├── src
├── test
│ ├── resources
│ │ ├── version.properties
│ │ └── log4j2.xml
│ └── java
│ │ └── fr
│ │ └── pilato
│ │ └── test
│ │ └── elasticsearch
│ │ └── hlclient
│ │ ├── GeoPoint.java
│ │ ├── Person.java
│ │ ├── SSLUtils.java
│ │ └── EsClientIT.java
└── main
│ └── documentation
│ └── README.md
├── .github
├── mergify.yml
├── CODEOWNERS
├── dependabot.yml
└── workflows
│ ├── push.yml
│ └── pr.yml
├── README.md
├── .gitignore
├── pom.xml
└── LICENSE
/src/test/resources/version.properties:
--------------------------------------------------------------------------------
1 | elasticsearch.version=${elasticsearch.version}
2 |
--------------------------------------------------------------------------------
/.github/mergify.yml:
--------------------------------------------------------------------------------
1 | pull_request_rules:
2 | - name: automatic merge on review
3 | conditions:
4 | - "#approved-reviews-by>=1"
5 | - check-success=build
6 | actions:
7 | merge:
8 | method: merge
9 |
10 |
--------------------------------------------------------------------------------
/.github/CODEOWNERS:
--------------------------------------------------------------------------------
1 | # This file is used to define the code owners for the repository.
2 | # Each line is a file pattern followed by one or more GitHub usernames or team names.
3 | # For more information, see https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners
4 | # The code owners will be automatically requested for review when someone makes changes to the files they own.
5 | # All the modified files will be reviewed by dadoonet.
6 | * @dadoonet
7 |
--------------------------------------------------------------------------------
/.github/dependabot.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 | updates:
3 | # Enable version updates for Maven
4 | - package-ecosystem: maven
5 | directory: "/"
6 | schedule:
7 | interval: daily
8 | time: "04:00"
9 | open-pull-requests-limit: 99
10 | assignees:
11 | - dadoonet
12 |
13 | # Maintain dependencies for GitHub Actions
14 | - package-ecosystem: "github-actions"
15 | directory: "/.github/workflows"
16 | schedule:
17 | interval: "daily"
18 | time: "04:00"
19 | open-pull-requests-limit: 99
20 | assignees:
21 | - dadoonet
22 |
--------------------------------------------------------------------------------
/.github/workflows/push.yml:
--------------------------------------------------------------------------------
1 | name: Build the project
2 | on: [push]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | permissions:
7 | contents: write
8 | packages: write
9 | steps:
10 | - name: Checkout code
11 | uses: actions/checkout@v5
12 | with:
13 | ref: ${{ github.head_ref }}
14 | token: ${{ secrets.PAT }}
15 | - name: Update resources with Maven
16 | run: mvn -B process-resources
17 | - name: Update files if needed
18 | uses: stefanzweifel/git-auto-commit-action@v7
19 | - name: Set up JDK 17 and Maven Central Repository
20 | uses: actions/setup-java@v5
21 | with:
22 | java-version: '17'
23 | distribution: 'adopt'
24 | cache: 'maven'
25 | - name: Build the project and run integration tests
26 | run: mvn -B verify
27 |
--------------------------------------------------------------------------------
/.github/workflows/pr.yml:
--------------------------------------------------------------------------------
1 | name: Build the project
2 | on: [pull_request]
3 | jobs:
4 | build:
5 | runs-on: ubuntu-latest
6 | if: ${{ github.actor != 'dependabot[bot]' }}
7 | permissions:
8 | contents: write
9 | packages: write
10 | steps:
11 | - name: Checkout code
12 | uses: actions/checkout@v5
13 | with:
14 | ref: ${{ github.head_ref }}
15 | token: ${{ secrets.PAT }}
16 | - name: Update resources with Maven
17 | run: mvn -B process-resources
18 | - name: Update files if needed
19 | uses: stefanzweifel/git-auto-commit-action@v7
20 | - name: Set up JDK 17 and Maven Central Repository
21 | uses: actions/setup-java@v5
22 | with:
23 | java-version: '17'
24 | distribution: 'adopt'
25 | cache: 'maven'
26 | - name: Build the project and run integration tests
27 | run: mvn -B verify
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Elasticsearch Client Java Sample project
4 |
5 | https://discuss.elastic.co has many questions about the Elasticsearch [Java API Client](https://www.elastic.co/docs/reference/elasticsearch/clients/java).
6 |
7 | To address those questions, I often attempt to reproduce the issues.
8 |
9 | This repository houses many examples derived from those discussions.
10 | I believe it could be beneficial for many, so I've made the code available here.
11 |
12 | You're welcome to contribute your own examples if you'd like.
13 |
14 | This repository is tested with:
15 |
16 | * **Elasticsearch Server 9.2.3**
17 | * **Elasticsearch Java API Client 9.2.3**
18 |
19 | We automatically start a Docker image using the [Elasticsearch module for TestContainers](https://www.testcontainers.org/modules/elasticsearch/).
20 |
21 | If you want to have very fast startup times, you can use the [TestContainers `reuse` feature](https://java.testcontainers.org/features/reuse/).
22 |
23 | To do this, you need to add the following to your `~/.testcontainers.properties` file:
24 |
25 | ```properties
26 | testcontainers.reuse.enable=true
27 | ```
28 |
29 | It requires to have Docker running.
30 |
--------------------------------------------------------------------------------
/src/main/documentation/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | # Elasticsearch Client Java Sample project
4 |
5 | https://discuss.elastic.co has many questions about the Elasticsearch [Java API Client](https://www.elastic.co/docs/reference/elasticsearch/clients/java).
6 |
7 | To address those questions, I often attempt to reproduce the issues.
8 |
9 | This repository houses many examples derived from those discussions.
10 | I believe it could be beneficial for many, so I've made the code available here.
11 |
12 | You're welcome to contribute your own examples if you'd like.
13 |
14 | This repository is tested with:
15 |
16 | * **Elasticsearch Server ${elasticsearch.version}**
17 | * **Elasticsearch Java API Client ${elasticsearch-client.version}**
18 |
19 | We automatically start a Docker image using the [Elasticsearch module for TestContainers](https://www.testcontainers.org/modules/elasticsearch/).
20 |
21 | If you want to have very fast startup times, you can use the [TestContainers `reuse` feature](https://java.testcontainers.org/features/reuse/).
22 |
23 | To do this, you need to add the following to your `~/.testcontainers.properties` file:
24 |
25 | ```properties
26 | testcontainers.reuse.enable=true
27 | ```
28 |
29 | It requires to have Docker running.
30 |
--------------------------------------------------------------------------------
/src/test/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
--------------------------------------------------------------------------------
/src/test/java/fr/pilato/test/elasticsearch/hlclient/GeoPoint.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package fr.pilato.test.elasticsearch.hlclient;
21 |
22 | public class GeoPoint {
23 | private double lon;
24 | private double lat;
25 |
26 | public GeoPoint() {
27 |
28 | }
29 |
30 | public GeoPoint(double lat, double lon) {
31 | this.lon = lon;
32 | this.lat = lat;
33 | }
34 |
35 | public double getLon() {
36 | return lon;
37 | }
38 |
39 | public void setLon(double lon) {
40 | this.lon = lon;
41 | }
42 |
43 | public double getLat() {
44 | return lat;
45 | }
46 |
47 | public void setLat(double lat) {
48 | this.lat = lat;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/test/java/fr/pilato/test/elasticsearch/hlclient/Person.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package fr.pilato.test.elasticsearch.hlclient;
21 |
22 | public class Person {
23 | private String id;
24 | private String name;
25 |
26 | private GeoPoint location;
27 | public String getId() {
28 | return id;
29 | }
30 |
31 | public void setId(String id) {
32 | this.id = id;
33 | }
34 |
35 | public String getName() {
36 | return name;
37 | }
38 |
39 | public void setName(String name) {
40 | this.name = name;
41 | }
42 |
43 | public GeoPoint getLocation() {
44 | return location;
45 | }
46 |
47 | public void setLocation(GeoPoint location) {
48 | this.location = location;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Created by .ignore support plugin (hsz.mobi)
2 | ### Maven template
3 | target/
4 | pom.xml.tag
5 | pom.xml.releaseBackup
6 | pom.xml.versionsBackup
7 | pom.xml.next
8 | release.properties
9 | dependency-reduced-pom.xml
10 | buildNumber.properties
11 | .mvn/timing.properties
12 | .mvn/wrapper/maven-wrapper.jar
13 | ### JetBrains template
14 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
15 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
16 |
17 | # User-specific stuff
18 | .idea/**/workspace.xml
19 | .idea/**/tasks.xml
20 | .idea/**/usage.statistics.xml
21 | .idea/**/dictionaries
22 | .idea/**/shelf
23 |
24 | # Sensitive or high-churn files
25 | .idea/**/dataSources/
26 | .idea/**/dataSources.ids
27 | .idea/**/dataSources.local.xml
28 | .idea/**/sqlDataSources.xml
29 | .idea/**/dynamic.xml
30 | .idea/**/uiDesigner.xml
31 | .idea/**/dbnavigator.xml
32 |
33 | # Gradle
34 | .idea/**/gradle.xml
35 | .idea/**/libraries
36 |
37 | # Gradle and Maven with auto-import
38 | # When using Gradle or Maven with auto-import, you should exclude module files,
39 | # since they will be recreated, and may cause churn. Uncomment if using
40 | # auto-import.
41 | # .idea/modules.xml
42 | # .idea/*.iml
43 | # .idea/modules
44 |
45 | # CMake
46 | cmake-build-*/
47 |
48 | # Mongo Explorer plugin
49 | .idea/**/mongoSettings.xml
50 |
51 | # File-based project format
52 | *.iws
53 |
54 | # IntelliJ
55 | out/
56 |
57 | # mpeltonen/sbt-idea plugin
58 | .idea_modules/
59 |
60 | # JIRA plugin
61 | atlassian-ide-plugin.xml
62 |
63 | # Cursive Clojure plugin
64 | .idea/replstate.xml
65 |
66 | # Crashlytics plugin (for Android Studio and IntelliJ)
67 | com_crashlytics_export_strings.xml
68 | crashlytics.properties
69 | crashlytics-build.properties
70 | fabric.properties
71 |
72 | # Editor-based Rest Client
73 | .idea/httpRequests
74 |
75 | .idea/
76 | *.iml
77 |
--------------------------------------------------------------------------------
/src/test/java/fr/pilato/test/elasticsearch/hlclient/SSLUtils.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package fr.pilato.test.elasticsearch.hlclient;
21 |
22 | import org.apache.hc.core5.ssl.SSLContextBuilder;
23 | import org.apache.hc.core5.ssl.SSLContexts;
24 |
25 | import javax.net.ssl.SSLContext;
26 | import javax.net.ssl.TrustManager;
27 | import javax.net.ssl.X509TrustManager;
28 | import java.io.ByteArrayInputStream;
29 | import java.security.KeyManagementException;
30 | import java.security.KeyStore;
31 | import java.security.NoSuchAlgorithmException;
32 | import java.security.SecureRandom;
33 | import java.security.cert.Certificate;
34 | import java.security.cert.CertificateFactory;
35 | import java.security.cert.X509Certificate;
36 |
37 | /**
38 | * Some utilities for SSL
39 | */
40 | public class SSLUtils {
41 |
42 | /**
43 | * Create a SSL Context from a Certificate
44 | * @param certificate Certificate provided as a byte array
45 | * @return the SSL Context
46 | */
47 | public static SSLContext createContextFromCaCert(byte[] certificate) {
48 | try {
49 | CertificateFactory factory = CertificateFactory.getInstance("X.509");
50 | Certificate trustedCa = factory.generateCertificate(
51 | new ByteArrayInputStream(certificate)
52 | );
53 | KeyStore trustStore = KeyStore.getInstance("pkcs12");
54 | trustStore.load(null, null);
55 | trustStore.setCertificateEntry("ca", trustedCa);
56 | SSLContextBuilder sslContextBuilder = SSLContexts.custom().loadTrustMaterial(trustStore, null);
57 | return sslContextBuilder.build();
58 | } catch (Exception e) {
59 | throw new RuntimeException(e);
60 | }
61 | }
62 |
63 | private static final TrustManager[] trustAllCerts = new TrustManager[]{new X509TrustManager() {
64 | @Override public void checkClientTrusted(X509Certificate[] chain, String authType) {}
65 | @Override public void checkServerTrusted(X509Certificate[] chain, String authType) {}
66 | @Override public X509Certificate[] getAcceptedIssuers() { return null; }
67 | }};
68 |
69 | public static SSLContext createTrustAllCertsContext() {
70 | try {
71 | SSLContext sslContext = SSLContext.getInstance("SSL");
72 | sslContext.init(null, trustAllCerts, new SecureRandom());
73 | return sslContext;
74 | } catch (NoSuchAlgorithmException | KeyManagementException e) {
75 | throw new RuntimeException("Can not create the SSLContext", e);
76 | }
77 | }
78 | }
79 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 | 4.0.0
6 |
7 | fr.pilato.test.elasticsearch
8 | elasticsearch-client-tests
9 | 1.0-SNAPSHOT
10 |
11 |
12 |
13 | 9.2.3
14 | 9.2.3
15 | 2.20.1
16 | 2.25.2
17 | 2.0.17
18 | 6.0.1
19 | 1.21.3
20 |
21 |
22 | 17
23 |
24 |
25 | UTF-8
26 |
27 |
28 |
29 |
30 |
31 | src/main/documentation
32 | true
33 | ${project.basedir}
34 |
35 |
36 |
37 |
38 | src/test/resources
39 | true
40 |
41 |
42 |
43 |
44 | org.apache.maven.plugins
45 | maven-compiler-plugin
46 | 3.14.1
47 |
48 | ${java.compiler.version}
49 | ${java.compiler.version}
50 | UTF-8
51 | true
52 | true
53 | true
54 | -Xlint:all,-serial,-path,-rawtypes,-unchecked
55 |
56 |
57 |
58 | maven-failsafe-plugin
59 | 3.5.4
60 |
61 |
62 |
63 | integration-test
64 | verify
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 | co.elastic.clients
75 | elasticsearch-java
76 | ${elasticsearch-client.version}
77 | test
78 |
79 |
80 |
81 |
82 | org.elasticsearch
83 | rest-api-spec
84 | ${elasticsearch.version}
85 | provided
86 |
87 |
88 |
89 | com.fasterxml.jackson.core
90 | jackson-core
91 | ${jackson.version}
92 | test
93 |
94 |
95 | com.fasterxml.jackson.core
96 | jackson-databind
97 | ${jackson.version}
98 | test
99 |
100 |
101 |
102 |
103 | org.junit.jupiter
104 | junit-jupiter-api
105 | ${junit.version}
106 | test
107 |
108 |
109 | org.junit.jupiter
110 | junit-jupiter-engine
111 | ${junit.version}
112 | test
113 |
114 |
115 | org.assertj
116 | assertj-core
117 | 3.27.6
118 |
119 |
120 |
121 |
122 | org.slf4j
123 | slf4j-api
124 | ${slf4j.version}
125 | test
126 |
127 |
128 |
129 |
130 | org.apache.logging.log4j
131 | log4j-api
132 | ${log4j.version}
133 | test
134 | true
135 |
136 |
137 | org.apache.logging.log4j
138 | log4j-slf4j-impl
139 | ${log4j.version}
140 | test
141 |
142 |
143 | org.apache.logging.log4j
144 | log4j-core
145 | ${log4j.version}
146 | test
147 |
148 |
149 | org.apache.logging.log4j
150 | log4j-slf4j2-impl
151 | ${log4j.version}
152 | test
153 |
154 |
155 |
156 |
157 | org.testcontainers
158 | elasticsearch
159 | ${testcontainers.version}
160 | test
161 |
162 |
163 |
164 |
165 |
166 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/src/test/java/fr/pilato/test/elasticsearch/hlclient/EsClientIT.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Licensed to Elasticsearch under one or more contributor
3 | * license agreements. See the NOTICE file distributed with
4 | * this work for additional information regarding copyright
5 | * ownership. Elasticsearch licenses this file to you under
6 | * the Apache License, Version 2.0 (the "License"); you may
7 | * not use this file except in compliance with the License.
8 | * You may obtain a copy of the License at
9 | *
10 | * http://www.apache.org/licenses/LICENSE-2.0
11 | *
12 | * Unless required by applicable law or agreed to in writing,
13 | * software distributed under the License is distributed on an
14 | * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 | * KIND, either express or implied. See the License for the
16 | * specific language governing permissions and limitations
17 | * under the License.
18 | */
19 |
20 | package fr.pilato.test.elasticsearch.hlclient;
21 |
22 | import co.elastic.clients.elasticsearch.ElasticsearchAsyncClient;
23 | import co.elastic.clients.elasticsearch.ElasticsearchClient;
24 | import co.elastic.clients.elasticsearch._helpers.bulk.BulkIngester;
25 | import co.elastic.clients.elasticsearch._helpers.bulk.BulkListener;
26 | import co.elastic.clients.elasticsearch._helpers.esql.jdbc.ResultSetEsqlAdapter;
27 | import co.elastic.clients.elasticsearch._helpers.esql.objects.ObjectsEsqlAdapter;
28 | import co.elastic.clients.elasticsearch._types.*;
29 | import co.elastic.clients.elasticsearch.cat.IndicesResponse;
30 | import co.elastic.clients.elasticsearch.cat.ShardsResponse;
31 | import co.elastic.clients.elasticsearch.cat.ThreadPoolResponse;
32 | import co.elastic.clients.elasticsearch.cluster.PutComponentTemplateResponse;
33 | import co.elastic.clients.elasticsearch.core.*;
34 | import co.elastic.clients.elasticsearch.core.search.HighlightField;
35 | import co.elastic.clients.elasticsearch.ilm.PutLifecycleResponse;
36 | import co.elastic.clients.elasticsearch.indices.*;
37 | import co.elastic.clients.elasticsearch.ingest.PutPipelineResponse;
38 | import co.elastic.clients.elasticsearch.ingest.SimulateResponse;
39 | import co.elastic.clients.elasticsearch.sql.TranslateResponse;
40 | import co.elastic.clients.elasticsearch.transform.GetTransformResponse;
41 | import co.elastic.clients.elasticsearch.transform.PutTransformResponse;
42 | import co.elastic.clients.json.JsonData;
43 | import co.elastic.clients.transport.TransportException;
44 | import co.elastic.clients.transport.endpoints.BinaryResponse;
45 | import co.elastic.clients.util.BinaryData;
46 | import co.elastic.clients.util.ContentType;
47 | import co.elastic.clients.util.NamedValue;
48 | import com.fasterxml.jackson.core.type.TypeReference;
49 | import com.fasterxml.jackson.databind.JsonNode;
50 | import com.fasterxml.jackson.databind.ObjectMapper;
51 | import com.fasterxml.jackson.databind.node.ObjectNode;
52 | import org.apache.logging.log4j.LogManager;
53 | import org.apache.logging.log4j.Logger;
54 | import org.junit.jupiter.api.*;
55 | import org.testcontainers.elasticsearch.ElasticsearchContainer;
56 | import org.testcontainers.utility.DockerImageName;
57 |
58 | import java.io.IOException;
59 | import java.io.InputStream;
60 | import java.io.StringReader;
61 | import java.nio.charset.StandardCharsets;
62 | import java.sql.ResultSet;
63 | import java.sql.SQLException;
64 | import java.util.*;
65 | import java.util.concurrent.CompletableFuture;
66 | import java.util.concurrent.ExecutionException;
67 | import java.util.concurrent.TimeUnit;
68 | import java.util.concurrent.TimeoutException;
69 | import java.util.concurrent.atomic.AtomicInteger;
70 | import java.util.concurrent.atomic.AtomicReference;
71 | import java.util.random.RandomGenerator;
72 |
73 | import static fr.pilato.test.elasticsearch.hlclient.SSLUtils.createContextFromCaCert;
74 | import static fr.pilato.test.elasticsearch.hlclient.SSLUtils.createTrustAllCertsContext;
75 | import static org.assertj.core.api.Assertions.*;
76 | import static org.junit.Assume.assumeNotNull;
77 |
78 | class EsClientIT {
79 |
80 | private static final Logger logger = LogManager.getLogger();
81 | private static ElasticsearchClient client = null;
82 | private static ElasticsearchAsyncClient asyncClient = null;
83 | private static final String PASSWORD = "changeme";
84 | private static final String PREFIX = "esclientit_";
85 |
86 | @BeforeAll
87 | static void startOptionallyTestContainers() throws IOException {
88 | final var props = new Properties();
89 | props.load(EsClientIT.class.getResourceAsStream("/version.properties"));
90 | final String version = props.getProperty("elasticsearch.version");
91 | logger.info("Starting testcontainers with Elasticsearch {}.", props.getProperty("elasticsearch.version"));
92 | // Start the container. This step might take some time...
93 | final ElasticsearchContainer container = new ElasticsearchContainer(
94 | DockerImageName.parse("docker.elastic.co/elasticsearch/elasticsearch")
95 | .withTag(version))
96 | .withPassword(PASSWORD)
97 | .withReuse(true);
98 | container.start();
99 | final byte[] certAsBytes = container.copyFileFromContainer(
100 | "/usr/share/elasticsearch/config/certs/http_ca.crt",
101 | InputStream::readAllBytes);
102 | try {
103 | client = getClient("https://" + container.getHttpHostAddress(), certAsBytes);
104 | asyncClient = getAsyncClient("https://" + container.getHttpHostAddress(), certAsBytes);
105 | } catch (Exception e) {
106 | logger.debug("No cluster is running yet at https://{}.", container.getHttpHostAddress());
107 | }
108 |
109 | assumeNotNull(client);
110 | assumeNotNull(asyncClient);
111 | }
112 |
113 | @AfterAll
114 | static void elasticsearchClient() throws IOException {
115 | if (client != null) {
116 | client.close();
117 | }
118 | if (asyncClient != null) {
119 | asyncClient.close();
120 | }
121 | }
122 |
123 | static private ElasticsearchClient getClient(final String elasticsearchServiceAddress, final byte[] certificate) throws Exception {
124 | // Create the API client
125 | final ElasticsearchClient client = ElasticsearchClient.of(b -> b
126 | .host(elasticsearchServiceAddress)
127 | .sslContext(certificate != null ? createContextFromCaCert(certificate) : createTrustAllCertsContext())
128 | .usernameAndPassword("elastic", PASSWORD)
129 | );
130 | final InfoResponse info = client.info();
131 | logger.info("Client connected to a cluster running version {} at {}.", info.version().number(), elasticsearchServiceAddress);
132 | return client;
133 | }
134 |
135 | static private ElasticsearchAsyncClient getAsyncClient(final String elasticsearchServiceAddress, final byte[] certificate) throws Exception {
136 | // Create the API client
137 | final ElasticsearchAsyncClient client = ElasticsearchAsyncClient.of(b -> b
138 | .host(elasticsearchServiceAddress)
139 | .sslContext(certificate != null ? createContextFromCaCert(certificate) : createTrustAllCertsContext())
140 | .usernameAndPassword("elastic", PASSWORD)
141 | );
142 | final InfoResponse info = client.info().get();
143 | logger.info("Async Client connected to a cluster running version {} at {}.", info.version().number(), elasticsearchServiceAddress);
144 | return client;
145 | }
146 |
147 | List indices;
148 | String indexName;
149 |
150 | @BeforeEach
151 | void cleanIndexBeforeRun(final TestInfo testInfo) {
152 | indices = new ArrayList<>();
153 | final var methodName = testInfo.getTestMethod().orElseThrow().getName();
154 | indexName = PREFIX + methodName.toLowerCase(Locale.ROOT);
155 |
156 | logger.debug("Using [{}] as the index name", indexName);
157 | setAndRemoveIndex(indexName);
158 | }
159 |
160 | @AfterEach
161 | void cleanIndexAfterRun() {
162 | indices.forEach(this::removeIndex);
163 | }
164 |
165 | @Test
166 | void getDocument() throws IOException {
167 | client.index(ir -> ir.index(indexName).id("1")
168 | .withJson(new StringReader("{\"foo\":\"bar\", \"application_id\": 6}")));
169 | {
170 | final GetResponse getResponse = client.get(gr -> gr.index(indexName).id("1"), ObjectNode.class);
171 | assertThat(getResponse.source()).hasToString("{\"foo\":\"bar\",\"application_id\":6}");
172 | }
173 | {
174 | // With source filtering
175 | final GetResponse getResponse = client.get(gr -> gr.index(indexName).id("1").sourceIncludes("application_id"), ObjectNode.class);
176 | assertThat(getResponse.source()).hasToString("{\"application_id\":6}");
177 | }
178 | {
179 | // Get as Map
180 | final GetResponse getResponse = client.get(gr -> gr.index(indexName).id("1"), ObjectNode.class);
181 | final ObjectMapper mapper = new ObjectMapper();
182 | final Map result = mapper.convertValue(getResponse.source(), new TypeReference<>() {});
183 | assertThat(result)
184 | .contains(entry("foo", "bar"))
185 | .contains(entry("application_id", 6));
186 | }
187 | }
188 |
189 | @Test
190 | void exists() throws IOException {
191 | client.index(ir -> ir.index(indexName).id("1")
192 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
193 | assertThat(client.exists(gr -> gr.index(indexName).id("1")).value()).isTrue();
194 | assertThat(client.exists(gr -> gr.index(indexName).id("2")).value()).isFalse();
195 | }
196 |
197 | @Test
198 | void createIndex() throws IOException {
199 | final CreateIndexResponse response = client.indices().create(cir -> cir.index(indexName)
200 | .mappings(m -> m.properties("content", p -> p.text(tp -> tp))));
201 | assertThat(response.acknowledged()).isTrue();
202 | }
203 |
204 | @Test
205 | void callInfo() throws IOException {
206 | final InfoResponse info = client.info();
207 | final String version = info.version().number();
208 | assertThat(version).isNotBlank();
209 | assertThat(info.clusterName()).isNotBlank();
210 | assertThat(info.tagline()).isEqualTo("You Know, for Search");
211 | }
212 |
213 | @Test
214 | void createMapping() throws IOException {
215 | client.indices().create(cir -> cir.index(indexName));
216 | final PutMappingResponse response = client.indices().putMapping(pmr -> pmr.index(indexName)
217 | .properties("foo", p -> p.text(tp -> tp)));
218 | assertThat(response.acknowledged()).isTrue();
219 | }
220 |
221 | @Test
222 | void createData() throws IOException {
223 | final IndexResponse indexResponse = client.index(ir -> ir.index(indexName).id("1")
224 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
225 | assertThat(indexResponse.result()).isEqualTo(Result.Created);
226 | client.indices().refresh(rr -> rr.index(indexName));
227 | final SearchResponse response = client.search(sr -> sr.index(indexName), Void.class);
228 | assertThat(response.hits().total()).isNotNull();
229 | assertThat(response.hits().total().value()).isEqualTo(1);
230 | }
231 |
232 | @Test
233 | void searchData() throws IOException {
234 | client.index(ir -> ir.index(indexName).id("1")
235 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
236 | client.indices().refresh(rr -> rr.index(indexName));
237 | {
238 | final SearchResponse response = client.search(sr -> sr
239 | .index(indexName)
240 | .query(q -> q.match(mq -> mq.field("foo").query("bar"))),
241 | Void.class);
242 | assertThat(response.hits().total()).isNotNull();
243 | assertThat(response.hits().total().value()).isEqualTo(1);
244 | assertThat(response.hits().hits().get(0).id()).isEqualTo("1");
245 | }
246 | {
247 | final SearchResponse response = client.search(sr -> sr
248 | .index(indexName)
249 | .query(q -> q.term(tq -> tq.field("foo").value("bar"))),
250 | Void.class);
251 | assertThat(response.hits().total()).isNotNull();
252 | assertThat(response.hits().total().value()).isEqualTo(1);
253 | assertThat(response.hits().hits().get(0).id()).isEqualTo("1");
254 | }
255 | {
256 | final String matchAllQuery = Base64.getEncoder().encodeToString("{\"match_all\":{}}".getBytes(StandardCharsets.UTF_8));
257 | final SearchResponse response = client.search(sr -> sr
258 | .index(indexName)
259 | .query(q -> q.wrapper(wq -> wq.query(matchAllQuery))),
260 | Void.class);
261 | assertThat(response.hits().total()).isNotNull();
262 | assertThat(response.hits().total().value()).isEqualTo(1);
263 | assertThat(response.hits().hits().get(0).id()).isEqualTo("1");
264 | }
265 | {
266 | final SearchResponse response = client.search(sr -> sr
267 | .index(indexName)
268 | .query(q -> q.matchAll(maq -> maq))
269 | .trackScores(true),
270 | Void.class);
271 | assertThat(response.hits().total()).isNotNull();
272 | assertThat(response.hits().total().value()).isEqualTo(1);
273 | assertThat(response.hits().hits().get(0).id()).isEqualTo("1");
274 | }
275 | }
276 |
277 | @Test
278 | void translateSqlQuery() throws IOException {
279 | client.index(ir -> ir.index(indexName).id("1")
280 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
281 | client.indices().refresh(rr -> rr.index(indexName));
282 |
283 | final TranslateResponse translateResponse = client.sql().translate(tr -> tr
284 | .query("SELECT * FROM " + indexName + " WHERE foo='bar' limit 10"));
285 | assertThat(translateResponse.query()).isNotNull();
286 | assertThat(translateResponse.size())
287 | .isNotNull()
288 | .isEqualTo(10);
289 |
290 | final SearchResponse response = client.search(sr -> sr
291 | .index(indexName)
292 | .query(translateResponse.query())
293 | .size(translateResponse.size().intValue()),
294 | Void.class);
295 | assertThat(response.hits().total()).isNotNull();
296 | assertThat(response.hits().total().value()).isEqualTo(1);
297 | assertThat(response.hits().hits().get(0).id()).isEqualTo("1");
298 | }
299 |
300 | @Test
301 | void transformApi() throws IOException {
302 | final var id = "test-get";
303 | try {
304 | client.transform().deleteTransform(dtr -> dtr.transformId(id));
305 | } catch (ElasticsearchException ignored) { }
306 | client.index(ir -> ir.index(indexName).id("1")
307 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
308 | client.indices().refresh(rr -> rr.index(indexName));
309 | final PutTransformResponse putTransformResponse = client.transform().putTransform(ptr -> ptr
310 | .transformId(id)
311 | .source(s -> s.index(indexName).query(q -> q.matchAll(maq -> maq)))
312 | .dest(d -> d.index("pivot-dest"))
313 | .pivot(p -> p
314 | .groupBy("reviewer", pgb -> pgb.terms(ta -> ta.field("user_id")))
315 | .aggregations("avg_rating", a -> a.avg(aa -> aa.field("stars")))
316 | )
317 | .description("this is a test transform")
318 | );
319 | assertThat(putTransformResponse.acknowledged()).isTrue();
320 |
321 | final GetTransformResponse getTransformResponse = client.transform().getTransform(gt -> gt.transformId(id));
322 | assertThat(getTransformResponse.count()).isEqualTo(1);
323 | }
324 |
325 | @Test
326 | void highlight() throws IOException {
327 | client.index(ir -> ir.index(indexName)
328 | .withJson(new StringReader("{\"foo\":\"bar baz\"}")));
329 | client.indices().refresh(rr -> rr.index(indexName));
330 | final SearchResponse response = client.search(sr -> sr
331 | .index(indexName)
332 | .query(q -> q.match(mq -> mq.field("foo").query("bar")))
333 | .highlight(h -> h
334 | .fields(NamedValue.of("foo", HighlightField.of(hf -> hf.maxAnalyzedOffset(10)))))
335 | , Void.class);
336 | assertThat(response.hits().total()).isNotNull();
337 | assertThat(response.hits().total().value()).isEqualTo(1);
338 | assertThat(response.hits().hits().get(0).highlight())
339 | .isNotNull()
340 | .containsExactly(entry("foo", Collections.singletonList("bar baz")));
341 | }
342 |
343 | @Test
344 | void termsAgg() throws IOException {
345 | client.index(ir -> ir.index(indexName).id("1")
346 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
347 | client.index(ir -> ir.index(indexName).id("2")
348 | .withJson(new StringReader("{\"foo\":\"bar\"}")));
349 | client.indices().refresh(rr -> rr.index(indexName));
350 | final SearchResponse response = client.search(sr -> sr
351 | .index(indexName)
352 | .aggregations("top10foo", a -> a
353 | .terms(ta -> ta.field("foo.keyword").size(10)))
354 | , Void.class);
355 | assertThat(response.aggregations())
356 | .isNotNull()
357 | .containsKey("top10foo");
358 | assertThat(response.aggregations().get("top10foo").sterms()).isNotNull();
359 | assertThat(response.aggregations().get("top10foo").sterms().buckets()).isNotNull();
360 | assertThat(response.aggregations().get("top10foo").sterms().buckets().array())
361 | .hasSize(1)
362 | .allSatisfy(bucket -> {
363 | assertThat(bucket.key()).isNotNull();
364 | assertThat(bucket.key().stringValue()).isEqualTo("bar");
365 | assertThat(bucket.docCount()).isEqualTo(2);
366 | });
367 | }
368 |
369 | @Test
370 | void bulkIngester() throws IOException {
371 | final var size = 1000;
372 | try (final BulkIngester ingester = BulkIngester.of(b -> b
373 | .client(client)
374 | .globalSettings(gs -> gs
375 | .index(indexName)
376 | )
377 | .listener(new BulkListener<>() {
378 | @Override
379 | public void beforeBulk(long executionId, BulkRequest request, List voids) {
380 | logger.debug("going to execute bulk of {} requests", request.operations().size());
381 | }
382 |
383 | @Override
384 | public void afterBulk(long executionId, BulkRequest request, List voids, BulkResponse response) {
385 | logger.debug("bulk executed {} errors", response.errors() ? "with" : "without");
386 | }
387 |
388 | @Override
389 | public void afterBulk(long executionId, BulkRequest request, List voids, Throwable failure) {
390 | logger.warn("error while executing bulk", failure);
391 | }
392 | })
393 | .maxOperations(10)
394 | .maxSize(1_000_000)
395 | .flushInterval(5, TimeUnit.SECONDS)
396 | )) {
397 | final var data = BinaryData.of("{\"foo\":\"bar\"}".getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
398 | for (int i = 0; i < size; i++) {
399 | ingester.add(bo -> bo.index(io -> io.document(data)));
400 | }
401 | }
402 |
403 | // Make sure to close (and flush) the bulk ingester before exiting if you are not using try-with-resources
404 | // ingester.close();
405 |
406 | client.indices().refresh(rr -> rr.index(indexName));
407 | final SearchResponse response = client.search(sr -> sr.index(indexName), Void.class);
408 | assertThat(response.hits().total()).isNotNull();
409 | assertThat(response.hits().total().value()).isEqualTo(size);
410 | }
411 |
412 | @Test
413 | void bulkIngesterFlush() throws IOException {
414 | final var size = 100_000;
415 | try (final BulkIngester ingester = BulkIngester.of(b -> b
416 | .client(client)
417 | .globalSettings(gs -> gs
418 | .index(indexName)
419 | )
420 | .maxOperations(10_000)
421 | .flushInterval(5, TimeUnit.SECONDS)
422 | )) {
423 | final var data = BinaryData.of("{\"foo\":\"bar\"}".getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
424 | for (int i = 0; i < size; i++) {
425 | ingester.add(bo -> bo.index(io -> io.document(data)));
426 | }
427 |
428 | // Calling flush should actually flush the ingester and send the latest docs
429 | ingester.flush();
430 |
431 | client.indices().refresh(rr -> rr.index(indexName));
432 | final SearchResponse response = client.search(sr -> sr.index(indexName).trackTotalHits(tth -> tth.enabled(true)), Void.class);
433 | assertThat(response.hits().total()).isNotNull();
434 | // But this test is failing as the flush is not sending the last batch
435 | // assertThat(response.hits().total().value()).isEqualTo(size);
436 | assertThat(response.hits().total().value()).isLessThan(size);
437 | }
438 | }
439 |
440 | @Test
441 | void rangeQuery() throws IOException {
442 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"foo\":1}")));
443 | client.index(ir -> ir.index(indexName).id("2").withJson(new StringReader("{\"foo\":2}")));
444 | client.indices().refresh(rr -> rr.index(indexName));
445 | final SearchResponse response = client.search(sr -> sr.index(indexName)
446 | .query(q -> q.range(rq -> rq
447 | .number(nrq -> nrq.field("foo").gte(0.0).lte(1.0))
448 | ))
449 | , ObjectNode.class);
450 | assertThat(response.hits().total()).isNotNull();
451 | assertThat(response.hits().total().value()).isEqualTo(1);
452 | assertThat(response.hits().hits().get(0).id()).isEqualTo("1");
453 | }
454 |
455 | @Test
456 | void bulk() throws IOException {
457 | final var size = 1_000;
458 | final var goodData = new AtomicInteger();
459 | final var data = BinaryData.of("{\"foo\":\"bar\"}".getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
460 | final var wrongData = BinaryData.of("{\"foo\":\"bar}".getBytes(StandardCharsets.UTF_8), ContentType.APPLICATION_JSON);
461 | final BulkResponse response = client.bulk(br -> {
462 | br.index(indexName);
463 | for (int i = 0; i < size; i++) {
464 | if (RandomGenerator.getDefault().nextBoolean()) {
465 | br.operations(o -> o.index(ir -> ir.document(wrongData)));
466 | } else {
467 | goodData.getAndIncrement();
468 | br.operations(o -> o.index(ir -> ir.document(data)));
469 | }
470 | }
471 | return br;
472 | });
473 | logger.debug("bulk executed in {} ms {} errors", response.took(), response.errors() ? "with" : "without");
474 | if (response.errors()) {
475 | assertThat(response.items())
476 | .filteredOn(item -> item.error() != null)
477 | .allSatisfy(item -> {
478 | assertThat(item.id()).isNotNull();
479 | assertThat(item.error()).isNotNull();
480 | assertThat(item.error().reason()).isNotNull();
481 | logger.trace("Error {} for id {}", item.error().reason(), item.id());
482 | });
483 | }
484 |
485 | client.indices().refresh(rr -> rr.index(indexName));
486 | final SearchResponse searchResponse = client.search(sr -> sr.index(indexName), Void.class);
487 | assertThat(searchResponse.hits().total()).isNotNull();
488 | assertThat(searchResponse.hits().total().value()).isEqualTo(goodData.get());
489 | }
490 |
491 | @Test
492 | void searchWithBeans() throws IOException {
493 | final var p1 = new Person();
494 | p1.setId("1");
495 | p1.setName("Foo");
496 | final var p2 = new Person();
497 | p2.setId("2");
498 | p2.setName("Bar");
499 | client.index(ir -> ir.index(indexName).id(p1.getId()).document(p1));
500 | client.index(ir -> ir.index(indexName).id(p2.getId()).document(p2));
501 | client.indices().refresh(rr -> rr.index(indexName));
502 | final SearchResponse response = client.search(sr -> sr.index(indexName), Person.class);
503 | assertThat(response.hits()).isNotNull();
504 | assertThat(response.hits().hits()).allSatisfy(hit -> {
505 | assertThat(hit.id()).isNotNull();
506 | assertThat(hit.source()).isNotNull();
507 | assertThat(hit.source().getId()).isEqualTo(hit.id());
508 | assertThat(hit.source().getName()).isNotNull();
509 | });
510 | }
511 |
512 | @Test
513 | void reindex() throws IOException {
514 | // Check the error is thrown when the source index does not exist
515 | assertThatThrownBy(() -> client.reindex(rr -> rr
516 | .source(s -> s.index(PREFIX + "does-not-exists")).dest(d -> d.index("foo"))))
517 | .isInstanceOfSatisfying(ElasticsearchException.class, e -> assertThat(e.status()).isEqualTo(404));
518 |
519 | // A regular reindex operation
520 | setAndRemoveIndex(indexName + "-dest");
521 |
522 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"foo\":1}")));
523 | client.indices().refresh(rr -> rr.index(indexName));
524 | final ReindexResponse reindexResponse = client.reindex(rr -> rr
525 | .source(s -> s.index(indexName)).dest(d -> d.index(indexName + "-dest")));
526 | assertThat(reindexResponse.total()).isEqualTo(1);
527 | }
528 |
529 | @Test
530 | void geoPointSort() throws IOException {
531 | client.indices().create(cir -> cir.index(indexName));
532 | client.indices().putMapping(pmr -> pmr.index(indexName).properties("location", p -> p.geoPoint(gp -> gp)));
533 | final var p1 = new Person();
534 | p1.setId("1");
535 | p1.setName("Foo");
536 | p1.setLocation(new GeoPoint(49.0404, 2.0174));
537 | final var p2 = new Person();
538 | p2.setId("2");
539 | p2.setName("Bar");
540 | p2.setLocation(new GeoPoint(38.7330, -109.8774));
541 | client.index(ir -> ir.index(indexName).id(p1.getId()).document(p1));
542 | client.index(ir -> ir.index(indexName).id(p2.getId()).document(p2));
543 | client.indices().refresh(rr -> rr.index(indexName));
544 | final SearchResponse response = client.search(sr -> sr.index(indexName)
545 | .sort(so -> so
546 | .geoDistance(gd -> gd
547 | .field("location")
548 | .location(
549 | new GeoLocation.Builder()
550 | .latlon(ll -> ll.lat(49.0404).lon(2.0174))
551 | .build()
552 | )
553 | .order(SortOrder.Asc)
554 | .unit(DistanceUnit.Kilometers)
555 | )
556 | ), Person.class);
557 |
558 | assertThat(response.hits().total()).isNotNull();
559 | assertThat(response.hits().total().value()).isEqualTo(2);
560 | assertThat(response.hits().hits()).satisfiesExactly(hit1 -> {
561 | assertThat(hit1.id()).isEqualTo("1");
562 | assertThat(hit1.sort()).hasSize(1);
563 | assertThat(hit1.sort().get(0).doubleValue()).isEqualTo(0.0);
564 | }, hit2 -> {
565 | assertThat(hit2.id()).isEqualTo("2");
566 | assertThat(hit2.sort()).hasSize(1);
567 | assertThat(hit2.sort().get(0).doubleValue()).isEqualTo(8187.4318605250455);
568 | });
569 | }
570 |
571 | @Test
572 | void geoPointSearch() throws IOException {
573 | client.indices().create(cir -> cir.index(indexName));
574 | client.indices().putMapping(pmr -> pmr.index(indexName).properties("location", p -> p.geoPoint(gp -> gp)));
575 | final var p1 = new Person();
576 | p1.setId("1");
577 | p1.setName("Foo");
578 | p1.setLocation(new GeoPoint(49.0404, 2.0174));
579 | final var p2 = new Person();
580 | p2.setId("2");
581 | p2.setName("Bar");
582 | p2.setLocation(new GeoPoint(38.7330, -109.8774));
583 | client.index(ir -> ir.index(indexName).id(p1.getId()).document(p1));
584 | client.index(ir -> ir.index(indexName).id(p2.getId()).document(p2));
585 | client.indices().refresh(rr -> rr.index(indexName));
586 | final SearchResponse response = client.search(sr -> sr.index(indexName)
587 | .query(q -> q.geoBoundingBox(gbb -> gbb
588 | .field("location")
589 | .boundingBox(bbq -> bbq
590 | .coords(c -> c
591 | .bottom(0).left(0).top(50).right(10))
592 | )))
593 | , Person.class);
594 |
595 | assertThat(response.hits().total()).isNotNull();
596 | assertThat(response.hits().total().value()).isEqualTo(1);
597 | assertThat(response.hits().hits()).satisfiesExactly(hit -> assertThat(hit.id()).isEqualTo("1"));
598 | }
599 |
600 | @Test
601 | void searchWithTimeout() throws IOException, ExecutionException, InterruptedException {
602 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"foo\":\"bar\"}")));
603 | client.indices().refresh(rr -> rr.index(indexName));
604 |
605 | final var timeoutException = new AtomicReference<>(false);
606 |
607 | final CompletableFuture> future = asyncClient.search(sr -> sr
608 | .index(indexName)
609 | .query(q -> q.match(mq -> mq.field("foo").query("bar"))),
610 | Void.class)
611 | .orTimeout(1, TimeUnit.NANOSECONDS)
612 | .exceptionally(e -> {
613 | if (e instanceof TimeoutException) {
614 | timeoutException.set(true);
615 | } else {
616 | logger.error("Got an unexpected exception", e);
617 | }
618 | return null;
619 | });
620 | assertThat(future.get()).isNull();
621 | assertThat(timeoutException.get()).isTrue();
622 |
623 | timeoutException.set(false);
624 | final SearchResponse response = asyncClient.search(sr -> sr
625 | .index(indexName)
626 | .query(q -> q.match(mq -> mq.field("foo").query("bar"))),
627 | Void.class)
628 | .orTimeout(10, TimeUnit.SECONDS)
629 | .exceptionally(e -> {
630 | if (e instanceof TimeoutException) {
631 | timeoutException.set(true);
632 | } else {
633 | logger.error("Got an unexpected exception", e);
634 | }
635 | return null;
636 | })
637 | .get();
638 | assertThat(timeoutException.get()).isFalse();
639 | assertThat(response.hits().total()).isNotNull();
640 | assertThat(response.hits().total().value()).isEqualTo(1);
641 | }
642 |
643 | @Test
644 | void catApi() throws IOException {
645 | final ThreadPoolResponse threadPool = client.cat().threadPool();
646 | assertThat(threadPool).isNotNull();
647 | assertThat(threadPool.threadPools()).allSatisfy(record -> {
648 | assertThat(record.nodeName()).isNotNull();
649 | assertThat(record.name()).isNotNull();
650 | assertThat(record.active()).isNotNull();
651 | assertThat(record.queue()).isNotNull();
652 | assertThat(record.rejected()).isNotNull();
653 | });
654 | final IndicesResponse indices = client.cat().indices();
655 | assertThat(indices).isNotNull();
656 | assertThat(indices.indices()).allSatisfy(record -> {
657 | assertThat(record.index()).isNotNull();
658 | assertThat(record.docsCount()).isNotNull();
659 | assertThat(record.docsDeleted()).isNotNull();
660 | });
661 | final ShardsResponse shards = client.cat().shards();
662 | assertThat(shards).isNotNull();
663 | assertThat(shards.shards()).allSatisfy(record -> {
664 | assertThat(record.index()).isNotNull();
665 | assertThat(record.state()).isIn("STARTED", "UNASSIGNED");
666 | assertThat(record.prirep()).isIn("p", "r");
667 | });
668 | }
669 |
670 | @Test
671 | void ingestPipelines() throws IOException {
672 | // Define some pipelines
673 | try {
674 | client.ingest().deletePipeline(pr -> pr.id("my-pipeline"));
675 | } catch (final ElasticsearchException ignored) { }
676 | {
677 | final PutPipelineResponse response = client.ingest().putPipeline(pr -> pr
678 | .id("my-pipeline")
679 | .processors(p -> p
680 | .script(s -> s
681 | .lang(ScriptLanguage.Painless)
682 | .source(src -> src.scriptString("ctx.foo = 'bar'"))
683 | )
684 | )
685 | );
686 | assertThat(response.acknowledged()).isTrue();
687 | }
688 | {
689 | final PutPipelineResponse response = client.ingest().putPipeline(pr -> pr
690 | .id("my-pipeline")
691 | .processors(p -> p
692 | .set(s -> s
693 | .field("foo")
694 | .value(JsonData.of("bar"))
695 | .ignoreFailure(true)
696 | )
697 | )
698 | );
699 | assertThat(response.acknowledged()).isTrue();
700 | }
701 | {
702 | final SimulateResponse response = client.ingest().simulate(sir -> sir
703 | .id("my-pipeline")
704 | .docs(d -> d
705 | .source(JsonData.fromJson("{\"foo\":\"baz\"}"))
706 | )
707 | );
708 | assertThat(response.docs())
709 | .hasSize(1)
710 | .allSatisfy(doc -> {
711 | assertThat(doc.doc()).isNotNull();
712 | assertThat(doc.doc().source()).isNotNull();
713 | assertThat(doc.doc().source()).allSatisfy((key, value) -> {
714 | assertThat(key).isEqualTo("foo");
715 | assertThat(value).satisfies(jsonData -> assertThat(jsonData.to(String.class)).isEqualTo("bar"));
716 | });
717 | });
718 | }
719 | }
720 |
721 | @Test
722 | void sourceRequest() throws IOException {
723 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"foo\":\"bar\"}")));
724 | client.indices().refresh(rr -> rr.index(indexName));
725 | final GetSourceResponse source = client.getSource(gsr -> gsr.index(indexName).id("1"), ObjectNode.class);
726 | assertThat(source.source())
727 | .isNotNull()
728 | .satisfies(jsonData -> assertThat(jsonData.toString()).isEqualTo("{\"foo\":\"bar\"}"));
729 | }
730 |
731 | @Test
732 | void deleteByQuery() throws IOException {
733 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"foo\":\"bar\"}")));
734 | client.indices().refresh(rr -> rr.index(indexName));
735 | final SearchResponse response1 = client.search(sr -> sr.index(indexName), Void.class);
736 | assertThat(response1.hits().total()).isNotNull();
737 | assertThat(response1.hits().total().value()).isEqualTo(1);
738 | final DeleteByQueryResponse deleteByQueryResponse = client.deleteByQuery(dbq -> dbq
739 | .index(indexName)
740 | .query(q -> q
741 | .match(mq -> mq
742 | .field("foo")
743 | .query("bar"))));
744 | assertThat(deleteByQueryResponse.deleted()).isEqualTo(1);
745 | client.indices().refresh(rr -> rr.index(indexName));
746 | final SearchResponse response2 = client.search(sr -> sr.index(indexName), Void.class);
747 | assertThat(response2.hits().total()).isNotNull();
748 | assertThat(response2.hits().total().value()).isEqualTo(0);
749 | }
750 |
751 | @Test
752 | void updateDocument() throws IOException {
753 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"show_count\":0}")));
754 | client.update(ur -> ur.index(indexName).id("1").script(
755 | s -> s
756 | .lang(ScriptLanguage.Painless)
757 | .source(src -> src.scriptString("ctx._source.show_count += 1"))
758 | ), ObjectNode.class);
759 | final GetResponse response = client.get(gr -> gr.index(indexName).id("1"), ObjectNode.class);
760 | assertThat(response.source())
761 | .isNotNull()
762 | .satisfies(o -> assertThat(o.toString()).isEqualTo("{\"show_count\":1}"));
763 | }
764 |
765 | @Test
766 | void createComponentTemplate() throws IOException {
767 | {
768 | final PutComponentTemplateResponse response = client.cluster().putComponentTemplate(pct -> pct
769 | .name("my_component_template")
770 | .template(t -> t
771 | .settings(s -> s.numberOfShards("1").numberOfReplicas("0"))
772 | .mappings(m -> m
773 | .properties("foo", p -> p.text(tp -> tp))
774 | )
775 | )
776 | );
777 | assertThat(response.acknowledged()).isTrue();
778 | }
779 |
780 | {
781 | // With JSON
782 | final PutComponentTemplateResponse response = client.cluster().putComponentTemplate(pct -> pct
783 | .name("my_component_template")
784 | .template(t -> t
785 | .mappings(
786 | m -> m.properties("@timestamp", p -> p.date(dp -> dp))
787 | )
788 | )
789 | );
790 | assertThat(response.acknowledged()).isTrue();
791 | }
792 | }
793 |
794 | @Test
795 | void createIndexTemplate() throws IOException {
796 | client.cluster().putComponentTemplate(pct -> pct
797 | .name("my_component_template")
798 | .template(t -> t
799 | .settings(s -> s.numberOfShards("1").numberOfReplicas("0"))
800 | .mappings(m -> m
801 | .properties("foo", p -> p.text(tp -> tp))
802 | )
803 | )
804 | );
805 | final PutIndexTemplateResponse response = client.indices().putIndexTemplate(pit -> pit
806 | .name("my_index_template")
807 | .indexPatterns("my-index-*")
808 | .composedOf("my_component_template")
809 | .template(t -> t
810 | .aliases("foo", a -> a
811 | .indexRouting("bar")
812 | )
813 | .settings(s -> s.numberOfShards("1").numberOfReplicas("0"))
814 | .mappings(m -> m
815 | .properties("foo", p -> p.text(tp -> tp))
816 | )
817 | )
818 | );
819 | assertThat(response.acknowledged()).isTrue();
820 | }
821 |
822 | @Test
823 | void elser() throws IOException {
824 | // Create the index with sparse vector
825 | client.indices().create(cir -> cir.index(indexName).mappings(m -> m
826 | .properties("content", p -> p.text(tp -> tp))
827 | .properties("content_embedding", p -> p.sparseVector(sp -> sp))
828 | ));
829 |
830 | // Create the pipeline
831 | // This requires to have the elserv2 model deployed and started
832 | client.ingest().putPipeline(pr -> pr
833 | .id("elser-v2-test")
834 | .processors(p -> p
835 | .inference(i -> i
836 | .modelId(".elser_model_2")
837 | .fieldMap("content", JsonData.of("content"))
838 | .targetField("content_embedding")
839 | )
840 | )
841 | );
842 |
843 | // We are expecting an exception as the model is not deployed
844 | assertThatThrownBy(() -> {
845 | // Search
846 | client.search(sr -> sr
847 | .index(indexName)
848 | .query(q -> q.sparseVector(sv -> sv
849 | .field("content_embedding")
850 | .inferenceId("elser-v2-test")
851 | .query("How to avoid muscle soreness after running?")
852 | )), ObjectNode.class);
853 | })
854 | .withFailMessage("We are expecting an exception as the model is not deployed")
855 | .isInstanceOfSatisfying(ElasticsearchException.class, exception -> {
856 | assertThat(exception.error().reason()).isEqualTo("current license is non-compliant for [inference]");
857 | assertThat(exception.status()).isEqualTo(403);
858 | });
859 | }
860 |
861 | @Test
862 | void testIlm() throws IOException {
863 | try {
864 | client.ilm().deleteLifecycle(dlr -> dlr.name(indexName + "-ilm"));
865 | } catch (IOException | ElasticsearchException ignored) { }
866 | PutLifecycleResponse response = client.ilm().putLifecycle(plr -> plr
867 | .name(indexName + "-ilm")
868 | .policy(p -> p
869 | .phases(ph -> ph
870 | .hot(h -> h
871 | .actions(a -> a
872 | .rollover(r -> r
873 | .maxAge(t -> t.time("5d"))
874 | .maxSize("10gb")
875 | )
876 | )
877 | )
878 | )
879 | )
880 | );
881 | assertThat(response.acknowledged()).isTrue();
882 | }
883 |
884 | @Test
885 | void searchExistField() throws IOException {
886 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("{\"foo\":\"baz\"}")));
887 | client.index(ir -> ir.index(indexName).id("2").withJson(new StringReader("{\"foo\":\"baz\", \"bar\":\"baz\"}")));
888 | client.indices().refresh(rr -> rr.index(indexName));
889 | final SearchResponse response = client.search(sr -> sr
890 | .index(indexName)
891 | .query(q -> q.exists(eq -> eq.field("bar")))
892 | , Void.class);
893 | assertThat(response.hits().total()).isNotNull();
894 | assertThat(response.hits().total().value()).isEqualTo(1);
895 | assertThat(response.hits().hits()).satisfiesExactly(hit -> assertThat(hit.id()).isEqualTo("2"));
896 | }
897 |
898 | @Test
899 | void multipleAggs() throws IOException {
900 | client.index(ir -> ir.index(indexName).withJson(new StringReader("{\"country\":\"france\",\"state\":\"paris\",\"city\":\"paris\"}")));
901 | client.index(ir -> ir.index(indexName).withJson(new StringReader("{\"country\":\"germany\",\"state\":\"berlin\",\"city\":\"berlin\"}")));
902 | client.index(ir -> ir.index(indexName).withJson(new StringReader("{\"country\":\"italy\",\"state\":\"rome\",\"city\":\"rome\"}")));
903 | client.indices().refresh(rr -> rr.index(indexName));
904 | final SearchResponse response = client.search(sr -> sr
905 | .index(indexName)
906 | .aggregations("country", a -> a.terms(ta -> ta.field("country.keyword"))
907 | .aggregations("state", sa -> sa.terms(ta -> ta.field("state.keyword"))
908 | .aggregations("city", ca -> ca.terms(ta -> ta.field("city.keyword")))
909 | )
910 | )
911 | , Void.class);
912 |
913 | assertThat(response.aggregations())
914 | .isNotNull()
915 | .hasEntrySatisfying("country", countries -> {
916 | assertThat(countries.sterms()).isNotNull();
917 | assertThat(countries.sterms().buckets()).isNotNull();
918 | assertThat(countries.sterms().buckets().array())
919 | .hasSize(3)
920 | .anySatisfy(country -> {
921 | assertThat(country.key()).isNotNull();
922 | assertThat(country.key().stringValue()).isEqualTo("france");
923 | assertThat(country.docCount()).isEqualTo(1);
924 | assertThat(country.aggregations())
925 | .hasEntrySatisfying("state", state -> {
926 | assertThat(state.sterms()).isNotNull();
927 | assertThat(state.sterms().buckets()).isNotNull();
928 | assertThat(state.sterms().buckets().array())
929 | .hasSize(1)
930 | .satisfiesExactly(stateBucket -> {
931 | assertThat(stateBucket.key()).isNotNull();
932 | assertThat(stateBucket.key().stringValue()).isEqualTo("paris");
933 | assertThat(stateBucket.docCount()).isEqualTo(1);
934 | assertThat(stateBucket.aggregations())
935 | .containsKey("city")
936 | .hasEntrySatisfying("city", city -> {
937 | assertThat(city.sterms()).isNotNull();
938 | assertThat(city.sterms().buckets()).isNotNull();
939 | assertThat(city.sterms().buckets().array())
940 | .hasSize(1)
941 | .satisfiesExactly(cityBucket -> {
942 | assertThat(cityBucket.key()).isNotNull();
943 | assertThat(cityBucket.key().stringValue()).isEqualTo("paris");
944 | assertThat(cityBucket.docCount()).isEqualTo(1);
945 | });
946 | });
947 | });
948 | });
949 | });
950 | });
951 | }
952 |
953 | @Test
954 | void esql() throws IOException, SQLException {
955 | final var p1 = new Person();
956 | p1.setId("1");
957 | p1.setName("David");
958 | final var p2 = new Person();
959 | p2.setId("2");
960 | p2.setName("Max");
961 | client.index(ir -> ir.index(indexName).id(p1.getId()).document(p1));
962 | client.index(ir -> ir.index(indexName).id(p2.getId()).document(p2));
963 | client.indices().refresh(rr -> rr.index(indexName));
964 |
965 | String query = """
966 | FROM indexName
967 | | WHERE name == "David"
968 | | KEEP name
969 | | LIMIT 1
970 | """.replaceFirst("indexName", indexName);
971 |
972 | {
973 | // Using the Raw ES|QL API
974 | try (final BinaryResponse response = client.esql().query(q -> q.query(query)); InputStream is = response.content()) {
975 | // The response object is {"took":173,"is_partial":false,"documents_found":1,"values_loaded":1,"columns":[{"name":"name","type":"text"}],"values":[["David"]]}
976 | final ObjectMapper mapper = new ObjectMapper();
977 | final JsonNode jsonNode = mapper.readTree(is);
978 | assertThat(jsonNode).isNotNull().hasSize(6);
979 | assertThat(jsonNode.get("columns")).isNotNull().hasSize(1).first().satisfies(column -> assertThat(column.get("name").asText()).isEqualTo("name"));
980 | assertThat(jsonNode.get("values")).isNotNull().hasSize(1).first().satisfies(value -> assertThat(value).hasSize(1).first().satisfies(singleValue -> assertThat(singleValue.asText()).isEqualTo("David")));
981 | assertThat(jsonNode.get("took").asInt()).isGreaterThan(0);
982 | assertThat(jsonNode.get("is_partial").asBoolean()).isFalse();
983 | assertThat(jsonNode.get("documents_found").asLong()).isEqualTo(1);
984 | assertThat(jsonNode.get("values_loaded").asLong()).isEqualTo(1);
985 | }
986 | }
987 |
988 | {
989 | // Using the JDBC ResultSet ES|QL API
990 | try (final ResultSet resultSet = client.esql().query(ResultSetEsqlAdapter.INSTANCE, query)) {
991 | assertThat(resultSet).isNotNull().satisfies(resultSetResult -> {
992 | assertThat(resultSetResult.next()).isTrue();
993 | assertThat(resultSetResult.getString("name")).isEqualTo("David");
994 | });
995 | }
996 | }
997 |
998 | {
999 | // Using the Object ES|QL API
1000 | final Iterable persons = client.esql().query(ObjectsEsqlAdapter.of(Person.class), query);
1001 | for (final Person person : persons) {
1002 | assertThat(person.getId()).isNull();
1003 | assertThat(person.getName()).isNotNull();
1004 | }
1005 | }
1006 |
1007 | {
1008 | // Using named parameters
1009 | String parametrizedQuery = """
1010 | FROM indexName
1011 | | WHERE name == ?name
1012 | | KEEP name
1013 | | LIMIT 1
1014 | """.replaceFirst("indexName", indexName);
1015 |
1016 | // Using the Object ES|QL API
1017 | final Iterable persons = client.esql()
1018 | .query(ObjectsEsqlAdapter.of(Person.class), parametrizedQuery,
1019 | Map.of("name", "David")
1020 | );
1021 | for (final Person person : persons) {
1022 | assertThat(person.getId()).isNull();
1023 | assertThat(person.getName()).isNotNull();
1024 | }
1025 | }
1026 | }
1027 |
1028 | /**
1029 | * This one is failing for now. So we are expecting a failure.
1030 | * When updating to 8.15.1, it should fix it. (865)
1031 | */
1032 | @Test
1033 | void callHotThreads() {
1034 | assertThatThrownBy(() -> client.nodes().hotThreads()).isInstanceOf(TransportException.class);
1035 | }
1036 |
1037 | @Test
1038 | void withAliases() throws IOException {
1039 | setAndRemoveIndex(indexName + "-v2");
1040 | assertThat(client.indices().create(cir -> cir.index(indexName)
1041 | .aliases(indexName + "_alias", a -> a)).acknowledged()).isTrue();
1042 | assertThat(client.indices().create(cir -> cir.index(indexName + "-v2")).acknowledged()).isTrue();
1043 |
1044 | // Check the alias existence by its name
1045 | assertThat(client.indices().existsAlias(ga -> ga.name(indexName + "_alias")).value()).isTrue();
1046 |
1047 | // Check we have one alias on indexName
1048 | assertThat(client.indices().getAlias(ga -> ga.index(indexName)).aliases().get(indexName).aliases()).hasSize(1);
1049 | // Check we have no alias on indexName-v2
1050 | assertThat(client.indices().getAlias(ga -> ga.index(indexName + "-v2")).aliases().get(indexName + "-v2").aliases()).hasSize(0);
1051 |
1052 | // Switch the alias indexName_alias from indexName to indexName-v2
1053 | client.indices().updateAliases(ua -> ua
1054 | .actions(a -> a.add(aa -> aa.alias(indexName + "_alias").index(indexName + "-v2")))
1055 | .actions(a -> a.remove(ra -> ra.alias(indexName + "_alias").index(indexName)))
1056 | );
1057 |
1058 | // Check we have no alias on indexName
1059 | assertThat(client.indices().getAlias(ga -> ga.index(indexName)).aliases().get(indexName).aliases()).hasSize(0);
1060 | // Check we have one alias on indexName-v2
1061 | assertThat(client.indices().getAlias(ga -> ga.index(indexName + "-v2")).aliases().get(indexName + "-v2").aliases()).hasSize(1);
1062 |
1063 | // Check the alias existence by its name
1064 | assertThat(client.indices().existsAlias(ga -> ga.name(indexName + "_alias")).value()).isTrue();
1065 |
1066 | // Delete the alias
1067 | client.indices().deleteAlias(da -> da.name(indexName + "_alias").index("*"));
1068 |
1069 | // Check the alias non-existence by its name
1070 | assertThat(client.indices().existsAlias(ga -> ga.name(indexName + "_alias")).value()).isFalse();
1071 | }
1072 |
1073 | @Test
1074 | void kNNWithFunctionScore() throws IOException {
1075 | client.indices().create(cir -> cir.index(indexName).mappings(m -> m
1076 | .properties("vector", p -> p.denseVector(dv -> dv))
1077 | .properties("country", p -> p.keyword(k -> k))
1078 | ));
1079 | client.index(ir -> ir.index(indexName).withJson(new StringReader("{\"country\":\"france\", \"vector\":[1.0, 0.4, 0.8]}")));
1080 | client.indices().refresh(rr -> rr.index(indexName));
1081 | final SearchResponse response = client.search(sr -> sr
1082 | .index(indexName)
1083 | .query(q -> q.functionScore(
1084 | fsq -> fsq
1085 | .query(qknn -> qknn.knn(
1086 | k -> k.field("vector").queryVector(0.9f, 0.4f, 0.8f)
1087 | ))
1088 | .functions(fs -> fs.randomScore(rs -> rs.field("country").seed("hello")))
1089 | ))
1090 | , Void.class);
1091 |
1092 | assumeNotNull(response.hits().total());
1093 | assertThat(response.hits().total().value()).isEqualTo(1);
1094 | assertThat(response.hits().hits().get(0).score()).isEqualTo(0.4063275);
1095 | }
1096 |
1097 | @Test
1098 | void boolQuery() throws IOException {
1099 | client.index(ir -> ir.index(indexName).id("1").withJson(new StringReader("""
1100 | {
1101 | "number":1,
1102 | "effective_date":"2024-10-01T00:00:00.000Z"
1103 | }""")));
1104 | client.index(ir -> ir.index(indexName).id("2").withJson(new StringReader("""
1105 | {
1106 | "number":2,
1107 | "effective_date":"2024-10-02T00:00:00.000Z"
1108 | }""")));
1109 | client.index(ir -> ir.index(indexName).id("3").withJson(new StringReader("""
1110 | {
1111 | "number":3,
1112 | "effective_date":"2024-10-03T00:00:00.000Z"
1113 | }""")));
1114 | client.index(ir -> ir.index(indexName).id("4").withJson(new StringReader("""
1115 | {
1116 | "number":4,
1117 | "effective_date":"2024-10-04T00:00:00.000Z"
1118 | }""")));
1119 | client.indices().refresh(rr -> rr.index(indexName));
1120 | final SearchResponse response = client.search(sr -> sr
1121 | .index(indexName)
1122 | .query(q -> q.bool(bq -> bq
1123 | .filter(fq -> fq.terms(tq -> tq.field("number")
1124 | .terms(t -> t.value(List.of(
1125 | FieldValue.of("2"),
1126 | FieldValue.of("3"))))))
1127 | .filter(fq -> fq
1128 | .range(rq -> rq.date(drq -> drq
1129 | .field("effective_date")
1130 | .gte("2024-10-03T00:00:00.000Z"))))
1131 | ))
1132 | , Void.class);
1133 | assertThat(response.hits().total()).isNotNull();
1134 | assertThat(response.hits().total().value()).isEqualTo(1);
1135 | assertThat(response.hits().hits()).hasSize(1);
1136 | assertThat(response.hits().hits().get(0).id()).isEqualTo("3");
1137 | }
1138 |
1139 | /**
1140 | * This method adds the index name we want to use to the list
1141 | * and deletes the index if it exists.
1142 | * @param name the index name
1143 | */
1144 | private void setAndRemoveIndex(final String name) {
1145 | indices.add(name);
1146 | removeIndex(name);
1147 | }
1148 |
1149 | /**
1150 | * This method deletes the index if it exists.
1151 | * @param name the index name
1152 | */
1153 | private void removeIndex(final String name) {
1154 | try {
1155 | client.indices().delete(dir -> dir.index(name));
1156 | logger.debug("Index [{}] has been removed", name);
1157 | } catch (final IOException | ElasticsearchException ignored) { }
1158 | }
1159 | }
1160 |
--------------------------------------------------------------------------------