├── .github
└── workflows
│ ├── codeql-analysis.yml
│ └── maven.yml
├── .gitignore
├── LICENSE
├── README.md
├── pom.xml
└── src
├── main
├── java
│ └── org
│ │ ├── codelibs
│ │ └── elasticsearch
│ │ │ └── runner
│ │ │ ├── ClusterRunnerException.java
│ │ │ ├── ElasticsearchClusterRunner.java
│ │ │ └── net
│ │ │ └── EcrCurl.java
│ │ └── elasticsearch
│ │ ├── node
│ │ └── ClusterRunnerNode.java
│ │ └── plugins
│ │ └── ClusterRunnerPluginsService.java
└── resources
│ └── config
│ ├── elasticsearch.yml
│ └── log4j2.properties
└── test
└── java
└── org
└── codelibs
└── elasticsearch
└── runner
└── ElasticsearchClusterRunnerTest.java
/.github/workflows/codeql-analysis.yml:
--------------------------------------------------------------------------------
1 | # For most projects, this workflow file will not need changing; you simply need
2 | # to commit it to your repository.
3 | #
4 | # You may wish to alter this file to override the set of languages analyzed,
5 | # or to provide custom queries or build logic.
6 | name: "CodeQL"
7 |
8 | on:
9 | push:
10 | branches:
11 | - master
12 | - "*.x"
13 | pull_request:
14 | branches:
15 | - master
16 | - "*.x"
17 | schedule:
18 | - cron: '28 18 * * 5'
19 |
20 | jobs:
21 | analyze:
22 | name: Analyze
23 | runs-on: ubuntu-latest
24 |
25 | strategy:
26 | fail-fast: false
27 | matrix:
28 | # Override automatic language detection by changing the below list
29 | # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python']
30 | language: [ 'java' ]
31 | # Learn more...
32 | # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection
33 |
34 | steps:
35 | - name: Checkout repository
36 | uses: actions/checkout@v2
37 | with:
38 | # We must fetch at least the immediate parents so that if this is
39 | # a pull request then we can checkout the head.
40 | fetch-depth: 2
41 |
42 | # If this run was triggered by a pull request event, then checkout
43 | # the head of the pull request instead of the merge commit.
44 | - run: git checkout HEAD^2
45 | if: ${{ github.event_name == 'pull_request' }}
46 |
47 | # Initializes the CodeQL tools for scanning.
48 | - name: Initialize CodeQL
49 | uses: github/codeql-action/init@v1
50 | with:
51 | languages: ${{ matrix.language }}
52 | # If you wish to specify custom queries, you can do so here or in a config file.
53 | # By default, queries listed here will override any specified in a config file.
54 | # Prefix the list here with "+" to use these queries and those in the config file.
55 | # queries: ./path/to/local/query, your-org/your-repo/queries@main
56 |
57 | # Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
58 | # If this step fails, then you should remove it and run the build manually (see below)
59 | #- name: Autobuild
60 | # uses: github/codeql-action/autobuild@v1
61 |
62 | # ℹ️ Command-line programs to run using the OS shell.
63 | # 📚 https://git.io/JvXDl
64 |
65 | # ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
66 | # and modify them (or add more) to build your code if your project
67 | # uses a compiled language
68 |
69 | #- run: |
70 | # make bootstrap
71 | # make release
72 | - uses: actions/checkout@v2
73 | - name: Set up JDK 17
74 | uses: actions/setup-java@v2
75 | with:
76 | java-version: '17'
77 | distribution: 'temurin'
78 | - name: Cache Maven packages
79 | uses: actions/cache@v2
80 | with:
81 | path: ~/.m2
82 | key: ${{ runner.os }}-m2-${{ hashFiles('**/pom.xml') }}
83 | restore-keys: ${{ runner.os }}-m2
84 | - name: Build with Maven
85 | run: mvn -B package --file pom.xml
86 |
87 | - name: Perform CodeQL Analysis
88 | uses: github/codeql-action/analyze@v1
89 |
--------------------------------------------------------------------------------
/.github/workflows/maven.yml:
--------------------------------------------------------------------------------
1 | # This workflow will build a Java project with Maven
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
3 |
4 | name: Java CI with Maven
5 |
6 | on:
7 | push:
8 | branches:
9 | - master
10 | - "*.x"
11 | pull_request:
12 | branches:
13 | - master
14 | - "*.x"
15 |
16 | jobs:
17 | build:
18 |
19 | runs-on: ${{ matrix.os }}
20 |
21 | strategy:
22 | matrix:
23 | os: [ubuntu-latest, windows-latest]
24 |
25 | steps:
26 | - uses: actions/checkout@v2
27 | - name: Set up JDK 17
28 | uses: actions/setup-java@v2
29 | with:
30 | java-version: '17'
31 | distribution: 'temurin'
32 | - name: Build with Maven
33 | run: mvn -B package --file pom.xml
34 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | target/
2 | es_home/
3 | .settings/
4 | .classpath
5 | .project
6 | .idea
7 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Server Side Public License
2 | VERSION 1, OCTOBER 16, 2018
3 |
4 | Copyright © 2018 MongoDB, Inc.
5 |
6 | Everyone is permitted to copy and distribute verbatim copies of this
7 | license document, but changing it is not allowed.
8 |
9 | TERMS AND CONDITIONS
10 |
11 | 0. Definitions.
12 |
13 | “This License” refers to Server Side Public License.
14 |
15 | “Copyright” also means copyright-like laws that apply to other kinds of
16 | works, such as semiconductor masks.
17 |
18 | “The Program” refers to any copyrightable work licensed under this
19 | License. Each licensee is addressed as “you”. “Licensees” and
20 | “recipients” may be individuals or organizations.
21 |
22 | To “modify” a work means to copy from or adapt all or part of the work in
23 | a fashion requiring copyright permission, other than the making of an
24 | exact copy. The resulting work is called a “modified version” of the
25 | earlier work or a work “based on” the earlier work.
26 |
27 | A “covered work” means either the unmodified Program or a work based on
28 | the Program.
29 |
30 | To “propagate” a work means to do anything with it that, without
31 | permission, would make you directly or secondarily liable for
32 | infringement under applicable copyright law, except executing it on a
33 | computer or modifying a private copy. Propagation includes copying,
34 | distribution (with or without modification), making available to the
35 | public, and in some countries other activities as well.
36 |
37 | To “convey” a work means any kind of propagation that enables other
38 | parties to make or receive copies. Mere interaction with a user through a
39 | computer network, with no transfer of a copy, is not conveying.
40 |
41 | An interactive user interface displays “Appropriate Legal Notices” to the
42 | extent that it includes a convenient and prominently visible feature that
43 | (1) displays an appropriate copyright notice, and (2) tells the user that
44 | there is no warranty for the work (except to the extent that warranties
45 | are provided), that licensees may convey the work under this License, and
46 | how to view a copy of this License. If the interface presents a list of
47 | user commands or options, such as a menu, a prominent item in the list
48 | meets this criterion.
49 |
50 | 1. Source Code.
51 |
52 | The “source code” for a work means the preferred form of the work for
53 | making modifications to it. “Object code” means any non-source form of a
54 | work.
55 |
56 | A “Standard Interface” means an interface that either is an official
57 | standard defined by a recognized standards body, or, in the case of
58 | interfaces specified for a particular programming language, one that is
59 | widely used among developers working in that language. The “System
60 | Libraries” of an executable work include anything, other than the work as
61 | a whole, that (a) is included in the normal form of packaging a Major
62 | Component, but which is not part of that Major Component, and (b) serves
63 | only to enable use of the work with that Major Component, or to implement
64 | a Standard Interface for which an implementation is available to the
65 | public in source code form. A “Major Component”, in this context, means a
66 | major essential component (kernel, window system, and so on) of the
67 | specific operating system (if any) on which the executable work runs, or
68 | a compiler used to produce the work, or an object code interpreter used
69 | to run it.
70 |
71 | The “Corresponding Source” for a work in object code form means all the
72 | source code needed to generate, install, and (for an executable work) run
73 | the object code and to modify the work, including scripts to control
74 | those activities. However, it does not include the work's System
75 | Libraries, or general-purpose tools or generally available free programs
76 | which are used unmodified in performing those activities but which are
77 | not part of the work. For example, Corresponding Source includes
78 | interface definition files associated with source files for the work, and
79 | the source code for shared libraries and dynamically linked subprograms
80 | that the work is specifically designed to require, such as by intimate
81 | data communication or control flow between those subprograms and other
82 | parts of the work.
83 |
84 | The Corresponding Source need not include anything that users can
85 | regenerate automatically from other parts of the Corresponding Source.
86 |
87 | The Corresponding Source for a work in source code form is that same work.
88 |
89 | 2. Basic Permissions.
90 |
91 | All rights granted under this License are granted for the term of
92 | copyright on the Program, and are irrevocable provided the stated
93 | conditions are met. This License explicitly affirms your unlimited
94 | permission to run the unmodified Program, subject to section 13. The
95 | output from running a covered work is covered by this License only if the
96 | output, given its content, constitutes a covered work. This License
97 | acknowledges your rights of fair use or other equivalent, as provided by
98 | copyright law. Subject to section 13, you may make, run and propagate
99 | covered works that you do not convey, without conditions so long as your
100 | license otherwise remains in force. You may convey covered works to
101 | others for the sole purpose of having them make modifications exclusively
102 | for you, or provide you with facilities for running those works, provided
103 | that you comply with the terms of this License in conveying all
104 | material for which you do not control copyright. Those thus making or
105 | running the covered works for you must do so exclusively on your
106 | behalf, under your direction and control, on terms that prohibit them
107 | from making any copies of your copyrighted material outside their
108 | relationship with you.
109 |
110 | Conveying under any other circumstances is permitted solely under the
111 | conditions stated below. Sublicensing is not allowed; section 10 makes it
112 | unnecessary.
113 |
114 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
115 |
116 | No covered work shall be deemed part of an effective technological
117 | measure under any applicable law fulfilling obligations under article 11
118 | of the WIPO copyright treaty adopted on 20 December 1996, or similar laws
119 | prohibiting or restricting circumvention of such measures.
120 |
121 | When you convey a covered work, you waive any legal power to forbid
122 | circumvention of technological measures to the extent such circumvention is
123 | effected by exercising rights under this License with respect to the
124 | covered work, and you disclaim any intention to limit operation or
125 | modification of the work as a means of enforcing, against the work's users,
126 | your or third parties' legal rights to forbid circumvention of
127 | technological measures.
128 |
129 | 4. Conveying Verbatim Copies.
130 |
131 | You may convey verbatim copies of the Program's source code as you
132 | receive it, in any medium, provided that you conspicuously and
133 | appropriately publish on each copy an appropriate copyright notice; keep
134 | intact all notices stating that this License and any non-permissive terms
135 | added in accord with section 7 apply to the code; keep intact all notices
136 | of the absence of any warranty; and give all recipients a copy of this
137 | License along with the Program. You may charge any price or no price for
138 | each copy that you convey, and you may offer support or warranty
139 | protection for a fee.
140 |
141 | 5. Conveying Modified Source Versions.
142 |
143 | You may convey a work based on the Program, or the modifications to
144 | produce it from the Program, in the form of source code under the terms
145 | of section 4, provided that you also meet all of these conditions:
146 |
147 | a) The work must carry prominent notices stating that you modified it,
148 | and giving a relevant date.
149 |
150 | b) The work must carry prominent notices stating that it is released
151 | under this License and any conditions added under section 7. This
152 | requirement modifies the requirement in section 4 to “keep intact all
153 | notices”.
154 |
155 | c) You must license the entire work, as a whole, under this License to
156 | anyone who comes into possession of a copy. This License will therefore
157 | apply, along with any applicable section 7 additional terms, to the
158 | whole of the work, and all its parts, regardless of how they are
159 | packaged. This License gives no permission to license the work in any
160 | other way, but it does not invalidate such permission if you have
161 | separately received it.
162 |
163 | d) If the work has interactive user interfaces, each must display
164 | Appropriate Legal Notices; however, if the Program has interactive
165 | interfaces that do not display Appropriate Legal Notices, your work
166 | need not make them do so.
167 |
168 | A compilation of a covered work with other separate and independent
169 | works, which are not by their nature extensions of the covered work, and
170 | which are not combined with it such as to form a larger program, in or on
171 | a volume of a storage or distribution medium, is called an “aggregate” if
172 | the compilation and its resulting copyright are not used to limit the
173 | access or legal rights of the compilation's users beyond what the
174 | individual works permit. Inclusion of a covered work in an aggregate does
175 | not cause this License to apply to the other parts of the aggregate.
176 |
177 | 6. Conveying Non-Source Forms.
178 |
179 | You may convey a covered work in object code form under the terms of
180 | sections 4 and 5, provided that you also convey the machine-readable
181 | Corresponding Source under the terms of this License, in one of these
182 | ways:
183 |
184 | a) Convey the object code in, or embodied in, a physical product
185 | (including a physical distribution medium), accompanied by the
186 | Corresponding Source fixed on a durable physical medium customarily
187 | used for software interchange.
188 |
189 | b) Convey the object code in, or embodied in, a physical product
190 | (including a physical distribution medium), accompanied by a written
191 | offer, valid for at least three years and valid for as long as you
192 | offer spare parts or customer support for that product model, to give
193 | anyone who possesses the object code either (1) a copy of the
194 | Corresponding Source for all the software in the product that is
195 | covered by this License, on a durable physical medium customarily used
196 | for software interchange, for a price no more than your reasonable cost
197 | of physically performing this conveying of source, or (2) access to
198 | copy the Corresponding Source from a network server at no charge.
199 |
200 | c) Convey individual copies of the object code with a copy of the
201 | written offer to provide the Corresponding Source. This alternative is
202 | allowed only occasionally and noncommercially, and only if you received
203 | the object code with such an offer, in accord with subsection 6b.
204 |
205 | d) Convey the object code by offering access from a designated place
206 | (gratis or for a charge), and offer equivalent access to the
207 | Corresponding Source in the same way through the same place at no
208 | further charge. You need not require recipients to copy the
209 | Corresponding Source along with the object code. If the place to copy
210 | the object code is a network server, the Corresponding Source may be on
211 | a different server (operated by you or a third party) that supports
212 | equivalent copying facilities, provided you maintain clear directions
213 | next to the object code saying where to find the Corresponding Source.
214 | Regardless of what server hosts the Corresponding Source, you remain
215 | obligated to ensure that it is available for as long as needed to
216 | satisfy these requirements.
217 |
218 | e) Convey the object code using peer-to-peer transmission, provided you
219 | inform other peers where the object code and Corresponding Source of
220 | the work are being offered to the general public at no charge under
221 | subsection 6d.
222 |
223 | A separable portion of the object code, whose source code is excluded
224 | from the Corresponding Source as a System Library, need not be included
225 | in conveying the object code work.
226 |
227 | A “User Product” is either (1) a “consumer product”, which means any
228 | tangible personal property which is normally used for personal, family,
229 | or household purposes, or (2) anything designed or sold for incorporation
230 | into a dwelling. In determining whether a product is a consumer product,
231 | doubtful cases shall be resolved in favor of coverage. For a particular
232 | product received by a particular user, “normally used” refers to a
233 | typical or common use of that class of product, regardless of the status
234 | of the particular user or of the way in which the particular user
235 | actually uses, or expects or is expected to use, the product. A product
236 | is a consumer product regardless of whether the product has substantial
237 | commercial, industrial or non-consumer uses, unless such uses represent
238 | the only significant mode of use of the product.
239 |
240 | “Installation Information” for a User Product means any methods,
241 | procedures, authorization keys, or other information required to install
242 | and execute modified versions of a covered work in that User Product from
243 | a modified version of its Corresponding Source. The information must
244 | suffice to ensure that the continued functioning of the modified object
245 | code is in no case prevented or interfered with solely because
246 | modification has been made.
247 |
248 | If you convey an object code work under this section in, or with, or
249 | specifically for use in, a User Product, and the conveying occurs as part
250 | of a transaction in which the right of possession and use of the User
251 | Product is transferred to the recipient in perpetuity or for a fixed term
252 | (regardless of how the transaction is characterized), the Corresponding
253 | Source conveyed under this section must be accompanied by the
254 | Installation Information. But this requirement does not apply if neither
255 | you nor any third party retains the ability to install modified object
256 | code on the User Product (for example, the work has been installed in
257 | ROM).
258 |
259 | The requirement to provide Installation Information does not include a
260 | requirement to continue to provide support service, warranty, or updates
261 | for a work that has been modified or installed by the recipient, or for
262 | the User Product in which it has been modified or installed. Access
263 | to a network may be denied when the modification itself materially
264 | and adversely affects the operation of the network or violates the
265 | rules and protocols for communication across the network.
266 |
267 | Corresponding Source conveyed, and Installation Information provided, in
268 | accord with this section must be in a format that is publicly documented
269 | (and with an implementation available to the public in source code form),
270 | and must require no special password or key for unpacking, reading or
271 | copying.
272 |
273 | 7. Additional Terms.
274 |
275 | “Additional permissions” are terms that supplement the terms of this
276 | License by making exceptions from one or more of its conditions.
277 | Additional permissions that are applicable to the entire Program shall be
278 | treated as though they were included in this License, to the extent that
279 | they are valid under applicable law. If additional permissions apply only
280 | to part of the Program, that part may be used separately under those
281 | permissions, but the entire Program remains governed by this License
282 | without regard to the additional permissions. When you convey a copy of
283 | a covered work, you may at your option remove any additional permissions
284 | from that copy, or from any part of it. (Additional permissions may be
285 | written to require their own removal in certain cases when you modify the
286 | work.) You may place additional permissions on material, added by you to
287 | a covered work, for which you have or can give appropriate copyright
288 | permission.
289 |
290 | Notwithstanding any other provision of this License, for material you add
291 | to a covered work, you may (if authorized by the copyright holders of
292 | that material) supplement the terms of this License with terms:
293 |
294 | a) Disclaiming warranty or limiting liability differently from the
295 | terms of sections 15 and 16 of this License; or
296 |
297 | b) Requiring preservation of specified reasonable legal notices or
298 | author attributions in that material or in the Appropriate Legal
299 | Notices displayed by works containing it; or
300 |
301 | c) Prohibiting misrepresentation of the origin of that material, or
302 | requiring that modified versions of such material be marked in
303 | reasonable ways as different from the original version; or
304 |
305 | d) Limiting the use for publicity purposes of names of licensors or
306 | authors of the material; or
307 |
308 | e) Declining to grant rights under trademark law for use of some trade
309 | names, trademarks, or service marks; or
310 |
311 | f) Requiring indemnification of licensors and authors of that material
312 | by anyone who conveys the material (or modified versions of it) with
313 | contractual assumptions of liability to the recipient, for any
314 | liability that these contractual assumptions directly impose on those
315 | licensors and authors.
316 |
317 | All other non-permissive additional terms are considered “further
318 | restrictions” within the meaning of section 10. If the Program as you
319 | received it, or any part of it, contains a notice stating that it is
320 | governed by this License along with a term that is a further restriction,
321 | you may remove that term. If a license document contains a further
322 | restriction but permits relicensing or conveying under this License, you
323 | may add to a covered work material governed by the terms of that license
324 | document, provided that the further restriction does not survive such
325 | relicensing or conveying.
326 |
327 | If you add terms to a covered work in accord with this section, you must
328 | place, in the relevant source files, a statement of the additional terms
329 | that apply to those files, or a notice indicating where to find the
330 | applicable terms. Additional terms, permissive or non-permissive, may be
331 | stated in the form of a separately written license, or stated as
332 | exceptions; the above requirements apply either way.
333 |
334 | 8. Termination.
335 |
336 | You may not propagate or modify a covered work except as expressly
337 | provided under this License. Any attempt otherwise to propagate or modify
338 | it is void, and will automatically terminate your rights under this
339 | License (including any patent licenses granted under the third paragraph
340 | of section 11).
341 |
342 | However, if you cease all violation of this License, then your license
343 | from a particular copyright holder is reinstated (a) provisionally,
344 | unless and until the copyright holder explicitly and finally terminates
345 | your license, and (b) permanently, if the copyright holder fails to
346 | notify you of the violation by some reasonable means prior to 60 days
347 | after the cessation.
348 |
349 | Moreover, your license from a particular copyright holder is reinstated
350 | permanently if the copyright holder notifies you of the violation by some
351 | reasonable means, this is the first time you have received notice of
352 | violation of this License (for any work) from that copyright holder, and
353 | you cure the violation prior to 30 days after your receipt of the notice.
354 |
355 | Termination of your rights under this section does not terminate the
356 | licenses of parties who have received copies or rights from you under
357 | this License. If your rights have been terminated and not permanently
358 | reinstated, you do not qualify to receive new licenses for the same
359 | material under section 10.
360 |
361 | 9. Acceptance Not Required for Having Copies.
362 |
363 | You are not required to accept this License in order to receive or run a
364 | copy of the Program. Ancillary propagation of a covered work occurring
365 | solely as a consequence of using peer-to-peer transmission to receive a
366 | copy likewise does not require acceptance. However, nothing other than
367 | this License grants you permission to propagate or modify any covered
368 | work. These actions infringe copyright if you do not accept this License.
369 | Therefore, by modifying or propagating a covered work, you indicate your
370 | acceptance of this License to do so.
371 |
372 | 10. Automatic Licensing of Downstream Recipients.
373 |
374 | Each time you convey a covered work, the recipient automatically receives
375 | a license from the original licensors, to run, modify and propagate that
376 | work, subject to this License. You are not responsible for enforcing
377 | compliance by third parties with this License.
378 |
379 | An “entity transaction” is a transaction transferring control of an
380 | organization, or substantially all assets of one, or subdividing an
381 | organization, or merging organizations. If propagation of a covered work
382 | results from an entity transaction, each party to that transaction who
383 | receives a copy of the work also receives whatever licenses to the work
384 | the party's predecessor in interest had or could give under the previous
385 | paragraph, plus a right to possession of the Corresponding Source of the
386 | work from the predecessor in interest, if the predecessor has it or can
387 | get it with reasonable efforts.
388 |
389 | You may not impose any further restrictions on the exercise of the rights
390 | granted or affirmed under this License. For example, you may not impose a
391 | license fee, royalty, or other charge for exercise of rights granted
392 | under this License, and you may not initiate litigation (including a
393 | cross-claim or counterclaim in a lawsuit) alleging that any patent claim
394 | is infringed by making, using, selling, offering for sale, or importing
395 | the Program or any portion of it.
396 |
397 | 11. Patents.
398 |
399 | A “contributor” is a copyright holder who authorizes use under this
400 | License of the Program or a work on which the Program is based. The work
401 | thus licensed is called the contributor's “contributor version”.
402 |
403 | A contributor's “essential patent claims” are all patent claims owned or
404 | controlled by the contributor, whether already acquired or hereafter
405 | acquired, that would be infringed by some manner, permitted by this
406 | License, of making, using, or selling its contributor version, but do not
407 | include claims that would be infringed only as a consequence of further
408 | modification of the contributor version. For purposes of this definition,
409 | “control” includes the right to grant patent sublicenses in a manner
410 | consistent with the requirements of this License.
411 |
412 | Each contributor grants you a non-exclusive, worldwide, royalty-free
413 | patent license under the contributor's essential patent claims, to make,
414 | use, sell, offer for sale, import and otherwise run, modify and propagate
415 | the contents of its contributor version.
416 |
417 | In the following three paragraphs, a “patent license” is any express
418 | agreement or commitment, however denominated, not to enforce a patent
419 | (such as an express permission to practice a patent or covenant not to
420 | sue for patent infringement). To “grant” such a patent license to a party
421 | means to make such an agreement or commitment not to enforce a patent
422 | against the party.
423 |
424 | If you convey a covered work, knowingly relying on a patent license, and
425 | the Corresponding Source of the work is not available for anyone to copy,
426 | free of charge and under the terms of this License, through a publicly
427 | available network server or other readily accessible means, then you must
428 | either (1) cause the Corresponding Source to be so available, or (2)
429 | arrange to deprive yourself of the benefit of the patent license for this
430 | particular work, or (3) arrange, in a manner consistent with the
431 | requirements of this License, to extend the patent license to downstream
432 | recipients. “Knowingly relying” means you have actual knowledge that, but
433 | for the patent license, your conveying the covered work in a country, or
434 | your recipient's use of the covered work in a country, would infringe
435 | one or more identifiable patents in that country that you have reason
436 | to believe are valid.
437 |
438 | If, pursuant to or in connection with a single transaction or
439 | arrangement, you convey, or propagate by procuring conveyance of, a
440 | covered work, and grant a patent license to some of the parties receiving
441 | the covered work authorizing them to use, propagate, modify or convey a
442 | specific copy of the covered work, then the patent license you grant is
443 | automatically extended to all recipients of the covered work and works
444 | based on it.
445 |
446 | A patent license is “discriminatory” if it does not include within the
447 | scope of its coverage, prohibits the exercise of, or is conditioned on
448 | the non-exercise of one or more of the rights that are specifically
449 | granted under this License. You may not convey a covered work if you are
450 | a party to an arrangement with a third party that is in the business of
451 | distributing software, under which you make payment to the third party
452 | based on the extent of your activity of conveying the work, and under
453 | which the third party grants, to any of the parties who would receive the
454 | covered work from you, a discriminatory patent license (a) in connection
455 | with copies of the covered work conveyed by you (or copies made from
456 | those copies), or (b) primarily for and in connection with specific
457 | products or compilations that contain the covered work, unless you
458 | entered into that arrangement, or that patent license was granted, prior
459 | to 28 March 2007.
460 |
461 | Nothing in this License shall be construed as excluding or limiting any
462 | implied license or other defenses to infringement that may otherwise be
463 | available to you under applicable patent law.
464 |
465 | 12. No Surrender of Others' Freedom.
466 |
467 | If conditions are imposed on you (whether by court order, agreement or
468 | otherwise) that contradict the conditions of this License, they do not
469 | excuse you from the conditions of this License. If you cannot use,
470 | propagate or convey a covered work so as to satisfy simultaneously your
471 | obligations under this License and any other pertinent obligations, then
472 | as a consequence you may not use, propagate or convey it at all. For
473 | example, if you agree to terms that obligate you to collect a royalty for
474 | further conveying from those to whom you convey the Program, the only way
475 | you could satisfy both those terms and this License would be to refrain
476 | entirely from conveying the Program.
477 |
478 | 13. Offering the Program as a Service.
479 |
480 | If you make the functionality of the Program or a modified version
481 | available to third parties as a service, you must make the Service Source
482 | Code available via network download to everyone at no charge, under the
483 | terms of this License. Making the functionality of the Program or
484 | modified version available to third parties as a service includes,
485 | without limitation, enabling third parties to interact with the
486 | functionality of the Program or modified version remotely through a
487 | computer network, offering a service the value of which entirely or
488 | primarily derives from the value of the Program or modified version, or
489 | offering a service that accomplishes for users the primary purpose of the
490 | Program or modified version.
491 |
492 | “Service Source Code” means the Corresponding Source for the Program or
493 | the modified version, and the Corresponding Source for all programs that
494 | you use to make the Program or modified version available as a service,
495 | including, without limitation, management software, user interfaces,
496 | application program interfaces, automation software, monitoring software,
497 | backup software, storage software and hosting software, all such that a
498 | user could run an instance of the service using the Service Source Code
499 | you make available.
500 |
501 | 14. Revised Versions of this License.
502 |
503 | MongoDB, Inc. may publish revised and/or new versions of the Server Side
504 | Public License from time to time. Such new versions will be similar in
505 | spirit to the present version, but may differ in detail to address new
506 | problems or concerns.
507 |
508 | Each version is given a distinguishing version number. If the Program
509 | specifies that a certain numbered version of the Server Side Public
510 | License “or any later version” applies to it, you have the option of
511 | following the terms and conditions either of that numbered version or of
512 | any later version published by MongoDB, Inc. If the Program does not
513 | specify a version number of the Server Side Public License, you may
514 | choose any version ever published by MongoDB, Inc.
515 |
516 | If the Program specifies that a proxy can decide which future versions of
517 | the Server Side Public License can be used, that proxy's public statement
518 | of acceptance of a version permanently authorizes you to choose that
519 | version for the Program.
520 |
521 | Later license versions may give you additional or different permissions.
522 | However, no additional obligations are imposed on any author or copyright
523 | holder as a result of your choosing to follow a later version.
524 |
525 | 15. Disclaimer of Warranty.
526 |
527 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
528 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
529 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY
530 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
531 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
532 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
533 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
534 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
535 |
536 | 16. Limitation of Liability.
537 |
538 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
539 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
540 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING
541 | ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF
542 | THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
543 | LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU
544 | OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
545 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
546 | POSSIBILITY OF SUCH DAMAGES.
547 |
548 | 17. Interpretation of Sections 15 and 16.
549 |
550 | If the disclaimer of warranty and limitation of liability provided above
551 | cannot be given local legal effect according to their terms, reviewing
552 | courts shall apply local law that most closely approximates an absolute
553 | waiver of all civil liability in connection with the Program, unless a
554 | warranty or assumption of liability accompanies a copy of the Program in
555 | return for a fee.
556 |
557 | END OF TERMS AND CONDITIONS
558 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | Elasticsearch Cluster Runner
2 | [](https://github.com/codelibs/elasticsearch-cluster-runner/actions/workflows/maven.yml)
3 | ============================
4 |
5 | This project runs Elasticsearch cluster on one JVM instance for your development/testing easily.
6 | You can use Elasticsearch Cluster Runner as Embedded Elasticsearch in your application.
7 |
8 | ## Version
9 |
10 | - [Versions in Maven Repository (7.11-)](https://maven.codelibs.org/org/codelibs/elasticsearch-cluster-runner/)
11 | - [Versions in Maven Repository (-7.10)](https://repo1.maven.org/maven2/org/codelibs/elasticsearch-cluster-runner/)
12 |
13 | ## Run on Your Application
14 |
15 | Put elasticsearch-cluster-runner if using Maven:
16 |
17 |
18 | org.codelibs
19 | elasticsearch-cluster-runner
20 | x.x.x.0
21 |
22 |
23 | and add Maven repository to pom.xml:
24 |
25 |
26 |
27 | central
28 | https://repo1.maven.org/maven2
29 |
30 | true
31 |
32 |
33 | true
34 |
35 |
36 |
37 | codelibs.org
38 | CodeLibs Repository
39 | https://maven.codelibs.org/
40 |
41 |
42 |
43 | ### Start Cluster Runner
44 |
45 | import static org.codelibs.elasticsearch.runner.ElasticsearchClusterRunner.newConfigs;
46 | ...
47 | // create runner instance
48 | ElasticsearchClusterRunner runner = new ElasticsearchClusterRunner();
49 | // create ES nodes
50 | runner.onBuild(new ElasticsearchClusterRunner.Builder() {
51 | @Override
52 | public void build(final int number, final Builder settingsBuilder) {
53 | // put elasticsearch settings
54 | // settingsBuilder.put("index.number_of_replicas", 0);
55 | }
56 | }).build(newConfigs());
57 |
58 | build(Configs) method configures/starts Clsuter Runner.
59 |
60 | ### Stop Cluster Runner
61 |
62 | // close runner
63 | runner.close();
64 |
65 | ### Clean up
66 |
67 | // delete all files(config and index)
68 | runner.clean();
69 |
70 | ## Run on JUnit
71 |
72 | Put elasticsearch-cluster-runner as test scope:
73 |
74 |
75 | org.codelibs
76 | elasticsearch-cluster-runner
77 | x.x.x.0
78 | test
79 |
80 |
81 | and see [ElasticsearchClusterRunnerTest](https://github.com/codelibs/elasticsearch-cluster-runner/blob/master/src/test/java/org/codelibs/elasticsearch/runner/ElasticsearchClusterRunnerTest.java "ElasticsearchClusterRunnerTest").
82 |
83 | ## Run as Standalone
84 |
85 | ### Install Maven
86 |
87 | Download and install Maven 3 from https://maven.apache.org/.
88 |
89 | ### Clone This Project
90 |
91 | git clone https://github.com/codelibs/elasticsearch-cluster-runner.git
92 |
93 | ### Build This Project
94 |
95 | mvn compile
96 |
97 | ## Run/Stop Elasticsearch Cluster
98 |
99 | ### Run Cluster
100 |
101 | Run:
102 |
103 | mvn exec:java
104 |
105 | The default cluster has 3 nodes and the root directory for Elasticsearch is es\_home.
106 | Nodes use 9201-9203 port for HTTP and 9301-9303 port for Transport.
107 | If you want to change the number of node, Run:
108 |
109 | mvn exec:java -Dexec.args="-basePath es_home -numOfNode 4"
110 |
111 | ### Stop Cluster
112 |
113 | Type Ctrl-c or kill the process.
114 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 | 4.0.0
3 | org.codelibs
4 | elasticsearch-cluster-runner
5 | 8.14.1.1-SNAPSHOT
6 | jar
7 | Elasticsearch Cluster Runner
8 | 2011
9 |
10 |
11 | Server Side Public License (SSPL) version 1
12 | https://www.mongodb.com/licensing/server-side-public-license
13 | repo
14 |
15 |
16 |
17 | CodeLibs Project
18 | https://www.codelibs.org/
19 |
20 |
21 | scm:git:git@github.com:codelibs/elasticsearch-cluster-runner.git
22 | scm:git:git@github.com:codelibs/elasticsearch-cluster-runner.git
23 | git@github.com:codelibs/elasticsearch-cluster-runner.git
24 | HEAD
25 |
26 |
27 | UTF-8
28 | 8.14.1
29 | 9.10.0
30 | 2.19.0
31 | 5.10.0
32 | 1.15.0
33 | 0.7
34 |
35 |
36 |
37 |
38 | maven-compiler-plugin
39 | 3.12.1
40 |
41 | UTF-8
42 | 17
43 |
44 |
45 |
46 | maven-surefire-plugin
47 | 3.2.5
48 |
49 |
50 | **/*Test.java
51 |
52 | false
53 |
54 |
55 |
56 | org.jacoco
57 | jacoco-maven-plugin
58 | 0.8.11
59 |
60 |
61 |
62 | prepare-agent
63 |
64 |
65 |
66 | report
67 | prepare-package
68 |
69 | report
70 |
71 |
72 |
73 |
74 |
75 | maven-source-plugin
76 | 3.2.1
77 |
78 |
79 | attach-sources
80 |
81 | jar
82 |
83 |
84 |
85 |
86 |
87 | maven-javadoc-plugin
88 | 3.6.3
89 |
90 | UTF-8
91 | UTF-8
92 | UTF-8
93 |
94 |
95 |
96 | com.mycila
97 | license-maven-plugin
98 | 4.3
99 |
100 | https://www.codelibs.org/assets/license/header-sspl.txt
101 |
102 | 2022
103 |
104 |
105 | src/**/*.java
106 |
107 | UTF-8
108 |
109 | https://www.codelibs.org/assets/license/header-definition-2.xml
110 |
111 |
112 |
113 |
114 | org.codehaus.mojo
115 | exec-maven-plugin
116 | 3.1.0
117 |
118 |
119 |
120 | java
121 |
122 |
123 |
124 |
125 | org.codelibs.elasticsearch.runner.ElasticsearchClusterRunner
126 |
127 | -basePath
128 | es_home
129 | -numOfNode
130 | 3
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 | central
139 | https://repo1.maven.org/maven2
140 |
141 | true
142 |
143 |
144 | true
145 |
146 |
147 |
148 | codelibs.org
149 | CodeLibs Repository
150 | https://maven.codelibs.org/
151 |
152 |
153 |
154 |
155 | org.elasticsearch
156 | elasticsearch
157 | ${elasticsearch.version}
158 |
159 |
160 | org.elasticsearch
161 | elasticsearch-plugin-classloader
162 |
163 |
164 | org.elasticsearch
165 | elasticsearch-lz4
166 |
167 |
168 | org.elasticsearch
169 | elasticsearch-preallocate
170 |
171 |
172 |
173 |
174 | org.codelibs.elasticsearch.lib
175 | lz4
176 | ${elasticsearch.version}
177 |
178 |
179 | org.codelibs.elasticsearch.lib
180 | preallocate
181 | ${elasticsearch.version}
182 |
183 |
184 | org.codelibs.elasticsearch.lib
185 | plugin-classloader
186 | ${elasticsearch.version}
187 |
188 |
189 |
190 | org.codelibs.elasticsearch.module
191 | aggregations
192 | ${elasticsearch.version}
193 |
194 |
195 | org.codelibs.elasticsearch.module
196 | analysis-common
197 | ${elasticsearch.version}
198 |
199 |
200 | org.codelibs.elasticsearch.module
201 | apm
202 | ${elasticsearch.version}
203 |
204 |
205 | org.codelibs.elasticsearch.module
206 | data-streams
207 | ${elasticsearch.version}
208 |
209 |
210 | org.codelibs.elasticsearch.module
211 | ingest-attachment
212 | ${elasticsearch.version}
213 |
214 |
215 | org.codelibs.elasticsearch.module
216 | ingest-common
217 | ${elasticsearch.version}
218 |
219 |
224 |
225 | org.codelibs.elasticsearch.module
226 | ingest-user-agent
227 | ${elasticsearch.version}
228 |
229 |
230 | org.codelibs.elasticsearch.module
231 | kibana
232 | ${elasticsearch.version}
233 |
234 |
235 | org.codelibs.elasticsearch.module
236 | lang-expression
237 | ${elasticsearch.version}
238 |
239 |
240 | org.codelibs.elasticsearch.module
241 | lang-mustache
242 | ${elasticsearch.version}
243 |
244 |
245 | org.codelibs.elasticsearch.module
246 | lang-painless
247 | ${elasticsearch.version}
248 |
249 |
250 | org.codelibs.elasticsearch.module
251 | legacy-geo
252 | ${elasticsearch.version}
253 |
254 |
255 | org.codelibs.elasticsearch.module
256 | mapper-extras
257 | ${elasticsearch.version}
258 |
259 |
260 | org.codelibs.elasticsearch.module
261 | parent-join
262 | ${elasticsearch.version}
263 |
264 |
265 | org.codelibs.elasticsearch.module
266 | percolator
267 | ${elasticsearch.version}
268 |
269 |
270 | org.codelibs.elasticsearch.module
271 | rank-eval
272 | ${elasticsearch.version}
273 |
274 |
275 | org.codelibs.elasticsearch.module
276 | reindex
277 | ${elasticsearch.version}
278 |
279 |
284 |
285 | org.codelibs.elasticsearch.module
286 | repository-gcs
287 | ${elasticsearch.version}
288 |
289 |
290 | org.codelibs.elasticsearch.module
291 | repository-s3
292 | ${elasticsearch.version}
293 |
294 |
295 | org.codelibs.elasticsearch.module
296 | repository-url
297 | ${elasticsearch.version}
298 |
299 |
300 | org.codelibs.elasticsearch.module
301 | rest-root
302 | ${elasticsearch.version}
303 |
304 |
305 | org.codelibs.elasticsearch.module
306 | runtime-fields-common
307 | ${elasticsearch.version}
308 |
309 |
314 |
315 | org.codelibs.elasticsearch.module
316 | transport-netty4
317 | ${elasticsearch.version}
318 |
319 |
320 | args4j
321 | args4j
322 | 2.33
323 |
324 |
325 | org.apache.logging.log4j
326 | log4j-api
327 | ${log4j.version}
328 |
329 |
330 | org.apache.logging.log4j
331 | log4j-core
332 | ${log4j.version}
333 |
334 |
335 | org.apache.logging.log4j
336 | log4j-1.2-api
337 | ${log4j.version}
338 | true
339 |
340 |
341 |
342 | net.java.dev.jna
343 | jna
344 | ${jna.version}
345 | true
346 |
347 |
348 | org.codelibs
349 | curl4j
350 | 1.2.8
351 |
352 |
353 | org.locationtech.jts
354 | jts-core
355 | ${jts-core.version}
356 |
357 |
358 | xerces
359 | xercesImpl
360 |
361 |
362 |
363 |
364 | org.locationtech.spatial4j
365 | spatial4j
366 | ${spatial4j.version}
367 |
368 |
369 | junit
370 | junit
371 | 4.13.2
372 | test
373 |
374 |
375 |
376 |
--------------------------------------------------------------------------------
/src/main/java/org/codelibs/elasticsearch/runner/ClusterRunnerException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012-2022 CodeLibs Project and the Others.
3 | *
4 | * This program is free software: you can redistribute it and/or modify
5 | * it under the terms of the Server Side Public License, version 1,
6 | * as published by MongoDB, Inc.
7 | *
8 | * This program is distributed in the hope that it will be useful,
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 | * Server Side Public License for more details.
12 | *
13 | * You should have received a copy of the Server Side Public License
14 | * along with this program. If not, see
15 | * .
16 | */
17 | package org.codelibs.elasticsearch.runner;
18 |
19 | import org.elasticsearch.action.ActionResponse;
20 |
21 | public class ClusterRunnerException extends RuntimeException {
22 |
23 | private static final long serialVersionUID = 1L;
24 |
25 | private final transient ActionResponse response;
26 |
27 | public ClusterRunnerException(final String message, final Throwable cause) {
28 | super(message, cause);
29 | this.response = null;
30 | }
31 |
32 | public ClusterRunnerException(final String message) {
33 | super(message);
34 | this.response = null;
35 | }
36 |
37 | public ClusterRunnerException(final String message,
38 | final ActionResponse response) {
39 | super(message);
40 | this.response = response;
41 | }
42 |
43 | @SuppressWarnings("unchecked")
44 | public T getActionResponse() {
45 | return (T) response;
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/org/codelibs/elasticsearch/runner/ElasticsearchClusterRunner.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012-2022 CodeLibs Project and the Others.
3 | *
4 | * This program is free software: you can redistribute it and/or modify
5 | * it under the terms of the Server Side Public License, version 1,
6 | * as published by MongoDB, Inc.
7 | *
8 | * This program is distributed in the hope that it will be useful,
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 | * Server Side Public License for more details.
12 | *
13 | * You should have received a copy of the Server Side Public License
14 | * along with this program. If not, see
15 | * .
16 | */
17 | package org.codelibs.elasticsearch.runner;
18 |
19 | import static org.elasticsearch.common.settings.Settings.builder;
20 |
21 | import java.io.Closeable;
22 | import java.io.IOException;
23 | import java.io.InputStream;
24 | import java.net.ConnectException;
25 | import java.net.Socket;
26 | import java.nio.file.FileSystems;
27 | import java.nio.file.FileVisitResult;
28 | import java.nio.file.FileVisitor;
29 | import java.nio.file.Files;
30 | import java.nio.file.Path;
31 | import java.nio.file.Paths;
32 | import java.nio.file.SimpleFileVisitor;
33 | import java.nio.file.StandardCopyOption;
34 | import java.nio.file.attribute.BasicFileAttributes;
35 | import java.util.ArrayList;
36 | import java.util.Collection;
37 | import java.util.Collections;
38 | import java.util.List;
39 | import java.util.Locale;
40 | import java.util.concurrent.ExecutionException;
41 | import java.util.concurrent.TimeUnit;
42 | import java.util.concurrent.TimeoutException;
43 | import java.util.stream.Collectors;
44 |
45 | import org.apache.logging.log4j.LogManager;
46 | import org.apache.logging.log4j.Logger;
47 | import org.elasticsearch.action.ActionResponse;
48 | import org.elasticsearch.action.DocWriteResponse;
49 | import org.elasticsearch.action.DocWriteResponse.Result;
50 | import org.elasticsearch.action.ShardOperationFailedException;
51 | import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
52 | import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
53 | import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
54 | import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
55 | import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequestBuilder;
56 | import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequestBuilder;
57 | import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
58 | import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder;
59 | import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
60 | import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
61 | import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequestBuilder;
62 | import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder;
63 | import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeRequestBuilder;
64 | import org.elasticsearch.action.admin.indices.get.GetIndexRequestBuilder;
65 | import org.elasticsearch.action.admin.indices.get.GetIndexResponse;
66 | import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
67 | import org.elasticsearch.action.admin.indices.open.OpenIndexRequestBuilder;
68 | import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
69 | import org.elasticsearch.action.admin.indices.refresh.RefreshRequestBuilder;
70 | import org.elasticsearch.action.delete.DeleteRequestBuilder;
71 | import org.elasticsearch.action.delete.DeleteResponse;
72 | import org.elasticsearch.action.index.IndexRequestBuilder;
73 | import org.elasticsearch.action.search.SearchRequestBuilder;
74 | import org.elasticsearch.action.search.SearchResponse;
75 | import org.elasticsearch.action.support.WriteRequest.RefreshPolicy;
76 | import org.elasticsearch.action.support.broadcast.BroadcastResponse;
77 | import org.elasticsearch.action.support.master.AcknowledgedResponse;
78 | import org.elasticsearch.client.internal.AdminClient;
79 | import org.elasticsearch.client.internal.Client;
80 | import org.elasticsearch.cluster.ClusterState;
81 | import org.elasticsearch.cluster.health.ClusterHealthStatus;
82 | import org.elasticsearch.cluster.service.ClusterService;
83 | import org.elasticsearch.common.Priority;
84 | import org.elasticsearch.common.Strings;
85 | import org.elasticsearch.common.logging.LogConfigurator;
86 | import org.elasticsearch.common.settings.Settings;
87 | import org.elasticsearch.env.Environment;
88 | import org.elasticsearch.index.IndexNotFoundException;
89 | import org.elasticsearch.index.query.QueryBuilder;
90 | import org.elasticsearch.index.query.QueryBuilders;
91 | import org.elasticsearch.node.ClusterRunnerNode;
92 | import org.elasticsearch.node.InternalSettingsPreparer;
93 | import org.elasticsearch.node.Node;
94 | import org.elasticsearch.node.NodeValidationException;
95 | import org.elasticsearch.plugins.ClusterRunnerPluginsService;
96 | import org.elasticsearch.plugins.Plugin;
97 | import org.elasticsearch.search.sort.SortBuilder;
98 | import org.elasticsearch.search.sort.SortBuilders;
99 | import org.elasticsearch.xcontent.XContentBuilder;
100 | import org.elasticsearch.xcontent.XContentType;
101 | import org.elasticsearch.xcontent.json.JsonXContent;
102 | import org.elasticsearch.xcontent.smile.SmileXContent;
103 | import org.elasticsearch.xcontent.yaml.YamlXContent;
104 | import org.kohsuke.args4j.CmdLineException;
105 | import org.kohsuke.args4j.CmdLineParser;
106 | import org.kohsuke.args4j.Option;
107 | import org.kohsuke.args4j.ParserProperties;
108 |
109 | /**
110 | * ElasticsearchClusterRunner manages multiple Elasticsearch instances.
111 | *
112 | * @author shinsuke
113 | *
114 | */
115 | public class ElasticsearchClusterRunner implements Closeable {
116 |
117 | private static final Logger logger = LogManager.getLogger("codelibs.cluster.runner");
118 |
119 | public static final String NODE_NAME = "node.name";
120 |
121 | public static final String HTTP_PORT = "http.port";
122 |
123 | public static final String LOG4J2_PROPERTIES = "log4j2.properties";
124 |
125 | public static final String ELASTICSEARCH_YAML = "elasticsearch.yml";
126 |
127 | public static final String[] MODULE_TYPES = new String[] { //
128 | "org.elasticsearch.aggregations.AggregationsPlugin", //
129 | "org.elasticsearch.analysis.common.CommonAnalysisPlugin", //
130 | "org.elasticsearch.telemetry.apm.APM", //
131 | "org.elasticsearch.datastreams.DataStreamsPlugin", //
132 | "org.elasticsearch.ingest.attachment.IngestAttachmentPlugin", //
133 | "org.elasticsearch.ingest.common.IngestCommonPlugin", //
134 | // "org.elasticsearch.ingest.geoip.IngestGeoIpPlugin", //
135 | "org.elasticsearch.ingest.useragent.IngestUserAgentPlugin", //
136 | "org.elasticsearch.kibana.KibanaPlugin", //
137 | "org.elasticsearch.script.expression.ExpressionPlugin", //
138 | "org.elasticsearch.script.mustache.MustachePlugin", //
139 | "org.elasticsearch.painless.PainlessPlugin", //
140 | "org.elasticsearch.legacygeo.LegacyGeoPlugin", //
141 | "org.elasticsearch.index.mapper.extras.MapperExtrasPlugin", //
142 | "org.elasticsearch.join.ParentJoinPlugin", //
143 | "org.elasticsearch.percolator.PercolatorPlugin", //
144 | "org.elasticsearch.index.rankeval.RankEvalPlugin", //
145 | "org.elasticsearch.reindex.ReindexPlugin", //
146 | // "org.elasticsearch.repositories.azure.AzureRepositoryPlugin", //
147 | "org.elasticsearch.repositories.gcs.GoogleCloudStoragePlugin", //
148 | "org.elasticsearch.repositories.s3.S3RepositoryPlugin", //
149 | "org.elasticsearch.plugin.repository.url.URLRepositoryPlugin", //
150 | "org.elasticsearch.rest.root.MainRestPlugin", //
151 | "org.elasticsearch.runtimefields.RuntimeFieldsCommonPlugin", //
152 | // "org.elasticsearch.systemd.SystemdPlugin", //
153 | "org.elasticsearch.transport.netty4.Netty4Plugin", //
154 |
155 | };
156 |
157 | public static final String DATA_DIR = "data";
158 |
159 | public static final String LOGS_DIR = "logs";
160 |
161 | public static final String CONFIG_DIR = "config";
162 |
163 | protected List nodeList = new ArrayList<>();
164 |
165 | protected List envList = new ArrayList<>();
166 |
167 | protected Collection> pluginList = new ArrayList<>();
168 |
169 | protected int maxHttpPort = 9299;
170 |
171 | @Option(name = "-basePath", usage = "Base path for Elasticsearch.")
172 | protected String basePath;
173 |
174 | @Option(name = "-confPath", usage = "Config path for Elasticsearch.")
175 | protected String confPath;
176 |
177 | @Option(name = "-dataPath", usage = "Data path for Elasticsearch.")
178 | protected String dataPath;
179 |
180 | @Option(name = "-logsPath", usage = "Log path for Elasticsearch.")
181 | protected String logsPath;
182 |
183 | @Option(name = "-numOfNode", usage = "The number of Elasticsearch node.")
184 | protected int numOfNode = 3;
185 |
186 | @Option(name = "-baseHttpPort", usage = "Base http port.")
187 | protected int baseHttpPort = 9200;
188 |
189 | @Option(name = "-clusterName", usage = "Cluster name.")
190 | protected String clusterName = "elasticsearch-cluster-runner";
191 |
192 | @Option(name = "-indexStoreType", usage = "Index store type.")
193 | protected String indexStoreType = "fs";
194 |
195 | @Option(name = "-useLogger", usage = "Print logs to a logger.")
196 | protected boolean useLogger = false;
197 |
198 | @Option(name = "-disableESLogger", usage = "Disable ESLogger.")
199 | protected boolean disableESLogger = false;
200 |
201 | @Option(name = "-useConsoleAppender", usage = "Use a console appender.")
202 | protected boolean useConsoleAppender = true;
203 |
204 | @Option(name = "-printOnFailure", usage = "Print an exception on a failure.")
205 | protected boolean printOnFailure = false;
206 |
207 | @Option(name = "-moduleTypes", usage = "Module types.")
208 | protected String moduleTypes;
209 |
210 | @Option(name = "-pluginTypes", usage = "Plugin types.")
211 | protected String pluginTypes;
212 |
213 | protected Builder settingsBuilder;
214 |
215 | public static void main(final String[] args) {
216 | try (final ElasticsearchClusterRunner runner = new ElasticsearchClusterRunner()) {
217 | Runtime.getRuntime().addShutdownHook(new Thread() {
218 | @Override
219 | public void run() {
220 | try {
221 | runner.close();
222 | } catch (final IOException e) {
223 | runner.print(e.getLocalizedMessage());
224 | }
225 | }
226 | });
227 |
228 | runner.build(args);
229 |
230 | while (true) {
231 | if (runner.isClosed()) {
232 | break;
233 | }
234 | try {
235 | Thread.sleep(5000);
236 | } catch (final InterruptedException e) {
237 | // no-op
238 | }
239 | }
240 | } catch (final IOException e) {
241 | System.exit(1);
242 | }
243 | }
244 |
245 | public ElasticsearchClusterRunner() {
246 | // nothing
247 | }
248 |
249 | /**
250 | * Check if a cluster runner is closed.
251 | *
252 | * @return true if a runner is closed.
253 | */
254 | public boolean isClosed() {
255 | for (final Node node : nodeList) {
256 | if (!node.isClosed()) {
257 | return false;
258 | }
259 | }
260 | return true;
261 | }
262 |
263 | /**
264 | * Close a cluster runner.
265 | * @throws IOException i/o exception
266 | */
267 | @Override
268 | public void close() throws IOException {
269 | final List exceptionList = new ArrayList<>();
270 | for (final Node node : nodeList) {
271 | try {
272 | node.close();
273 | } catch (final IOException e) {
274 | exceptionList.add(e);
275 | }
276 | }
277 | if (exceptionList.isEmpty()) {
278 | print("Closed all nodes.");
279 | } else {
280 | if (useLogger && logger.isDebugEnabled()) {
281 | for (final Exception e : exceptionList) {
282 | logger.debug("Failed to close a node.", e);
283 | }
284 | }
285 | throw new IOException(exceptionList.toString());
286 | }
287 | }
288 |
289 | /**
290 | * Delete all configuration files and directories.
291 | */
292 | public void clean() {
293 | LogManager.shutdown();
294 | final Path bPath = FileSystems.getDefault().getPath(basePath);
295 | final CleanUpFileVisitor visitor = new CleanUpFileVisitor();
296 | try {
297 | Files.walkFileTree(bPath, visitor);
298 | if (visitor.hasErrors()) {
299 | throw new ClusterRunnerException(visitor.getErrors().stream()
300 | .map(e -> e.getLocalizedMessage())
301 | .collect(Collectors.joining("\n")));
302 | }
303 | } catch (IOException e) {
304 | throw new ClusterRunnerException("Failed to delete " + bPath, e);
305 | }
306 | }
307 |
308 | /**
309 | * Configure each Elasticsearch instance by builder.
310 | *
311 | * @param builder builder to create a cluster
312 | * @return this instance
313 | */
314 | public ElasticsearchClusterRunner onBuild(final Builder builder) {
315 | this.settingsBuilder = builder;
316 | return this;
317 | }
318 |
319 | /**
320 | * Create and start Elasticsearch cluster with Configs instance.
321 | *
322 | * @param configs configuration
323 | */
324 | public void build(final Configs configs) {
325 | build(configs.build());
326 | }
327 |
328 | /**
329 | * Create and start Elasticsearch cluster with arguments.
330 | *
331 | * @param args artuments for starting a cluster
332 | */
333 | public void build(final String... args) {
334 | if (args != null) {
335 | final CmdLineParser parser = new CmdLineParser(this, ParserProperties.defaults().withUsageWidth(80));
336 |
337 | try {
338 | parser.parseArgument(args);
339 | } catch (final CmdLineException e) {
340 | throw new ClusterRunnerException("Failed to parse args: " + Strings.arrayToDelimitedString(args, " "));
341 | }
342 | }
343 |
344 | if (basePath == null) {
345 | try {
346 | basePath = Files.createTempDirectory("es-cluster").toAbsolutePath().toString();
347 | } catch (final IOException e) {
348 | throw new ClusterRunnerException("Could not create $ES_HOME.", e);
349 | }
350 | }
351 |
352 | final Path esBasePath = Paths.get(basePath);
353 | createDir(esBasePath);
354 |
355 | final String[] types = moduleTypes == null ? MODULE_TYPES : moduleTypes.split(",");
356 | for (final String moduleType : types) {
357 | Class extends Plugin> clazz;
358 | try {
359 | clazz = Class.forName(moduleType).asSubclass(Plugin.class);
360 | pluginList.add(clazz);
361 | } catch (final ClassNotFoundException e) {
362 | logger.debug("{} is not found.", moduleType, e);
363 | }
364 | }
365 | if (pluginTypes != null) {
366 | for (final String value : pluginTypes.split(",")) {
367 | final String pluginType = value.trim();
368 | if (pluginType.length() > 0) {
369 | Class extends Plugin> clazz;
370 | try {
371 | clazz = Class.forName(pluginType).asSubclass(Plugin.class);
372 | pluginList.add(clazz);
373 | } catch (final ClassNotFoundException e) {
374 | throw new ClusterRunnerException(pluginType + " is not found.", e);
375 | }
376 | }
377 | }
378 | }
379 |
380 | print("Base Path: " + basePath);
381 | print("Num Of Node: " + numOfNode);
382 |
383 | for (int i = 0; i < numOfNode; i++) {
384 | execute(i + 1);
385 | System.setProperty("es.set.netty.runtime.available.processors", "false");
386 | }
387 | }
388 |
389 | protected void execute(final int id) {
390 | final String nodeName = "Node " + id;
391 | final Path homePath = Paths.get(basePath, nodeName.replace(' ', '_').toLowerCase(Locale.ROOT));
392 | final Path confPath = this.confPath == null ? homePath.resolve(CONFIG_DIR) : Paths.get(this.confPath);
393 | final Path logsPath = this.logsPath == null ? homePath.resolve(LOGS_DIR) : Paths.get(this.logsPath);
394 | final Path dataPath = this.dataPath == null ? homePath.resolve(DATA_DIR) : Paths.get(this.dataPath);
395 |
396 | createDir(homePath);
397 | createDir(confPath);
398 | createDir(logsPath);
399 | createDir(dataPath);
400 |
401 | final Settings.Builder builder = builder();
402 |
403 | if (settingsBuilder != null) {
404 | settingsBuilder.build(id, builder);
405 | }
406 |
407 | putIfAbsent(builder, "path.home", homePath.toAbsolutePath().toString());
408 | putIfAbsent(builder, "path.data", dataPath.toAbsolutePath().toString());
409 | putIfAbsent(builder, "path.logs", logsPath.toAbsolutePath().toString());
410 |
411 | final Path esConfPath = confPath.resolve(ELASTICSEARCH_YAML);
412 | if (!esConfPath.toFile().exists()) {
413 | try (InputStream is =
414 | Thread.currentThread().getContextClassLoader().getResourceAsStream(CONFIG_DIR + "/" + ELASTICSEARCH_YAML)) {
415 | Files.copy(is, esConfPath, StandardCopyOption.REPLACE_EXISTING);
416 | } catch (final IOException e) {
417 | throw new ClusterRunnerException("Could not create: " + esConfPath, e);
418 | }
419 | }
420 |
421 | if (!disableESLogger) {
422 | final Path logConfPath = confPath.resolve(LOG4J2_PROPERTIES);
423 | if (!logConfPath.toFile().exists()) {
424 | try (InputStream is =
425 | Thread.currentThread().getContextClassLoader().getResourceAsStream(CONFIG_DIR + "/" + LOG4J2_PROPERTIES)) {
426 | Files.copy(is, logConfPath, StandardCopyOption.REPLACE_EXISTING);
427 | } catch (final IOException e) {
428 | throw new ClusterRunnerException("Could not create: " + logConfPath, e);
429 | }
430 | }
431 | }
432 |
433 | try {
434 | final String pluginPath = builder.get("path.plugins");
435 | if (pluginPath != null) {
436 | final Path sourcePath = Paths.get(pluginPath);
437 | final Path targetPath = homePath.resolve("plugins");
438 | Files.walkFileTree(sourcePath, new SimpleFileVisitor() {
439 | @Override
440 | public FileVisitResult preVisitDirectory(final Path dir, final BasicFileAttributes attrs) throws IOException {
441 | Files.createDirectories(targetPath.resolve(sourcePath.relativize(dir)));
442 | return FileVisitResult.CONTINUE;
443 | }
444 |
445 | @Override
446 | public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
447 | Files.copy(file, targetPath.resolve(sourcePath.relativize(file)), StandardCopyOption.REPLACE_EXISTING);
448 | return FileVisitResult.CONTINUE;
449 | }
450 | });
451 | builder.remove("path.plugins");
452 | }
453 |
454 | final int httpPort = getAvailableHttpPort(id);
455 | putIfAbsent(builder, "cluster.name", clusterName);
456 | putIfAbsent(builder, NODE_NAME, nodeName);
457 | putIfAbsent(builder, HTTP_PORT, String.valueOf(httpPort));
458 | putIfAbsent(builder, "index.store.type", indexStoreType);
459 | if (!builder.keys().contains("node.roles")) {
460 | builder.putList("node.roles", "master", "data");
461 | }
462 |
463 | print("Node Name: " + builder.get(NODE_NAME));
464 | print("HTTP Port: " + builder.get(HTTP_PORT));
465 | print("Data Directory: " + dataPath);
466 | print("Log Directory: " + logsPath);
467 |
468 | final Settings settings = builder.build();
469 | final Environment environment =
470 | InternalSettingsPreparer.prepareEnvironment(settings, Collections.emptyMap(), confPath, () -> nodeName);
471 | if (!disableESLogger) {
472 | LogConfigurator.registerErrorListener();
473 | final String envNodeName = Node.NODE_NAME_SETTING
474 | .get(environment.settings());
475 | try {
476 | LogConfigurator.setNodeName(envNodeName);
477 | } catch (final IllegalStateException e) {
478 | if (logger.isDebugEnabled()) {
479 | logger.debug("Failed to set {} to a log configuration.",
480 | envNodeName, e);
481 | }
482 | }
483 | LogConfigurator.configure(environment, useConsoleAppender);
484 | }
485 | createDir(environment.modulesFile());
486 | createDir(environment.pluginsFile());
487 |
488 | final Node node = new ClusterRunnerNode(environment,
489 | s -> new ClusterRunnerPluginsService(s, environment,
490 | pluginList));
491 | node.start();
492 | nodeList.add(node);
493 | envList.add(environment);
494 | } catch (final Exception e) {
495 | throw new ClusterRunnerException("Failed to start node " + id, e);
496 | }
497 | }
498 |
499 | protected int getAvailableHttpPort(final int number) {
500 | int httpPort = baseHttpPort + number;
501 | if (maxHttpPort < 0) {
502 | return httpPort;
503 | }
504 | while (httpPort <= maxHttpPort) {
505 | try (Socket socket = new Socket("localhost", httpPort)) {
506 | httpPort++;
507 | } catch (final ConnectException e) {
508 | return httpPort;
509 | } catch (final IOException e) {
510 | print(e.getMessage());
511 | httpPort++;
512 | }
513 | }
514 | throw new ClusterRunnerException("The http port " + httpPort + " is unavailable.");
515 | }
516 |
517 | protected void putIfAbsent(final Settings.Builder builder, final String key, final String value) {
518 | if (builder.get(key) == null && value != null) {
519 | builder.put(key, value);
520 | }
521 | }
522 |
523 | public void setMaxHttpPort(final int maxHttpPort) {
524 | this.maxHttpPort = maxHttpPort;
525 | }
526 |
527 | /**
528 | * Return a node by the node index.
529 | *
530 | * @param i A node index
531 | * @return null if the node is not found
532 | */
533 | public Node getNode(final int i) {
534 | if (i < 0 || i >= nodeList.size()) {
535 | return null;
536 | }
537 | return nodeList.get(i);
538 | }
539 |
540 | /**
541 | * Start a closed node.
542 | *
543 | * @param i the number of nodes
544 | * @return true if the node is started.
545 | */
546 | public boolean startNode(final int i) {
547 | if (i >= nodeList.size()) {
548 | return false;
549 | }
550 | if (!nodeList.get(i).isClosed()) {
551 | return false;
552 | }
553 | final Environment environment = envList.get(i);
554 | final Node node = new ClusterRunnerNode(environment,
555 | s -> new ClusterRunnerPluginsService(s, environment,
556 | pluginList));
557 | try {
558 | node.start();
559 | nodeList.set(i, node);
560 | return true;
561 | } catch (final NodeValidationException e) {
562 | print(e.getLocalizedMessage());
563 | }
564 | return false;
565 | }
566 |
567 | /**
568 | * Return a node by the name.
569 | *
570 | * @param name A node name
571 | * @return null if the node is not found by the name
572 | */
573 | public Node getNode(final String name) {
574 | if (name == null) {
575 | return null;
576 | }
577 | for (final Node node : nodeList) {
578 | if (name.equals(node.settings().get(NODE_NAME))) {
579 | return node;
580 | }
581 | }
582 | return null;
583 | }
584 |
585 | /**
586 | * Return a node index.
587 | *
588 | * @param node node to check an index
589 | * @return -1 if the node does not exist.
590 | */
591 | public int getNodeIndex(final Node node) {
592 | for (int i = 0; i < nodeList.size(); i++) {
593 | if (nodeList.get(i).equals(node)) {
594 | return i;
595 | }
596 | }
597 | return -1;
598 | }
599 |
600 | /**
601 | * Return the number of nodes.
602 | *
603 | * @return the number of nodes
604 | */
605 | public int getNodeSize() {
606 | return nodeList.size();
607 | }
608 |
609 | public void print(final String line) {
610 | if (useLogger) {
611 | logger.info(line);
612 | } else {
613 | System.out.println(line);
614 | }
615 | }
616 |
617 | protected void createDir(final Path path) {
618 | if (!path.toFile().exists()) {
619 | print("Creating " + path);
620 | try {
621 | Files.createDirectories(path);
622 | } catch (final IOException e) {
623 | throw new ClusterRunnerException("Failed to create " + path, e);
624 | }
625 | }
626 | }
627 |
628 | /**
629 | * Return an available node.
630 | *
631 | * @return node
632 | */
633 | public Node node() {
634 | for (final Node node : nodeList) {
635 | if (!node.isClosed()) {
636 | return node;
637 | }
638 | }
639 | throw new ClusterRunnerException("All nodes are closed.");
640 | }
641 |
642 | /**
643 | * Return a master node.
644 | *
645 | * @return master node
646 | */
647 | public synchronized Node masterNode() {
648 | final ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
649 | final String name = state.nodes().getMasterNode().getName();
650 | return getNode(name);
651 | }
652 |
653 | /**
654 | * Return a non-master node.
655 | *
656 | * @return non-master node
657 | */
658 | public synchronized Node nonMasterNode() {
659 | final ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
660 | final String name = state.nodes().getMasterNode().getName();
661 | for (final Node node : nodeList) {
662 | if (!node.isClosed() && !name.equals(node.settings().get(NODE_NAME))) {
663 | return node;
664 | }
665 | }
666 | return null;
667 | }
668 |
669 | /**
670 | * Return an elasticsearch client.
671 | *
672 | * @return client
673 | */
674 | public Client client() {
675 | return node().client();
676 | }
677 |
678 | /**
679 | * Return an elasticsearch admin client.
680 | *
681 | * @return admin client
682 | */
683 | public AdminClient admin() {
684 | return client().admin();
685 | }
686 |
687 | /**
688 | * Wait for green state of a cluster.
689 | *
690 | * @param indices indices to check status
691 | * @return cluster health status
692 | */
693 | public ClusterHealthStatus ensureGreen(final String... indices) {
694 | final ClusterHealthResponse actionGet = client().admin().cluster()
695 | .prepareHealth(indices).setWaitForGreenStatus()
696 | .setWaitForEvents(Priority.LANGUID)
697 | .setWaitForNoRelocatingShards(true).execute().actionGet();
698 | if (actionGet.isTimedOut()) {
699 | onFailure(
700 | "ensureGreen timed out, cluster state:\n" + client()
701 | .admin().cluster().prepareState().get().getState()
702 | + "\n"
703 | + getClusterPendingTasks(client()),
704 | actionGet);
705 | }
706 | return actionGet.getStatus();
707 | }
708 |
709 | /**
710 | * Wait for yellow state of a cluster.
711 | *
712 | * @param indices indices to check status
713 | * @return cluster health status
714 | */
715 | public ClusterHealthStatus ensureYellow(final String... indices) {
716 | final ClusterHealthResponse actionGet = client().admin().cluster()
717 | .prepareHealth(indices).setWaitForYellowStatus()
718 | .setWaitForNoRelocatingShards(true)
719 | .setWaitForEvents(Priority.LANGUID).execute().actionGet();
720 | if (actionGet.isTimedOut()) {
721 | onFailure(
722 | "ensureYellow timed out, cluster state:\n" + "\n" + client()
723 | .admin().cluster().prepareState().get().getState()
724 | + "\n"
725 | + getClusterPendingTasks(client()),
726 | actionGet);
727 | }
728 | return actionGet.getStatus();
729 | }
730 |
731 | public ClusterHealthStatus waitForRelocation() {
732 | final ClusterHealthResponse actionGet = client().admin().cluster()
733 | .prepareHealth().setWaitForNoRelocatingShards(true).execute()
734 | .actionGet();
735 | if (actionGet.isTimedOut()) {
736 | onFailure(
737 | "waitForRelocation timed out, cluster state:\n" + "\n"
738 | + client().admin().cluster().prepareState().get()
739 | .getState()
740 | + "\n"
741 | + getClusterPendingTasks(client()),
742 | actionGet);
743 | }
744 | return actionGet.getStatus();
745 | }
746 |
747 | public static PendingClusterTasksResponse getClusterPendingTasks(
748 | Client client) {
749 | try {
750 | return client
751 | .execute(TransportPendingClusterTasksAction.TYPE,
752 | new PendingClusterTasksRequest())
753 | .get(10, TimeUnit.SECONDS);
754 | } catch (final Exception e) {
755 | throw new ClusterRunnerException("Failed to get the cluster pending tasks.", e);
756 | }
757 | }
758 |
759 | public BroadcastResponse flush() {
760 | return flush(true);
761 | }
762 |
763 | public BroadcastResponse flush(final boolean force) {
764 | return flush(builder -> builder.setWaitIfOngoing(true).setForce(force));
765 | }
766 |
767 | public BroadcastResponse flush(final BuilderCallback builder) {
768 | waitForRelocation();
769 | final BroadcastResponse actionGet = builder.apply(client().admin().indices().prepareFlush()).execute().actionGet();
770 | final ShardOperationFailedException[] shardFailures = actionGet.getShardFailures();
771 | if (shardFailures != null && shardFailures.length != 0) {
772 | final StringBuilder buf = new StringBuilder(100);
773 | for (final ShardOperationFailedException shardFailure : shardFailures) {
774 | buf.append(shardFailure.toString()).append('\n');
775 | }
776 | onFailure(buf.toString(), actionGet);
777 | }
778 | return actionGet;
779 | }
780 |
781 | public BroadcastResponse refresh() {
782 | return refresh(builder -> builder);
783 | }
784 |
785 | public BroadcastResponse refresh(final BuilderCallback builder) {
786 | waitForRelocation();
787 | final BroadcastResponse actionGet = builder.apply(client().admin().indices().prepareRefresh()).execute().actionGet();
788 | final ShardOperationFailedException[] shardFailures = actionGet.getShardFailures();
789 | if (shardFailures != null && shardFailures.length != 0) {
790 | final StringBuilder buf = new StringBuilder(100);
791 | for (final ShardOperationFailedException shardFailure : shardFailures) {
792 | buf.append(shardFailure.toString()).append('\n');
793 | }
794 | onFailure(buf.toString(), actionGet);
795 | }
796 | return actionGet;
797 | }
798 |
799 | public BroadcastResponse forceMerge() {
800 | return forceMerge(-1, false, true);
801 | }
802 |
803 | public BroadcastResponse forceMerge(final int maxNumSegments, final boolean onlyExpungeDeletes, final boolean flush) {
804 | return forceMerge(builder -> builder.setMaxNumSegments(maxNumSegments).setOnlyExpungeDeletes(onlyExpungeDeletes).setFlush(flush));
805 | }
806 |
807 | public BroadcastResponse forceMerge(final BuilderCallback builder) {
808 | waitForRelocation();
809 | final BroadcastResponse actionGet = builder.apply(client().admin().indices().prepareForceMerge()).execute().actionGet();
810 | final ShardOperationFailedException[] shardFailures = actionGet.getShardFailures();
811 | if (shardFailures != null && shardFailures.length != 0) {
812 | final StringBuilder buf = new StringBuilder(100);
813 | for (final ShardOperationFailedException shardFailure : shardFailures) {
814 | buf.append(shardFailure.toString()).append('\n');
815 | }
816 | onFailure(buf.toString(), actionGet);
817 | }
818 | return actionGet;
819 | }
820 |
821 | public OpenIndexResponse openIndex(final String index) {
822 | return openIndex(index, builder -> builder);
823 | }
824 |
825 | public OpenIndexResponse openIndex(final String index, final BuilderCallback builder) {
826 | final OpenIndexResponse actionGet = builder.apply(client().admin().indices().prepareOpen(index)).execute().actionGet();
827 | if (!actionGet.isAcknowledged()) {
828 | onFailure("Failed to open " + index + ".", actionGet);
829 | }
830 | return actionGet;
831 | }
832 |
833 | public AcknowledgedResponse closeIndex(final String index) {
834 | return closeIndex(index, builder -> builder);
835 | }
836 |
837 | public AcknowledgedResponse closeIndex(final String index, final BuilderCallback builder) {
838 | final AcknowledgedResponse actionGet = builder.apply(client().admin().indices().prepareClose(index)).execute().actionGet();
839 | if (!actionGet.isAcknowledged()) {
840 | onFailure("Failed to close " + index + ".", actionGet);
841 | }
842 | return actionGet;
843 | }
844 |
845 | public CreateIndexResponse createIndex(final String index, final Settings settings) {
846 | return createIndex(index, builder -> builder.setSettings(settings != null ? settings : Settings.EMPTY));
847 | }
848 |
849 | public CreateIndexResponse createIndex(final String index, final BuilderCallback builder) {
850 | final CreateIndexResponse actionGet = builder.apply(client().admin().indices().prepareCreate(index)).execute().actionGet();
851 | if (!actionGet.isAcknowledged()) {
852 | onFailure("Failed to create " + index + ".", actionGet);
853 | }
854 | return actionGet;
855 | }
856 |
857 | public boolean indexExists(final String index) {
858 | return indexExists(index, builder -> builder);
859 | }
860 |
861 | public boolean indexExists(final String index,
862 | final BuilderCallback builder) {
863 | try {
864 | final GetIndexResponse actionGet = builder.apply(client().admin()
865 | .indices().prepareGetIndex().setIndices(index)).execute()
866 | .actionGet();
867 | return actionGet.indices().length > 0;
868 | } catch (final IndexNotFoundException e) {
869 | return false;
870 | }
871 | }
872 |
873 | public AcknowledgedResponse deleteIndex(final String index) {
874 | return deleteIndex(index, builder -> builder);
875 | }
876 |
877 | public AcknowledgedResponse deleteIndex(final String index, final BuilderCallback builder) {
878 | final AcknowledgedResponse actionGet = builder.apply(client().admin().indices().prepareDelete(index)).execute().actionGet();
879 | if (!actionGet.isAcknowledged()) {
880 | onFailure("Failed to create " + index + ".", actionGet);
881 | }
882 | return actionGet;
883 | }
884 |
885 | public AcknowledgedResponse createMapping(final String index, final String mappingSource) {
886 | return createMapping(index, builder -> builder.setSource(mappingSource, xContentType(mappingSource)));
887 | }
888 |
889 | public AcknowledgedResponse createMapping(final String index, final XContentBuilder source) {
890 | return createMapping(index, builder -> builder.setSource(source));
891 | }
892 |
893 | public AcknowledgedResponse createMapping(final String index, final BuilderCallback builder) {
894 | final AcknowledgedResponse actionGet = builder.apply(client().admin().indices().preparePutMapping(index)).execute().actionGet();
895 | if (!actionGet.isAcknowledged()) {
896 | onFailure("Failed to create a mapping for " + index + ".", actionGet);
897 | }
898 | return actionGet;
899 | }
900 |
901 | public DocWriteResponse insert(final String index, final String id, final String source) {
902 | return insert(index, id,
903 | builder -> builder.setSource(source, xContentType(source)).setRefreshPolicy(RefreshPolicy.IMMEDIATE));
904 | }
905 |
906 | public DocWriteResponse insert(final String index, final String id,
907 | final BuilderCallback builder) {
908 | final DocWriteResponse actionGet = builder.apply(client().prepareIndex().setIndex(index).setId(id)).execute().actionGet();
909 | if (actionGet.getResult() != Result.CREATED) {
910 | onFailure("Failed to insert " + id + " into " + index + ".", actionGet);
911 | }
912 | return actionGet;
913 | }
914 |
915 | public DeleteResponse delete(final String index, final String id) {
916 | return delete(index, id, builder -> builder.setRefreshPolicy(RefreshPolicy.IMMEDIATE));
917 | }
918 |
919 | public DeleteResponse delete(final String index, final String id,
920 | final BuilderCallback builder) {
921 | final DeleteResponse actionGet = builder.apply(client().prepareDelete().setIndex(index).setId(id)).execute().actionGet();
922 | if (actionGet.getResult() != Result.DELETED) {
923 | onFailure("Failed to delete " + id + " from " + index + ".", actionGet);
924 | }
925 | return actionGet;
926 | }
927 |
928 | public SearchResponse count(final String index) {
929 | return count(index, builder -> builder);
930 | }
931 |
932 | public SearchResponse count(final String index, final BuilderCallback builder) {
933 | return builder.apply(client().prepareSearch(index).setSize(0)).execute().actionGet();
934 | }
935 |
936 | public SearchResponse search(final String index, final QueryBuilder queryBuilder, final SortBuilder> sort, final int from,
937 | final int size) {
938 | return search(index, builder -> builder.setQuery(queryBuilder != null ? queryBuilder : QueryBuilders.matchAllQuery())
939 | .addSort(sort != null ? sort : SortBuilders.scoreSort()).setFrom(from).setSize(size));
940 | }
941 |
942 | public SearchResponse search(final String index, final BuilderCallback builder) {
943 | return builder.apply(client().prepareSearch(index)).execute().actionGet();
944 | }
945 |
946 | public GetAliasesResponse getAlias(final String alias) {
947 | return getAlias(alias, builder -> builder);
948 | }
949 |
950 | public GetAliasesResponse getAlias(final String alias, final BuilderCallback builder) {
951 | return builder.apply(client().admin().indices().prepareGetAliases(alias)).execute().actionGet();
952 | }
953 |
954 | public AcknowledgedResponse updateAlias(final String alias, final String[] addedIndices, final String[] deletedIndices) {
955 | return updateAlias(builder -> {
956 | if (addedIndices != null && addedIndices.length > 0) {
957 | builder.addAlias(addedIndices, alias);
958 | }
959 | if (deletedIndices != null && deletedIndices.length > 0) {
960 | builder.removeAlias(deletedIndices, alias);
961 | }
962 | return builder;
963 | });
964 | }
965 |
966 | public AcknowledgedResponse updateAlias(final BuilderCallback builder) {
967 | final AcknowledgedResponse actionGet = builder.apply(client().admin().indices().prepareAliases()).execute().actionGet();
968 | if (!actionGet.isAcknowledged()) {
969 | onFailure("Failed to update aliases.", actionGet);
970 | }
971 | return actionGet;
972 | }
973 |
974 | public ClusterService clusterService() {
975 | return getInstance(ClusterService.class);
976 | }
977 |
978 | public synchronized T getInstance(final Class clazz) {
979 | final Node node = masterNode();
980 | return node.injector().getInstance(clazz);
981 | }
982 |
983 | public String getClusterName() {
984 | return clusterName;
985 | }
986 |
987 | private void onFailure(final String message, final ActionResponse response) {
988 | if (printOnFailure) {
989 | print(message);
990 | } else {
991 | throw new ClusterRunnerException(message, response);
992 | }
993 | }
994 |
995 | private static final class CleanUpFileVisitor implements FileVisitor {
996 | private final List errorList = new ArrayList<>();
997 |
998 | @Override
999 | public FileVisitResult preVisitDirectory(final Path dir, final BasicFileAttributes attrs) throws IOException {
1000 | return FileVisitResult.CONTINUE;
1001 | }
1002 |
1003 | public boolean hasErrors() {
1004 | return !errorList.isEmpty();
1005 | }
1006 |
1007 | public List getErrors() {
1008 | return errorList;
1009 | }
1010 |
1011 | @Override
1012 | public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
1013 | Files.delete(file);
1014 | return checkIfExist(file);
1015 | }
1016 |
1017 | @Override
1018 | public FileVisitResult visitFileFailed(final Path file, final IOException exc) throws IOException {
1019 | throw exc;
1020 | }
1021 |
1022 | @Override
1023 | public FileVisitResult postVisitDirectory(final Path dir, final IOException exc) throws IOException {
1024 | if (exc == null) {
1025 | Files.delete(dir);
1026 | if (dir.toFile().exists()) {
1027 | errorList.add(new IOException("Failed to delete " + dir));
1028 | dir.toFile().deleteOnExit();
1029 | }
1030 | return FileVisitResult.CONTINUE;
1031 | } else {
1032 | throw exc;
1033 | }
1034 | }
1035 |
1036 | private FileVisitResult checkIfExist(final Path path) {
1037 | if (path.toFile().exists()) {
1038 | errorList.add(new IOException("Failed to delete " + path));
1039 | path.toFile().deleteOnExit();
1040 | }
1041 | return FileVisitResult.CONTINUE;
1042 | }
1043 | }
1044 |
1045 | /**
1046 | * This builder sets parameters to create a node
1047 | *
1048 | */
1049 | public interface Builder {
1050 |
1051 | /**
1052 | * @param index an index of nodes
1053 | * @param builder a builder instance to create a node
1054 | */
1055 | void build(int index, Settings.Builder builder);
1056 | }
1057 |
1058 | public static Configs newConfigs() {
1059 | return new Configs();
1060 | }
1061 |
1062 | /**
1063 | * ElasticsearchClusterRunner configuration.
1064 | *
1065 | */
1066 | public static class Configs {
1067 | List configList = new ArrayList<>();
1068 |
1069 | public Configs basePath(final String basePath) {
1070 | configList.add("-basePath");
1071 | configList.add(basePath);
1072 | return this;
1073 | }
1074 |
1075 | public Configs numOfNode(final int numOfNode) {
1076 | configList.add("-numOfNode");
1077 | configList.add(String.valueOf(numOfNode));
1078 | return this;
1079 | }
1080 |
1081 | public Configs baseHttpPort(final int baseHttpPort) {
1082 | configList.add("-baseHttpPort");
1083 | configList.add(String.valueOf(baseHttpPort));
1084 | return this;
1085 | }
1086 |
1087 | public Configs clusterName(final String clusterName) {
1088 | configList.add("-clusterName");
1089 | configList.add(clusterName);
1090 | return this;
1091 | }
1092 |
1093 | public Configs indexStoreType(final String indexStoreType) {
1094 | configList.add("-indexStoreType");
1095 | configList.add(indexStoreType);
1096 | return this;
1097 | }
1098 |
1099 | public Configs useLogger() {
1100 | configList.add("-useLogger");
1101 | return this;
1102 | }
1103 |
1104 | public Configs disableESLogger() {
1105 | configList.add("-disableESLogger");
1106 | return this;
1107 | }
1108 |
1109 | public Configs useConsoleAppender() {
1110 | configList.add("-useConsoleAppender");
1111 | return this;
1112 | }
1113 |
1114 | public Configs printOnFailure() {
1115 | configList.add("-printOnFailure");
1116 | return this;
1117 | }
1118 |
1119 | public Configs moduleTypes(final String moduleTypes) {
1120 | configList.add("-moduleTypes");
1121 | configList.add(moduleTypes);
1122 | return this;
1123 | }
1124 |
1125 | public Configs pluginTypes(final String pluginTypes) {
1126 | configList.add("-pluginTypes");
1127 | configList.add(pluginTypes);
1128 | return this;
1129 | }
1130 |
1131 | public String[] build() {
1132 | return configList.toArray(new String[configList.size()]);
1133 | }
1134 |
1135 | }
1136 |
1137 | private static XContentType xContentType(final CharSequence content) {
1138 | final int length = content.length() < 20 ? content.length() : 20;
1139 | if (length == 0) {
1140 | return null;
1141 | }
1142 | final char first = content.charAt(0);
1143 | if (first == '{' || JsonXContent.jsonXContent.detectContent(content)) {
1144 | return XContentType.JSON;
1145 | }
1146 | // Should we throw a failure here? Smile idea is to use it in bytes....
1147 | if (SmileXContent.smileXContent.detectContent(content)) {
1148 | return XContentType.SMILE;
1149 | }
1150 | if (YamlXContent.yamlXContent.detectContent(content)) {
1151 | return XContentType.YAML;
1152 | }
1153 |
1154 | // CBOR is not supported
1155 |
1156 | for (int i = 0; i < length; i++) {
1157 | final char c = content.charAt(i);
1158 | if (c == '{') {
1159 | return XContentType.JSON;
1160 | }
1161 | if (!Character.isWhitespace(c)) {
1162 | break;
1163 | }
1164 | }
1165 | return null;
1166 | }
1167 |
1168 | /**
1169 | * Callback function.
1170 | */
1171 | public interface BuilderCallback {
1172 | T apply(T builder);
1173 | }
1174 | }
1175 |
--------------------------------------------------------------------------------
/src/main/java/org/codelibs/elasticsearch/runner/net/EcrCurl.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012-2022 CodeLibs Project and the Others.
3 | *
4 | * This program is free software: you can redistribute it and/or modify
5 | * it under the terms of the Server Side Public License, version 1,
6 | * as published by MongoDB, Inc.
7 | *
8 | * This program is distributed in the hope that it will be useful,
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 | * Server Side Public License for more details.
12 | *
13 | * You should have received a copy of the Server Side Public License
14 | * along with this program. If not, see
15 | * .
16 | */
17 | package org.codelibs.elasticsearch.runner.net;
18 |
19 | import java.io.InputStream;
20 | import java.util.Map;
21 | import java.util.function.Function;
22 |
23 | import org.codelibs.curl.Curl.Method;
24 | import org.codelibs.curl.CurlException;
25 | import org.codelibs.curl.CurlRequest;
26 | import org.codelibs.curl.CurlResponse;
27 | import org.elasticsearch.common.xcontent.LoggingDeprecationHandler;
28 | import org.elasticsearch.node.Node;
29 | import org.elasticsearch.xcontent.NamedXContentRegistry;
30 | import org.elasticsearch.xcontent.json.JsonXContent;
31 |
32 | public class EcrCurl {
33 |
34 | protected EcrCurl() {
35 | // nothing
36 | }
37 |
38 | public static CurlRequest get(final Node node, final String path) {
39 | return new CurlRequest(Method.GET, getUrl(node, path));
40 | }
41 |
42 | public static CurlRequest post(final Node node, final String path) {
43 | return new CurlRequest(Method.POST, getUrl(node, path));
44 | }
45 |
46 | public static CurlRequest put(final Node node, final String path) {
47 | return new CurlRequest(Method.PUT, getUrl(node, path));
48 | }
49 |
50 | public static CurlRequest delete(final Node node, final String path) {
51 | return new CurlRequest(Method.DELETE, getUrl(node, path));
52 | }
53 |
54 | protected static String getUrl(final Node node, final String path) {
55 | final StringBuilder urlBuf = new StringBuilder(200);
56 | urlBuf.append("http://localhost:").append(node.settings().get("http.port"));
57 | if (path.startsWith("/")) {
58 | urlBuf.append(path);
59 | } else {
60 | urlBuf.append('/').append(path);
61 | }
62 | return urlBuf.toString();
63 | }
64 |
65 | public static CurlRequest get(final String url) {
66 | return new CurlRequest(Method.GET, url);
67 | }
68 |
69 | public static CurlRequest post(final String url) {
70 | return new CurlRequest(Method.POST, url);
71 | }
72 |
73 | public static CurlRequest put(final String url) {
74 | return new CurlRequest(Method.PUT, url);
75 | }
76 |
77 | public static CurlRequest delete(final String url) {
78 | return new CurlRequest(Method.DELETE, url);
79 | }
80 |
81 | public static Function> jsonParser() {
82 | return PARSER;
83 | }
84 |
85 | protected static final Function> PARSER = response -> {
86 | try (InputStream is = response.getContentAsStream()) {
87 | return JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, is).map();
88 | } catch (final Exception e) {
89 | throw new CurlException("Failed to access the content.", e);
90 | }
91 | };
92 |
93 | }
94 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/node/ClusterRunnerNode.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012-2020 CodeLibs Project and the Others.
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
13 | * either express or implied. See the License for the specific language
14 | * governing permissions and limitations under the License.
15 | */
16 | package org.elasticsearch.node;
17 |
18 | import java.util.function.Function;
19 |
20 | import org.elasticsearch.cluster.service.ClusterService;
21 | import org.elasticsearch.common.settings.Settings;
22 | import org.elasticsearch.env.Environment;
23 | import org.elasticsearch.plugins.PluginsService;
24 |
25 | public class ClusterRunnerNode extends Node {
26 |
27 | public ClusterRunnerNode(final Environment initialEnvironment,
28 | final Function pluginServiceCtor) {
29 | super(NodeConstruction.prepareConstruction(initialEnvironment,
30 | new NodeServiceProvider() {
31 | @Override
32 | PluginsService newPluginService(Environment environment,
33 | Settings settings) {
34 | return pluginServiceCtor.apply(settings);
35 | }
36 | }, true));
37 | }
38 |
39 | @Override
40 | protected void configureNodeAndClusterIdStateListener(
41 | final ClusterService clusterService) {
42 | // nothing
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/src/main/java/org/elasticsearch/plugins/ClusterRunnerPluginsService.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012-2022 CodeLibs Project and the Others.
3 | *
4 | * This program is free software: you can redistribute it and/or modify
5 | * it under the terms of the Server Side Public License, version 1,
6 | * as published by MongoDB, Inc.
7 | *
8 | * This program is distributed in the hope that it will be useful,
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 | * Server Side Public License for more details.
12 | *
13 | * You should have received a copy of the Server Side Public License
14 | * along with this program. If not, see
15 | * .
16 | */
17 | package org.elasticsearch.plugins;
18 |
19 | import java.nio.file.Path;
20 | import java.util.ArrayList;
21 | import java.util.Collection;
22 | import java.util.Collections;
23 | import java.util.List;
24 | import java.util.Map;
25 |
26 | import org.apache.logging.log4j.LogManager;
27 | import org.apache.logging.log4j.Logger;
28 | import org.elasticsearch.Version;
29 | import org.elasticsearch.action.admin.cluster.node.info.PluginsAndModules;
30 | import org.elasticsearch.common.settings.Settings;
31 | import org.elasticsearch.env.Environment;
32 | import org.elasticsearch.jdk.ModuleQualifiedExportsService;
33 |
34 | public class ClusterRunnerPluginsService extends PluginsService {
35 |
36 | private static final Logger logger = LogManager
37 | .getLogger(ClusterRunnerPluginsService.class);
38 |
39 | private final List overridePlugins;
40 |
41 | private final PluginsAndModules overrideInfo;
42 |
43 | public ClusterRunnerPluginsService(Settings settings,
44 | Environment environment,
45 | Collection> classpathPlugins) {
46 | super(settings, environment.configFile(), environment.modulesFile(),
47 | environment.pluginsFile());
48 |
49 | final Path configPath = environment.configFile();
50 |
51 | List pluginsLoaded = new ArrayList<>();
52 |
53 | for (Class extends Plugin> pluginClass : classpathPlugins) {
54 | Plugin plugin = loadPlugin(pluginClass, settings, configPath);
55 | PluginDescriptor pluginInfo = new PluginDescriptor(
56 | pluginClass.getName(), "classpath plugin", "NA",
57 | Version.CURRENT.toString(),
58 | Integer.toString(Runtime.version().feature()),
59 | pluginClass.getName(), null, Collections.emptyList(), false,
60 | false, false, false);
61 | if (logger.isTraceEnabled()) {
62 | logger.trace("plugin loaded from classpath [{}]", pluginInfo);
63 | }
64 | pluginsLoaded.add(new LoadedPlugin(pluginInfo, plugin));
65 | }
66 |
67 | List pluginInfos = new ArrayList<>();
68 | pluginInfos.addAll(super.info().getPluginInfos());
69 | pluginsLoaded.stream().map(LoadedPlugin::descriptor)
70 | .map(PluginRuntimeInfo::new).forEach(pluginInfos::add);
71 |
72 | loadExtensions(pluginsLoaded);
73 |
74 | pluginsLoaded.addAll(super.plugins());
75 | this.overridePlugins = List.copyOf(pluginsLoaded);
76 | this.overrideInfo = new PluginsAndModules(pluginInfos,
77 | super.info().getModuleInfos());
78 |
79 | }
80 |
81 | @Override
82 | protected final List plugins() {
83 | return this.overridePlugins;
84 | }
85 |
86 | @Override
87 | public PluginsAndModules info() {
88 | return this.overrideInfo;
89 | }
90 |
91 | @Override
92 | protected void addServerExportsService(
93 | Map> qualifiedExports) {
94 | // no-op
95 | }
96 | }
97 |
--------------------------------------------------------------------------------
/src/main/resources/config/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | # ======================== Elasticsearch Configuration =========================
2 | #
3 | # NOTE: Elasticsearch comes with reasonable defaults for most settings.
4 | # Before you set out to tweak and tune the configuration, make sure you
5 | # understand what are you trying to accomplish and the consequences.
6 | #
7 | # The primary way of configuring a node is via this file. This template lists
8 | # the most important settings you may want to configure for a production cluster.
9 | #
10 | # Please consult the documentation for further information on configuration options:
11 | # https://www.elastic.co/guide/en/elasticsearch/reference/index.html
12 | #
13 | # ---------------------------------- Cluster -----------------------------------
14 | #
15 | # Use a descriptive name for your cluster:
16 | #
17 | #cluster.name: my-application
18 | #
19 | # ------------------------------------ Node ------------------------------------
20 | #
21 | # Use a descriptive name for the node:
22 | #
23 | #node.name: node-1
24 | #
25 | # Add custom attributes to the node:
26 | #
27 | #node.attr.rack: r1
28 | #
29 | # ----------------------------------- Paths ------------------------------------
30 | #
31 | # Path to directory where to store the data (separate multiple locations by comma):
32 | #
33 | #path.data: /path/to/data
34 | #
35 | # Path to log files:
36 | #
37 | #path.logs: /path/to/logs
38 | #
39 | # ----------------------------------- Memory -----------------------------------
40 | #
41 | # Lock the memory on startup:
42 | #
43 | #bootstrap.memory_lock: true
44 | #
45 | # Make sure that the heap size is set to about half the memory available
46 | # on the system and that the owner of the process is allowed to use this
47 | # limit.
48 | #
49 | # Elasticsearch performs poorly when the system is swapping the memory.
50 | #
51 | # ---------------------------------- Network -----------------------------------
52 | #
53 | # By default Elasticsearch is only accessible on localhost. Set a different
54 | # address here to expose this node on the network:
55 | #
56 | #network.host: 192.168.0.1
57 | #
58 | # By default Elasticsearch listens for HTTP traffic on the first free port it
59 | # finds starting at 9200. Set a specific HTTP port here:
60 | #
61 | #http.port: 9200
62 | #
63 | # For more information, consult the network module documentation.
64 | #
65 | # --------------------------------- Discovery ----------------------------------
66 | #
67 | # Pass an initial list of hosts to perform discovery when this node is started:
68 | # The default list of hosts is ["127.0.0.1", "[::1]"]
69 | #
70 | #discovery.seed_hosts: ["host1", "host2"]
71 | #
72 | # Bootstrap the cluster using an initial set of master-eligible nodes:
73 | #
74 | #cluster.initial_master_nodes: ["node-1", "node-2"]
75 | #
76 | # For more information, consult the discovery and cluster formation module documentation.
77 | #
78 | # ---------------------------------- Various -----------------------------------
79 | #
80 | # Allow wildcard deletion of indices:
81 | #
82 | #action.destructive_requires_name: false
83 |
--------------------------------------------------------------------------------
/src/main/resources/config/log4j2.properties:
--------------------------------------------------------------------------------
1 | status = error
2 |
3 | appender.console.type = Console
4 | appender.console.name = console
5 | appender.console.layout.type = PatternLayout
6 | appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker %m%n
7 |
8 | ######## Server JSON ############################
9 | appender.rolling.type = RollingFile
10 | appender.rolling.name = rolling
11 | appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_server.json
12 | appender.rolling.layout.type = ECSJsonLayout
13 | appender.rolling.layout.dataset = elasticsearch.server
14 |
15 | appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.json.gz
16 | appender.rolling.policies.type = Policies
17 | appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
18 | appender.rolling.policies.time.interval = 1
19 | appender.rolling.policies.time.modulate = true
20 | appender.rolling.policies.size.type = SizeBasedTriggeringPolicy
21 | appender.rolling.policies.size.size = 128MB
22 | appender.rolling.strategy.type = DefaultRolloverStrategy
23 | appender.rolling.strategy.fileIndex = nomax
24 | appender.rolling.strategy.action.type = Delete
25 | appender.rolling.strategy.action.basepath = ${sys:es.logs.base_path}
26 | appender.rolling.strategy.action.condition.type = IfFileName
27 | appender.rolling.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
28 | appender.rolling.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
29 | appender.rolling.strategy.action.condition.nested_condition.exceeds = 2GB
30 | ################################################
31 | ######## Server - old style pattern ###########
32 | appender.rolling_old.type = RollingFile
33 | appender.rolling_old.name = rolling_old
34 | appender.rolling_old.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
35 | appender.rolling_old.layout.type = PatternLayout
36 | appender.rolling_old.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker %m%n
37 |
38 | appender.rolling_old.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}-%i.log.gz
39 | appender.rolling_old.policies.type = Policies
40 | appender.rolling_old.policies.time.type = TimeBasedTriggeringPolicy
41 | appender.rolling_old.policies.time.interval = 1
42 | appender.rolling_old.policies.time.modulate = true
43 | appender.rolling_old.policies.size.type = SizeBasedTriggeringPolicy
44 | appender.rolling_old.policies.size.size = 128MB
45 | appender.rolling_old.strategy.type = DefaultRolloverStrategy
46 | appender.rolling_old.strategy.fileIndex = nomax
47 | appender.rolling_old.strategy.action.type = Delete
48 | appender.rolling_old.strategy.action.basepath = ${sys:es.logs.base_path}
49 | appender.rolling_old.strategy.action.condition.type = IfFileName
50 | appender.rolling_old.strategy.action.condition.glob = ${sys:es.logs.cluster_name}-*
51 | appender.rolling_old.strategy.action.condition.nested_condition.type = IfAccumulatedFileSize
52 | appender.rolling_old.strategy.action.condition.nested_condition.exceeds = 2GB
53 | ################################################
54 |
55 | rootLogger.level = info
56 | rootLogger.appenderRef.console.ref = console
57 | rootLogger.appenderRef.rolling.ref = rolling
58 | rootLogger.appenderRef.rolling_old.ref = rolling_old
59 |
60 | ######## Deprecation JSON #######################
61 | appender.deprecation_rolling.type = RollingFile
62 | appender.deprecation_rolling.name = deprecation_rolling
63 | appender.deprecation_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation.json
64 | appender.deprecation_rolling.layout.type = ECSJsonLayout
65 | # Intentionally follows a different pattern to above
66 | appender.deprecation_rolling.layout.dataset = deprecation.elasticsearch
67 | appender.deprecation_rolling.filter.rate_limit.type = RateLimitingFilter
68 |
69 | appender.deprecation_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_deprecation-%i.json.gz
70 | appender.deprecation_rolling.policies.type = Policies
71 | appender.deprecation_rolling.policies.size.type = SizeBasedTriggeringPolicy
72 | appender.deprecation_rolling.policies.size.size = 1GB
73 | appender.deprecation_rolling.strategy.type = DefaultRolloverStrategy
74 | appender.deprecation_rolling.strategy.max = 4
75 |
76 | appender.header_warning.type = HeaderWarningAppender
77 | appender.header_warning.name = header_warning
78 | #################################################
79 |
80 | logger.deprecation.name = org.elasticsearch.deprecation
81 | logger.deprecation.level = WARN
82 | logger.deprecation.appenderRef.deprecation_rolling.ref = deprecation_rolling
83 | logger.deprecation.appenderRef.header_warning.ref = header_warning
84 | logger.deprecation.additivity = false
85 |
86 | ######## Search slowlog JSON ####################
87 | appender.index_search_slowlog_rolling.type = RollingFile
88 | appender.index_search_slowlog_rolling.name = index_search_slowlog_rolling
89 | appender.index_search_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
90 | .cluster_name}_index_search_slowlog.json
91 | appender.index_search_slowlog_rolling.layout.type = ECSJsonLayout
92 | appender.index_search_slowlog_rolling.layout.dataset = elasticsearch.index_search_slowlog
93 |
94 | appender.index_search_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs\
95 | .cluster_name}_index_search_slowlog-%i.json.gz
96 | appender.index_search_slowlog_rolling.policies.type = Policies
97 | appender.index_search_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
98 | appender.index_search_slowlog_rolling.policies.size.size = 1GB
99 | appender.index_search_slowlog_rolling.strategy.type = DefaultRolloverStrategy
100 | appender.index_search_slowlog_rolling.strategy.max = 4
101 | #################################################
102 |
103 | #################################################
104 | logger.index_search_slowlog_rolling.name = index.search.slowlog
105 | logger.index_search_slowlog_rolling.level = trace
106 | logger.index_search_slowlog_rolling.appenderRef.index_search_slowlog_rolling.ref = index_search_slowlog_rolling
107 | logger.index_search_slowlog_rolling.additivity = false
108 |
109 | ######## Indexing slowlog JSON ##################
110 | appender.index_indexing_slowlog_rolling.type = RollingFile
111 | appender.index_indexing_slowlog_rolling.name = index_indexing_slowlog_rolling
112 | appender.index_indexing_slowlog_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
113 | _index_indexing_slowlog.json
114 | appender.index_indexing_slowlog_rolling.layout.type = ECSJsonLayout
115 | appender.index_indexing_slowlog_rolling.layout.dataset = elasticsearch.index_indexing_slowlog
116 |
117 |
118 | appender.index_indexing_slowlog_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}\
119 | _index_indexing_slowlog-%i.json.gz
120 | appender.index_indexing_slowlog_rolling.policies.type = Policies
121 | appender.index_indexing_slowlog_rolling.policies.size.type = SizeBasedTriggeringPolicy
122 | appender.index_indexing_slowlog_rolling.policies.size.size = 1GB
123 | appender.index_indexing_slowlog_rolling.strategy.type = DefaultRolloverStrategy
124 | appender.index_indexing_slowlog_rolling.strategy.max = 4
125 | #################################################
126 |
127 |
128 | logger.index_indexing_slowlog.name = index.indexing.slowlog.index
129 | logger.index_indexing_slowlog.level = trace
130 | logger.index_indexing_slowlog.appenderRef.index_indexing_slowlog_rolling.ref = index_indexing_slowlog_rolling
131 | logger.index_indexing_slowlog.additivity = false
132 |
133 |
134 | appender.audit_rolling.type = RollingFile
135 | appender.audit_rolling.name = audit_rolling
136 | appender.audit_rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit.json
137 | appender.audit_rolling.layout.type = PatternLayout
138 | appender.audit_rolling.layout.pattern = {\
139 | "type":"audit", \
140 | "timestamp":"%d{yyyy-MM-dd'T'HH:mm:ss,SSSZ}"\
141 | %varsNotEmpty{, "node.name":"%enc{%map{node.name}}{JSON}"}\
142 | %varsNotEmpty{, "node.id":"%enc{%map{node.id}}{JSON}"}\
143 | %varsNotEmpty{, "host.name":"%enc{%map{host.name}}{JSON}"}\
144 | %varsNotEmpty{, "host.ip":"%enc{%map{host.ip}}{JSON}"}\
145 | %varsNotEmpty{, "event.type":"%enc{%map{event.type}}{JSON}"}\
146 | %varsNotEmpty{, "event.action":"%enc{%map{event.action}}{JSON}"}\
147 | %varsNotEmpty{, "authentication.type":"%enc{%map{authentication.type}}{JSON}"}\
148 | %varsNotEmpty{, "user.name":"%enc{%map{user.name}}{JSON}"}\
149 | %varsNotEmpty{, "user.run_by.name":"%enc{%map{user.run_by.name}}{JSON}"}\
150 | %varsNotEmpty{, "user.run_as.name":"%enc{%map{user.run_as.name}}{JSON}"}\
151 | %varsNotEmpty{, "user.realm":"%enc{%map{user.realm}}{JSON}"}\
152 | %varsNotEmpty{, "user.run_by.realm":"%enc{%map{user.run_by.realm}}{JSON}"}\
153 | %varsNotEmpty{, "user.run_as.realm":"%enc{%map{user.run_as.realm}}{JSON}"}\
154 | %varsNotEmpty{, "user.roles":%map{user.roles}}\
155 | %varsNotEmpty{, "apikey.id":"%enc{%map{apikey.id}}{JSON}"}\
156 | %varsNotEmpty{, "apikey.name":"%enc{%map{apikey.name}}{JSON}"}\
157 | %varsNotEmpty{, "authentication.token.name":"%enc{%map{authentication.token.name}}{JSON}"}\
158 | %varsNotEmpty{, "authentication.token.type":"%enc{%map{authentication.token.type}}{JSON}"}\
159 | %varsNotEmpty{, "origin.type":"%enc{%map{origin.type}}{JSON}"}\
160 | %varsNotEmpty{, "origin.address":"%enc{%map{origin.address}}{JSON}"}\
161 | %varsNotEmpty{, "realm":"%enc{%map{realm}}{JSON}"}\
162 | %varsNotEmpty{, "url.path":"%enc{%map{url.path}}{JSON}"}\
163 | %varsNotEmpty{, "url.query":"%enc{%map{url.query}}{JSON}"}\
164 | %varsNotEmpty{, "request.method":"%enc{%map{request.method}}{JSON}"}\
165 | %varsNotEmpty{, "request.body":"%enc{%map{request.body}}{JSON}"}\
166 | %varsNotEmpty{, "request.id":"%enc{%map{request.id}}{JSON}"}\
167 | %varsNotEmpty{, "action":"%enc{%map{action}}{JSON}"}\
168 | %varsNotEmpty{, "request.name":"%enc{%map{request.name}}{JSON}"}\
169 | %varsNotEmpty{, "indices":%map{indices}}\
170 | %varsNotEmpty{, "opaque_id":"%enc{%map{opaque_id}}{JSON}"}\
171 | %varsNotEmpty{, "x_forwarded_for":"%enc{%map{x_forwarded_for}}{JSON}"}\
172 | %varsNotEmpty{, "transport.profile":"%enc{%map{transport.profile}}{JSON}"}\
173 | %varsNotEmpty{, "rule":"%enc{%map{rule}}{JSON}"}\
174 | %varsNotEmpty{, "put":%map{put}}\
175 | %varsNotEmpty{, "delete":%map{delete}}\
176 | %varsNotEmpty{, "change":%map{change}}\
177 | %varsNotEmpty{, "create":%map{create}}\
178 | %varsNotEmpty{, "invalidate":%map{invalidate}}\
179 | }%n
180 | # "node.name" node name from the `elasticsearch.yml` settings
181 | # "node.id" node id which should not change between cluster restarts
182 | # "host.name" unresolved hostname of the local node
183 | # "host.ip" the local bound ip (i.e. the ip listening for connections)
184 | # "origin.type" a received REST request is translated into one or more transport requests. This indicates which processing layer generated the event "rest" or "transport" (internal)
185 | # "event.action" the name of the audited event, eg. "authentication_failed", "access_granted", "run_as_granted", etc.
186 | # "authentication.type" one of "realm", "api_key", "token", "anonymous" or "internal"
187 | # "user.name" the subject name as authenticated by a realm
188 | # "user.run_by.name" the original authenticated subject name that is impersonating another one.
189 | # "user.run_as.name" if this "event.action" is of a run_as type, this is the subject name to be impersonated as.
190 | # "user.realm" the name of the realm that authenticated "user.name"
191 | # "user.run_by.realm" the realm name of the impersonating subject ("user.run_by.name")
192 | # "user.run_as.realm" if this "event.action" is of a run_as type, this is the realm name the impersonated user is looked up from
193 | # "user.roles" the roles array of the user; these are the roles that are granting privileges
194 | # "apikey.id" this field is present if and only if the "authentication.type" is "api_key"
195 | # "apikey.name" this field is present if and only if the "authentication.type" is "api_key"
196 | # "authentication.token.name" this field is present if and only if the authenticating credential is a service account token
197 | # "authentication.token.type" this field is present if and only if the authenticating credential is a service account token
198 | # "event.type" informs about what internal system generated the event; possible values are "rest", "transport", "ip_filter" and "security_config_change"
199 | # "origin.address" the remote address and port of the first network hop, i.e. a REST proxy or another cluster node
200 | # "realm" name of a realm that has generated an "authentication_failed" or an "authentication_successful"; the subject is not yet authenticated
201 | # "url.path" the URI component between the port and the query string; it is percent (URL) encoded
202 | # "url.query" the URI component after the path and before the fragment; it is percent (URL) encoded
203 | # "request.method" the method of the HTTP request, i.e. one of GET, POST, PUT, DELETE, OPTIONS, HEAD, PATCH, TRACE, CONNECT
204 | # "request.body" the content of the request body entity, JSON escaped
205 | # "request.id" a synthetic identifier for the incoming request, this is unique per incoming request, and consistent across all audit events generated by that request
206 | # "action" an action is the most granular operation that is authorized and this identifies it in a namespaced way (internal)
207 | # "request.name" if the event is in connection to a transport message this is the name of the request class, similar to how rest requests are identified by the url path (internal)
208 | # "indices" the array of indices that the "action" is acting upon
209 | # "opaque_id" opaque value conveyed by the "X-Opaque-Id" request header
210 | # "x_forwarded_for" the addresses from the "X-Forwarded-For" request header, as a verbatim string value (not an array)
211 | # "transport.profile" name of the transport profile in case this is a "connection_granted" or "connection_denied" event
212 | # "rule" name of the applied rule if the "origin.type" is "ip_filter"
213 | # the "put", "delete", "change", "create", "invalidate" fields are only present
214 | # when the "event.type" is "security_config_change" and contain the security config change (as an object) taking effect
215 |
216 | appender.audit_rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}_audit-%d{yyyy-MM-dd}-%i.json.gz
217 | appender.audit_rolling.policies.type = Policies
218 | appender.audit_rolling.policies.time.type = TimeBasedTriggeringPolicy
219 | appender.audit_rolling.policies.time.interval = 1
220 | appender.audit_rolling.policies.time.modulate = true
221 | appender.audit_rolling.policies.size.type = SizeBasedTriggeringPolicy
222 | appender.audit_rolling.policies.size.size = 1GB
223 | appender.audit_rolling.strategy.type = DefaultRolloverStrategy
224 | appender.audit_rolling.strategy.fileIndex = nomax
225 |
226 | logger.xpack_security_audit_logfile.name = org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail
227 | logger.xpack_security_audit_logfile.level = info
228 | logger.xpack_security_audit_logfile.appenderRef.audit_rolling.ref = audit_rolling
229 | logger.xpack_security_audit_logfile.additivity = false
230 |
231 | logger.xmlsig.name = org.apache.xml.security.signature.XMLSignature
232 | logger.xmlsig.level = error
233 | logger.samlxml_decrypt.name = org.opensaml.xmlsec.encryption.support.Decrypter
234 | logger.samlxml_decrypt.level = fatal
235 | logger.saml2_decrypt.name = org.opensaml.saml.saml2.encryption.Decrypter
236 | logger.saml2_decrypt.level = fatal
237 |
--------------------------------------------------------------------------------
/src/test/java/org/codelibs/elasticsearch/runner/ElasticsearchClusterRunnerTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright 2012-2022 CodeLibs Project and the Others.
3 | *
4 | * This program is free software: you can redistribute it and/or modify
5 | * it under the terms of the Server Side Public License, version 1,
6 | * as published by MongoDB, Inc.
7 | *
8 | * This program is distributed in the hope that it will be useful,
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 | * Server Side Public License for more details.
12 | *
13 | * You should have received a copy of the Server Side Public License
14 | * along with this program. If not, see
15 | * .
16 | */
17 | package org.codelibs.elasticsearch.runner;
18 |
19 | import static org.codelibs.elasticsearch.runner.ElasticsearchClusterRunner.newConfigs;
20 |
21 | import java.io.BufferedWriter;
22 | import java.io.IOException;
23 | import java.io.OutputStreamWriter;
24 | import java.nio.file.FileSystems;
25 | import java.nio.file.Files;
26 | import java.util.Map;
27 |
28 | import org.codelibs.curl.CurlException;
29 | import org.codelibs.curl.CurlResponse;
30 | import org.codelibs.elasticsearch.runner.net.EcrCurl;
31 | import org.elasticsearch.action.DocWriteResponse;
32 | import org.elasticsearch.action.DocWriteResponse.Result;
33 | import org.elasticsearch.action.admin.indices.alias.get.GetAliasesResponse;
34 | import org.elasticsearch.action.search.SearchResponse;
35 | import org.elasticsearch.common.settings.Settings;
36 | import org.elasticsearch.common.settings.Settings.Builder;
37 | import org.elasticsearch.index.query.QueryBuilders;
38 | import org.elasticsearch.node.Node;
39 | import org.elasticsearch.search.sort.SortBuilders;
40 | import org.elasticsearch.xcontent.XContentBuilder;
41 | import org.elasticsearch.xcontent.XContentFactory;
42 |
43 | import junit.framework.TestCase;
44 |
45 | public class ElasticsearchClusterRunnerTest extends TestCase {
46 |
47 | private ElasticsearchClusterRunner runner;
48 |
49 | private String clusterName;
50 |
51 | private static final int NUM_OF_NODES = 3;
52 |
53 | @Override
54 | protected void setUp() throws Exception {
55 | clusterName = "es-cl-run-" + System.currentTimeMillis();
56 | // create runner instance
57 | runner = new ElasticsearchClusterRunner();
58 | // create ES nodes
59 | runner.onBuild(new ElasticsearchClusterRunner.Builder() {
60 | @Override
61 | public void build(final int number, final Builder settingsBuilder) {
62 | settingsBuilder.put("http.cors.enabled", true);
63 | settingsBuilder.put("http.cors.allow-origin", "*");
64 | settingsBuilder.putList("discovery.seed_hosts", "127.0.0.1:9301", "127.0.0.1:9302");
65 | settingsBuilder.putList("cluster.initial_master_nodes", "127.0.0.1:9301");
66 | }
67 | }).build(newConfigs().clusterName(clusterName).numOfNode(NUM_OF_NODES));
68 |
69 | // wait for yellow status
70 | runner.ensureYellow();
71 | }
72 |
73 | @Override
74 | protected void tearDown() throws Exception {
75 | // close runner
76 | runner.close();
77 | // delete all files
78 | runner.clean();
79 | assertFalse("Check if " + runner.basePath + " is deleted", Files
80 | .exists(FileSystems.getDefault().getPath(runner.basePath)));
81 | }
82 |
83 | public void test_runCluster() throws Exception {
84 |
85 | // check if runner has nodes
86 | assertEquals(NUM_OF_NODES, runner.getNodeSize());
87 | assertNotNull(runner.getNode(0));
88 | assertNotNull(runner.getNode(1));
89 | assertNotNull(runner.getNode(2));
90 | assertNotNull(runner.getNode("Node 1"));
91 | assertNotNull(runner.getNode("Node 2"));
92 | assertNotNull(runner.getNode("Node 3"));
93 | assertNull(runner.getNode(NUM_OF_NODES));
94 | assertNotNull(runner.node());
95 |
96 | assertNotNull(runner.client());
97 |
98 | // check if a master node exists
99 | assertNotNull(runner.masterNode());
100 | assertNotNull(runner.nonMasterNode());
101 | assertFalse(runner.masterNode() == runner.nonMasterNode());
102 |
103 | // check if a cluster service exists
104 | assertNotNull(runner.clusterService());
105 |
106 | final String index = "test_index";
107 |
108 | // create an index
109 | runner.createIndex(index, (Settings) null);
110 | runner.ensureYellow(index);
111 |
112 | // create a mapping
113 | final XContentBuilder mappingBuilder = XContentFactory.jsonBuilder()//
114 | .startObject()//
115 | .startObject("properties")//
116 |
117 | // id
118 | .startObject("id")//
119 | .field("type", "keyword")//
120 | .endObject()//
121 |
122 | // msg
123 | .startObject("msg")//
124 | .field("type", "text")//
125 | .endObject()//
126 |
127 | // order
128 | .startObject("order")//
129 | .field("type", "long")//
130 | .endObject()//
131 |
132 | // @timestamp
133 | .startObject("@timestamp")//
134 | .field("type", "date")//
135 | .endObject()//
136 |
137 | .endObject()//
138 | .endObject();
139 | runner.createMapping(index, mappingBuilder);
140 |
141 | if (!runner.indexExists(index)) {
142 | fail();
143 | }
144 |
145 | // create 1000 documents
146 | for (int i = 1; i <= 1000; i++) {
147 | final DocWriteResponse indexResponse1 = runner.insert(index, String.valueOf(i),
148 | "{\"id\":\"" + i + "\",\"msg\":\"test " + i + "\",\"order\":" + i + ",\"@timestamp\":\"2000-01-01T00:00:00\"}");
149 | assertEquals(Result.CREATED, indexResponse1.getResult());
150 | }
151 | runner.refresh();
152 |
153 | // update alias
154 | final String alias = index + "_alias";
155 | {
156 | final GetAliasesResponse aliasesResponse = runner.getAlias(alias);
157 | assertNull(aliasesResponse.getAliases().get(alias));
158 | }
159 |
160 | {
161 | runner.updateAlias(alias, new String[] { index }, null);
162 | runner.flush();
163 | final GetAliasesResponse aliasesResponse = runner.getAlias(alias);
164 | assertEquals(1, aliasesResponse.getAliases().size());
165 | assertEquals(1, aliasesResponse.getAliases().get(index).size());
166 | assertEquals(alias, aliasesResponse.getAliases().get(index).get(0).alias());
167 | }
168 |
169 | {
170 | runner.updateAlias(alias, null, new String[] { index });
171 | final GetAliasesResponse aliasesResponse = runner.getAlias(alias);
172 | assertNull(aliasesResponse.getAliases().get(alias));
173 | }
174 |
175 | // search 1000 documents
176 | {
177 | final SearchResponse searchResponse = runner.search(index, null, null, 0, 10);
178 | assertEquals(1000, searchResponse.getHits().getTotalHits().value);
179 | assertEquals(10, searchResponse.getHits().getHits().length);
180 | }
181 |
182 | {
183 | final SearchResponse searchResponse = runner.search(index, QueryBuilders.matchAllQuery(), SortBuilders.fieldSort("id"), 0, 10);
184 | assertEquals(1000, searchResponse.getHits().getTotalHits().value);
185 | assertEquals(10, searchResponse.getHits().getHits().length);
186 | }
187 |
188 | {
189 | final SearchResponse searchResponse = runner.count(index);
190 | assertEquals(1000, searchResponse.getHits().getTotalHits().value);
191 | }
192 |
193 | // delete 1 document
194 | runner.delete(index, String.valueOf(1));
195 | runner.flush();
196 |
197 | {
198 | final SearchResponse searchResponse = runner.search(index, null, null, 0, 10);
199 | assertEquals(999, searchResponse.getHits().getTotalHits().value);
200 | assertEquals(10, searchResponse.getHits().getHits().length);
201 | }
202 |
203 | // optimize
204 | runner.forceMerge();
205 |
206 | final Node node = runner.node();
207 |
208 | // http access
209 | // root
210 | try (CurlResponse curlResponse =
211 | EcrCurl.get(node, "/").execute()) {
212 | final String content = curlResponse.getContentAsString();
213 | assertNotNull(content);
214 | assertTrue(content.contains("cluster_name"));
215 | final Map map = curlResponse.getContent(EcrCurl.jsonParser());
216 | assertNotNull(map);
217 | assertEquals(clusterName, map.get("cluster_name").toString());
218 | }
219 |
220 | // get
221 | try (CurlResponse curlResponse =
222 | EcrCurl.get(node, "/_search").header("Content-Type", "application/json").param("q", "*:*").execute()) {
223 | final String content = curlResponse.getContentAsString();
224 | assertNotNull(content);
225 | assertTrue(content.contains("total"));
226 | final Map map = curlResponse.getContent(EcrCurl.jsonParser());
227 | assertNotNull(map);
228 | assertEquals("false", map.get("timed_out").toString());
229 | }
230 |
231 | // post
232 | try (CurlResponse curlResponse = EcrCurl.post(node, "/" + index + "/_doc/").header("Content-Type", "application/json")
233 | .body("{\"id\":\"2000\",\"msg\":\"test 2000\"}").execute()) {
234 | final Map map = curlResponse.getContent(EcrCurl.jsonParser());
235 | assertNotNull(map);
236 | assertEquals("created", map.get("result"));
237 | }
238 |
239 | // put
240 | try (CurlResponse curlResponse = EcrCurl.put(node, "/" + index + "/_doc/2001").header("Content-Type", "application/json")
241 | .body("{\"id\":\"2001\",\"msg\":\"test 2001\"}").execute()) {
242 | final Map map = curlResponse.getContent(EcrCurl.jsonParser());
243 | assertNotNull(map);
244 | assertEquals("created", map.get("result"));
245 | }
246 |
247 | // delete
248 | try (CurlResponse curlResponse =
249 | EcrCurl.delete(node, "/" + index + "/_doc/2001").header("Content-Type", "application/json").execute()) {
250 | final Map map = curlResponse.getContent(EcrCurl.jsonParser());
251 | assertNotNull(map);
252 | assertEquals("deleted", map.get("result"));
253 | }
254 |
255 | // post
256 | try (CurlResponse curlResponse = EcrCurl.post(node, "/" + index + "/_doc/").header("Content-Type", "application/json")
257 | .onConnect((curlRequest, connection) -> {
258 | connection.setDoOutput(true);
259 | try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(connection.getOutputStream(), "UTF-8"))) {
260 | writer.write("{\"id\":\"2002\",\"msg\":\"test 2002\"}");
261 | writer.flush();
262 | } catch (IOException e) {
263 | throw new CurlException("Failed to write data.", e);
264 | }
265 | }).execute()) {
266 | final Map map = curlResponse.getContent(EcrCurl.jsonParser());
267 | assertNotNull(map);
268 | assertEquals("created", map.get("result"));
269 | }
270 |
271 | // close 1 node
272 | final Node node1 = runner.node();
273 | node1.close();
274 | final Node node2 = runner.node();
275 | assertTrue(node1 != node2);
276 | assertTrue(runner.getNode(0).isClosed());
277 | assertFalse(runner.getNode(1).isClosed());
278 | assertFalse(runner.getNode(2).isClosed());
279 |
280 | // restart a node
281 | assertTrue(runner.startNode(0));
282 | assertFalse(runner.startNode(1));
283 | assertFalse(runner.startNode(2));
284 |
285 | runner.ensureGreen();
286 | }
287 | }
288 |
--------------------------------------------------------------------------------