├── .circleci
└── config.yml
├── .github
└── ISSUE_TEMPLATE
│ └── bug_report.md
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE
├── README.md
├── build.gradle
├── gradle
└── wrapper
│ ├── gradle-wrapper.jar
│ └── gradle-wrapper.properties
├── gradlew
├── gradlew.bat
├── settings.gradle
└── src
├── main
├── java
│ └── co
│ │ └── navdeep
│ │ └── kafkaer
│ │ ├── App.java
│ │ ├── Args.java
│ │ ├── Configurator.java
│ │ ├── model
│ │ ├── Acl.java
│ │ ├── Broker.java
│ │ ├── Config.java
│ │ └── Topic.java
│ │ └── utils
│ │ └── Utils.java
└── resources
│ ├── kafka-config.json
│ └── local.properties
└── test
├── java
├── ConfigJsonGenerate.java
├── ConfiguratorTest.java
└── UtilsTest.java
└── resources
├── kafka-config-with-description.json
├── kafka-config.json
└── test.properties
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Java Gradle CircleCI 2.0 configuration file
2 | #
3 | # Check https://circleci.com/docs/2.0/language-java/ for more details
4 | #
5 | version: 2
6 | jobs:
7 | build:
8 | docker:
9 | - image: cimg/openjdk:8.0
10 |
11 | # Specify service dependencies here if necessary
12 | # CircleCI maintains a library of pre-built images
13 | # documented at https://circleci.com/docs/2.0/circleci-images/
14 | # - image: circleci/postgres:9.4
15 | - image: wurstmeister/zookeeper
16 | - image: wurstmeister/kafka:2.12-2.2.0
17 | environment:
18 | KAFKA_ADVERTISED_HOST_NAME: localhost
19 | KAFKA_ADVERTISED_PORT: 9092
20 | KAFKA_PORT: 9092
21 | KAFKA_ZOOKEEPER_CONNECT: localhost:2181
22 | KAFKA_DELETE_TOPIC_ENABLE: true
23 | KAFKA_SUPER_USERS: User:ANONYMOUS
24 | KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
25 | - image: wurstmeister/kafka:2.12-2.2.0
26 | environment:
27 | KAFKA_ADVERTISED_HOST_NAME: localhost
28 | KAFKA_ADVERTISED_PORT: 9093
29 | KAFKA_PORT: 9093
30 | KAFKA_ZOOKEEPER_CONNECT: localhost:2181
31 | KAFKA_DELETE_TOPIC_ENABLE: true
32 | KAFKA_SUPER_USERS: User:ANONYMOUS
33 | KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.auth.SimpleAclAuthorizer
34 |
35 | working_directory: ~/repo
36 |
37 | environment:
38 | # Customize the JVM maximum heap limit
39 | JVM_OPTS: -Xmx3200m
40 | TERM: dumb
41 |
42 | steps:
43 | - checkout
44 |
45 | # Download and cache dependencies
46 | - restore_cache:
47 | keys:
48 | - v1-dependencies-{{ checksum "build.gradle" }}
49 | # fallback to using the latest cache if no exact match is found
50 | - v1-dependencies-
51 |
52 | - run: ./gradlew dependencies
53 |
54 | - save_cache:
55 | paths:
56 | - ~/.gradle
57 | key: v1-dependencies-{{ checksum "build.gradle" }}
58 |
59 | # run tests!
60 | - run: ./gradlew test
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 |
16 |
17 | **Expected behavior**
18 | A clear and concise description of what you expected to happen.
19 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | .idea
2 | .gradle
3 | /build/
4 | /out/
5 | gradle.properties
6 | *.gpg
7 | # Ignore Gradle GUI config
8 | gradle-app.setting
9 |
10 | # Avoid ignoring Gradle wrapper jar file (.jar files are usually ignored)
11 | !gradle-wrapper.jar
12 |
13 | # Cache of project
14 | .gradletasknamecache
15 |
16 | # # Work around https://youtrack.jetbrains.com/issue/IDEA-116898
17 | # gradle/wrapper/gradle-wrapper.properties
18 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | Pull requests are welcome.
2 |
3 | 1. Create an issue detailing the changes and assign it to yourself
4 | 2. Create a pull request mentioning the issue #.
5 | 3. Make sure to add test cases
6 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # kafkaer
2 | 
3 | 
4 |
5 | ## Table of Contents
6 | - [Overview](#overview)
7 | - [Integrate with your project](#two-ways-to-use)
8 | - [Define configurations](#kafka-configjson)
9 | - [Topic configurations](#topics)
10 | - [Broker configurations](#brokers)
11 | - [ACL configurations](#acls)
12 | - [Properties file](#properties-file)
13 | - [Kafka connection configurations](#admin-client-configs)
14 | - [Delete created topics (--wipe)](#delete-created-topics)
15 | - [Delete schemas from schema registry (--wipe-schemas)](#delete-schemas)
16 | - [Debug (--debug)](#debug)
17 | - [Preserve Partition Count (--preserve-partition-count)](#preserve-partition-count)
18 | - [Contributions](#contributions)
19 |
20 |
21 | # Overview
22 | Kafkaer is a deployment and configuration tool for Apache Kafka. It allows you to automate creation/update of topics and brokers across multiple environments. Create one template configration file and control using different properties files.
23 |
24 |
25 |
26 | Current features:
27 | * Create topics
28 | * Update configurations and partitions for existing topics
29 | * Update configs for a specific broker
30 | * Update configs for entire kafka cluster
31 | * Create/update Access control lists (ACLs)
32 | * Delete all topics created by tool
33 | * Delete all schemas from schema registry when deleting topics
34 |
35 |
36 | # Two ways to use:
37 | ## Executable jar
38 | Get the jar from [releases](https://github.com/navdeepsekhon/kafkaer/releases)
39 | ```
40 | java -jar kafkaer.jar --properties propertiesLocation --config configLocation
41 | ```
42 |
43 | ## Include jar as dep in project from maven central
44 | Gradle:
45 | ```json
46 | compile "co.navdeep:kafkaer:1.1"
47 | ```
48 | Maven:
49 | ```xml
50 |
51 | co.navdeep
52 | kafkaer
53 | 1.1
54 |
55 | ```
56 |
57 | And use it:
58 | ```java
59 | Configurator configurator = new Configurator("src/main/resources/your.properties", "src/main/resources/kafka-config.json");
60 | configurator.applyConfig();
61 | ```
62 | ###### The jar published to maven is also an executable.
63 |
64 | # kafka-config.json
65 | ## Example:
66 | ```json
67 | {
68 | "topics": [
69 | {
70 | "name": "withSuffix-${topic.suffix}",
71 | "partitions": 3,
72 | "replicationFactor": 3,
73 | "description": "This description is just for documentation. It does not affect the kafka cluster.",
74 | "configs": {
75 | "compression.type": "gzip",
76 | "cleanup.policy": "delete",
77 | "delete.retention.ms": "86400000"
78 | }
79 | },
80 | {
81 | "name": "test",
82 | "partitions": 1,
83 | "replicationFactor": 1,
84 | "configs": {
85 | "compression.type": "gzip",
86 | "cleanup.policy": "compact"
87 | }
88 | }
89 | ],
90 | "brokers": [
91 | {
92 | "id": "1",
93 | "config": {
94 | "sasl.login.refresh.window.jitter": "0.05"
95 | }
96 | }
97 | ],
98 | "aclStrings": [
99 | "User:joe,Topic,LITERAL,test,Read,Allow,*",
100 | "User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"
101 | ]
102 | }
103 |
104 | ```
105 |
106 | ## Topics:
107 | A list of topics. Required for each topic:
108 | ```json
109 | name,
110 | partitions,
111 | replicationFactor
112 | ```
113 |
114 | Rest of all the configs go inside the `configs` map. You can specify any/all of the [topic configurations listed in the kafka documentation](https://kafka.apache.org/documentation/#topicconfigs)
115 |
116 | `description` is optional. It is just for documentation purpose and is ignored by kafkaer.
117 |
118 | ## What if the topic already exists:
119 | ### Partitions:
120 | If the partitions listed in the config are more than the existing partitions - topic partitions will be increased to the number.
121 |
122 | If the partitions listed in config are less than the existing - an exception will be thrown.
123 |
124 | If they are same - nothing.
125 |
126 | If flag `--preserve-partition-count` is used, partitions will not be updated.
127 |
128 | ### All other configs:
129 | All other configs will be updated to the new values from config.
130 |
131 | ## Brokers
132 | A list of broker configs.
133 |
134 | NOTE: If a broker id is provided, the update is made only on that broker. If no broker id is provided update is sent to each broker in the cluster. [See kafka documentation for all broker configs](https://kafka.apache.org/documentation/#brokerconfigs)
135 |
136 | Cluster-wide configs must be without an id.
137 |
138 | ## ACLs
139 | You can provide the ACLs to create in one of two formats:
140 |
141 | Structured list:
142 | ```json
143 | "acls" : [
144 | {
145 | "principal": "User:joe",
146 | "resourceType": "Topic",
147 | "patternType": "LITERAL",
148 | "resourceName": "test",
149 | "operation": "Read",
150 | "permissionType": "Allow",
151 | "host": "*"
152 | },
153 | {
154 | "principal": "User:jon",
155 | "resourceType": "Cluster",
156 | "patternType": "LITERAL",
157 | "resourceName": "kafka-cluster",
158 | "operation": "Create",
159 | "permissionType": "Allow",
160 | "host": "*"
161 | }
162 | ]
163 | ```
164 |
165 | As a list of strings:
166 | ```json
167 | //Format: "principal,resourceType,patternType,resourceName,operation,permissionType,host"
168 |
169 | "aclStrings": [
170 | "User:joe,Topic,LITERAL,test,Read,Allow,*",
171 | "User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"
172 | ]
173 | ```
174 |
175 | All the values are case insensitive.
176 |
177 | ## Variables in kafka-config.json
178 | To allow for deployments across different environments, kafka-config.json allows you to specify variables for values that will be replaced with values from the properties file. In the example above the topic name `withSuffix-${topic.suffix}` will be replaced with `withSuffix-iamasuffix` using the value of `topic.suffix` from props.
179 |
180 | Why is it useful?
181 |
182 | Use case 1: You want to setup multiple instances of your application on same kafka cluster. You can name all your topics with `${topic.suffix}` and use different value for each instance `john`, `jane` etc.
183 |
184 | Use case 2: You might need 50 partitions for your topics in production but only 3 for dev. You create two properties files with different values and use the same `kafka-config.json`.
185 |
186 | # Properties file
187 | Standard java properties file.
188 | ```json
189 | #admin client configs
190 | kafkaer.bootstrap.servers=localhost:29092
191 | kafkaer.client.id=kafkaer
192 |
193 | #variables
194 | topic.suffix=iamasuffix
195 | ```
196 |
197 | # Admin Client configs
198 | Kafkaer uses `AdminClient` API to connect to Kafka.
199 | All the admin client configs can be provided in the same properties file. Property name must have prefix `kafkaer.` followed by one of `AdminClientConfig`. For example, to specify `bootstrap.servers` add a property called `kafkaer.bootstrap.servers`. All the admin client configs are supported. [See the list of configs here](https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java)
200 |
201 | # Delete created topics
202 |
203 | Provide the `--wipe` flag to delete all the topics listed in the config.json
204 |
205 | # Delete Schemas
206 |
207 | If you're using [confluent schema registry](https://docs.confluent.io/current/schema-registry/develop/api.html) or other compatible schema registry to store topic schemas, kafkaer can delete the associated schemas when deleting the topics.
208 |
209 | Use flag `--wipe-schemas` with `--wipe` to delete schemas.
210 |
211 | Provide the schema registry url with property `kafkaer.schema.registry.url`. Other schema registry properties can be provided by prefixing `kafkaer.`.
212 | ```
213 | kafkaer.schema.registry.security.protocol=SSL
214 | kafkaer.schema.registry.ssl.truststore.location=...
215 | ...
216 | ```
217 |
218 | # Debug
219 |
220 | Use flag `--debug` for detailed logging
221 |
222 | # Preserve Partition Count
223 |
224 | If a topic already exists and it's partition count is different from what is defined in the config, kafkaer will try update the partitions as described above. In order to ignore the partition count and keep the existing partitions, `--preserve-partition-count` flag can be used. When used, the difference is partition count will only be logged.
225 |
226 | # Contributions
227 | Merge requests welcome. Please create an issue with change details and link it to your merge request.
228 |
229 | Note: This project uses [lombok](https://projectlombok.org/). Please install the plugin for your IDE to avoid compilation errors.
230 |
--------------------------------------------------------------------------------
/build.gradle:
--------------------------------------------------------------------------------
1 | plugins {
2 | id 'java'
3 | id 'maven-publish'
4 | id 'signing'
5 | }
6 |
7 | group 'co.navdeep'
8 | version '2.0.0'
9 | archivesBaseName = "kafkaer"
10 |
11 | sourceCompatibility = 1.8
12 |
13 | repositories {
14 | mavenCentral()
15 | maven{ url "https://packages.confluent.io/maven/"}
16 | }
17 |
18 | dependencies {
19 | implementation "org.apache.kafka:kafka-clients:3.6.1"
20 | implementation "com.fasterxml.jackson.core:jackson-databind:2.14.0-rc1"
21 | implementation "org.apache.commons:commons-text:1.10.0"
22 | implementation "commons-io:commons-io:2.11.0"
23 | implementation "org.apache.commons:commons-configuration2:2.8.0"
24 | implementation "commons-beanutils:commons-beanutils:1.9.4"
25 | implementation "args4j:args4j:2.33"
26 | implementation "org.slf4j:slf4j-simple:1.7.30"
27 | implementation "io.confluent:kafka-schema-registry-client:7.5.3"
28 |
29 | compileOnly "org.projectlombok:lombok:1.18.20"
30 | annotationProcessor "org.projectlombok:lombok:1.18.20"
31 |
32 | testImplementation "junit:junit:4.12"
33 | testImplementation "org.mockito:mockito-core:3.4.0"
34 | }
35 |
36 | task execJar(type: Jar) {
37 | manifest {
38 | attributes 'Implementation-Title': 'kafkaer',
39 | 'Implementation-Version': version,
40 | 'Main-Class': 'co.navdeep.kafkaer.App'
41 | }
42 | from { configurations.runtimeClasspath.collect { it.isDirectory() ? it : zipTree(it) } }
43 | with jar
44 | }
45 |
46 | task javadocJar(type: Jar) {
47 | classifier = 'javadoc'
48 | from javadoc
49 | }
50 |
51 | task sourcesJar(type: Jar) {
52 | classifier = 'sources'
53 | from sourceSets.main.allSource
54 | }
55 |
56 | artifacts {
57 | archives javadocJar, sourcesJar
58 | }
59 |
60 | jar {
61 | duplicatesStrategy = DuplicatesStrategy.EXCLUDE
62 | manifest {
63 | attributes 'Implementation-Title': 'kafkaer',
64 | 'Implementation-Version': version,
65 | 'Main-Class': 'co.navdeep.kafkaer.App'
66 | }
67 | from { configurations.runtimeClasspath.collect { it.isDirectory() ? it : zipTree(it) } }
68 | }
69 |
70 | signing {
71 | sign configurations.archives
72 | }
73 |
74 | def usr = ''
75 | def pwd = ''
76 | if(project.hasProperty("nexusUsername")){
77 | usr = nexusUsername
78 | pwd = nexusPassword
79 | }
80 |
81 | publishing {
82 | publications {
83 | mavenJava(MavenPublication) {
84 | from components.java
85 |
86 | pom {
87 | name = 'kafkaer'
88 | packaging = 'jar'
89 | description = 'Deployment automation utility for apache kafka. Automate kafka cluster configurations for topics, brokers, ACLs. This jar can be used as an executable as well as a maven dependecy'
90 | url = 'https://github.com/navdeepsekhon/kafkaer'
91 |
92 | scm {
93 | connection = 'git@github.com:navdeepsekhon/kafkaer.git'
94 | developerConnection = 'git@github.com:navdeepsekhon/kafkaer.git'
95 | url = 'https://github.com/navdeepsekhon/kafkaer'
96 | }
97 |
98 | licenses {
99 | license {
100 | name = 'The Apache License, Version 2.0'
101 | url = 'http://www.apache.org/licenses/LICENSE-2.0.txt'
102 | }
103 | }
104 |
105 | developers {
106 | developer {
107 | id = 'navdeep'
108 | name = 'Navdeep Sekhon'
109 | email = 'hi@navdeep.co'
110 | }
111 | }
112 | }
113 | }
114 | }
115 |
116 |
117 | repositories {
118 | maven {
119 | // change URLs to point to your repos, e.g. http://my.org/repo
120 | url = "https://oss.sonatype.org/service/local/staging/deploy/maven2/"
121 | credentials{
122 | username = usr
123 | password= pwd
124 | }
125 | }
126 | }
127 | }
128 |
129 | signing {
130 | sign publishing.publications.mavenJava
131 | }
132 |
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/navdeepsekhon/kafkaer/0d8f8e4b8c633088082c61dfd20b716106e30962/gradle/wrapper/gradle-wrapper.jar
--------------------------------------------------------------------------------
/gradle/wrapper/gradle-wrapper.properties:
--------------------------------------------------------------------------------
1 | distributionBase=GRADLE_USER_HOME
2 | distributionPath=wrapper/dists
3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.5-bin.zip
4 | zipStoreBase=GRADLE_USER_HOME
5 | zipStorePath=wrapper/dists
6 |
--------------------------------------------------------------------------------
/gradlew:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | #
4 | # Copyright © 2015-2021 the original authors.
5 | #
6 | # Licensed under the Apache License, Version 2.0 (the "License");
7 | # you may not use this file except in compliance with the License.
8 | # You may obtain a copy of the License at
9 | #
10 | # https://www.apache.org/licenses/LICENSE-2.0
11 | #
12 | # Unless required by applicable law or agreed to in writing, software
13 | # distributed under the License is distributed on an "AS IS" BASIS,
14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 | # See the License for the specific language governing permissions and
16 | # limitations under the License.
17 | #
18 |
19 | ##############################################################################
20 | #
21 | # Gradle start up script for POSIX generated by Gradle.
22 | #
23 | # Important for running:
24 | #
25 | # (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is
26 | # noncompliant, but you have some other compliant shell such as ksh or
27 | # bash, then to run this script, type that shell name before the whole
28 | # command line, like:
29 | #
30 | # ksh Gradle
31 | #
32 | # Busybox and similar reduced shells will NOT work, because this script
33 | # requires all of these POSIX shell features:
34 | # * functions;
35 | # * expansions «$var», «${var}», «${var:-default}», «${var+SET}»,
36 | # «${var#prefix}», «${var%suffix}», and «$( cmd )»;
37 | # * compound commands having a testable exit status, especially «case»;
38 | # * various built-in commands including «command», «set», and «ulimit».
39 | #
40 | # Important for patching:
41 | #
42 | # (2) This script targets any POSIX shell, so it avoids extensions provided
43 | # by Bash, Ksh, etc; in particular arrays are avoided.
44 | #
45 | # The "traditional" practice of packing multiple parameters into a
46 | # space-separated string is a well documented source of bugs and security
47 | # problems, so this is (mostly) avoided, by progressively accumulating
48 | # options in "$@", and eventually passing that to Java.
49 | #
50 | # Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS,
51 | # and GRADLE_OPTS) rely on word-splitting, this is performed explicitly;
52 | # see the in-line comments for details.
53 | #
54 | # There are tweaks for specific operating systems such as AIX, CygWin,
55 | # Darwin, MinGW, and NonStop.
56 | #
57 | # (3) This script is generated from the Groovy template
58 | # https://github.com/gradle/gradle/blob/master/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt
59 | # within the Gradle project.
60 | #
61 | # You can find Gradle at https://github.com/gradle/gradle/.
62 | #
63 | ##############################################################################
64 |
65 | # Attempt to set APP_HOME
66 |
67 | # Resolve links: $0 may be a link
68 | app_path=$0
69 |
70 | # Need this for daisy-chained symlinks.
71 | while
72 | APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path
73 | [ -h "$app_path" ]
74 | do
75 | ls=$( ls -ld "$app_path" )
76 | link=${ls#*' -> '}
77 | case $link in #(
78 | /*) app_path=$link ;; #(
79 | *) app_path=$APP_HOME$link ;;
80 | esac
81 | done
82 |
83 | APP_HOME=$( cd "${APP_HOME:-./}" && pwd -P ) || exit
84 |
85 | APP_NAME="Gradle"
86 | APP_BASE_NAME=${0##*/}
87 |
88 | # Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
89 | DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"'
90 |
91 | # Use the maximum available, or set MAX_FD != -1 to use that value.
92 | MAX_FD=maximum
93 |
94 | warn () {
95 | echo "$*"
96 | } >&2
97 |
98 | die () {
99 | echo
100 | echo "$*"
101 | echo
102 | exit 1
103 | } >&2
104 |
105 | # OS specific support (must be 'true' or 'false').
106 | cygwin=false
107 | msys=false
108 | darwin=false
109 | nonstop=false
110 | case "$( uname )" in #(
111 | CYGWIN* ) cygwin=true ;; #(
112 | Darwin* ) darwin=true ;; #(
113 | MSYS* | MINGW* ) msys=true ;; #(
114 | NONSTOP* ) nonstop=true ;;
115 | esac
116 |
117 | CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar
118 |
119 |
120 | # Determine the Java command to use to start the JVM.
121 | if [ -n "$JAVA_HOME" ] ; then
122 | if [ -x "$JAVA_HOME/jre/sh/java" ] ; then
123 | # IBM's JDK on AIX uses strange locations for the executables
124 | JAVACMD=$JAVA_HOME/jre/sh/java
125 | else
126 | JAVACMD=$JAVA_HOME/bin/java
127 | fi
128 | if [ ! -x "$JAVACMD" ] ; then
129 | die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME
130 |
131 | Please set the JAVA_HOME variable in your environment to match the
132 | location of your Java installation."
133 | fi
134 | else
135 | JAVACMD=java
136 | which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
137 |
138 | Please set the JAVA_HOME variable in your environment to match the
139 | location of your Java installation."
140 | fi
141 |
142 | # Increase the maximum file descriptors if we can.
143 | if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then
144 | case $MAX_FD in #(
145 | max*)
146 | MAX_FD=$( ulimit -H -n ) ||
147 | warn "Could not query maximum file descriptor limit"
148 | esac
149 | case $MAX_FD in #(
150 | '' | soft) :;; #(
151 | *)
152 | ulimit -n "$MAX_FD" ||
153 | warn "Could not set maximum file descriptor limit to $MAX_FD"
154 | esac
155 | fi
156 |
157 | # Collect all arguments for the java command, stacking in reverse order:
158 | # * args from the command line
159 | # * the main class name
160 | # * -classpath
161 | # * -D...appname settings
162 | # * --module-path (only if needed)
163 | # * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables.
164 |
165 | # For Cygwin or MSYS, switch paths to Windows format before running java
166 | if "$cygwin" || "$msys" ; then
167 | APP_HOME=$( cygpath --path --mixed "$APP_HOME" )
168 | CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" )
169 |
170 | JAVACMD=$( cygpath --unix "$JAVACMD" )
171 |
172 | # Now convert the arguments - kludge to limit ourselves to /bin/sh
173 | for arg do
174 | if
175 | case $arg in #(
176 | -*) false ;; # don't mess with options #(
177 | /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath
178 | [ -e "$t" ] ;; #(
179 | *) false ;;
180 | esac
181 | then
182 | arg=$( cygpath --path --ignore --mixed "$arg" )
183 | fi
184 | # Roll the args list around exactly as many times as the number of
185 | # args, so each arg winds up back in the position where it started, but
186 | # possibly modified.
187 | #
188 | # NB: a `for` loop captures its iteration list before it begins, so
189 | # changing the positional parameters here affects neither the number of
190 | # iterations, nor the values presented in `arg`.
191 | shift # remove old arg
192 | set -- "$@" "$arg" # push replacement arg
193 | done
194 | fi
195 |
196 | # Collect all arguments for the java command;
197 | # * $DEFAULT_JVM_OPTS, $JAVA_OPTS, and $GRADLE_OPTS can contain fragments of
198 | # shell script including quotes and variable substitutions, so put them in
199 | # double quotes to make sure that they get re-expanded; and
200 | # * put everything else in single quotes, so that it's not re-expanded.
201 |
202 | set -- \
203 | "-Dorg.gradle.appname=$APP_BASE_NAME" \
204 | -classpath "$CLASSPATH" \
205 | org.gradle.wrapper.GradleWrapperMain \
206 | "$@"
207 |
208 | # Stop when "xargs" is not available.
209 | if ! command -v xargs >/dev/null 2>&1
210 | then
211 | die "xargs is not available"
212 | fi
213 |
214 | # Use "xargs" to parse quoted args.
215 | #
216 | # With -n1 it outputs one arg per line, with the quotes and backslashes removed.
217 | #
218 | # In Bash we could simply go:
219 | #
220 | # readarray ARGS < <( xargs -n1 <<<"$var" ) &&
221 | # set -- "${ARGS[@]}" "$@"
222 | #
223 | # but POSIX shell has neither arrays nor command substitution, so instead we
224 | # post-process each arg (as a line of input to sed) to backslash-escape any
225 | # character that might be a shell metacharacter, then use eval to reverse
226 | # that process (while maintaining the separation between arguments), and wrap
227 | # the whole thing up as a single "set" statement.
228 | #
229 | # This will of course break if any of these variables contains a newline or
230 | # an unmatched quote.
231 | #
232 |
233 | eval "set -- $(
234 | printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" |
235 | xargs -n1 |
236 | sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' |
237 | tr '\n' ' '
238 | )" '"$@"'
239 |
240 | exec "$JAVACMD" "$@"
241 |
--------------------------------------------------------------------------------
/gradlew.bat:
--------------------------------------------------------------------------------
1 | @rem
2 | @rem Copyright 2015 the original author or authors.
3 | @rem
4 | @rem Licensed under the Apache License, Version 2.0 (the "License");
5 | @rem you may not use this file except in compliance with the License.
6 | @rem You may obtain a copy of the License at
7 | @rem
8 | @rem https://www.apache.org/licenses/LICENSE-2.0
9 | @rem
10 | @rem Unless required by applicable law or agreed to in writing, software
11 | @rem distributed under the License is distributed on an "AS IS" BASIS,
12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | @rem See the License for the specific language governing permissions and
14 | @rem limitations under the License.
15 | @rem
16 |
17 | @if "%DEBUG%"=="" @echo off
18 | @rem ##########################################################################
19 | @rem
20 | @rem Gradle startup script for Windows
21 | @rem
22 | @rem ##########################################################################
23 |
24 | @rem Set local scope for the variables with windows NT shell
25 | if "%OS%"=="Windows_NT" setlocal
26 |
27 | set DIRNAME=%~dp0
28 | if "%DIRNAME%"=="" set DIRNAME=.
29 | set APP_BASE_NAME=%~n0
30 | set APP_HOME=%DIRNAME%
31 |
32 | @rem Resolve any "." and ".." in APP_HOME to make it shorter.
33 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi
34 |
35 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script.
36 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m"
37 |
38 | @rem Find java.exe
39 | if defined JAVA_HOME goto findJavaFromJavaHome
40 |
41 | set JAVA_EXE=java.exe
42 | %JAVA_EXE% -version >NUL 2>&1
43 | if %ERRORLEVEL% equ 0 goto execute
44 |
45 | echo.
46 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH.
47 | echo.
48 | echo Please set the JAVA_HOME variable in your environment to match the
49 | echo location of your Java installation.
50 |
51 | goto fail
52 |
53 | :findJavaFromJavaHome
54 | set JAVA_HOME=%JAVA_HOME:"=%
55 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe
56 |
57 | if exist "%JAVA_EXE%" goto execute
58 |
59 | echo.
60 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME%
61 | echo.
62 | echo Please set the JAVA_HOME variable in your environment to match the
63 | echo location of your Java installation.
64 |
65 | goto fail
66 |
67 | :execute
68 | @rem Setup the command line
69 |
70 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar
71 |
72 |
73 | @rem Execute Gradle
74 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %*
75 |
76 | :end
77 | @rem End local scope for the variables with windows NT shell
78 | if %ERRORLEVEL% equ 0 goto mainEnd
79 |
80 | :fail
81 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of
82 | rem the _cmd.exe /c_ return code!
83 | set EXIT_CODE=%ERRORLEVEL%
84 | if %EXIT_CODE% equ 0 set EXIT_CODE=1
85 | if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE%
86 | exit /b %EXIT_CODE%
87 |
88 | :mainEnd
89 | if "%OS%"=="Windows_NT" endlocal
90 |
91 | :omega
92 |
--------------------------------------------------------------------------------
/settings.gradle:
--------------------------------------------------------------------------------
1 | rootProject.name = 'kafkaer'
2 |
3 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/App.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer;
2 |
3 | import org.kohsuke.args4j.CmdLineException;
4 | import org.kohsuke.args4j.CmdLineParser;
5 | import org.slf4j.Logger;
6 | import org.slf4j.LoggerFactory;
7 |
8 | public class App {
9 | public static void main(String[] a) throws Exception {
10 | Args args = new Args();
11 | CmdLineParser parser = new CmdLineParser(args);
12 | try {
13 | parser.parseArgument(a);
14 | } catch(CmdLineException e){
15 | throw new Exception("Invalid command line arguments", e);
16 | }
17 | if(args.isHelp()){
18 | parser.printUsage(System.out);
19 | return;
20 | }
21 |
22 | if(args.isDebug()){
23 | System.setProperty("org.slf4j.simpleLogger.log.co.navdeep", "debug");
24 | }
25 | Logger logger = LoggerFactory.getLogger(App.class);
26 |
27 | if(args.getProperties() == null || args.getConfig() == null) {
28 | throw new RuntimeException("Missing required arguments - propertiesLocation, configLocation");
29 | }
30 |
31 | logger.debug("Input args: config: [{}] properties: [{}] wipe:[{}] confirm-delete: [{}], wipe-schema: [{}], preserve-partition-count: [{}]", args.getConfig(), args.getProperties(), args.isWipe(), args.isConfirmDelete(), args.isWipeSchemas(), args.isPreservePartitionCount());
32 | Configurator configurator = new Configurator(args.getProperties(), args.getConfig());
33 | configurator.setPreservePartitionCount(args.isPreservePartitionCount());
34 | if(args.isWipe())
35 | configurator.wipeTopics(args.isConfirmDelete(), args.isWipeSchemas());
36 | else
37 | configurator.applyConfig();
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/Args.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer;
2 |
3 | import co.navdeep.kafkaer.utils.Utils;
4 | import lombok.Data;
5 | import org.apache.commons.lang3.StringUtils;
6 | import org.kohsuke.args4j.Argument;
7 | import org.kohsuke.args4j.Option;
8 | import org.kohsuke.args4j.spi.BooleanOptionHandler;
9 |
10 | import java.util.ArrayList;
11 | import java.util.List;
12 |
13 | @Data
14 | public class Args {
15 | @Option(name="--config",aliases = "-c", usage="Location of config file")
16 | String config;
17 |
18 | @Option(name="--properties", aliases = "-p", usage="Location of properties file")
19 | String properties;
20 |
21 | @Option(name="--wipe", usage="Wipe all topics", handler = BooleanOptionHandler.class)
22 | boolean wipe;
23 |
24 | @Option(name="--wipe-schemas", usage="Used with --wipe. Will delete corresponding schemas from schema registry. Will use properties kafkaer.schema.registry.* to connect to schema registry", handler = BooleanOptionHandler.class)
25 | boolean wipeSchemas;
26 |
27 | @Option(name="--confirm-delete", usage="Used with --wipe. Will wait for all brokers to sync up to ensure topic is deleted from all. Default max wait 60s. Configure using " + Utils.MAX_DELETE_CONFIRM_WAIT_CONFIG, handler = BooleanOptionHandler.class)
28 | boolean confirmDelete;
29 |
30 | @Option(name="--preserve-partition-count", usage="If a topic already exists and it's partition count is different from config, the partition count will not be changed.", handler = BooleanOptionHandler.class)
31 | boolean preservePartitionCount;
32 |
33 | @Option(name="--help", aliases= "-h", help = true, usage="list usage", handler = BooleanOptionHandler.class)
34 | boolean help;
35 |
36 | @Option(name="--debug", aliases = "-d", usage = "debug mode", handler = BooleanOptionHandler.class)
37 | boolean debug;
38 |
39 | @Argument
40 | private List arguments = new ArrayList<>();
41 |
42 | //Maintain backward compatibility for use without flags
43 | public String getConfig(){
44 | return StringUtils.isBlank(config) ? arguments.size() < 2 ? null : arguments.get(1) : config;
45 | }
46 |
47 | public String getProperties(){
48 | return StringUtils.isBlank(properties) ? arguments.size() < 1 ? null : arguments.get(0) : properties;
49 | }
50 | }
51 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/Configurator.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer;
2 |
3 | import co.navdeep.kafkaer.model.Broker;
4 | import co.navdeep.kafkaer.model.Config;
5 | import co.navdeep.kafkaer.model.Topic;
6 | import co.navdeep.kafkaer.utils.Utils;
7 | import io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient;
8 | import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
9 | import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
10 | import lombok.Data;
11 | import org.apache.commons.configuration2.Configuration;
12 | import org.apache.commons.configuration2.ex.ConfigurationException;
13 | import org.apache.commons.lang3.StringUtils;
14 | import org.apache.kafka.clients.admin.*;
15 | import org.apache.kafka.common.KafkaFuture;
16 | import org.apache.kafka.common.acl.AclBinding;
17 | import org.apache.kafka.common.config.ConfigResource;
18 | import org.apache.kafka.common.errors.UnknownTopicOrPartitionException;
19 | import org.slf4j.Logger;
20 | import org.slf4j.LoggerFactory;
21 |
22 | import java.io.IOException;
23 | import java.util.*;
24 | import java.util.concurrent.ExecutionException;
25 |
26 | @Data
27 | public class Configurator {
28 | private Configuration properties;
29 | private Config config;
30 | private AdminClient adminClient;
31 | private SchemaRegistryClient schemaRegistryClient;
32 | private boolean preservePartitionCount;
33 |
34 | private static Logger logger = LoggerFactory.getLogger(Configurator.class);
35 |
36 | public Configurator(String propertiesLocation, String configLocation) throws ConfigurationException, IOException {
37 | properties = Utils.readProperties(propertiesLocation);
38 | config = Utils.readConfig(configLocation, Utils.propertiesToMap(properties));
39 | adminClient = AdminClient.create(Utils.getClientConfig(properties));
40 | initializeSchemaRegistryClient();
41 | }
42 |
43 | public Configurator(Configuration p, Config c){
44 | properties = p;
45 | config = c;
46 | adminClient = AdminClient.create(Utils.getClientConfig(properties));
47 | initializeSchemaRegistryClient();
48 | }
49 |
50 | private void initializeSchemaRegistryClient(){
51 | String url = Utils.getSchemaRegistryUrl(properties);
52 | if(StringUtils.isNotBlank(url)){
53 | schemaRegistryClient = new CachedSchemaRegistryClient(url, 12384, Utils.getSchemaRegistryConfigs(properties));
54 | }
55 | }
56 |
57 | @Deprecated
58 | public void wipeTopics() throws ExecutionException, InterruptedException {
59 | wipeTopics(false, false);
60 | }
61 |
62 | public void wipeTopics(boolean confirmDelete, boolean wipeSchema) throws ExecutionException, InterruptedException {
63 | logger.debug("Deleting topics");
64 | DeleteTopicsResult result = adminClient.deleteTopics(config.getAllTopicNames());
65 | for(String topic : result.values().keySet()){
66 | try {
67 | logger.debug("Deleting topic: {}", topic);
68 | result.values().get(topic).get();
69 | if(confirmDelete) waitForDelete(topic);
70 | } catch(ExecutionException e){
71 | if(e.getCause() instanceof UnknownTopicOrPartitionException){
72 | logger.debug("Unable to delete topic {} because it does not exist.", topic);
73 | } else {
74 | throw new ExecutionException(e);
75 | }
76 | } finally {
77 | if(wipeSchema) {
78 | try {
79 | wipeSchema(topic);
80 | } catch (IOException | RestClientException e) {
81 | logger.error("Error deleting schema for [{}]", topic);
82 | throw new ExecutionException(e);
83 | }
84 | }
85 | }
86 |
87 | }
88 | }
89 |
90 | public void wipeSchema(String topicName) throws IOException, RestClientException {
91 | if(schemaRegistryClient == null){
92 | logger.warn("No schema registry configured. Set property [{}]", Utils.SCHEMA_REGISTRY_URL_CONFIG);
93 | return;
94 | }
95 |
96 | Collection currentSubjects = schemaRegistryClient.getAllSubjects();
97 | for(String subject : currentSubjects){
98 | if(StringUtils.contains(subject, topicName)){
99 | logger.debug("Deleting subject [{}] from schema registry", subject);
100 | schemaRegistryClient.deleteSubject(subject);
101 | }
102 | }
103 | }
104 | private void waitForDelete(String topicName) throws ExecutionException, InterruptedException {
105 | int maxWaitTime = Utils.getMaxDeleteConfirmWaitTime(properties);
106 | int maxTries = Math.floorDiv(maxWaitTime, 5);
107 | int tries = 0;
108 | logger.debug("Confirming topic [{}] was deleted from all brokers. Will wait for max [{}]s", topicName, maxWaitTime);
109 | while(tries < maxTries){
110 | DescribeTopicsResult result = adminClient.describeTopics(Collections.singletonList(topicName));
111 | try{
112 | result.values().get(topicName).get();
113 | } catch(Exception e){
114 | if(e.getCause() instanceof UnknownTopicOrPartitionException){
115 | logger.debug("Confirmed: topic [{}] was deleted.", topicName);
116 | return;
117 | }
118 | throw e;
119 | }
120 | Thread.sleep(5000);
121 | tries++;
122 | }
123 | }
124 | public void applyConfig() throws ExecutionException, InterruptedException {
125 | validate();
126 | configureTopics();
127 | configureBrokers();
128 | configureAcls();
129 | }
130 |
131 | public void validate(){
132 | validateDuplicateTopics();
133 | }
134 |
135 | public void validateDuplicateTopics(){
136 | Set seen = new HashSet<>();
137 | Set duplicates = new HashSet<>();
138 | for(String t : config.getAllTopicNames()){
139 | if(seen.contains(t)){
140 | duplicates.add(t);
141 | }
142 | seen.add(t);
143 | }
144 |
145 | if(!duplicates.isEmpty()){
146 | logger.error("These topics are defined multiple times: {}", duplicates);
147 | throw new RuntimeException("Duplicate topic definitions " + duplicates);
148 | }
149 | }
150 |
151 | public void configureAcls() throws ExecutionException, InterruptedException {
152 | logger.debug("Configuring ACLs");
153 | List bindings = config.getAclBindings();
154 | if(bindings.isEmpty()){
155 | logger.debug("No ACLs defined in config. Nothing done.");
156 | return;
157 | }
158 |
159 | CreateAclsResult result = adminClient.createAcls(bindings);
160 | for(AclBinding binding : result.values().keySet()){
161 | logger.debug("Creating ACL {}", binding);
162 | result.values().get(binding).get();
163 | }
164 | }
165 |
166 | public void configureBrokers() throws ExecutionException, InterruptedException {
167 | logger.debug("Configuring brokers");
168 | if(!config.hasBrokerConfig()){
169 | logger.debug("No broker configs defined. Nothing done.");
170 | return;
171 | }
172 |
173 | Map updateConfig = new HashMap<>();
174 | for(Broker broker : config.getBrokers()){
175 | logger.debug("Applying broker config {}", broker);
176 | ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, broker.getId());
177 | updateConfig.put(configResource, broker.configsAsKafkaConfig());
178 | }
179 |
180 | AlterConfigsResult result = adminClient.alterConfigs(updateConfig);
181 | result.all().get();
182 |
183 | }
184 |
185 | public void configureTopics() throws ExecutionException, InterruptedException {
186 | logger.debug("Configuring topics");
187 | Map> topicResults = adminClient.describeTopics(config.getAllTopicNames()).values();
188 | for(Topic topic : config.getTopics()){
189 | logger.debug("Topic config: {}", topic);
190 | try {
191 | TopicDescription td = topicResults.get(topic.getName()).get();
192 | logger.debug("Updating existing topic {}", topic.getName());
193 | handleTopicPartitionsUpdate(td, topic);
194 | handleTopicConfigUpdate(topic);
195 | } catch(ExecutionException e){
196 | if(e.getCause() instanceof UnknownTopicOrPartitionException) {
197 | logger.debug("Creating new topic {}", topic.getName());
198 | CreateTopicsResult result = adminClient.createTopics(Collections.singleton(topic.toNewTopic()));
199 | result.all().get();
200 | } else {
201 | throw(e);
202 | }
203 | }
204 | }
205 | }
206 |
207 | private void handleTopicConfigUpdate(Topic topic) throws InterruptedException {
208 | if(!topic.hasConfigs()) return;
209 | ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic.getName());
210 | Map updateConfig = new HashMap<>();
211 | updateConfig.put(configResource, topic.configsAsKafkaConfig());
212 | AlterConfigsResult alterConfigsResult = adminClient.alterConfigs(updateConfig);
213 | try {
214 | alterConfigsResult.all().get();
215 | } catch (ExecutionException e) {
216 | throw new RuntimeException(e);
217 | }
218 | }
219 |
220 | private void handleTopicPartitionsUpdate(TopicDescription current, Topic topic) throws InterruptedException {
221 | if(preservePartitionCount) logPartitionDiff(current, topic);
222 | else updatePartitions(current, topic);
223 | }
224 |
225 | private void logPartitionDiff(TopicDescription current, Topic topic){
226 | if(current.partitions().size() < topic.getPartitions()){
227 | logger.warn("Current partition count for topic {} is [{}], partition count in config is [{}]. Execute without --preserve-partition-count to make this partition update.", topic.getName(), current.partitions().size(), topic.getPartitions());
228 |
229 | } else if(current.partitions().size() > topic.getPartitions()){
230 | logger.warn("Current partition count for topic {} is [{}], partition count in config is [{}].", topic.getName(), current.partitions().size(), topic.getPartitions());
231 | }
232 | }
233 | private void updatePartitions(TopicDescription current, Topic topic){
234 | try {
235 | if(current.partitions().size() < topic.getPartitions()){
236 | logger.debug("Updating partition count for topic {} from [{}] to [{}]", topic.getName(), current.partitions().size(), topic.getPartitions());
237 | CreatePartitionsResult result = adminClient.createPartitions(Collections.singletonMap(topic.getName(), NewPartitions.increaseTo(topic.getPartitions())));
238 | result.all().get();
239 | } else if(current.partitions().size() > topic.getPartitions()){
240 | throw new RuntimeException("Can not reduce number of partitions for topic [" + topic.getName() + "] from current:" + current.partitions().size() + " to " + topic.getPartitions());
241 | }
242 | } catch(ExecutionException | InterruptedException e){
243 | throw new RuntimeException(e);
244 | }
245 | }
246 | }
247 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/model/Acl.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer.model;
2 |
3 | import lombok.Data;
4 | import org.apache.kafka.common.acl.AccessControlEntry;
5 | import org.apache.kafka.common.acl.AclBinding;
6 | import org.apache.kafka.common.acl.AclOperation;
7 | import org.apache.kafka.common.acl.AclPermissionType;
8 | import org.apache.kafka.common.resource.PatternType;
9 | import org.apache.kafka.common.resource.ResourcePattern;
10 | import org.apache.kafka.common.resource.ResourceType;
11 |
12 | @Data
13 | public class Acl {
14 | private String resourceType;
15 | private String resourceName;
16 | private String principal;
17 | private String patternType;
18 | private String host;
19 | private String operation;
20 | private String permissionType;
21 |
22 | public Acl(){
23 | super();
24 | }
25 |
26 | public Acl(String s){
27 | //principal,resourceType,patternType,resourceName,operation,permissionType,host
28 | String[] splits = s.split(",");
29 | if(splits.length < 7) throw new RuntimeException("Invalid ACL:" + s);
30 | principal = splits[0];
31 | resourceType = splits[1];
32 | patternType = splits[2];
33 | resourceName = splits[3];
34 | operation = splits[4];
35 | permissionType = splits[5];
36 | host = splits[6];
37 | }
38 |
39 | public AclBinding toKafkaAclBinding(){
40 | return new AclBinding(getKafkaResourcePattern(), getKafkaAccessControlEntry());
41 | }
42 | public ResourcePattern getKafkaResourcePattern(){
43 | return new ResourcePattern(ResourceType.fromString(resourceType.toUpperCase()), resourceName, PatternType.fromString(patternType.toUpperCase()));
44 | }
45 |
46 | public AccessControlEntry getKafkaAccessControlEntry(){
47 | return new AccessControlEntry(principal, host, AclOperation.fromString(operation.toUpperCase()), AclPermissionType.fromString(permissionType.toUpperCase()));
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/model/Broker.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer.model;
2 |
3 | import co.navdeep.kafkaer.utils.Utils;
4 | import lombok.Data;
5 | import org.apache.kafka.clients.admin.Config;
6 |
7 | import java.util.Map;
8 |
9 | @Data
10 | public class Broker {
11 | private String id;
12 | private Map config;
13 |
14 | public Config configsAsKafkaConfig(){
15 | return Utils.configsAsKafkaConfig(config);
16 | }
17 |
18 | public String getId(){
19 | return id == null ? "" : id;
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/model/Config.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer.model;
2 |
3 | import lombok.Data;
4 | import org.apache.kafka.common.acl.AclBinding;
5 |
6 | import java.util.ArrayList;
7 | import java.util.List;
8 |
9 | @Data
10 | public class Config {
11 | private List topics;
12 | private List brokers;
13 | private List acls;
14 | private List aclStrings;
15 |
16 | public Config(){
17 | topics = new ArrayList<>();
18 | brokers = new ArrayList<>();
19 | acls = new ArrayList<>();
20 | aclStrings = new ArrayList<>();
21 | }
22 |
23 | public List getAclBindings(){
24 | List bindings = new ArrayList<>();
25 | for(Acl acl : acls){
26 | bindings.add(acl.toKafkaAclBinding());
27 | }
28 |
29 | for(String acl : aclStrings){
30 | bindings.add(new Acl(acl).toKafkaAclBinding());
31 | }
32 |
33 | return bindings;
34 | }
35 | public List getAllTopicNames(){
36 | List names = new ArrayList<>();
37 | for(Topic t : topics){
38 | names.add(t.getName());
39 | }
40 | return names;
41 | }
42 |
43 | public boolean hasBrokerConfig(){
44 | return brokers != null && !brokers.isEmpty();
45 | }
46 |
47 | }
48 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/model/Topic.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer.model;
2 |
3 | import co.navdeep.kafkaer.utils.Utils;
4 | import lombok.Data;
5 | import lombok.NoArgsConstructor;
6 | import lombok.NonNull;
7 | import lombok.RequiredArgsConstructor;
8 | import org.apache.kafka.clients.admin.Config;
9 | import org.apache.kafka.clients.admin.NewTopic;
10 |
11 | import java.util.Map;
12 |
13 | @Data
14 | @RequiredArgsConstructor
15 | @NoArgsConstructor
16 | public class Topic {
17 | @NonNull private String name;
18 | @NonNull private int partitions;
19 | @NonNull private short replicationFactor;
20 | private Map configs;
21 | private String description;
22 |
23 | public NewTopic toNewTopic(){
24 | NewTopic newTopic = new NewTopic(name, partitions, replicationFactor);
25 | if(configs != null)
26 | newTopic.configs(configs);
27 | return newTopic;
28 | }
29 |
30 | public boolean hasConfigs(){
31 | return configs != null && !configs.isEmpty();
32 | }
33 | public Config configsAsKafkaConfig(){
34 | return Utils.configsAsKafkaConfig(configs);
35 | }
36 | }
37 |
--------------------------------------------------------------------------------
/src/main/java/co/navdeep/kafkaer/utils/Utils.java:
--------------------------------------------------------------------------------
1 | package co.navdeep.kafkaer.utils;
2 |
3 | import com.fasterxml.jackson.databind.ObjectMapper;
4 | import org.apache.commons.configuration2.Configuration;
5 | import org.apache.commons.configuration2.builder.fluent.Configurations;
6 | import org.apache.commons.configuration2.ex.ConfigurationException;
7 | import org.apache.commons.io.FileUtils;
8 | import org.apache.commons.lang3.StringUtils;
9 | import org.apache.commons.text.StringSubstitutor;
10 | import org.apache.kafka.clients.admin.Config;
11 | import org.apache.kafka.clients.admin.ConfigEntry;
12 |
13 | import java.io.File;
14 | import java.io.IOException;
15 | import java.util.*;
16 |
17 | import static java.nio.charset.StandardCharsets.UTF_8;
18 |
19 | public class Utils {
20 | private static final String KAFKAER = "kafkaer";
21 | private static final String KAFKAER_DOT = KAFKAER + ".";
22 |
23 | public static final String MAX_DELETE_CONFIRM_WAIT_CONFIG = "kafkaer.max.delete.confirm.wait";
24 | public static final String SCHEMA_REGISTRY_URL_CONFIG = KAFKAER_DOT + "schema.registry.url";
25 |
26 | private static final String SCHEMA_REGISTRY_CONFIG_PREFIX = "schema.registry";
27 | private static final String SCHEMA_REGISTRY_CONFIG_PREFIX_DOT = SCHEMA_REGISTRY_CONFIG_PREFIX + ".";
28 | private static final String KAFKAER_SCHEMA_REGISTRY_CONFIG_PREFIX = KAFKAER_DOT + SCHEMA_REGISTRY_CONFIG_PREFIX;
29 | private static final String KAFKAER_SCHEMA_REGISTRY_CONFIG_PREFIX_DOT = KAFKAER_SCHEMA_REGISTRY_CONFIG_PREFIX + ".";
30 |
31 | public static Configuration readProperties(String location) throws ConfigurationException {
32 | return new Configurations().properties(location);
33 | }
34 |
35 | public static Map readPropertiesAsMap(String location) throws ConfigurationException {
36 | return propertiesToMap(readProperties(location));
37 | }
38 |
39 | public static Map propertiesToMap(Configuration properties){
40 | Map map = new HashMap<>();
41 | properties.getKeys().forEachRemaining(s -> map.put(s, properties.getString(s)));
42 | return map;
43 | }
44 |
45 | public static Properties getClientConfig(Configuration properties){
46 | Properties config = new Properties();
47 | properties.getKeys(KAFKAER).forEachRemaining(key -> config.put(replacePrefix(key, KAFKAER_DOT, null), properties.getString(key)));
48 | return config;
49 | }
50 |
51 | public static Map getSchemaRegistryConfigs(Configuration properties){
52 | Map config = new HashMap<>();
53 | properties.getKeys(KAFKAER_SCHEMA_REGISTRY_CONFIG_PREFIX).forEachRemaining(key -> config.put(replacePrefix(key, KAFKAER_SCHEMA_REGISTRY_CONFIG_PREFIX_DOT, SCHEMA_REGISTRY_CONFIG_PREFIX_DOT), properties.getString(key)));
54 | return config;
55 | }
56 |
57 | public static int getMaxDeleteConfirmWaitTime(Configuration properties){
58 | return properties.getInt(MAX_DELETE_CONFIRM_WAIT_CONFIG, 60);
59 | }
60 |
61 | private static String replacePrefix(String key, String prefix, String replaceWith){
62 | String stripped = key.substring(key.indexOf(prefix) + prefix.length());
63 | return StringUtils.isBlank(replaceWith) ? stripped : (replaceWith + stripped);
64 | }
65 |
66 | public static org.apache.kafka.clients.admin.Config configsAsKafkaConfig(Map config){
67 | List configEntries = new ArrayList<>();
68 | for(String key : config.keySet()){
69 | configEntries.add(new ConfigEntry(key, config.get(key)));
70 | }
71 | return new Config(configEntries);
72 | }
73 |
74 | public static co.navdeep.kafkaer.model.Config readConfig(String location, Map valueMap) throws IOException {
75 | String configString = FileUtils.readFileToString(new File(location), UTF_8);
76 | StringSubstitutor substitutor = new StringSubstitutor(valueMap);
77 | configString = substitutor.replace(configString);
78 | ObjectMapper mapper = new ObjectMapper();
79 | return mapper.readValue(configString, co.navdeep.kafkaer.model.Config.class);
80 | }
81 |
82 | public static String getSchemaRegistryUrl(Configuration properties){
83 | return properties.getString(SCHEMA_REGISTRY_URL_CONFIG);
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/src/main/resources/kafka-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "topics": [
3 | {
4 | "name": "withSuffix-${topic.suffix}",
5 | "partitions": 1,
6 | "replicationFactor": 1,
7 | "description": "This is a description for a topic with a suffix.",
8 | "configs": {
9 | "compression.type": "gzip",
10 | "cleanup.policy": "delete",
11 | "delete.retention.ms": "86400000"
12 | }
13 | },
14 | {
15 | "name": "test",
16 | "partitions": 1,
17 | "replicationFactor": 1,
18 | "description": "This description is just for documentation. It does not affect the kafka cluster",
19 | "configs": {
20 | "compression.type": "gzip",
21 | "cleanup.policy": "compact"
22 | }
23 | }
24 | ],
25 | "brokers": [
26 | {
27 | "id": "1",
28 | "config": {
29 | "sasl.login.refresh.window.jitter": "0.05"
30 | }
31 | }
32 | ],
33 | "acls" : [
34 | {
35 | "principal": "User:joe",
36 | "resourceType": "Topic",
37 | "patternType": "LITERAL",
38 | "resourceName": "test",
39 | "operation": "Read",
40 | "permissionType": "Allow",
41 | "host": "*"
42 | }
43 | ],
44 | "aclStrings": [
45 | "User:joe,Topic,LITERAL,test,Read,Allow,*",
46 | "User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"
47 | ]
48 | }
49 |
--------------------------------------------------------------------------------
/src/main/resources/local.properties:
--------------------------------------------------------------------------------
1 | #admin client configs
2 | kafkaer.bootstrap.servers=localhost:29092
3 | kafkaer.client.id=kafkaer
4 |
5 | #variables
6 | topic.suffix=iamasuffix
--------------------------------------------------------------------------------
/src/test/java/ConfigJsonGenerate.java:
--------------------------------------------------------------------------------
1 | import co.navdeep.kafkaer.model.Broker;
2 | import co.navdeep.kafkaer.model.Config;
3 | import co.navdeep.kafkaer.model.Topic;
4 | import com.fasterxml.jackson.core.JsonProcessingException;
5 | import com.fasterxml.jackson.databind.ObjectMapper;
6 | import org.junit.Test;
7 |
8 | import java.util.ArrayList;
9 | import java.util.HashMap;
10 |
11 | public class ConfigJsonGenerate {
12 | @Test
13 | public void generateConfigJson() throws JsonProcessingException {
14 | Config config = new Config();
15 | Topic topic = new Topic();
16 | topic.setName("test");
17 | topic.setConfigs(new HashMap<>());
18 | topic.getConfigs().put("cleanup.policy", "compact");
19 | topic.getConfigs().put("compression.type", "gzip");
20 | config.getTopics().add(topic);
21 |
22 | Broker broker = new Broker();
23 | broker.setId("1");
24 | broker.setConfig(new HashMap<>());
25 | broker.getConfig().put("sasl.login.refresh.window.jitter", "0.05");
26 | config.setBrokers(new ArrayList<>());
27 | config.getBrokers().add(broker);
28 | ObjectMapper mapper = new ObjectMapper();
29 | System.out.println(mapper.writeValueAsString(config));
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/src/test/java/ConfiguratorTest.java:
--------------------------------------------------------------------------------
1 | import co.navdeep.kafkaer.Configurator;
2 | import co.navdeep.kafkaer.model.Acl;
3 | import co.navdeep.kafkaer.model.Broker;
4 | import co.navdeep.kafkaer.model.Topic;
5 | import co.navdeep.kafkaer.utils.Utils;
6 | import co.navdeep.kafkaer.model.Config;
7 | import io.confluent.kafka.schemaregistry.client.SchemaRegistryClient;
8 | import io.confluent.kafka.schemaregistry.client.rest.exceptions.RestClientException;
9 | import org.apache.commons.configuration2.Configuration;
10 | import org.apache.commons.configuration2.ex.ConfigurationException;
11 | import org.apache.kafka.clients.admin.*;
12 | import org.apache.kafka.common.Node;
13 | import org.apache.kafka.common.acl.AccessControlEntryFilter;
14 | import org.apache.kafka.common.acl.AclBindingFilter;
15 | import org.apache.kafka.common.config.ConfigResource;
16 | import org.apache.kafka.common.resource.ResourcePatternFilter;
17 | import org.junit.*;
18 | import org.mockito.ArgumentMatchers;
19 | import org.mockito.Mock;
20 | import org.mockito.Mockito;
21 |
22 | import java.io.IOException;
23 | import java.util.*;
24 | import java.util.concurrent.ExecutionException;
25 | import java.util.concurrent.TimeUnit;
26 |
27 | public class ConfiguratorTest {
28 |
29 | final static String PROPERTIES_LOCATION="src/test/resources/test.properties";
30 | final static String CONFIG_LOCATION="src/test/resources/kafka-config.json";
31 | static AdminClient adminClient;
32 |
33 | @BeforeClass
34 | public static void setup() throws ConfigurationException {
35 | Configuration properties = Utils.readProperties(PROPERTIES_LOCATION);
36 | adminClient = AdminClient.create(Utils.getClientConfig(properties));
37 | }
38 |
39 | @Before
40 | @After
41 | public void cleanup() throws ExecutionException, InterruptedException {
42 | deleteAllAcls();
43 | }
44 |
45 | @Test
46 | public void testReadConfig() throws IOException, ConfigurationException {
47 | Configurator configurator = new Configurator(PROPERTIES_LOCATION, CONFIG_LOCATION);
48 | Config config = configurator.getConfig();
49 | Assert.assertFalse(config.getTopics().isEmpty());
50 | Assert.assertEquals(config.getTopics().size(), 2);
51 | Assert.assertEquals(config.getTopics().get(0).getName(), "withSuffix-iamasuffix");
52 | Assert.assertEquals(config.getTopics().get(0).getPartitions(), 1);
53 | Assert.assertEquals(config.getTopics().get(0).getReplicationFactor(), 1);
54 | Assert.assertEquals(config.getTopics().get(0).getConfigs().get("compression.type"), "gzip");
55 |
56 | Assert.assertEquals(config.getBrokers().size(), 1);
57 | Assert.assertEquals(config.getBrokers().get(0).getConfig().get("sasl.login.refresh.window.jitter"), "0.05");
58 |
59 |
60 | Assert.assertEquals(config.getAcls().size(), 1);
61 | Assert.assertEquals(config.getAcls().get(0).getPrincipal(), "User:joe");
62 | Assert.assertEquals(config.getAcls().get(0).getResourceType(), "Topic");
63 | Assert.assertEquals(config.getAcls().get(0).getPatternType(), "LITERAL");
64 | Assert.assertEquals(config.getAcls().get(0).getResourceName(), "test");
65 | Assert.assertEquals(config.getAcls().get(0).getOperation(), "Read");
66 | Assert.assertEquals(config.getAcls().get(0).getPermissionType(), "Allow");
67 | Assert.assertEquals(config.getAcls().get(0).getHost(), "*");
68 |
69 | Assert.assertEquals(config.getAclStrings().size(), 2);
70 | Assert.assertTrue(config.getAclStrings().containsAll(Arrays.asList("User:joe,Topic,LITERAL,test,Read,Allow,*", "User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*")));
71 | }
72 |
73 | @Test
74 | public void testTopicCreation() throws ExecutionException, InterruptedException, ConfigurationException {
75 | Config config = new Config();
76 | String topicName = UUID.randomUUID().toString();
77 | Topic topic = new Topic(topicName, 1, (short)1);
78 | config.getTopics().add(topic);
79 |
80 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
81 | configurator.applyConfig();
82 |
83 | sleep();
84 | compareWithKafkaTopic(topic);
85 | }
86 |
87 | @Test
88 | public void testMultipleTopicCreation() throws ExecutionException, InterruptedException, ConfigurationException {
89 | Config config = new Config();
90 | String topicName = UUID.randomUUID().toString();
91 | String topicName2 = UUID.randomUUID().toString();
92 | Topic topic = new Topic(topicName, 1, (short)1);
93 | Topic topic2 = new Topic(topicName2, 2, (short)1);
94 | config.getTopics().add(topic);
95 | config.getTopics().add(topic2);
96 |
97 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
98 | configurator.applyConfig();
99 |
100 | sleep();
101 | compareWithKafkaTopic(topic);
102 | compareWithKafkaTopic(topic2);
103 | }
104 |
105 | @Test
106 | public void testTopicCreationWithConfigs() throws ExecutionException, InterruptedException, ConfigurationException {
107 | Config config = new Config();
108 | Topic topic = new Topic(UUID.randomUUID().toString(), 1, (short)1);
109 | topic.setConfigs(Collections.singletonMap("delete.retention.ms", "123"));
110 | config.getTopics().add(topic);
111 |
112 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
113 | configurator.applyConfig();
114 |
115 | sleep();
116 | ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic.getName());
117 | DescribeConfigsResult result = adminClient.describeConfigs(Collections.singletonList(configResource));
118 |
119 | org.apache.kafka.clients.admin.Config topicConfig = result.all().get().get(configResource);
120 |
121 | Assert.assertEquals(topicConfig.get("delete.retention.ms").value(), "123");
122 | }
123 |
124 | @Test
125 | public void testIncreasePartitions() throws ExecutionException, InterruptedException, ConfigurationException {
126 | Config config = new Config();
127 | String topicName = UUID.randomUUID().toString();
128 | Topic topic = new Topic(topicName, 1, (short)1);
129 | config.getTopics().add(topic);
130 |
131 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
132 | configurator.applyConfig();
133 |
134 | sleep();
135 | compareWithKafkaTopic(topic);
136 |
137 | topic.setPartitions(2);
138 | configurator.applyConfig();
139 |
140 | sleep();
141 | compareWithKafkaTopic(topic);
142 | }
143 |
144 | @Test
145 | public void testPreservePartitions() throws ExecutionException, InterruptedException, ConfigurationException {
146 | Config config = new Config();
147 | String topicName = UUID.randomUUID().toString();
148 | Topic topic = new Topic(topicName, 1, (short)1);
149 | config.getTopics().add(topic);
150 |
151 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
152 | configurator.setPreservePartitionCount(true);
153 | configurator.applyConfig();
154 |
155 | sleep();
156 | compareWithKafkaTopic(topic);
157 |
158 | //Increase the partitions and apply config
159 | topic.setPartitions(2);
160 | configurator.applyConfig();
161 |
162 | //Still expect 1 partition
163 | topic.setPartitions(1);
164 | compareWithKafkaTopic(topic);
165 | }
166 |
167 | @Test
168 | public void testUpdateExistingTopicConfig() throws ConfigurationException, ExecutionException, InterruptedException {
169 | Config config = new Config();
170 | Topic topic = new Topic(UUID.randomUUID().toString(), 1, (short)1);
171 | topic.setConfigs(Collections.singletonMap("delete.retention.ms", "123"));
172 | config.getTopics().add(topic);
173 |
174 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
175 | configurator.applyConfig();
176 |
177 | sleep();
178 | ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic.getName());
179 | DescribeConfigsResult result = adminClient.describeConfigs(Collections.singletonList(configResource));
180 |
181 | org.apache.kafka.clients.admin.Config topicConfig = result.all().get().get(configResource);
182 |
183 | Assert.assertEquals(topicConfig.get("delete.retention.ms").value(), "123");
184 |
185 | //update the same topic
186 | topic.setConfigs(Collections.singletonMap("delete.retention.ms", "321"));
187 | configurator.applyConfig();
188 | result = adminClient.describeConfigs(Collections.singletonList(configResource));
189 | topicConfig = result.all().get().get(configResource);
190 | Assert.assertEquals(topicConfig.get("delete.retention.ms").value(), "321");
191 | }
192 |
193 |
194 | @Test
195 | public void testSpecificBrokerConfigUpdate() throws ExecutionException, InterruptedException, ConfigurationException {
196 | Node brokerNode = new ArrayList<>(adminClient.describeCluster().nodes().get()).get(0);
197 | Config config = new Config();
198 | Broker broker = new Broker();
199 | broker.setId(String.valueOf(brokerNode.id()));
200 | broker.setConfig(Collections.singletonMap("sasl.kerberos.min.time.before.relogin", "60001"));
201 | config.getBrokers().add(broker);
202 |
203 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
204 | configurator.applyConfig();
205 |
206 | sleep();
207 | ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(brokerNode.id()));
208 | DescribeConfigsResult result = adminClient.describeConfigs(Collections.singletonList(configResource));
209 | org.apache.kafka.clients.admin.Config brokerConfig = result.all().get().get(configResource);
210 |
211 | //Default is 60000
212 | Assert.assertEquals(brokerConfig.get("sasl.kerberos.min.time.before.relogin").value(), "60001");
213 | }
214 |
215 | @Test
216 | public void testClusterwideConfigUpdate() throws ExecutionException, InterruptedException, ConfigurationException {
217 | List nodes = new ArrayList<>(adminClient.describeCluster().nodes().get());
218 | Broker broker = new Broker();
219 | Config config = new Config();
220 | //Default is 2147483647
221 | broker.setConfig(Collections.singletonMap("max.connections.per.ip", "10000"));
222 | config.getBrokers().add(broker);
223 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
224 | configurator.applyConfig();
225 |
226 | sleep();
227 | for(Node node : nodes){
228 | ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(node.id()));
229 | DescribeConfigsResult result = adminClient.describeConfigs(Collections.singletonList(configResource));
230 | org.apache.kafka.clients.admin.Config brokerConfig = result.all().get().get(configResource);
231 | Assert.assertEquals(brokerConfig.get("max.connections.per.ip").value(), "10000");
232 | }
233 | }
234 |
235 | @Test
236 | public void testCreateAclsStructured() throws ConfigurationException, ExecutionException, InterruptedException {
237 | Config config = new Config();
238 | config.getAcls().add(new Acl("User:joe,Topic,LITERAL,test,Read,Allow,*"));
239 | config.getAcls().add(new Acl("User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"));
240 |
241 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
242 | configurator.applyConfig();
243 |
244 | sleep();
245 | DescribeAclsResult describeAclsResult = adminClient.describeAcls(new AclBindingFilter(ResourcePatternFilter.ANY, AccessControlEntryFilter.ANY));
246 |
247 | Assert.assertEquals(describeAclsResult.values().get().size(), 2);
248 | Assert.assertTrue(describeAclsResult.values().get().containsAll(config.getAclBindings()));
249 | }
250 |
251 | @Test
252 | public void testCreateAclsFromStrings() throws ExecutionException, InterruptedException, ConfigurationException {
253 | Config config = new Config();
254 | config.getAclStrings().add("User:joe,Topic,LITERAL,test,Read,Allow,*");
255 | config.getAclStrings().add("User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*");
256 |
257 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
258 | configurator.applyConfig();
259 |
260 | sleep();
261 | DescribeAclsResult describeAclsResult = adminClient.describeAcls(new AclBindingFilter(ResourcePatternFilter.ANY, AccessControlEntryFilter.ANY));
262 |
263 | Assert.assertEquals(describeAclsResult.values().get().size(), 2);
264 | Assert.assertTrue(describeAclsResult.values().get().containsAll(config.getAclBindings()));
265 | }
266 |
267 |
268 | @Test
269 | public void testCreateAclsMix() throws ConfigurationException, ExecutionException, InterruptedException {
270 | Config config = new Config();
271 | config.getAclStrings().add("User:joe,Topic,LITERAL,test,Read,Allow,*");
272 | config.getAcls().add(new Acl("User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"));
273 |
274 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
275 | configurator.applyConfig();
276 |
277 | sleep();
278 | DescribeAclsResult describeAclsResult = adminClient.describeAcls(new AclBindingFilter(ResourcePatternFilter.ANY, AccessControlEntryFilter.ANY));
279 |
280 | Assert.assertEquals(describeAclsResult.values().get().size(), 2);
281 | Assert.assertTrue(describeAclsResult.values().get().containsAll(config.getAclBindings()));
282 | }
283 |
284 | @Test
285 | public void testWipe() throws ConfigurationException, ExecutionException, InterruptedException {
286 | Config config = new Config();
287 | String topicName = UUID.randomUUID().toString();
288 | Topic topic = new Topic(topicName, 1, (short)1);
289 | config.getTopics().add(topic);
290 |
291 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
292 | configurator.applyConfig();
293 | sleep();
294 | compareWithKafkaTopic(topic);
295 |
296 | configurator.wipeTopics(true, false);
297 |
298 | Assert.assertFalse(adminClient.listTopics().names().get().contains(topic.getName()));
299 | }
300 |
301 | @Test
302 | public void testWipeWithSchemaWipe() throws ConfigurationException, ExecutionException, InterruptedException, IOException, RestClientException {
303 | Config config = new Config();
304 | String topicName = UUID.randomUUID().toString();
305 | Topic topic = new Topic(topicName, 1, (short)1);
306 | config.getTopics().add(topic);
307 |
308 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
309 | configurator.applyConfig();
310 | sleep();
311 | compareWithKafkaTopic(topic);
312 |
313 | SchemaRegistryClient mock = Mockito.mock(SchemaRegistryClient.class);
314 | configurator.setSchemaRegistryClient(mock);
315 |
316 | String subjectName = topicName + "-value";
317 | Mockito.when(mock.getAllSubjects()).thenReturn(Collections.singletonList(subjectName));
318 | Mockito.when(mock.deleteSubject(subjectName)).thenReturn(Collections.singletonList(1));
319 |
320 | configurator.wipeTopics(true, true);
321 |
322 | Mockito.verify(mock).getAllSubjects();
323 | Mockito.verify(mock).deleteSubject(ArgumentMatchers.eq(subjectName));
324 |
325 | Assert.assertFalse(adminClient.listTopics().names().get().contains(topic.getName()));
326 | }
327 |
328 | @Test
329 | public void testWipeWithSchemaWipeTopicDoesNotExist() throws ConfigurationException, ExecutionException, InterruptedException, IOException, RestClientException {
330 | Config config = new Config();
331 | String topicName = UUID.randomUUID().toString();
332 | Topic topic = new Topic(topicName, 1, (short)1);
333 | config.getTopics().add(topic);
334 |
335 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
336 |
337 | SchemaRegistryClient mock = Mockito.mock(SchemaRegistryClient.class);
338 | configurator.setSchemaRegistryClient(mock);
339 |
340 | String subjectName = topicName + "-value";
341 | Mockito.when(mock.getAllSubjects()).thenReturn(Collections.singletonList(subjectName));
342 | Mockito.when(mock.deleteSubject(subjectName)).thenReturn(Collections.singletonList(1));
343 |
344 |
345 | configurator.wipeTopics(true, true);
346 |
347 | //Topic did not exist, it should still delete schema
348 | Mockito.verify(mock).getAllSubjects();
349 | Mockito.verify(mock).deleteSubject(ArgumentMatchers.eq(subjectName));
350 |
351 | Assert.assertFalse(adminClient.listTopics().names().get().contains(topic.getName()));
352 | }
353 |
354 | @Test
355 | public void testNonExistingTopicWipeNoException() throws ConfigurationException {
356 | Config config = new Config();
357 | String topicName = UUID.randomUUID().toString();
358 | Topic topic = new Topic(topicName, 1, (short)1);
359 | config.getTopics().add(topic);
360 |
361 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
362 |
363 | try {
364 | configurator.wipeTopics(true, false);
365 | } catch(Exception e){
366 | Assert.fail();
367 | e.printStackTrace();
368 | }
369 | }
370 |
371 | @Test
372 | public void testWipeSchema() throws ConfigurationException, IOException, RestClientException {
373 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), new Config());
374 | SchemaRegistryClient mock = Mockito.mock(SchemaRegistryClient.class);
375 | configurator.setSchemaRegistryClient(mock);
376 |
377 | Mockito.when(mock.getAllSubjects()).thenReturn(Collections.singletonList("x-value"));
378 | Mockito.when(mock.deleteSubject("x-value")).thenReturn(Collections.singletonList(1));
379 |
380 | configurator.wipeSchema("x");
381 |
382 | Mockito.verify(mock).getAllSubjects();
383 | Mockito.verify(mock).deleteSubject(ArgumentMatchers.eq("x-value"));
384 | }
385 |
386 | @Test(expected = RuntimeException.class)
387 | public void testDuplicateTopicValidation() throws ExecutionException, InterruptedException, ConfigurationException {
388 | Config config = new Config();
389 | String topicName = UUID.randomUUID().toString();
390 | Topic topic = new Topic(topicName, 1, (short)1);
391 | Topic topic2 = new Topic(topicName, 2, (short)1);
392 | config.getTopics().add(topic);
393 | config.getTopics().add(topic2);
394 |
395 | Configurator configurator = new Configurator(Utils.readProperties(PROPERTIES_LOCATION), config);
396 | configurator.applyConfig();
397 |
398 | }
399 |
400 | private void compareWithKafkaTopic(Topic topic) throws ExecutionException, InterruptedException {
401 | DescribeTopicsResult result = adminClient.describeTopics(Collections.singletonList(topic.getName()));
402 | TopicDescription kafkaTopic = result.all().get().get(topic.getName());
403 | Assert.assertNotNull(kafkaTopic);
404 | Assert.assertEquals(kafkaTopic.partitions().size(), topic.getPartitions());
405 | Assert.assertEquals(kafkaTopic.partitions().get(0).replicas().size(), topic.getReplicationFactor());
406 | }
407 |
408 | private void deleteAllAcls() throws ExecutionException, InterruptedException {
409 | AclBindingFilter all = new AclBindingFilter(ResourcePatternFilter.ANY, AccessControlEntryFilter.ANY);
410 | DeleteAclsResult result = adminClient.deleteAcls(Collections.singleton(all));
411 | result.all().get();
412 | TimeUnit.SECONDS.sleep(1);
413 | }
414 |
415 | private void sleep() throws InterruptedException {
416 | TimeUnit.SECONDS.sleep(3);
417 | }
418 |
419 | }
420 |
--------------------------------------------------------------------------------
/src/test/java/UtilsTest.java:
--------------------------------------------------------------------------------
1 | import co.navdeep.kafkaer.utils.Utils;
2 | import org.apache.commons.configuration2.Configuration;
3 | import org.apache.commons.configuration2.PropertiesConfiguration;
4 | import org.apache.commons.configuration2.ex.ConfigurationException;
5 | import org.apache.kafka.clients.admin.AdminClientConfig;
6 | import org.apache.kafka.clients.admin.Config;
7 | import org.junit.Assert;
8 | import org.junit.Test;
9 |
10 | import java.io.IOException;
11 | import java.util.Collections;
12 | import java.util.HashMap;
13 | import java.util.Map;
14 | import java.util.Properties;
15 |
16 | public class UtilsTest {
17 | @Test
18 | public void getClientConfigsTest() throws ConfigurationException {
19 | Configuration properties = Utils.readProperties("src/test/resources/test.properties");
20 | Properties config = Utils.getClientConfig(properties);
21 | Assert.assertEquals(config.size(), 2);
22 | Assert.assertEquals(config.getProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG), "localhost:9092");
23 | Assert.assertEquals(config.getProperty(AdminClientConfig.CLIENT_ID_CONFIG), "kafkaer");
24 | }
25 |
26 | @Test
27 | public void readPropertiesTest() throws ConfigurationException {
28 | Configuration properties = Utils.readProperties("src/test/resources/test.properties");
29 | Assert.assertEquals(properties.getString("kafkaer.bootstrap.servers"), "localhost:9092");
30 | Assert.assertEquals(properties.getString("topic.suffix"), "iamasuffix");
31 | Assert.assertEquals(properties.getString("kafkaer.client.id"), "kafkaer");
32 | }
33 |
34 | @Test
35 | public void propertiesToMapTest() throws ConfigurationException {
36 | Configuration properties = Utils.readProperties("src/test/resources/test.properties");
37 | Map map = Utils.propertiesToMap(properties);
38 | Assert.assertEquals(map.get("kafkaer.bootstrap.servers"), "localhost:9092");
39 | Assert.assertEquals(map.get("topic.suffix"), "iamasuffix");
40 | Assert.assertEquals(map.get("kafkaer.client.id"), "kafkaer");
41 | }
42 |
43 | @Test
44 | public void readPropertiesAsMapTest() throws ConfigurationException {
45 | Map map = Utils.readPropertiesAsMap("src/test/resources/test.properties");
46 | Assert.assertEquals(map.get("kafkaer.bootstrap.servers"), "localhost:9092");
47 | Assert.assertEquals(map.get("topic.suffix"), "iamasuffix");
48 | Assert.assertEquals(map.get("kafkaer.client.id"), "kafkaer");
49 | }
50 |
51 | @Test
52 | public void configsAsKafkaConfigTest(){
53 | Map config = new HashMap<>();
54 | config.put("config1", "val1");
55 | config.put("config2", "val2");
56 |
57 | Config kafkaConfig = Utils.configsAsKafkaConfig(config);
58 | for(String key : config.keySet()){
59 | Assert.assertEquals(kafkaConfig.get(key).value(), config.get(key));
60 | }
61 | }
62 |
63 | @Test
64 | public void readConfigTest() throws IOException {
65 | co.navdeep.kafkaer.model.Config config = Utils.readConfig("src/test/resources/kafka-config.json", Collections.singletonMap("topic.suffix", "t"));
66 | co.navdeep.kafkaer.model.Config config2 = Utils.readConfig("src/test/resources/kafka-config.json", Collections.singletonMap("topic.suffix", "t2"));
67 |
68 | Assert.assertEquals(config.getTopics().get(0).getName(), "withSuffix-t");
69 | Assert.assertEquals(config2.getTopics().get(0).getName(), "withSuffix-t2");
70 | }
71 |
72 | @Test
73 | public void readConfigWithDescriptinoTest() throws IOException {
74 | co.navdeep.kafkaer.model.Config config = Utils.readConfig("src/test/resources/kafka-config-with-description.json", Collections.singletonMap("topic.suffix", "t"));
75 | Assert.assertNotNull(config.getTopics().get(0).getDescription());
76 | }
77 |
78 | @Test
79 | public void readConfigWithoutDescriptinoTest() throws IOException {
80 | co.navdeep.kafkaer.model.Config config = Utils.readConfig("src/test/resources/kafka-config.json", Collections.singletonMap("topic.suffix", "t"));
81 | Assert.assertNull(config.getTopics().get(0).getDescription());
82 | }
83 |
84 | @Test
85 | public void getSchemaRegistryUrlConfigTest() {
86 | Configuration p = new PropertiesConfiguration();
87 | p.addProperty(Utils.SCHEMA_REGISTRY_URL_CONFIG, "url");
88 | Assert.assertEquals("url", Utils.getSchemaRegistryUrl(p));
89 | }
90 |
91 | @Test
92 | public void getSchemaRegistryConfigsTest(){
93 | Configuration p = new PropertiesConfiguration();
94 | p.addProperty("kafkaer.schema.registry.url", "u");
95 | p.addProperty("x.y", "x");
96 | Map configs = Utils.getSchemaRegistryConfigs(p);
97 | Assert.assertEquals(1, configs.size());
98 | Assert.assertEquals("u", configs.get("schema.registry.url"));
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/src/test/resources/kafka-config-with-description.json:
--------------------------------------------------------------------------------
1 | {
2 | "topics": [
3 | {
4 | "name": "withSuffix-${topic.suffix}",
5 | "partitions": 1,
6 | "replicationFactor": 1,
7 | "description": "This is a description",
8 | "configs": {
9 | "compression.type": "gzip",
10 | "cleanup.policy": "delete",
11 | "delete.retention.ms": "86400000"
12 | }
13 | },
14 | {
15 | "name": "test",
16 | "partitions": 1,
17 | "replicationFactor": 1,
18 | "description": "Also description",
19 | "configs": {
20 | "compression.type": "gzip",
21 | "cleanup.policy": "compact"
22 | }
23 | }
24 | ],
25 | "brokers": [
26 | {
27 | "id": "1",
28 | "config": {
29 | "sasl.login.refresh.window.jitter": "0.05"
30 | }
31 | }
32 | ],
33 | "acls" : [
34 | {
35 | "principal": "User:joe",
36 | "resourceType": "Topic",
37 | "patternType": "LITERAL",
38 | "resourceName": "test",
39 | "operation": "Read",
40 | "permissionType": "Allow",
41 | "host": "*"
42 | }
43 | ],
44 | "aclStrings": [
45 | "User:joe,Topic,LITERAL,test,Read,Allow,*",
46 | "User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"
47 | ]
48 | }
49 |
--------------------------------------------------------------------------------
/src/test/resources/kafka-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "topics": [
3 | {
4 | "name": "withSuffix-${topic.suffix}",
5 | "partitions": 1,
6 | "replicationFactor": 1,
7 | "configs": {
8 | "compression.type": "gzip",
9 | "cleanup.policy": "delete",
10 | "delete.retention.ms": "86400000"
11 | }
12 | },
13 | {
14 | "name": "test",
15 | "partitions": 1,
16 | "replicationFactor": 1,
17 | "configs": {
18 | "compression.type": "gzip",
19 | "cleanup.policy": "compact"
20 | }
21 | }
22 | ],
23 | "brokers": [
24 | {
25 | "id": "1",
26 | "config": {
27 | "sasl.login.refresh.window.jitter": "0.05"
28 | }
29 | }
30 | ],
31 | "acls" : [
32 | {
33 | "principal": "User:joe",
34 | "resourceType": "Topic",
35 | "patternType": "LITERAL",
36 | "resourceName": "test",
37 | "operation": "Read",
38 | "permissionType": "Allow",
39 | "host": "*"
40 | }
41 | ],
42 | "aclStrings": [
43 | "User:joe,Topic,LITERAL,test,Read,Allow,*",
44 | "User:jon,Cluster,LITERAL,kafka-cluster,Create,Allow,*"
45 | ]
46 | }
47 |
--------------------------------------------------------------------------------
/src/test/resources/test.properties:
--------------------------------------------------------------------------------
1 | #admin client configs
2 | kafkaer.bootstrap.servers=localhost:9092
3 | kafkaer.client.id=kafkaer
4 |
5 | #variables
6 | topic.suffix=iamasuffix
--------------------------------------------------------------------------------