├── .gitignore
├── LICENSE
├── README.md
├── docker
├── Dockerfile
├── compose
│ ├── kafka-cluster.yml
│ ├── kafka-schema-registry.yml
│ ├── kafka-single-node.yml
│ └── pg_compose.yml
└── docker_commands.txt
├── pom.xml
└── src
├── main
├── java
│ └── org
│ │ └── jsmart
│ │ └── zerocode
│ │ └── zerocodejavaexec
│ │ └── utils
│ │ └── TokenGenerator.java
└── resources
│ ├── META-INF
│ └── package.properties
│ └── logback.xml
└── test
├── java
└── org
│ └── jsmart
│ └── zerocode
│ └── integration
│ └── tests
│ ├── kafka
│ ├── KafkaSuite.java
│ ├── consume
│ │ ├── KafkaConsumeAvroTest.java
│ │ ├── KafkaConsumeIntKeyTest.java
│ │ ├── KafkaConsumeJsonTest.java
│ │ ├── KafkaConsumeRawTest.java
│ │ ├── KafkaConsumeSeekOffsetTest.java
│ │ ├── KafkaConsumeTest.java
│ │ ├── KafkaConsumeUniqueGroupIdTest.java
│ │ ├── file
│ │ │ └── KafkaConsumeDumpToFileTest.java
│ │ └── negative
│ │ │ └── KafkaConsumeAvroNegativeTest.java
│ └── produce
│ │ ├── KafkaProduceAsyncTest.java
│ │ ├── KafkaProduceIntKeyTest.java
│ │ ├── KafkaProduceJsonButNotReallyJsonTest.java
│ │ ├── KafkaProduceJsonTest.java
│ │ ├── KafkaProduceRawTest.java
│ │ ├── KafkaProduceTest.java
│ │ ├── KafkaProduceToPartitionTest.java
│ │ ├── KafkaProduceTwoRecordsTest.java
│ │ ├── KafkaProduceUniqueClientIdTest.java
│ │ ├── KafkaProduceWithTimeStampTest.java
│ │ ├── KafkaPublishFailureTest.java
│ │ ├── file
│ │ ├── KafkaProduceAsyncFromFileRawTest.java
│ │ ├── KafkaProduceSyncFromFileJsonTest.java
│ │ └── KafkaProduceSyncFromFileRawTest.java
│ │ └── negative
│ │ └── KafkaProduceSyncWrongFileNameTest.java
│ └── more
│ ├── customclient
│ └── KafkaProduceCustomClientTest.java
│ └── ksql
│ └── KafkaKsqlTest.java
└── resources
├── kafka
├── consume
│ ├── file_dump
│ │ ├── test_kafka_consume_record_dump_json_json.json
│ │ ├── test_kafka_consume_record_dump_raw_json.json
│ │ └── test_kafka_consume_record_dump_raw_raw.json
│ ├── ksql
│ │ ├── WIP_ISSUE_test_ksql_print_records.json
│ │ └── test_ksql_query.json
│ ├── negative
│ │ ├── test_kafka_rest_proxy_avro_msg_wrong_value.json
│ │ └── test_produce_step_direct_invalid_avro_msg.json
│ ├── test_kafka_consume.json
│ ├── test_kafka_consume_avro_msg_json.json
│ ├── test_kafka_consume_avro_msg_raw_int.json
│ ├── test_kafka_consume_avro_msg_raw_json.json
│ ├── test_kafka_consume_int_key.json
│ ├── test_kafka_consume_json_msg.json
│ ├── test_kafka_consume_raw_msg.json
│ ├── test_kafka_consume_seek_offset.json
│ └── test_kafka_consume_via_unique_group_id.json
├── produce
│ ├── file_produce
│ │ ├── test_kafka_produce_async_from_file.json
│ │ ├── test_kafka_produce_sync_from_file.json
│ │ └── test_kafka_produce_sync_from_file_json.json
│ ├── negative
│ │ └── test_kafka_produce_from_worng_filename.json
│ ├── test_kafka_produce.json
│ ├── test_kafka_produce_2_records.json
│ ├── test_kafka_produce_ack_metadata.json
│ ├── test_kafka_produce_async.json
│ ├── test_kafka_produce_int_key.json
│ ├── test_kafka_produce_json_record.json
│ ├── test_kafka_produce_raw.json
│ ├── test_kafka_produce_to_partition.json
│ ├── test_kafka_produce_with_timestamp.json
│ └── test_kafka_publish_failed.json
└── produce_data
│ ├── test_data_json.json
│ └── test_data_raw.json
└── kafka_servers
├── kafka_consumer.properties
├── kafka_consumer_avro.properties
├── kafka_consumer_double_key.properties
├── kafka_consumer_int_key.properties
├── kafka_consumer_unique.properties
├── kafka_producer.properties
├── kafka_producer_avro.properties
├── kafka_producer_double_key.properties
├── kafka_producer_int_key.properties
├── kafka_producer_unique.properties
├── kafka_test_bad_server.properties
├── kafka_test_server.properties
├── kafka_test_server_avro.properties
├── kafka_test_server_double_key.properties
├── kafka_test_server_int_key.properties
└── kafka_test_server_unique.properties
/.gitignore:
--------------------------------------------------------------------------------
1 | *.class
2 | *.log
3 |
4 | # Mobile Tools for Java (J2ME)
5 | .mtj.tmp/
6 |
7 | # Package Files #
8 | *.jar
9 | *.war
10 | *.ear
11 |
12 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml
13 | hs_err_pid*
14 |
15 | *iml
16 | *.idea
17 | /target
18 | .DS_Store
19 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2018
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Kafka Testing Hello World examples
2 |
3 | This repo used open-source lib [zerocode-tdd](https://github.com/authorjapps/zerocode) for declarative style testing. Many flavours of [HelloWorld samples](https://github.com/authorjapps/zerocode/blob/master/README.md#hello-world-) are available to clone and run.
4 |
5 | > _Please make sure you bring up [Kafka in a Docker](https://github.com/authorjapps/zerocode-docker-factory/wiki/Docker-container-for-Kafka-and-Schema-Registry) prior to running the tests._
6 |
7 | Let's learn the most simple and efficient way of automated testing of Kafka applications.
8 | This is particulaly useful during:
9 |
10 | + Micro-Services involving REST and Kafka
11 | + Read/Write during BigData store e.g. HBase/Hadoop
12 | + any other Data Pipe Lines
13 |
14 |
15 | Try-at-home examples and much more(click to exapnd)
16 |
17 | + [Kafka testing - Examples to run at home](https://github.com/authorjapps/hello-kafka-stream-testing/tree/master/src/test/resources/kafka)
18 |
19 | + [Kafka testing - An Intro](https://github.com/authorjapps/zerocode/wiki/Kafka-Testing-Introduction)
20 |
21 | + [Other HelloWorld examples](https://github.com/authorjapps/zerocode/blob/master/README.md#hello-world-), such as Spring boot app testing, Performance testing, Kotlin app testing etc.
22 |
23 |
24 |
25 |
26 |
27 | For running the below test, please jump to the corresponding JUnit @Test.
28 |
29 | ```java
30 | @TargetEnv("kafka_servers/kafka_test_server.properties")
31 | @RunWith(ZeroCodeUnitRunner.class)
32 | public class KafkaProduceTest {
33 |
34 | @Test
35 | @Scenario("kafka/produce/test_kafka_produce.json")
36 | public void testProduce() throws Exception {
37 | // No code needed here.
38 | }
39 |
40 | }
41 | ```
42 |
43 | In the above code
44 |
45 | - `test_kafka_produce.json` is the Test Case which contains the JSON step(s). See a sample below.
46 | - `kafka_test_server.properties` contains the "Broker" details and Producer/Consumer configs
47 | - `@RunWith(ZeroCodeUnitRunner.class)` is a JUnit custom runner to run the test
48 |
49 |
50 | e.g.
51 | ```json
52 | {
53 | "scenarioName": "Simple Produce and Consume a record to-from a kafka topic",
54 | "steps": [
55 | {
56 | "name": "produce_step",
57 | "url": "kafka-topic:demo-topic1",
58 | "operation": "produce",
59 | "request": {
60 | "records":[
61 | {
62 | "key": "${RANDOM.NUMBER}",
63 | "value": "Hello World"
64 | }
65 | ]
66 | },
67 | "assertions": {
68 | "status" : "Ok"
69 | }
70 | },
71 | {
72 | "name": "consume_step",
73 | "url": "kafka-topic:demo-topic1",
74 | "operation": "consume",
75 | "request": {
76 | },
77 | "assertions": {
78 | "size": 1,
79 | "records": [
80 | {
81 | "key" : "$NOT.NULL",
82 | "value": "Hello World"
83 | }
84 | ]
85 | }
86 | }
87 | ]
88 | }
89 |
90 | ```
91 |
92 | Now you can see the-
93 | * Reports @ `target`
94 | * Logs @ `target/logs/test_logs.log`
95 | * Test coverage CSV Report @ `target/zerocode-junit-granular-report.csv`
96 | * Test coverage Chart @ `target/zerocode-junit-interactive-fuzzy-search.html`
97 | * More [reports](https://github.com/authorjapps/zerocode#generated-reports-and-charts)
98 |
99 | ## IDE References:
100 |
101 | ### IntelliJ(General key-board shotcuts):
102 | More keys - See more [IntelliJ keys](https://www.jetbrains.com/help/idea/mastering-intellij-idea-keyboard-shortcuts.html)
103 | 1. Open a matching java file -> Ctrl + n
104 | 1. Open a matching JSON or XML file -> Ctrl + Shift + n
105 | 1. To navigate to file -> Ctrl + Click
106 | 1. Usage/invocation of a JSON file or Java File or Java method/variable etc -> Alt + F7
107 |
108 | ### Eclipse(General key-board shotcuts):
109 | For quick reference only - See more [eclipse keys](https://www.linkedin.com/pulse/top-30-eclipse-keyboard-shortcuts-java-programmer-jayveersinh-solanki/)
110 | 1. Open a matching java file -> Ctrl + Shift + R
111 | 1. Open a matching JSON file -> Ctrl + Shift + R
112 | 1. To navigate to file -> Ctrl + Click
113 | 1. Clik a JSON file and navigate to it's JUnit Test -> Alt+Cmd+G(Mac: ⌥+⌘+G), Windows(Alt+Ctrl+G)
114 |
--------------------------------------------------------------------------------
/docker/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM confluentinc/cp-kafka
2 |
3 | VOLUME ["/var/lib/${COMPONENT}/data", "/etc/${COMPONENT}/secrets"]
4 |
5 | COPY include/etc/confluent/docker /etc/confluent/docker
6 |
7 | CMD ["/etc/confluent/docker/run"]
8 |
9 | ENTRYPOINT ["/docker_entrypoint.sh"]
10 |
--------------------------------------------------------------------------------
/docker/compose/kafka-cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: '2'
3 | services:
4 | zookeeper-1:
5 | image: confluentinc/cp-zookeeper:5.0.1
6 | environment:
7 | ZOOKEEPER_SERVER_ID: 1
8 | ZOOKEEPER_CLIENT_PORT: 22181
9 | ZOOKEEPER_TICK_TIME: 2000
10 | ZOOKEEPER_INIT_LIMIT: 5
11 | ZOOKEEPER_SYNC_LIMIT: 2
12 | ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
13 | network_mode: host
14 | extra_hosts:
15 | - "moby:127.0.0.1"
16 |
17 | zookeeper-2:
18 | image: confluentinc/cp-zookeeper:5.0.1
19 | environment:
20 | ZOOKEEPER_SERVER_ID: 2
21 | ZOOKEEPER_CLIENT_PORT: 32181
22 | ZOOKEEPER_TICK_TIME: 2000
23 | ZOOKEEPER_INIT_LIMIT: 5
24 | ZOOKEEPER_SYNC_LIMIT: 2
25 | ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
26 | network_mode: host
27 | extra_hosts:
28 | - "moby:127.0.0.1"
29 |
30 | zookeeper-3:
31 | image: confluentinc/cp-zookeeper:5.0.1
32 | environment:
33 | ZOOKEEPER_SERVER_ID: 3
34 | ZOOKEEPER_CLIENT_PORT: 42181
35 | ZOOKEEPER_TICK_TIME: 2000
36 | ZOOKEEPER_INIT_LIMIT: 5
37 | ZOOKEEPER_SYNC_LIMIT: 2
38 | ZOOKEEPER_SERVERS: localhost:22888:23888;localhost:32888:33888;localhost:42888:43888
39 | network_mode: host
40 | extra_hosts:
41 | - "moby:127.0.0.1"
42 |
43 | kafka-1:
44 | image: confluentinc/cp-kafka:5.0.1
45 | network_mode: host
46 | depends_on:
47 | - zookeeper-1
48 | - zookeeper-2
49 | - zookeeper-3
50 | environment:
51 | KAFKA_BROKER_ID: 1
52 | KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181
53 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:19092
54 | extra_hosts:
55 | - "moby:127.0.0.1"
56 |
57 | kafka-2:
58 | image: confluentinc/cp-kafka:5.0.1
59 | network_mode: host
60 | depends_on:
61 | - zookeeper-1
62 | - zookeeper-2
63 | - zookeeper-3
64 | environment:
65 | KAFKA_BROKER_ID: 2
66 | KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181
67 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:29092
68 | extra_hosts:
69 | - "moby:127.0.0.1"
70 |
71 | kafka-3:
72 | image: confluentinc/cp-kafka:5.0.1
73 | network_mode: host
74 | depends_on:
75 | - zookeeper-1
76 | - zookeeper-2
77 | - zookeeper-3
78 | environment:
79 | KAFKA_BROKER_ID: 3
80 | KAFKA_ZOOKEEPER_CONNECT: localhost:22181,localhost:32181,localhost:42181
81 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:39092
82 | extra_hosts:
83 | - "moby:127.0.0.1"
84 |
--------------------------------------------------------------------------------
/docker/compose/kafka-schema-registry.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: '3'
3 | services:
4 | zookeeper:
5 | image: confluentinc/cp-zookeeper:5.1.0
6 | environment:
7 | ZOOKEEPER_CLIENT_PORT: 2181
8 | ZOOKEEPER_TICK_TIME: 2000
9 |
10 | kafka:
11 | image: confluentinc/cp-kafka:5.1.0
12 | depends_on:
13 | - zookeeper
14 | ports:
15 | - 9092:9092
16 | environment:
17 | KAFKA_BROKER_ID: 1
18 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
19 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
20 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
21 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
22 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
23 |
24 | schema-registry:
25 | image: confluentinc/cp-schema-registry:5.1.0
26 | depends_on:
27 | - kafka
28 | - zookeeper
29 | environment:
30 | SCHEMA_REGISTRY_HOST_NAME: schema-registry
31 | SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: zookeeper:2181
32 | SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081
33 | ports:
34 | - "8081:8081"
35 |
36 | rest-proxy:
37 | image: confluentinc/cp-kafka-rest:5.1.0
38 | depends_on:
39 | - zookeeper
40 | - kafka
41 | - schema-registry
42 | environment:
43 | KAFKA_REST_HOST_NAME: rest-proxy
44 | KAFKA_REST_ZOOKEEPER_CONNECT: zookeeper:2181
45 | KAFKA_REST_BOOTSTRAP_SERVERS: kafka:29092
46 | KAFKA_REST_LISTENERS: http://0.0.0.0:8082
47 | KAFKA_REST_SCHEMA_REGISTRY_URL: http://schema-registry:8081
48 | ports:
49 | - "8082:8082"
50 |
51 | ksql-server:
52 | image: "confluentinc/cp-ksql-server:5.1.0"
53 | depends_on:
54 | - kafka
55 | - schema-registry
56 | environment:
57 | KSQL_BOOTSTRAP_SERVERS: kafka:29092
58 | KSQL_LISTENERS: http://0.0.0.0:8088
59 | KSQL_KSQL_SCHEMA_REGISTRY_URL: http://schema-registry:8081
60 | ports:
61 | - "8088:8088"
62 |
63 | ksql-cli:
64 | image: confluentinc/cp-ksql-cli:5.1.0
65 | depends_on:
66 | - ksql-server
67 | entrypoint: /bin/sh
68 | tty: true
69 |
--------------------------------------------------------------------------------
/docker/compose/kafka-single-node.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: '2'
3 | services:
4 | zookeeper:
5 | image: confluentinc/cp-zookeeper:5.0.1
6 | environment:
7 | ZOOKEEPER_CLIENT_PORT: 2181
8 | ZOOKEEPER_TICK_TIME: 2000
9 |
10 | kafka:
11 | # -----------------------------------------------------------------------------
12 | # For connections _internal_ to the docker network, such as from other services
13 | # and components, use kafka:29092.
14 | #
15 | # See https://rmoff.net/2018/08/02/kafka-listeners-explained/ for details
16 | # -----------------------------------------------------------------------------
17 | image: confluentinc/cp-kafka:5.0.1
18 | depends_on:
19 | - zookeeper
20 | ports:
21 | - 9092:9092
22 | environment:
23 | KAFKA_BROKER_ID: 1
24 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
25 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka:29092,PLAINTEXT_HOST://localhost:9092
26 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT
27 | KAFKA_INTER_BROKER_LISTENER_NAME: PLAINTEXT
28 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
--------------------------------------------------------------------------------
/docker/compose/pg_compose.yml:
--------------------------------------------------------------------------------
1 | # Use postgres/example user/password credentials
2 | # Then visit http://localhost:8080, or http://swarm-ip:8080, or http://host-ip:8080 (as appropriate).
3 | # Tested with adminer:4.7.0
4 | # How to Wiki : https://github.com/authorjapps/zerocode-docker-factory/wiki/Docker-container-for-a-Postgres-DB
5 | version: '3'
6 |
7 | services:
8 | db:
9 | image: postgres:9.3
10 | restart: always
11 | environment:
12 | POSTGRES_PASSWORD: example
13 | ports:
14 | - 35432:5432
15 |
16 | adminer:
17 | image: adminer:4.7.0
18 | restart: always
19 | ports:
20 | - 8080:8080
--------------------------------------------------------------------------------
/docker/docker_commands.txt:
--------------------------------------------------------------------------------
1 | docker-compose -f kafka-single-node.yml up -d
2 |
3 | docker exec -it compose_kafka_1 bash
4 |
5 | kafka-topics --zookeeper zookeeper:2181 --list
6 |
7 | kafka-console-consumer --bootstrap-server kafka:29092 --topic demo-topic-1 --from-beginning
8 |
9 | docker-compose -f kafka-single-node.yml down
10 |
--------------------------------------------------------------------------------
/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
5 |
6 | 4.0.0
7 |
8 | org.jsmart
9 | hello-world-kafka-testing
10 | 0.0.1-SNAPSHOT
11 |
12 | jar
13 | Zerocode Hello World Kafka Testing
14 | Zerocode Hello World Kafka Stream Testing Examples
15 |
16 |
17 | 1.3.5
18 | 4.12
19 | 1.8
20 | 3.0.0
21 | 5.1.0
22 |
23 |
29 |
30 |
31 | junit
32 | junit
33 | ${junit.version}
34 | compile
35 |
36 |
37 | org.jsmart
38 | zerocode-tdd
39 | ${version.zerocode-tdd}
40 |
41 |
46 |
47 |
48 |
49 |
50 |
51 | org.apache.maven.plugins
52 | maven-compiler-plugin
53 |
54 | ${java.version}
55 | ${java.version}
56 |
57 |
58 |
59 | org.apache.maven.plugins
60 | maven-surefire-plugin
61 | 2.19.1
62 |
63 |
64 | org.jsmart.zerocode.integration.tests.kafka.KafkaSuite
65 |
66 |
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/src/main/java/org/jsmart/zerocode/zerocodejavaexec/utils/TokenGenerator.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.zerocodejavaexec.utils;
2 |
3 | import java.time.LocalDateTime;
4 | import java.util.HashMap;
5 | import java.util.Map;
6 |
7 | public class TokenGenerator {
8 |
9 | public Map generateNew(String anyParam){
10 | Map tokenMap = new HashMap<>();
11 |
12 | // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
13 | // Your token generation logic goes here.
14 | // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
15 | // You need to put a valid implementation for this token.
16 | // For time being it's unique here as current time-stamp.
17 | // The key "token" here is just for demo purpose.
18 | // =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
19 | String uniqueToken = tokenFromCurrentTimeStamp();
20 |
21 | // ------------------------------------
22 | // Now put this token into a map key
23 | // - Choose any key name e.g. newToken
24 | // ------------------------------------
25 | tokenMap.put("newToken", uniqueToken);
26 |
27 | return tokenMap;
28 | }
29 |
30 | private String tokenFromCurrentTimeStamp() {
31 | return LocalDateTime.now().toString()
32 | .replace(":", "-")
33 | .replace(".", "-");
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/src/main/resources/META-INF/package.properties:
--------------------------------------------------------------------------------
1 | # Package meta information
2 | pkg.title=${pkg.title}
3 | pkg.name=${pkg.name}
4 | pkg.code=${pkg.code}
5 | pkg.url=${pkg.url}
6 | pkg.description=${pkg.description}
7 | pkg.product.code=${pkg.product.code}
8 | pkg.technology.code=${pkg.technology.code}
9 |
10 | pkg.version.number=${pkg.version.number}
11 | pkg.version.qualifier=${pkg.version.qualifier}
12 | pkg.version.buildNumber=${pkg.version.buildNumber}
13 | pkg.version.string=${pkg.version.string}
14 |
15 | pkg.build.sourceEncoding=${pkg.build.sourceEncoding}
16 | pkg.build.javaVersion=${pkg.build.javaVersion}
17 | pkg.build.sourceJavaVersion=${pkg.build.sourceJavaVersion}
18 | pkg.build.targetJavaVersion=${pkg.build.targetJavaVersion}
19 |
20 | # Organization related properties
21 | org.name=${org.name}
22 | org.fullName=${org.fullName}
23 | org.url=${org.url}
24 | org.vendorId=${org.vendorId}
25 |
--------------------------------------------------------------------------------
/src/main/resources/logback.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | target/logs/kafka_test_logs.log
5 | true
6 |
7 | %d [%thread] %-5level %logger{100} - %msg%n
8 |
9 |
10 |
11 |
12 |
13 | %d [%thread] %-5level %logger{100} - %msg%n
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/KafkaSuite.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka;
2 |
3 | import org.jsmart.zerocode.integration.tests.kafka.consume.*;
4 | import org.jsmart.zerocode.integration.tests.kafka.consume.file.KafkaConsumeDumpToFileTest;
5 | import org.jsmart.zerocode.integration.tests.more.ksql.KafkaKsqlTest;
6 | import org.jsmart.zerocode.integration.tests.kafka.consume.negative.KafkaConsumeAvroNegativeTest;
7 | import org.jsmart.zerocode.integration.tests.more.customclient.KafkaProduceCustomClientTest;
8 | import org.jsmart.zerocode.integration.tests.kafka.produce.*;
9 | import org.jsmart.zerocode.integration.tests.kafka.produce.file.KafkaProduceAsyncFromFileRawTest;
10 | import org.jsmart.zerocode.integration.tests.kafka.produce.file.KafkaProduceSyncFromFileJsonTest;
11 | import org.jsmart.zerocode.integration.tests.kafka.produce.file.KafkaProduceSyncFromFileRawTest;
12 | import org.jsmart.zerocode.integration.tests.kafka.produce.negative.KafkaProduceSyncWrongFileNameTest;
13 | import org.junit.runner.RunWith;
14 | import org.junit.runners.Suite;
15 |
16 | @Suite.SuiteClasses({
17 | KafkaProduceTest.class,
18 | KafkaConsumeTest.class,
19 | KafkaProduceCustomClientTest.class,
20 | KafkaProduceToPartitionTest.class,
21 | KafkaProduceWithTimeStampTest.class,
22 | KafkaProduceTwoRecordsTest.class,
23 | KafkaProduceRawTest.class,
24 | KafkaProduceJsonTest.class,
25 | KafkaConsumeRawTest.class,
26 | KafkaConsumeJsonTest.class,
27 | KafkaProduceIntKeyTest.class,
28 | KafkaConsumeIntKeyTest.class,
29 | KafkaConsumeAvroTest.class,
30 | KafkaConsumeAvroNegativeTest.class,
31 | KafkaConsumeDumpToFileTest.class,
32 | KafkaProduceAsyncTest.class,
33 | KafkaProduceAsyncFromFileRawTest.class,
34 | KafkaProduceSyncFromFileRawTest.class,
35 | KafkaProduceSyncFromFileJsonTest.class,
36 | KafkaProduceSyncWrongFileNameTest.class,
37 | KafkaConsumeSeekOffsetTest.class,
38 | KafkaKsqlTest.class
39 | })
40 | @RunWith(Suite.class)
41 | public class KafkaSuite {
42 | }
43 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeAvroTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @Ignore("For running these Avro tests please uncomment the depdendency 'kafka-avro-serializer' in the 'pom.xml'." +
11 | "And also uncomment the '' section")
12 | @TargetEnv("kafka_servers/kafka_test_server_avro.properties")
13 | @RunWith(ZeroCodeUnitRunner.class)
14 | public class KafkaConsumeAvroTest {
15 |
16 | @Test
17 | @JsonTestCase("kafka/consume/test_kafka_consume_avro_msg_json.json")
18 | public void testKafkaConsume_avroJson() throws Exception {
19 | }
20 |
21 | @Test
22 | @JsonTestCase("kafka/consume/test_kafka_consume_avro_msg_raw_int.json")
23 | public void testKafkaConsume_avroRaw() throws Exception {
24 | }
25 |
26 | @Test
27 | @JsonTestCase("kafka/consume/test_kafka_consume_avro_msg_raw_json.json")
28 | public void testKafkaConsume_avroRawJson() throws Exception {
29 | }
30 |
31 | }
32 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeIntKeyTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server_double_key.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaConsumeIntKeyTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/consume/test_kafka_consume_int_key.json")
15 | public void testKafkaConsume_intKey() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeJsonTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @TargetEnv("kafka_servers/kafka_test_server.properties")
11 | @RunWith(ZeroCodeUnitRunner.class)
12 | public class KafkaConsumeJsonTest {
13 |
14 | @Test
15 | @JsonTestCase("kafka/consume/test_kafka_consume_json_msg.json")
16 | public void testKafkaConsume_json() throws Exception {
17 | }
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeRawTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaConsumeRawTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/consume/test_kafka_consume_raw_msg.json")
15 | public void testKafkaConsume_raw() throws Exception {
16 | }
17 | }
18 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeSeekOffsetTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @TargetEnv("kafka_servers/kafka_test_server.properties")
11 | @RunWith(ZeroCodeUnitRunner.class)
12 | public class KafkaConsumeSeekOffsetTest {
13 |
14 | // Locally paases per run i.e. once . Fails if runs again as offset increases.
15 | // docker-compose down to reset the offset to 0. Then it can pass again.
16 | // "seek": "demo-c3,0,1", "demo-c3,0,3", "demo-c3,0,5" manually to see it passing
17 | // Note- it will always pass in CI, due to fresh container spins up.
18 | @Ignore("Unignore this when you find out how to run this repeatedly. Otherwise release to mvn central will fail")
19 | @Test
20 | @JsonTestCase("kafka/consume/test_kafka_consume_seek_offset.json")
21 | public void testKafkaConsume_seekOffset() throws Exception {
22 | }
23 |
24 | }
25 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @TargetEnv("kafka_servers/kafka_test_server.properties")
11 | @RunWith(ZeroCodeUnitRunner.class)
12 | public class KafkaConsumeTest {
13 |
14 | @Test
15 | @JsonTestCase("kafka/consume/test_kafka_consume.json")
16 | public void testKafkaConsume() throws Exception {
17 | }
18 |
19 | }
20 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/KafkaConsumeUniqueGroupIdTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server_unique.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaConsumeUniqueGroupIdTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/consume/test_kafka_consume_via_unique_group_id.json")
15 | public void testKafkaConsume_uniqueGroupId() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/file/KafkaConsumeDumpToFileTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume.file;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaConsumeDumpToFileTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/consume/file_dump/test_kafka_consume_record_dump_raw_raw.json")
15 | public void testKafka_RawRecordDump() throws Exception {
16 | }
17 |
18 | @Test
19 | @JsonTestCase("kafka/consume/file_dump/test_kafka_consume_record_dump_raw_json.json")
20 | public void testKafka_RawDumpOfJsonRecord() throws Exception {
21 | }
22 |
23 | @Test
24 | @JsonTestCase("kafka/consume/file_dump/test_kafka_consume_record_dump_json_json.json")
25 | public void testKafka_JsonDumpOfJsonRecord() throws Exception {
26 | }
27 |
28 | }
29 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/consume/negative/KafkaConsumeAvroNegativeTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.consume.negative;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @TargetEnv("kafka_servers/kafka_test_server_avro.properties")
11 | @RunWith(ZeroCodeUnitRunner.class)
12 | public class KafkaConsumeAvroNegativeTest {
13 |
14 | @Test
15 | @JsonTestCase("kafka/consume/negative/test_kafka_rest_proxy_avro_msg_wrong_value.json")
16 | public void testKafkaConsume_avroWrongValue() throws Exception {
17 | }
18 |
19 | @Ignore("For running Avro tests please uncomment the dependency 'kafka-avro-serializer' in the 'pom.xml' file")
20 | @Test
21 | @JsonTestCase("kafka/consume/negative/test_produce_step_direct_invalid_avro_msg.json")
22 | public void testKafkaWrongData_loadDirectTopic() throws Exception {
23 | }
24 |
25 | }
26 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceAsyncTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceAsyncTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_async.json")
15 | public void testProduceAnd_async() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceIntKeyTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server_double_key.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceIntKeyTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_int_key.json")
15 | public void testProduce_intOrDoubleKey() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceJsonButNotReallyJsonTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | public class KafkaProduceJsonButNotReallyJsonTest {
10 |
11 | @Test
12 | public void testNothing() {
13 | // See the KafkaConsumeJsonNotReallyJsonTest.java
14 | // which has both produce and consumer test
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceJsonTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server_int_key.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceJsonTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_json_record.json")
15 | public void testProduce_json() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceRawTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server_double_key.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceRawTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_raw.json")
15 | public void testProduce_raw() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce.json")
15 | public void testProduce() throws Exception {
16 | }
17 |
18 | @Test
19 | @JsonTestCase("kafka/produce/test_kafka_produce_ack_metadata.json")
20 | public void testProduceAnd_ack() throws Exception {
21 | }
22 |
23 | }
24 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceToPartitionTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceToPartitionTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_to_partition.json")
15 | public void testProdoceTo_partition() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceTwoRecordsTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceTwoRecordsTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_2_records.json")
15 | public void testProduce_twoRecords() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceUniqueClientIdTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server_unique.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceUniqueClientIdTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce.json")
15 | public void testProduce() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaProduceWithTimeStampTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceWithTimeStampTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/test_kafka_produce_with_timestamp.json")
15 | public void testProduceWith_timestamp() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/KafkaPublishFailureTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @TargetEnv("kafka_servers/kafka_test_bad_server.properties")
11 | @RunWith(ZeroCodeUnitRunner.class)
12 | public class KafkaPublishFailureTest {
13 |
14 | @Ignore("Sometimes - 2018-10-06 23:33:15,673 [main] WARN org.apache.kafka.common.utils.AppInfoParser - Error registering AppInfo mbean\n" +
15 | "javax.management.InstanceAlreadyExistsException: kafka.producer:type=app-info,id=zerocode-producer\n")
16 | @Test
17 | @JsonTestCase("kafka/produce/test_kafka_publish_failed.json")
18 | public void testPublish() throws Exception {
19 | }
20 |
21 | }
22 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/file/KafkaProduceAsyncFromFileRawTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce.file;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceAsyncFromFileRawTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/file_produce/test_kafka_produce_async_from_file.json")
15 | public void testProduceAnd_asyncFromFile() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/file/KafkaProduceSyncFromFileJsonTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce.file;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceSyncFromFileJsonTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/file_produce/test_kafka_produce_sync_from_file_json.json")
15 | public void testProduceAnd_syncFromFileJson() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/file/KafkaProduceSyncFromFileRawTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce.file;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceSyncFromFileRawTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/file_produce/test_kafka_produce_sync_from_file.json")
15 | public void testProduceAnd_syncFromFile() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/kafka/produce/negative/KafkaProduceSyncWrongFileNameTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.kafka.produce.negative;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Test;
7 | import org.junit.runner.RunWith;
8 |
9 | @TargetEnv("kafka_servers/kafka_test_server.properties")
10 | @RunWith(ZeroCodeUnitRunner.class)
11 | public class KafkaProduceSyncWrongFileNameTest {
12 |
13 | @Test
14 | @JsonTestCase("kafka/produce/negative/test_kafka_produce_from_worng_filename.json")
15 | public void testProduceAnd_wrongFileName() throws Exception {
16 | }
17 |
18 | }
19 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/more/customclient/KafkaProduceCustomClientTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.more.customclient;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.domain.UseKafkaClient;
6 | import org.jsmart.zerocode.core.kafka.client.ZerocodeCustomKafkaClient;
7 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
8 | import org.junit.Test;
9 | import org.junit.runner.RunWith;
10 |
11 | @TargetEnv("kafka_servers/kafka_test_server.properties")
12 | @UseKafkaClient(ZerocodeCustomKafkaClient.class)
13 | @RunWith(ZeroCodeUnitRunner.class)
14 | public class KafkaProduceCustomClientTest {
15 |
16 | @Test
17 | @JsonTestCase("kafka/produce/test_kafka_produce.json")
18 | public void testPublish() throws Exception {
19 | }
20 |
21 | }
22 |
--------------------------------------------------------------------------------
/src/test/java/org/jsmart/zerocode/integration/tests/more/ksql/KafkaKsqlTest.java:
--------------------------------------------------------------------------------
1 | package org.jsmart.zerocode.integration.tests.more.ksql;
2 |
3 | import org.jsmart.zerocode.core.domain.JsonTestCase;
4 | import org.jsmart.zerocode.core.domain.TargetEnv;
5 | import org.jsmart.zerocode.core.runner.ZeroCodeUnitRunner;
6 | import org.junit.Ignore;
7 | import org.junit.Test;
8 | import org.junit.runner.RunWith;
9 |
10 | @TargetEnv("kafka_servers/kafka_test_server_avro.properties")
11 | @RunWith(ZeroCodeUnitRunner.class)
12 | public class KafkaKsqlTest {
13 |
14 | @Ignore("This can be run only after adding confluent-avro maven dependencies")
15 | @Test
16 | @JsonTestCase("kafka/consume/ksql/test_ksql_query.json")
17 | public void testKafkaConsume_ksql() throws Exception {
18 | }
19 |
20 | @Ignore("Confluent Team is helping us to sort this out")
21 | @Test
22 | @JsonTestCase("kafka/consume/ksql/WIP_ISSUE_test_ksql_print_records.json")
23 | public void testKafkaConsume_printTopic() throws Exception {
24 | }
25 |
26 | }
27 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/file_dump/test_kafka_consume_record_dump_json_json.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Unload to file - json record as RAW dump",
3 | "steps": [
4 | {
5 | "name": "load_topic",
6 | "url": "kafka-topic:demo-json2",
7 | "operation": "produce",
8 | "request": {
9 | "recordType":"JSON",
10 | "records": [
11 | {
12 | "key": "${RANDOM.NUMBER}",
13 | "value": {
14 | "id" : 121,
15 | "name" : "Jey"
16 | }
17 | }
18 | ]
19 | },
20 | "assertions": {
21 | "status": "Ok"
22 | }
23 | },
24 | {
25 | "name": "onload_to_file",
26 | "url": "kafka-topic:demo-json2",
27 | "operation": "consume",
28 | "request": {
29 | "consumerLocalConfigs": {
30 | "fileDumpTo": "target/temp/demo.txt",
31 | "recordType": "JSON",
32 | "commitSync":true,
33 | "showRecordsConsumed": true,
34 | "maxNoOfRetryPollsOrTimeouts": 5,
35 | "pollingTime": 1000
36 | }
37 | },
38 | "assertions": {
39 | "size": 1
40 | }
41 | }
42 | ]
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/file_dump/test_kafka_consume_record_dump_raw_json.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Unload to file - json record as RAW dump",
3 | "steps": [
4 | {
5 | "name": "load_topic",
6 | "url": "kafka-topic:demo-raw1",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "{\"name\": \"Linus\"}"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "onload_to_file",
22 | "url": "kafka-topic:demo-raw1",
23 | "operation": "consume",
24 | "request": {
25 | "consumerLocalConfigs": {
26 | "fileDumpTo": "target/temp/demo.txt",
27 | "commitSync":true,
28 | "showRecordsConsumed": false,
29 | "maxNoOfRetryPollsOrTimeouts": 5,
30 | "pollingTime": 1000
31 | }
32 | },
33 | "assertions": {
34 | "size": 1
35 | }
36 | }
37 | ]
38 | }
39 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/file_dump/test_kafka_consume_record_dump_raw_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Unload - consume a message from kafka - raw dump to file",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-c2",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "dump_file_record_count",
22 | "url": "kafka-topic:demo-c2",
23 | "operation": "consume",
24 | "request": {
25 | "consumerLocalConfigs": {
26 | "fileDumpTo": "target/temp/demo.txt",
27 | "commitSync":true,
28 | "showRecordsConsumed": false,
29 | "maxNoOfRetryPollsOrTimeouts": 5,
30 | "pollingTime": 1000
31 | }
32 | },
33 | "assertions": {
34 | "size": 1
35 | }
36 | }
37 | ]
38 | }
39 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/ksql/WIP_ISSUE_test_ksql_print_records.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Print Topic Records via KSQL query",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-ksql",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello, Created for KSQL demo"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "ksql_print_records",
22 | "url": "${kafka-ksql-server-fqdn}/query",
23 | "operation": "POST",
24 | "request": {
25 | "headers": {
26 | "Content-Type": "application/vnd.ksql.v1+json; charset=utf-8",
27 | "Accept": "application/vnd.ksql.v1+json"
28 | },
29 | "body": {
30 | "ksql": "print 'demo-ksql' from beginning;",
31 | "streamsProperties": {}
32 | }
33 | },
34 | "assertions": {
35 | "status": 200,
36 | "body": "$NOT.NULL"
37 | }
38 | }
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/ksql/test_ksql_query.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Consume via KSQL query",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-ksql",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello, Created for KSQL demo"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "ksql_show_topics",
22 | "url": "${kafka-ksql-server-fqdn}/ksql",
23 | "operation": "POST",
24 | "request": {
25 | "headers": {
26 | "Content-Type": "application/vnd.ksql.v1+json; charset=utf-8",
27 | "Accept": "application/vnd.ksql.v1+json"
28 | },
29 | "body": {
30 | "ksql": "SHOW TOPICS;",
31 | "streamsProperties": {}
32 | }
33 | },
34 | "assertions": {
35 | "status": 200,
36 | "body": [
37 | {
38 | "topics.SIZE": "$GT.0",
39 | "topics[?(@.name=='demo-ksql')].registered.SIZE": 1
40 | }
41 | ]
42 | }
43 | },
44 | {
45 | "name": "ksql_list_streams",
46 | "url": "${kafka-ksql-server-fqdn}/ksql",
47 | "operation": "POST",
48 | "request": {
49 | "headers": {
50 | "Content-Type": "application/vnd.ksql.v1+json; charset=utf-8",
51 | "Accept": "application/vnd.ksql.v1+json"
52 | },
53 | "body": {
54 | "ksql": "LIST STREAMS;",
55 | "streamsProperties": {}
56 | }
57 | },
58 | "assertions": {
59 | "status": 200,
60 | "body": [
61 | {
62 | "@type": "streams",
63 | "statementText": "LIST STREAMS;",
64 | "streams": []
65 | }
66 | ]
67 | }
68 | },
69 | {
70 | "name": "ksql_server_info",
71 | "url": "${kafka-ksql-server-fqdn}/info",
72 | "operation": "GET",
73 | "request": {
74 | },
75 | "assertions": {
76 | "status": 200,
77 | "body": {
78 | "KsqlServerInfo": {
79 | "version": "5.1.0",
80 | "kafkaClusterId": "$NOT.NULL",
81 | "ksqlServiceId": "default_"
82 | }
83 | }
84 | }
85 | }
86 | ]
87 | }
88 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/negative/test_kafka_rest_proxy_avro_msg_wrong_value.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce an Inetger via REST proxy - A wrong Value",
3 | "steps": [
4 | {
5 | "name": "produce_avro_raw",
6 | "url": "/topics/demo-avro-3",
7 | "operation": "POST",
8 | "request": {
9 | "headers": {
10 | "Content-Type": "application/vnd.kafka.avro.v1+json",
11 | "Accept": "application/vnd.kafka.v1+json, application/vnd.kafka+json, application/json"
12 | },
13 | "body": {
14 | "value_schema": "{\"name\":\"int\",\"type\": \"int\"}",
15 | "records": [
16 | {
17 | "value": "a wrong value"
18 | }
19 | ]
20 | }
21 | },
22 | "assertions": {
23 | "status": 422,
24 | "headers": {
25 | "Content-Type": [
26 | "application/vnd.kafka.v1+json"
27 | ]
28 | },
29 | "body": {
30 | "error_code": 42203,
31 | "message": "Conversion of JSON to Avro failed: Failed to convert JSON to Avro: Expected int. Got VALUE_STRING"
32 | }
33 | }
34 | }
35 | ]
36 | }
37 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/negative/test_produce_step_direct_invalid_avro_msg.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Register Int via REST proxy and produce a String - Expect error",
3 | "steps": [
4 | {
5 | "name": "rest_produce_avro_raw",
6 | "url": "/topics/demo-avro-4",
7 | "operation": "POST",
8 | "request": {
9 | "headers": {
10 | "Content-Type": "application/vnd.kafka.avro.v1+json",
11 | "Accept": "application/vnd.kafka.v1+json, application/vnd.kafka+json, application/json"
12 | },
13 | "body": {
14 | "value_schema": "{\"name\":\"int\",\"type\": \"int\"}",
15 | "records": [
16 | {
17 | //optional sample value
18 | "value": 12
19 | }
20 | ]
21 | }
22 | },
23 | "assertions": {
24 | "status": 200,
25 | "body": {
26 | "offsets.SIZE": 1,
27 | "value_schema_id": "$NOT.NULL"
28 | }
29 | }
30 | },
31 | {
32 | "name": "produce_step",
33 | "url": "kafka-topic:demo-avro-4",
34 | "operation": "produce",
35 | "request": {
36 | "records": [
37 | {
38 | "value": "hello"
39 | }
40 | ]
41 | },
42 | "assertions": {
43 | "status": "Failed",
44 | "message": "Error registering Avro schema: \"string\""
45 | }
46 | },
47 | {
48 | "name": "produce_step_schema_id",
49 | "url": "kafka-topic:demo-avro-4",
50 | "operation": "produce",
51 | "request": {
52 | "records": [
53 | {
54 | "value": "hello",
55 | "value_schema_id": "${$.rest_produce_avro_raw.response.body.value_schema_id}"
56 | }
57 | ]
58 | },
59 | "assertions": {
60 | "status": "Failed",
61 | "message": "Error registering Avro schema: \"string\""
62 | }
63 | },
64 | {
65 | "name": "produce_step_valid_data",
66 | "url": "kafka-topic:demo-avro-4",
67 | "operation": "produce",
68 | "request": {
69 | "records": [
70 | {
71 | "value": 333.33,
72 | "value_schema_id": "${$.rest_produce_avro_raw.response.body.value_schema_id}"
73 | }
74 | ]
75 | },
76 | "assertions": {
77 | "status": "Ok"
78 | }
79 | },
80 | {
81 | "name": "consume_avro_msg_as_raw",
82 | "url": "kafka-topic:demo-avro-4",
83 | "operation": "consume",
84 | "request": {
85 | "consumerLocalConfigs": {
86 | "recordType": "RAW",
87 | "commitSync": true,
88 | "showRecordsConsumed": true,
89 | "maxNoOfRetryPollsOrTimeouts": 5
90 | }
91 | },
92 | "assertions": {
93 | "size": 2,
94 | "records": [
95 | {
96 | "topic": "demo-avro-4",
97 | "value": "$EQ.${$.rest_produce_avro_raw.request.body.records[0].value}"
98 | },
99 | {
100 | "topic": "demo-avro-4",
101 | "value": "$EQ.${$.produce_step_valid_data.request.records[0].value}"
102 | }
103 | ]
104 | }
105 | }
106 | ]
107 | }
108 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Unload - consume a message from kafka",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-c1",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status" : "Ok"
18 | }
19 | },
20 | {
21 | "name": "consume_step",
22 | "url": "kafka-topic:demo-c1",
23 | "operation": "consume",
24 | "request": {
25 | },
26 | "assertions": {
27 | // this will be 1 only when consumer does a commit i.e. commitAsync or sync,
28 | // otherwise 3 or more as earler messages will still be there
29 | // commitAsync - sometimes delayed as it is async(provide a helloworld example).
30 | "size" : "$GT.0"
31 | }
32 | }
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_avro_msg_json.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a JSON via REST Proxy and consume a AVRO msg message as JSON",
3 | "steps": [
4 | {
5 | "name": "produce_avro_raw",
6 | "url": "/topics/demo-avro-1",
7 | "operation": "POST",
8 | "request": {
9 | "headers": {
10 | "Content-Type": "application/vnd.kafka.avro.v1+json",
11 | "Accept": "application/vnd.kafka.v1+json, application/vnd.kafka+json, application/json"
12 | },
13 | "body": {
14 | "value_schema": "{\"type\":\"record\",\"name\":\"myrecord\",\"fields\":[{\"name\":\"f1\",\"type\":\"string\"}]}",
15 | "records": [
16 | {
17 | "value": {
18 | "f1": "val1"
19 | }
20 | },
21 | {
22 | "value": {
23 | "f1": "val2",
24 | "partition": 0
25 | }
26 | }
27 | ]
28 | }
29 | },
30 | "assertions": {
31 | "status": 200,
32 | "body" : {
33 | "offsets.SIZE" : 2,
34 | "value_schema_id" : "$NOT.NULL"
35 | }
36 | }
37 | },
38 | {
39 | "name": "consume_avro_msg_as_raw",
40 | "url": "kafka-topic:demo-avro-1",
41 | "operation": "consume",
42 | "request": {
43 | "consumerLocalConfigs": {
44 | "recordType": "JSON",
45 | "commitSync": true,
46 | "showRecordsConsumed": true,
47 | "maxNoOfRetryPollsOrTimeouts": 3
48 | }
49 | },
50 | "assertions": {
51 | "size": 2,
52 | "records": [
53 | {
54 | "value": {
55 | "f1": "val1"
56 | }
57 | },
58 | {
59 | "value": {
60 | "f1": "val2"
61 | }
62 | }
63 | ]
64 | }
65 | }
66 | ]
67 | }
68 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_avro_msg_raw_int.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce an Inetger via REST proxy and consume a AVRO msg message as RAW",
3 | "steps": [
4 | // The following consume steps works, Tested -Green
5 | // but not yet CI enabled due to avro registration is done manually
6 |
7 | // Note-
8 | // Do not put directly to the topic without avro registry ie schema registry.
9 | // When you put via Avro it updates the message with an ID(call it as avro ID) from the registry
10 | // If you put directly(by passing Avro), then it can not be consumed via AvroDeserialzer,
11 | // because message is not updated with an avro ID, hence not understood by avro deserializer
12 | // Error - {"error_code":50002,"message":"Kafka error: Error deserializing Avro message for id -1"}
13 | // 2019-01-04 09:46:29,371 [main] ERROR org.jsmart.zerocode.core.kafka.client.BasicKafkaClient -
14 | // Exception during operation:unload, topicName:demo-avro-2, error:Error deserializing key/value for partition demo-avro-2-0 at offset 1.
15 | // If needed, please seek past the record to continue consumption.
16 |
17 | {
18 | "name": "produce_avro_raw",
19 | "url": "/topics/demo-avro-2",
20 | "operation": "POST",
21 | "request": {
22 | "headers": {
23 | "Content-Type": "application/vnd.kafka.avro.v1+json",
24 | "Accept": "application/vnd.kafka.v1+json, application/vnd.kafka+json, application/json"
25 | },
26 | "body": {
27 | "value_schema": "{\"name\":\"int\",\"type\": \"int\"}",
28 | "records": [
29 | {
30 | "value": 12
31 | },
32 | {
33 | "value": 24,
34 | "partition": 0
35 | }
36 | ]
37 | }
38 | },
39 | "assertions": {
40 | "status": 200,
41 | "body" : {
42 | "offsets.SIZE" : 2,
43 | "value_schema_id" : "$NOT.NULL"
44 | }
45 | }
46 | },
47 | {
48 | "name": "consume_avro_msg_as_raw",
49 | "url": "kafka-topic:demo-avro-2",
50 | "operation": "consume",
51 | "request": {
52 | "consumerLocalConfigs": {
53 | "recordType": "RAW",
54 | "commitSync": true,
55 | "showRecordsConsumed": true,
56 | "maxNoOfRetryPollsOrTimeouts": 5
57 | }
58 | },
59 | "assertions": {
60 | "size": 2,
61 | "records": [
62 | {
63 | "topic": "demo-avro-2",
64 | "value": "$EQ.${$.produce_avro_raw.request.body.records[0].value}"
65 | },
66 | {
67 | "topic": "demo-avro-2",
68 | "value": "$EQ.${$.produce_avro_raw.request.body.records[1].value}"
69 | }
70 | ]
71 | }
72 | }
73 | ]
74 | }
75 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_avro_msg_raw_json.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a JSON via REST Proxy and consume a AVRO msg message as JSON",
3 | "steps": [
4 | {
5 | "name": "produce_avro_raw",
6 | "url": "/topics/demo-avro-1",
7 | "operation": "POST",
8 | "request": {
9 | "headers": {
10 | "Content-Type": "application/vnd.kafka.avro.v1+json",
11 | "Accept": "application/vnd.kafka.v1+json, application/vnd.kafka+json, application/json"
12 | },
13 | "body": {
14 | "value_schema": "{\"type\":\"record\",\"name\":\"myrecord\",\"fields\":[{\"name\":\"f1\",\"type\":\"string\"}]}",
15 | "records": [
16 | {
17 | "value": {
18 | "f1": "val1"
19 | }
20 | }
21 | ]
22 | }
23 | },
24 | "assertions": {
25 | "status": 200,
26 | "body" : {
27 | "offsets.SIZE" : 1,
28 | "value_schema_id" : "$NOT.NULL"
29 | }
30 | }
31 | },
32 | {
33 | "name": "consume_avro_msg_as_raw",
34 | "url": "kafka-topic:demo-avro-1",
35 | "operation": "consume",
36 | "request": {
37 | "consumerLocalConfigs": {
38 | "recordType": "RAW",
39 | "commitSync": true,
40 | "showRecordsConsumed": true,
41 | "maxNoOfRetryPollsOrTimeouts": 3
42 | }
43 | },
44 | "assertions": {
45 | "size": 1,
46 | "records": [
47 | {
48 | "topic": "demo-avro-1",
49 | "partition": 0,
50 | "value": {
51 | "schema": {
52 | "fields": [
53 | {
54 | "name": "f1",
55 | "position": 0,
56 | "schema": {
57 | "type": "STRING"
58 | }
59 | }
60 | ],
61 | "name": {
62 | "name": "myrecord",
63 | "full": "myrecord"
64 | }
65 | },
66 | "values": [
67 | {
68 | "string": "val1"
69 | }
70 | ]
71 | }
72 | }
73 |
74 | ]
75 | }
76 | }
77 | ]
78 | }
79 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_int_key.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Unload - consume a message from kafka",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-int-topic-1",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": 101,
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status" : "Ok"
18 | }
19 | },
20 | {
21 | "name": "consume_step",
22 | "url": "kafka-topic:demo-int-topic-1",
23 | "operation": "consume",
24 | "request": {
25 | },
26 | "assertions": {
27 | // this will be 1 only when consumer does a commit i.e. commitAsync or sync,
28 | // otherwise 3 or more as earler messages will still be there
29 | // TODO- commitAsync - sometimes delayed as it is async(provide a helloworld example).
30 | "size" : "$GT.0"
31 | }
32 | }
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_json_msg.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "consume a JSON message",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-json1",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "{\"name\": \"Nicola\"}"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "consume_step",
22 | "url": "kafka-topic:demo-json1",
23 | "operation": "consume",
24 | "request": {
25 | "consumerLocalConfigs": {
26 | "recordType": "JSON",
27 | "commitSync": true,
28 | "showRecordsConsumed": true,
29 | "maxNoOfRetryPollsOrTimeouts": 3
30 | }
31 | },
32 | "assertions": {
33 | "size": 1,
34 | "records": [
35 | {
36 | "value": {
37 | "name": "Nicola"
38 | }
39 | }
40 | ]
41 | }
42 | }
43 | ]
44 | }
45 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_raw_msg.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "consume as RAW message",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-raw1",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "{\"name\": \"Linus\"}"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "consume_step",
22 | "url": "kafka-topic:demo-raw1",
23 | "operation": "consume",
24 | "request": {
25 | "consumerLocalConfigs": {
26 | "recordType": "RAW",
27 | "commitSync": true,
28 | "showRecordsConsumed": true,
29 | "maxNoOfRetryPollsOrTimeouts": 3
30 | }
31 | },
32 | "assertions": {
33 | "size": 1,
34 | "records": [
35 | {
36 | "key" : "$NOT.NULL",
37 | "value": "{\"name\": \"Linus\"}"
38 | }
39 | ]
40 | }
41 | }
42 | ]
43 | }
44 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_seek_offset.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Unload - consume a message from kafka offset via seek",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-c3",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World1"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status": "Ok"
18 | }
19 | },
20 | {
21 | "name": "produce_step2",
22 | "url": "kafka-topic:demo-c3",
23 | "operation": "produce",
24 | "request": {
25 | "records": [
26 | {
27 | "key": "${RANDOM.NUMBER}",
28 | "value": "Hello World2"
29 | }
30 | ]
31 | },
32 | "assertions": {
33 | "status": "Ok"
34 | }
35 | },
36 | {
37 | "name": "consume_step",
38 | "url": "kafka-topic:demo-c3",
39 | "operation": "consume",
40 | "request": {
41 | "consumerLocalConfigs": {
42 | "commitSync": false,
43 | "recordType": "RAW",
44 | "showRecordsConsumed": true,
45 | "maxNoOfRetryPollsOrTimeouts": 3
46 | }
47 | },
48 | "assertions": {
49 | "size": 2
50 | }
51 | },
52 | {
53 | "name": "consume_step_seek",
54 | "url": "kafka-topic:demo-c3",
55 | "operation": "consume",
56 | "request": {
57 | "consumerLocalConfigs": {
58 | "seek": "demo-c3,0,1",
59 | "commitSync": true,
60 | "recordType": "RAW",
61 | "showRecordsConsumed": true,
62 | "maxNoOfRetryPollsOrTimeouts": 3
63 | }
64 | },
65 | "assertions": {
66 | // Locally paases per run i.e. once . Fails if runs again as offset increases.
67 | // docker-compose down to reset the offset to 0.
68 | // "seek": "demo-c3,0,1", "demo-c3,0,3", "demo-c3,0,5" manually to see it passing
69 | // Note- it will always pass in CI, due to fresh container spin up.
70 | "size": 1
71 | }
72 | }
73 | ]
74 | }
75 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/consume/test_kafka_consume_via_unique_group_id.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Consume - consume records via unique group ID",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-x1",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status" : "Ok"
18 | }
19 | },
20 | {
21 | "name": "consume_step",
22 | "url": "kafka-topic:demo-x1",
23 | "operation": "consume",
24 | "request": {
25 | },
26 | "assertions": {
27 | // This uses a brand new consumer group ID
28 | "size" : "$GT.0"
29 | }
30 | },
31 | {
32 | "name": "produce_again",
33 | "url": "kafka-topic:demo-x1",
34 | "operation": "produce",
35 | "request": {
36 | "records":[
37 | {
38 | "key": "${RANDOM.NUMBER}",
39 | "value": "Hello World"
40 | }
41 | ]
42 | },
43 | "assertions": {
44 | "status" : "Ok"
45 | }
46 | },
47 | {
48 | "name": "consume_again",
49 | "url": "kafka-topic:demo-x1",
50 | "operation": "consume",
51 | "request": {
52 | },
53 | "assertions": {
54 | // This uses a brand new consumer group ID.
55 | // Hence reads the messages from beginning
56 | // So for the first run, count is 2,
57 | // So for the second run, count is 3,
58 | // and so on...
59 | "size" : "$GT.1"
60 | }
61 | }
62 |
63 | ]
64 | }
65 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/file_produce/test_kafka_produce_async_from_file.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message - Async - From File",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-file-1",
7 | "operation": "produce",
8 | "request": {
9 | "async": true,
10 | "recordType" : "RAW",
11 | "file": "kafka/produce_data/test_data_raw.json"
12 | },
13 | "assertions": {
14 | "status" : "Ok",
15 | "recordMetadata" : {
16 | "topicPartition" : {
17 | "topic" : "demo-file-1"
18 | }
19 | }
20 | }
21 | },
22 | {
23 | "name": "consume_raw",
24 | "url": "kafka-topic:demo-file-1",
25 | "operation": "consume",
26 | "request": {
27 | "consumerLocalConfigs": {
28 | "recordType" : "RAW",
29 | "commitSync": true,
30 | "showRecordsConsumed": true,
31 | "maxNoOfRetryPollsOrTimeouts": 5
32 | }
33 | },
34 | "assertions": {
35 | "size": 2
36 | }
37 | }
38 |
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/file_produce/test_kafka_produce_sync_from_file.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message - Sync - From File",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-file-2",
7 | "operation": "produce",
8 | "request": {
9 | "async": false,
10 | "recordType" : "RAW",
11 | "file": "kafka/produce_data/test_data_raw.json"
12 | },
13 | "assertions": {
14 | "status" : "Ok",
15 | "recordMetadata" : {
16 | "topicPartition" : {
17 | "topic" : "demo-file-2"
18 | }
19 | }
20 | }
21 | },
22 | {
23 | "name": "consume_raw",
24 | "url": "kafka-topic:demo-file-2",
25 | "operation": "consume",
26 | "request": {
27 | "consumerLocalConfigs": {
28 | "recordType" : "RAW",
29 | "commitSync": true,
30 | "showRecordsConsumed": true,
31 | "maxNoOfRetryPollsOrTimeouts": 5
32 | }
33 | },
34 | "assertions": {
35 | "size": 2
36 | }
37 | }
38 |
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/file_produce/test_kafka_produce_sync_from_file_json.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message - Sync - From File",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-file-3",
7 | "operation": "produce",
8 | "request": {
9 | "async": false,
10 | "recordType" : "JSON",
11 | "file": "kafka/produce_data/test_data_json.json"
12 | },
13 | "assertions": {
14 | "status" : "Ok",
15 | "recordMetadata" : {
16 | "topicPartition" : {
17 | "topic" : "demo-file-3"
18 | }
19 | }
20 | }
21 | },
22 | {
23 | "name": "consume_raw",
24 | "url": "kafka-topic:demo-file-3",
25 | "operation": "consume",
26 | "request": {
27 | "consumerLocalConfigs": {
28 | "recordType" : "JSON",
29 | "commitSync": true,
30 | "showRecordsConsumed": true,
31 | "maxNoOfRetryPollsOrTimeouts": 3
32 | }
33 | },
34 | "assertions": {
35 | "size": 2
36 | }
37 | }
38 |
39 | ]
40 | }
41 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/negative/test_kafka_produce_from_worng_filename.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message - Wrong file name",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-file-2",
7 | "operation": "produce",
8 | "request": {
9 | "async": false,
10 | "recordType" : "RAW",
11 | "file": "kafka/produce_data/test_data_rawXX.json"
12 | },
13 | "assertions": {
14 | "status" : "Failed",
15 | "message" : "Error accessing file: `kafka/produce_data/test_data_rawXX.json' - java.lang.NullPointerException"
16 | }
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message to kafka topic - vanilla",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-1",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status" : "Ok",
18 | "recordMetadata" : "$NOT.NULL"
19 | }
20 | }
21 | ]
22 | }
23 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_2_records.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce - 2 records and consume both",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-p6",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World 1"
13 | },
14 | {
15 | "key": "${RANDOM.NUMBER}",
16 | "value": "Hello World 2"
17 | }
18 | ]
19 | },
20 | "assertions": {
21 | "status": "Ok"
22 | }
23 | },
24 | {
25 | "name": "consume_step",
26 | "url": "kafka-topic:demo-p6",
27 | "operation": "consume",
28 | "request": {
29 | "consumerLocalConfigs": {
30 | "showRecordsConsumed": true,
31 | "maxNoOfRetryPollsOrTimeouts": 3
32 | }
33 | },
34 | "assertions": {
35 | "records": [
36 | {
37 | "topic": "demo-p6",
38 | "key": "${$.produce_step.request.records[0].key}",
39 | "value": "Hello World 1"
40 | },
41 | {
42 | "topic": "demo-p6",
43 | "key": "${$.produce_step.request.records[1].key}",
44 | "value": "Hello World 2"
45 | }
46 | ],
47 | "size": 2
48 | }
49 | }
50 | ]
51 | }
52 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_ack_metadata.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message and Ack the topic metadata",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-2",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status" : "Ok",
18 | "recordMetadata" : {
19 | "offset" : "$NOT.NULL",
20 | "timestamp" : "$NOT.NULL",
21 | "serializedKeySize" : 13,
22 | "serializedValueSize" : 11,
23 | "topicPartition" : {
24 | "hash" : "$NOT.NULL",
25 | "partition" : 0,
26 | "topic" : "demo-2"
27 | }
28 | }
29 | }
30 | }
31 | ]
32 | }
33 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_async.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message - Async with Callback",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-3",
7 | "operation": "produce",
8 | "request": {
9 | "async": true,
10 | "records":[
11 | {
12 | "key": "${RANDOM.NUMBER}",
13 | "value": "Hello World"
14 | }
15 | ]
16 | },
17 | "assertions": {
18 | "status" : "Ok",
19 | "recordMetadata" : {
20 | "offset" : "$NOT.NULL",
21 | "timestamp" : "$NOT.NULL",
22 | "serializedKeySize" : 13,
23 | "serializedValueSize" : 11,
24 | "topicPartition" : {
25 | "hash" : "$NOT.NULL",
26 | "partition" : 0,
27 | "topic" : "demo-3"
28 | }
29 | }
30 | }
31 | }
32 | ]
33 | }
34 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_int_key.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message to kafka topic - int Key",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-int-topic-2",
7 | "operation": "produce",
8 | "request": {
9 | "recordType" : "RAW",
10 | "records": [
11 | {
12 | "key": 101,
13 | "value": "Hello World"
14 | }
15 | ]
16 | },
17 | "assertions": {
18 | "status": "Ok",
19 | "recordMetadata": "$NOT.NULL"
20 | }
21 | },
22 | {
23 | "name": "consume_step",
24 | "url": "kafka-topic:demo-int-topic-2",
25 | "operation": "consume",
26 | "request": {
27 | "consumerLocalConfigs": {
28 | //"recordType" : "JSON",
29 | "commitSync": true,
30 | "maxNoOfRetryPollsOrTimeouts": 3
31 | }
32 | },
33 | "assertions": {
34 | "size": 1,
35 | "records": [
36 | {
37 | "key": 101.0,
38 | "value": "Hello World"
39 | }
40 | ]
41 | }
42 | }
43 | ]
44 | }
45 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_json_record.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a JSON message to a kafka topic",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-json-topic-1",
7 | "operation": "produce",
8 | "request": {
9 | "recordType" : "JSON",
10 | "records": [
11 | {
12 | "key": 101,
13 | "value": {
14 | "name" : "Jey"
15 | }
16 | }
17 | ]
18 | },
19 | "assertions": {
20 | "status": "Ok",
21 | "recordMetadata": "$NOT.NULL"
22 | }
23 | },
24 | {
25 | "name": "consume_step",
26 | "url": "kafka-topic:demo-json-topic-1",
27 | "operation": "consume",
28 | "request": {
29 | "consumerLocalConfigs": {
30 | "recordType" : "JSON",
31 | "commitSync": true,
32 | "maxNoOfRetryPollsOrTimeouts": 3
33 | }
34 | },
35 | "assertions": {
36 | "size": 1,
37 | "records": [
38 | {
39 | "key": 101,
40 | "value": {
41 | "name" : "Jey"
42 | }
43 | }
44 |
45 | ]
46 | }
47 | }
48 | ]
49 | }
50 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_raw.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a RAW message i.e. RAW string and RAW json",
3 | "steps": [
4 | {
5 | "name": "produce_raw_string",
6 | "url": "kafka-topic:demo-raw-topic-1",
7 | "operation": "produce",
8 | "request": {
9 | "recordType" : "RAW",
10 | "records": [
11 | {
12 | "key": 101,
13 | "value": "Hello World"
14 | }
15 | ]
16 | },
17 | "assertions": {
18 | "status": "Ok",
19 | "recordMetadata": "$NOT.NULL"
20 | }
21 | },
22 | {
23 | "name": "produce_raw_json",
24 | "url": "kafka-topic:demo-raw-topic-1",
25 | "operation": "produce",
26 | "request": {
27 | "recordType" : "RAW",
28 | "records": [
29 | {
30 | "key": 102,
31 | "value": "{\"name\": \"Jay Kreps\"}"
32 | }
33 | ]
34 | },
35 | "assertions": {
36 | "status": "Ok",
37 | "recordMetadata": "$NOT.NULL"
38 | }
39 | },
40 | {
41 | "name": "consume_raw",
42 | "url": "kafka-topic:demo-raw-topic-1",
43 | "operation": "consume",
44 | "request": {
45 | "consumerLocalConfigs": {
46 | "recordType" : "RAW",
47 | "commitSync": true,
48 | "maxNoOfRetryPollsOrTimeouts": 5
49 | }
50 | },
51 | "assertions": {
52 | "size": 2,
53 | "records": [
54 | {
55 | "key": 101,
56 | "value": "Hello World"
57 | },
58 | {
59 | "key": 102,
60 | "value": "{\"name\": \"Jay Kreps\"}"
61 | }
62 | ]
63 | }
64 | }
65 | ]
66 | }
67 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_to_partition.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message to a partition",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-4",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World",
13 | "partition": 0
14 | }
15 | ]
16 | },
17 | "assertions": {
18 | "status" : "Ok",
19 | "recordMetadata" : {
20 | "topicPartition" : {
21 | "partition" : 0,
22 | "topic" : "demo-4"
23 | }
24 | }
25 | }
26 | },
27 | {
28 | "name": "produce_step_wrong_partition",
29 | "url": "kafka-topic:demo-4",
30 | "operation": "produce",
31 | "request": {
32 | "records":[
33 | {
34 | "key": "${RANDOM.NUMBER}",
35 | "value": "Hello World",
36 | "partition": 9
37 | }
38 | ]
39 | },
40 | "assertions": {
41 | "status" : "Failed",
42 | "message" : "Invalid partition given with record: 9 is not in the range [0...1)."
43 | }
44 | }
45 | ]
46 | }
47 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_produce_with_timestamp.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Produce a message with timestamp",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-5",
7 | "operation": "produce",
8 | "request": {
9 | "records": [
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World",
13 | "partition": 0,
14 | "timestamp": 1546065694692 //<-- set while sending, broker keeps the same, doesn't change it
15 | }
16 | ]
17 | },
18 | "assertions": {
19 | "status": "Ok",
20 | "recordMetadata": {
21 | "timestamp": 1546065694692, //<--- same timestamp value as it was set while sending.
22 | "topicPartition": {
23 | "partition": 0,
24 | "topic": "demo-5"
25 | }
26 | }
27 | }
28 | },
29 | {
30 | "name": "load_without_timestamp",
31 | "url": "kafka-topic:demo-5",
32 | "operation": "produce",
33 | "request": {
34 | "records": [
35 | {
36 | "key": "${RANDOM.NUMBER}",
37 | "value": "Hello World"
38 | }
39 | ]
40 | },
41 | "assertions": {
42 | "status": "Ok",
43 | "recordMetadata": {
44 | "timestamp": "$GT.1546065694692", //<-- because assigned by the broker after receiving
45 | "topicPartition": {
46 | "partition": 0,
47 | "topic": "demo-5"
48 | }
49 | }
50 | }
51 | }
52 | ]
53 | }
54 |
55 | // Note the difference here-
56 | // 1. One was set by the producer before hitting the Broker
57 | // 2. Other one was set by the Broker after receiving the message
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce/test_kafka_publish_failed.json:
--------------------------------------------------------------------------------
1 | {
2 | "scenarioName": "Load - publish a message to kafka - valid-ip format, but no kafka",
3 | "steps": [
4 | {
5 | "name": "produce_step",
6 | "url": "kafka-topic:demo-x",
7 | "operation": "produce",
8 | "request": {
9 | "records":[
10 | {
11 | "key": "${RANDOM.NUMBER}",
12 | "value": "Hello World"
13 | }
14 | ]
15 | },
16 | "assertions": {
17 | "status" : "Failed",
18 | "message" : "org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms."
19 | }
20 | }
21 | ]
22 | }
23 |
24 | // When you provide localhost:9092 : Then it means, the host is valid n able to ping localhost, if there is no kafka,
25 | // then only, it keeps on trying with below message-
26 |
27 | // 2018-09-28 13:07:03,628 [kafka-producer-network-thread | client1] WARN org.apache.kafka.clients.NetworkClient - [Producer clientId=client1] Error while fetching metadata with correlation id 559 : {kafka-topic:demo=INVALID_TOPIC_EXCEPTION}
28 | // 2018-09-28 13:07:03,735 [kafka-producer-network-thread | client1] WARN org.apache.kafka.clients.NetworkClient - [Producer clientId=client1] Error while fetching metadata with correlation id 560 : {kafka-topic:demo=INVALID_TOPIC_EXCEPTION}
29 | // Error in sending record
30 | // java.util.concurrent.ExecutionException: org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms.
31 |
32 | // Note - the final exception message is the same, hence asserted.
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce_data/test_data_json.json:
--------------------------------------------------------------------------------
1 | {"key":"1546955346669","value":{"id":121,"name":"Jey"}}
2 | {"key":"1546955346670","value":{"id":122,"name":"Krep"}}
3 |
--------------------------------------------------------------------------------
/src/test/resources/kafka/produce_data/test_data_raw.json:
--------------------------------------------------------------------------------
1 | {"key":"1539010017093","value":"Hello World 1"}
2 | {"key":"1539010017094","value":"Hello World 2"}
3 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_consumer.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka consumer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | group.id=consumerGroup14
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
7 | max.poll.records=2
8 | enable.auto.commit=false
9 | auto.offset.reset=earliest
10 |
11 | #
12 | #bootstrap.servers=localhost:9092
13 | #group.id=None
14 | #enable.auto.commit=true
15 | #key.deserializer=org.apache.kafka.common.serialization.LongDeserializer
16 | #value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
17 | #
18 | ## fast session timeout makes it more fun to play with failover
19 | #session.timeout.ms=10000
20 | #
21 | ## These buffer sizes seem to be needed to avoid consumer switching to
22 | ## a mode where it processes one bufferful every 5 seconds with multiple
23 | ## timeouts along the way. No idea why this happens.
24 | #fetch.min.bytes=50000
25 | #receive.buffer.bytes=262144
26 | #max.partition.fetch.bytes=2097152
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_consumer_avro.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka consumer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | group.id=consumerGroup11
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=io.confluent.kafka.serializers.KafkaAvroDeserializer
7 | schema.registry.url=http://localhost:8081
8 | max.poll.records=2
9 | enable.auto.commit=false
10 | auto.offset.reset=earliest
11 |
12 | #
13 | #bootstrap.servers=localhost:9092
14 | #group.id=None
15 | #enable.auto.commit=true
16 | #key.deserializer=org.apache.kafka.common.serialization.LongDeserializer
17 | #value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
18 | #
19 | ## fast session timeout makes it more fun to play with failover
20 | #session.timeout.ms=10000
21 | #
22 | ## These buffer sizes seem to be needed to avoid consumer switching to
23 | ## a mode where it processes one bufferful every 5 seconds with multiple
24 | ## timeouts along the way. No idea why this happens.
25 | #fetch.min.bytes=50000
26 | #receive.buffer.bytes=262144
27 | #max.partition.fetch.bytes=2097152
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_consumer_double_key.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka consumer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | group.id=consumerGroup14
5 | key.deserializer=org.apache.kafka.common.serialization.DoubleDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
7 | max.poll.records=2
8 | enable.auto.commit=false
9 | auto.offset.reset=earliest
10 |
11 | #
12 | #bootstrap.servers=localhost:9092
13 | #group.id=None
14 | #enable.auto.commit=true
15 | #key.deserializer=org.apache.kafka.common.serialization.LongDeserializer
16 | #value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
17 | #
18 | ## fast session timeout makes it more fun to play with failover
19 | #session.timeout.ms=10000
20 | #
21 | ## These buffer sizes seem to be needed to avoid consumer switching to
22 | ## a mode where it processes one bufferful every 5 seconds with multiple
23 | ## timeouts along the way. No idea why this happens.
24 | #fetch.min.bytes=50000
25 | #receive.buffer.bytes=262144
26 | #max.partition.fetch.bytes=2097152
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_consumer_int_key.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka consumer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | group.id=consumerGroup14
5 | key.deserializer=org.apache.kafka.common.serialization.IntegerDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
7 | max.poll.records=2
8 | enable.auto.commit=false
9 | auto.offset.reset=earliest
10 |
11 | #
12 | #bootstrap.servers=localhost:9092
13 | #group.id=None
14 | #enable.auto.commit=true
15 | #key.deserializer=org.apache.kafka.common.serialization.LongDeserializer
16 | #value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
17 | #
18 | ## fast session timeout makes it more fun to play with failover
19 | #session.timeout.ms=10000
20 | #
21 | ## These buffer sizes seem to be needed to avoid consumer switching to
22 | ## a mode where it processes one bufferful every 5 seconds with multiple
23 | ## timeouts along the way. No idea why this happens.
24 | #fetch.min.bytes=50000
25 | #receive.buffer.bytes=262144
26 | #max.partition.fetch.bytes=2097152
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_consumer_unique.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka consumer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | group.id=consumerGroup_${RANDOM.NUMBER}
5 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer
6 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
7 | max.poll.records=2
8 | enable.auto.commit=false
9 | auto.offset.reset=earliest
10 |
11 | #
12 | # -----------------------------
13 | # client.id is auto generated. Making it unique will have no effect if they belong to same group.
14 | # Making the group.id as unique makes sense and the new group ca consume same records once again.
15 | # client.id uniqueness will differentiate from another consumer in the same group.
16 | # Refer : ConsumerConfig.java in the source code.
17 | # /kafka/kafka/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java
18 | # -----------------------------
19 | #client.id=consumer-1123
20 | #group.id=None
21 | #enable.auto.commit=true
22 | #key.deserializer=org.apache.kafka.common.serialization.LongDeserializer
23 | #value.deserializer=org.apache.kafka.common.serialization.StringDeserializer
24 | #
25 | ## fast session timeout makes it more fun to play with failover
26 | #session.timeout.ms=10000
27 | #
28 | ## These buffer sizes seem to be needed to avoid consumer switching to
29 | ## a mode where it processes one bufferful every 5 seconds with multiple
30 | ## timeouts along the way. No idea why this happens.
31 | #fetch.min.bytes=50000
32 | #receive.buffer.bytes=262144
33 | #max.partition.fetch.bytes=2097152
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_producer.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka producer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | client.id=zerocode-producer
5 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
6 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 |
8 | # -------------------------------------------------------------
9 | # Check in the : ProducerConfig.java file for all list of keys
10 | # -------------------------------------------------------------
11 | #acks=all
12 | #retries=0
13 | #batch.size=16384
14 | #client.id=fcd-producer
15 | #auto.commit.interval.ms=1000
16 | #linger.ms=0
17 | #key.serializer=org.apache.kafka.common.serialization.StringSerializer
18 | #value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer
19 | #block.on.buffer.full=true
20 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_producer_avro.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka producer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | client.id=zerocode-producer
5 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
6 | #value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 | value.serializer=io.confluent.kafka.serializers.KafkaAvroSerializer
8 | schema.registry.url=http://localhost:8081
9 |
10 | # -------------------------------------------------------------
11 | # Check in the : ProducerConfig.java file for all list of keys
12 | # -------------------------------------------------------------
13 | #acks=all
14 | #retries=0
15 | #batch.size=16384
16 | #client.id=fcd-producer
17 | #auto.commit.interval.ms=1000
18 | #linger.ms=0
19 | #key.serializer=org.apache.kafka.common.serialization.StringSerializer
20 | #value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer
21 | #block.on.buffer.full=true
22 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_producer_double_key.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka producer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | client.id=zerocode-producer
5 | key.serializer=org.apache.kafka.common.serialization.DoubleSerializer
6 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 |
8 | #acks=all
9 | #retries=0
10 | #batch.size=16384
11 | #client.id=fcd-producer
12 | #auto.commit.interval.ms=1000
13 | #linger.ms=0
14 | #key.serializer=org.apache.kafka.common.serialization.StringSerializer
15 | #value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer
16 | #block.on.buffer.full=true
17 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_producer_int_key.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka producer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | client.id=zerocode-producer
5 | key.serializer=org.apache.kafka.common.serialization.IntegerSerializer
6 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 |
8 | #acks=all
9 | #retries=0
10 | #batch.size=16384
11 | #client.id=fcd-producer
12 | #auto.commit.interval.ms=1000
13 | #linger.ms=0
14 | #key.serializer=org.apache.kafka.common.serialization.StringSerializer
15 | #value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer
16 | #block.on.buffer.full=true
17 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_producer_unique.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka producer properties
3 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
4 | client.id=zerocode-producer_${RANDOM.NUMBER}
5 | key.serializer=org.apache.kafka.common.serialization.StringSerializer
6 | value.serializer=org.apache.kafka.common.serialization.StringSerializer
7 |
8 | # -------------------------------------------------------------
9 | # Check in the : ProducerConfig.java file for all list of keys
10 | # -------------------------------------------------------------
11 | #acks=all
12 | #retries=0
13 | #batch.size=16384
14 | #client.id=fcd-producer
15 | #auto.commit.interval.ms=1000
16 | #linger.ms=0
17 | #key.serializer=org.apache.kafka.common.serialization.StringSerializer
18 | #value.serializer=org.apache.kafka.common.serialization.ByteArraySerializer
19 | #block.on.buffer.full=true
20 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_test_bad_server.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka bootstrap servers comma separated
3 | # e.g. localhost:9092,host2:9093
4 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
5 | kafka.bootstrap.servers=localhost:9999
6 |
7 | kafka.producer.properties=kafka_servers/kafka_producer.properties
8 | kafka.consumer.properties=kafka_servers/kafka_consumer.properties
9 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_test_server.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka bootstrap servers comma separated
3 | # e.g. localhost:9092,host2:9093
4 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
5 | kafka.bootstrap.servers=localhost:9092
6 |
7 | kafka.producer.properties=kafka_servers/kafka_producer.properties
8 | kafka.consumer.properties=kafka_servers/kafka_consumer.properties
9 |
10 | # --------------------------------------------------------------------
11 | # Optional local consumer properties common/central to all test cases.
12 | # These can be overwritten by the tests locally.
13 | # --------------------------------------------------------------------
14 | # If this property is set, then the consumer does a commitSync after reading the message(s)
15 | # Make sure you don't set both commitSync and commitAsync to true
16 | consumer.commitSync = true
17 | # If this property is set, then the consumer does a commitAsync after reading the message(s)
18 | # Make sure you don't set both commitSync and commitAsync to true
19 | consumer.commitAsync = false
20 | # All records those were read are dumped to this specified file path
21 | # This path can be a relative path or an absolute path. If the file
22 | # does not exist, it creates the file and dumps the records
23 | consumer.fileDumpTo= target/temp/demo.txt
24 | # If this property is set to true, all records are shown in the response.
25 | # When dealing with large number of records, you might not be interested
26 | # in the individual records, but interested in the recordCount
27 | # i.e. total number of records consumed
28 | consumer.showRecordsConsumed=false
29 | # That means if any record(s) are read, then this counter is reset to 0(zero) and the consumer
30 | # polls again. So if no records are fetched for a specific poll interval, then the consumer
31 | # gives a retry retrying until this max number polls/reties reached.
32 | consumer.maxNoOfRetryPollsOrTimeouts = 5
33 | # Polling time in milli seconds i.e how long the consumer should poll before
34 | # the next retry poll
35 | consumer.pollingTime = 1000
36 |
37 | # local producer properties
38 | producer.key1=value1-testv ycvb
39 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_test_server_avro.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka bootstrap servers comma separated
3 | # e.g. localhost:9092,host2:9093
4 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
5 | kafka.bootstrap.servers=localhost:9092
6 |
7 | kafka.producer.properties=kafka_servers/kafka_producer_avro.properties
8 | kafka.consumer.properties=kafka_servers/kafka_consumer_avro.properties
9 |
10 | # Kafka REST Proxy end point for sending avro messages
11 | web.application.endpoint.host=http://localhost
12 | web.application.endpoint.port=8082
13 | web.application.endpoint.context=
14 |
15 | # URL of Kafka KSQL server
16 | kafka-ksql-server-fqdn=http://localhost:8088
17 |
18 | # --------------------------------------------------------------------
19 | # Optional local consumer properties common/central to all test cases.
20 | # These can be overwritten by the tests locally.
21 | # --------------------------------------------------------------------
22 | # If this property is set, then the consumer does a commitSync after reading the message(s)
23 | # Make sure you don't set both commitSync and commitAsync to true
24 | consumer.commitSync = true
25 | # If this property is set, then the consumer does a commitAsync after reading the message(s)
26 | # Make sure you don't set both commitSync and commitAsync to true
27 | consumer.commitAsync = false
28 | # All records those were read are dumped to this specified file path
29 | # This path can be a relative path or an absolute path. If the file
30 | # does not exist, it creates the file and dumps the records
31 | consumer.fileDumpTo= target/temp/demo.txt
32 | # If this property is set to true, all records are shown in the response.
33 | # When dealing with large number of records, you might not be interested
34 | # in the individual records, but interested in the recordCount
35 | # i.e. total number of records consumed
36 | consumer.showRecordsConsumed=false
37 | # That means if any record(s) are read, then this counter is reset to 0(zero) and the consumer
38 | # polls again. So if no records are fetched for a specific poll interval, then the consumer
39 | # gives a retry retrying until this max number polls/reties reached.
40 | consumer.maxNoOfRetryPollsOrTimeouts = 5
41 | # Polling time in milli seconds i.e how long the consumer should poll before
42 | # the next retry poll
43 | consumer.pollingTime = 1000
44 |
45 | # local producer properties
46 | producer.key1=value1-testv ycvb
47 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_test_server_double_key.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka bootstrap servers comma separated
3 | # e.g. localhost:9092,host2:9093
4 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
5 | kafka.bootstrap.servers=localhost:9092
6 |
7 | kafka.producer.properties=kafka_servers/kafka_producer_double_key.properties
8 | kafka.consumer.properties=kafka_servers/kafka_consumer_double_key.properties
9 |
10 | # --------------------------------------------------------------------
11 | # Optional local consumer properties common/central to all test cases.
12 | # These can be overwritten by the tests locally.
13 | # --------------------------------------------------------------------
14 | # If this property is set, then the consumer does a commitSync after reading the message(s)
15 | # Make sure you don't set both commitSync and commitAsync to true
16 | consumer.commitSync = true
17 | # If this property is set, then the consumer does a commitAsync after reading the message(s)
18 | # Make sure you don't set both commitSync and commitAsync to true
19 | consumer.commitAsync = false
20 |
21 | # If this property is set to RAW, then the consumer reads the records as RAW,
22 | # if this is set to JSON, then the consumer reads the records as JSON.
23 | consumer.recordType = RAW
24 | # All records those were read are dumped to this specified file path
25 | # This path can be a relative path or an absolute path. If the file
26 | # does not exist, it creates the file and dumps the records
27 | consumer.fileDumpTo= target/temp/demo.txt
28 | # If this property is set to true, all records are shown in the response.
29 | # When dealing with large number of records, you might not be interested
30 | # in the individual records, but interested in the recordCount
31 | # i.e. total number of records consumed
32 | consumer.showRecordsConsumed=true
33 | # That means if any record(s) are read, then this counter is reset to 0(zero) and the consumer
34 | # polls again. So if no records are fetched for a specific poll interval, then the consumer
35 | # gives a retry retrying until this max number polls/reties reached.
36 | consumer.maxNoOfRetryPollsOrTimeouts = 5
37 | # Polling time in milli seconds i.e how long the consumer should poll before
38 | # the next retry poll
39 | consumer.pollingTime = 1000
40 |
41 | # local producer properties
42 | producer.key1=value1-testv ycvb
43 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_test_server_int_key.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka bootstrap servers comma separated
3 | # e.g. localhost:9092,host2:9093
4 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
5 | kafka.bootstrap.servers=localhost:9092
6 |
7 | kafka.producer.properties=kafka_servers/kafka_producer_int_key.properties
8 | kafka.consumer.properties=kafka_servers/kafka_consumer_int_key.properties
9 |
10 | # --------------------------------------------------------------------
11 | # Optional local consumer properties common/central to all test cases.
12 | # These can be overwritten by the tests locally.
13 | # --------------------------------------------------------------------
14 | # If this property is set, then the consumer does a commitSync after reading the message(s)
15 | # Make sure you don't set both commitSync and commitAsync to true
16 | consumer.commitSync = true
17 | # If this property is set, then the consumer does a commitAsync after reading the message(s)
18 | # Make sure you don't set both commitSync and commitAsync to true
19 | consumer.commitAsync = false
20 |
21 | # If this property is set to RAW, then the consumer reads the records as RAW,
22 | # if this is set to JSON, then the consumer reads the records as JSON.
23 | consumer.recordType = RAW
24 | # All records those were read are dumped to this specified file path
25 | # This path can be a relative path or an absolute path. If the file
26 | # does not exist, it creates the file and dumps the records
27 | consumer.fileDumpTo= target/temp/demo.txt
28 | # If this property is set to true, all records are shown in the response.
29 | # When dealing with large number of records, you might not be interested
30 | # in the individual records, but interested in the recordCount
31 | # i.e. total number of records consumed
32 | consumer.showRecordsConsumed=true
33 | # That means if any record(s) are read, then this counter is reset to 0(zero) and the consumer
34 | # polls again. So if no records are fetched for a specific poll interval, then the consumer
35 | # gives a retry retrying until this max number polls/reties reached.
36 | consumer.maxNoOfRetryPollsOrTimeouts = 5
37 | # Polling time in milli seconds i.e how long the consumer should poll before
38 | # the next retry poll
39 | consumer.pollingTime = 1000
40 |
41 | # local producer properties
42 | producer.key1=value1-testv ycvb
43 |
--------------------------------------------------------------------------------
/src/test/resources/kafka_servers/kafka_test_server_unique.properties:
--------------------------------------------------------------------------------
1 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
2 | # kafka bootstrap servers comma separated
3 | # e.g. localhost:9092,host2:9093
4 | # =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
5 | kafka.bootstrap.servers=localhost:9092
6 |
7 | kafka.producer.properties=kafka_servers/kafka_producer_unique.properties
8 | kafka.consumer.properties=kafka_servers/kafka_consumer_unique.properties
9 |
10 | # --------------------------------------------------------------------
11 | # Optional local consumer properties common/central to all test cases.
12 | # These can be overwritten by the tests locally.
13 | # --------------------------------------------------------------------
14 | # If this property is set, then the consumer does a commitSync after reading the message(s)
15 | # Make sure you don't set both commitSync and commitAsync to true
16 | consumer.commitSync = true
17 | # If this property is set, then the consumer does a commitAsync after reading the message(s)
18 | # Make sure you don't set both commitSync and commitAsync to true
19 | consumer.commitAsync = false
20 | # All records those were read are dumped to this specified file path
21 | # This path can be a relative path or an absolute path. If the file
22 | # does not exist, it creates the file and dumps the records
23 | consumer.fileDumpTo= target/temp/demo.txt
24 | # If this property is set to true, all records are shown in the response.
25 | # When dealing with large number of records, you might not be interested
26 | # in the individual records, but interested in the recordCount
27 | # i.e. total number of records consumed
28 | consumer.showRecordsConsumed=false
29 | # That means if any record(s) are read, then this counter is reset to 0(zero) and the consumer
30 | # polls again. So if no records are fetched for a specific poll interval, then the consumer
31 | # gives a retry retrying until this max number polls/reties reached.
32 | consumer.maxNoOfRetryPollsOrTimeouts = 5
33 | # Polling time in milli seconds i.e how long the consumer should poll before
34 | # the next retry poll
35 | consumer.pollingTime = 1000
36 |
37 | # local producer properties
38 | producer.key1=value1-testv ycvb
39 |
--------------------------------------------------------------------------------