configParser) {
56 | this.recordBatchFactory = Objects.requireNonNull(recordBatchFactory);
57 | this.configParser = Objects.requireNonNull(configParser);
58 | }
59 |
60 | @Override
61 | public void configure(final Context context) {
62 | logger = Objects.requireNonNull(context.getLogger());
63 |
64 | final var rawConfig =
65 | Objects.requireNonNull(context.getConfiguration().instantiate(RawConfig.class));
66 | config = configParser.parse(rawConfig);
67 |
68 | final var serializer = new RecordSerializer();
69 | serializer.configure(config.getProducer().getConfig(), false);
70 | recordHandler = new RecordHandler(config.getRecords(), serializer);
71 |
72 | context.setFilter(new KafkaRecordFilter(config.getRecords()));
73 |
74 | if (logger.isDebugEnabled()) {
75 | logger.debug("Configured Kafka exporter: {}", config);
76 | } else {
77 | logger.info("Configured Kafka exporter");
78 | }
79 | }
80 |
81 | @Override
82 | public void open(final Controller controller) {
83 | this.controller = controller;
84 | recordBatch =
85 | recordBatchFactory.newRecordBatch(
86 | config.getProducer(), config.getMaxBatchSize(), this::updatePosition, logger);
87 |
88 | scheduleFlushBatchTask();
89 |
90 | if (logger.isDebugEnabled()) {
91 | logger.debug("Opened Kafka exporter with configuration: {}", config);
92 | } else {
93 | logger.info("Opened Kafka exporter");
94 | }
95 | }
96 |
97 | @Override
98 | public void close() {
99 | if (flushTask != null) {
100 | flushTask.cancel();
101 | }
102 |
103 | if (recordBatch != null) {
104 | recordBatch.flush();
105 | recordBatch.close();
106 | }
107 |
108 | if (logger != null) {
109 | logger.info("Closed Kafka exporter");
110 | }
111 | }
112 |
113 | @Override
114 | public void export(final Record record) {
115 | if (!recordHandler.isAllowed(record)) {
116 | logger.trace("Ignoring record {}", record);
117 | return;
118 | }
119 |
120 | final var producerRecord = recordHandler.transform(record);
121 | recordBatch.add(producerRecord);
122 | logger.trace("Added {} to the batch", producerRecord);
123 | }
124 |
125 | private void scheduleFlushBatchTask() {
126 | logger.trace("Rescheduling flush task in {}", config.getFlushInterval());
127 | flushTask = controller.scheduleCancellableTask(config.getFlushInterval(), this::flushBatchTask);
128 | }
129 |
130 | private void flushBatchTask() {
131 | try {
132 | recordBatch.flush();
133 | } finally {
134 | scheduleFlushBatchTask();
135 | }
136 | }
137 |
138 | private void updatePosition(final long position) {
139 | controller.updateLastExportedRecordPosition(position);
140 | logger.trace("Flushed batch and updated last exported record position to {}", position);
141 | }
142 | }
143 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/Config.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config;
17 |
18 | import java.time.Duration;
19 | import java.util.Objects;
20 |
21 | /**
22 | * Entrypoint for the effective {@link io.zeebe.exporters.kafka.KafkaExporter} configuration. This
23 | * is what the exporter will use as final configuration. See {@link
24 | * io.zeebe.exporters.kafka.config.raw.RawConfig} and {@link
25 | * io.zeebe.exporters.kafka.config.parser.RawConfigParser} for more on how the external
26 | * configuration is parsed into an instance of this class.
27 | */
28 | public final class Config {
29 | private final ProducerConfig producer;
30 | private final RecordsConfig records;
31 | private final int maxBatchSize;
32 | private final Duration flushInterval;
33 |
34 | public Config(
35 | final ProducerConfig producer,
36 | final RecordsConfig records,
37 | final int maxBatchSize,
38 | final Duration flushInterval) {
39 | this.producer = Objects.requireNonNull(producer);
40 | this.records = Objects.requireNonNull(records);
41 | this.maxBatchSize = maxBatchSize;
42 | this.flushInterval = Objects.requireNonNull(flushInterval);
43 | }
44 |
45 | public ProducerConfig getProducer() {
46 | return producer;
47 | }
48 |
49 | public RecordsConfig getRecords() {
50 | return records;
51 | }
52 |
53 | public int getMaxBatchSize() {
54 | return maxBatchSize;
55 | }
56 |
57 | public Duration getFlushInterval() {
58 | return flushInterval;
59 | }
60 |
61 | @Override
62 | public int hashCode() {
63 | return Objects.hash(producer, records, maxBatchSize, flushInterval);
64 | }
65 |
66 | @Override
67 | public boolean equals(final Object o) {
68 | if (this == o) {
69 | return true;
70 | }
71 | if (o == null || getClass() != o.getClass()) {
72 | return false;
73 | }
74 | final Config config = (Config) o;
75 | return getMaxBatchSize() == config.getMaxBatchSize()
76 | && Objects.equals(getProducer(), config.getProducer())
77 | && Objects.equals(getRecords(), config.getRecords())
78 | && Objects.equals(getMaxBatchSize(), config.getMaxBatchSize())
79 | && Objects.equals(getFlushInterval(), config.getFlushInterval());
80 | }
81 |
82 | @Override
83 | public String toString() {
84 | return "Config{"
85 | + "producer="
86 | + producer
87 | + ", records="
88 | + records
89 | + ", maxBatchSize="
90 | + maxBatchSize
91 | + ", commitInterval="
92 | + flushInterval
93 | + '}';
94 | }
95 | }
96 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/ProducerConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config;
17 |
18 | import java.time.Duration;
19 | import java.util.List;
20 | import java.util.Map;
21 | import java.util.Objects;
22 |
23 | /**
24 | * {@link ProducerConfig} is used by instances of {@link
25 | * io.zeebe.exporters.kafka.producer.KafkaProducerFactory} to configure a producer. A few standard
26 | * configuration options were extracted as options (e.g. {@code clientId}, {@code servers}) as they
27 | * were common - everything else can be configured via the free-form {@code config} map.
28 | *
29 | * NOTE: be aware the when configuring a producer using the {@code config} map, Kafka expects the
30 | * values to either be strings OR very specific data types. While these are well documented, if
31 | * you're unsure of the expected data type (e.g. Integer, Long, Boolean), then just pass a string
32 | * representation of what you want to use.
33 | */
34 | public final class ProducerConfig {
35 | private final String clientId;
36 | private final Duration closeTimeout;
37 | private final Map config;
38 | private final Duration requestTimeout;
39 | private final Duration maxBlockingTimeout;
40 | private final List servers;
41 |
42 | public ProducerConfig(
43 | final String clientId,
44 | final Duration closeTimeout,
45 | final Map config,
46 | final Duration requestTimeout,
47 | final Duration maxBlockingTimeout,
48 | final List servers) {
49 | this.clientId = Objects.requireNonNull(clientId);
50 | this.closeTimeout = Objects.requireNonNull(closeTimeout);
51 | this.config = Objects.requireNonNull(config);
52 | this.requestTimeout = Objects.requireNonNull(requestTimeout);
53 | this.maxBlockingTimeout = Objects.requireNonNull(maxBlockingTimeout);
54 | this.servers = Objects.requireNonNull(servers);
55 | }
56 |
57 | public String getClientId() {
58 | return clientId;
59 | }
60 |
61 | public Duration getCloseTimeout() {
62 | return closeTimeout;
63 | }
64 |
65 | public Map getConfig() {
66 | return config;
67 | }
68 |
69 | public Duration getRequestTimeout() {
70 | return requestTimeout;
71 | }
72 |
73 | public Duration getMaxBlockingTimeout() {
74 | return maxBlockingTimeout;
75 | }
76 |
77 | public List getServers() {
78 | return servers;
79 | }
80 |
81 | @Override
82 | public int hashCode() {
83 | return Objects.hash(
84 | clientId, closeTimeout, config, requestTimeout, maxBlockingTimeout, servers);
85 | }
86 |
87 | @Override
88 | public boolean equals(final Object o) {
89 | if (this == o) {
90 | return true;
91 | }
92 | if (o == null || getClass() != o.getClass()) {
93 | return false;
94 | }
95 | final ProducerConfig that = (ProducerConfig) o;
96 | return Objects.equals(getClientId(), that.getClientId())
97 | && Objects.equals(getCloseTimeout(), that.getCloseTimeout())
98 | && Objects.equals(getConfig(), that.getConfig())
99 | && Objects.equals(getRequestTimeout(), that.getRequestTimeout())
100 | && Objects.equals(getMaxBlockingTimeout(), that.getMaxBlockingTimeout())
101 | && Objects.equals(getServers(), that.getServers());
102 | }
103 |
104 | @Override
105 | public String toString() {
106 | return "ProducerConfig{"
107 | + "clientId='"
108 | + clientId
109 | + '\''
110 | + ", closeTimeout="
111 | + closeTimeout
112 | + ", config="
113 | + config
114 | + ", requestTimeout="
115 | + requestTimeout
116 | + ", maxBlockingTimeout="
117 | + maxBlockingTimeout
118 | + ", servers="
119 | + servers
120 | + '}';
121 | }
122 | }
123 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/RecordConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config;
17 |
18 | import io.camunda.zeebe.protocol.record.Record;
19 | import io.camunda.zeebe.protocol.record.RecordType;
20 | import java.util.Objects;
21 | import java.util.Set;
22 |
23 | /**
24 | * {@link RecordConfig} describes what the exporter should do with a record of a given {@link
25 | * io.camunda.zeebe.protocol.record.ValueType} - this is mapped via {@link RecordsConfig}, which
26 | * holds a map of {@link io.camunda.zeebe.protocol.record.ValueType} to {@link RecordConfig}.
27 | *
28 | * For the {@link io.camunda.zeebe.protocol.record.ValueType} associated with this instance, only
29 | * records with a {@link Record#getRecordType()} which is included in {@code allowedTypes} will be
30 | * exported. An empty set of {@code allowedTypes} means nothing gets exported.
31 | */
32 | public final class RecordConfig {
33 | private final Set allowedTypes;
34 | private final String topic;
35 |
36 | public RecordConfig(final Set allowedTypes, final String topic) {
37 | this.allowedTypes = Objects.requireNonNull(allowedTypes);
38 | this.topic = Objects.requireNonNull(topic);
39 | }
40 |
41 | public Set getAllowedTypes() {
42 | return allowedTypes;
43 | }
44 |
45 | public String getTopic() {
46 | return topic;
47 | }
48 |
49 | @Override
50 | public int hashCode() {
51 | return Objects.hash(allowedTypes, topic);
52 | }
53 |
54 | @Override
55 | public boolean equals(final Object o) {
56 | if (this == o) {
57 | return true;
58 | }
59 | if (o == null || getClass() != o.getClass()) {
60 | return false;
61 | }
62 | final RecordConfig that = (RecordConfig) o;
63 | return Objects.equals(getAllowedTypes(), that.getAllowedTypes())
64 | && Objects.equals(getTopic(), that.getTopic());
65 | }
66 |
67 | @Override
68 | public String toString() {
69 | return "RecordConfig{" + "allowedTypes=" + allowedTypes + ", topic='" + topic + '\'' + '}';
70 | }
71 | }
72 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/RecordsConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config;
17 |
18 | import io.camunda.zeebe.protocol.record.ValueType;
19 | import java.util.Map;
20 | import java.util.Objects;
21 | import java.util.Optional;
22 |
23 | /**
24 | * {@link RecordsConfig} provides a default {@link RecordConfig} for every {@link ValueType}, with
25 | * the possibility of setting a specific {@link RecordConfig} for a given {@link ValueType}.
26 | */
27 | public final class RecordsConfig {
28 | private final Map typeMap;
29 | private final RecordConfig defaults;
30 |
31 | public RecordsConfig(final Map typeMap, final RecordConfig defaults) {
32 | this.typeMap = Objects.requireNonNull(typeMap);
33 | this.defaults = Objects.requireNonNull(defaults);
34 | }
35 |
36 | public Map getTypeMap() {
37 | return typeMap;
38 | }
39 |
40 | public RecordConfig getDefaults() {
41 | return defaults;
42 | }
43 |
44 | /**
45 | * Returns the correct {@link RecordConfig} for this type, or {@link #getDefaults()} if none
46 | * defined for the given type.
47 | *
48 | * @param type the value type to get the {@link RecordConfig} of
49 | * @return the configured {@link RecordConfig} for this type, or {@link #getDefaults()}
50 | */
51 | public RecordConfig forType(final ValueType type) {
52 | return Optional.ofNullable(typeMap.get(type)).orElse(defaults);
53 | }
54 |
55 | @Override
56 | public int hashCode() {
57 | return Objects.hash(defaults, typeMap);
58 | }
59 |
60 | @Override
61 | public boolean equals(final Object o) {
62 | if (this == o) {
63 | return true;
64 | }
65 | if (o == null || getClass() != o.getClass()) {
66 | return false;
67 | }
68 | final RecordsConfig that = (RecordsConfig) o;
69 | return Objects.equals(getTypeMap(), that.getTypeMap())
70 | && Objects.equals(getDefaults(), that.getDefaults());
71 | }
72 |
73 | @Override
74 | public String toString() {
75 | return "RecordsConfig{" + "typeMap=" + typeMap + ", defaults=" + defaults + '}';
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/AllowedType.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import io.camunda.zeebe.protocol.record.RecordType;
19 | import java.util.Objects;
20 |
21 | /**
22 | * {@link AllowedType} maps string values to {@link RecordType} values, and is used purely for
23 | * parsing purposes. {@link RecordType} is not used directly as not all types are supported.
24 | */
25 | public enum AllowedType {
26 | COMMAND("command", RecordType.COMMAND),
27 | EVENT("event", RecordType.EVENT),
28 | REJECTION("rejection", RecordType.COMMAND_REJECTION);
29 |
30 | private final String typeName;
31 | private final RecordType recordType;
32 |
33 | AllowedType(final String typeName, final RecordType recordType) {
34 | this.typeName = Objects.requireNonNull(typeName);
35 | this.recordType = Objects.requireNonNull(recordType);
36 | }
37 |
38 | public String getTypeName() {
39 | return typeName;
40 | }
41 |
42 | public RecordType getRecordType() {
43 | return recordType;
44 | }
45 |
46 | public static AllowedType forName(final String name) {
47 | if (COMMAND.typeName.equals(name)) {
48 | return COMMAND;
49 | } else if (EVENT.typeName.equals(name)) {
50 | return EVENT;
51 | } else if (REJECTION.typeName.equals(name)) {
52 | return REJECTION;
53 | } else {
54 | throw new IllegalArgumentException("Unknown record type name: " + name);
55 | }
56 | }
57 | }
58 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/ConfigParser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import java.util.function.Supplier;
19 |
20 | /**
21 | * {@link ConfigParser} is a single-responsibility interface which should parse any given instance
22 | * of type {@code T} into a valid instance of type {@code R}.
23 | *
24 | * @param the raw configuration type to be parsed
25 | * @param the parsed configuration type
26 | */
27 | @FunctionalInterface
28 | public interface ConfigParser {
29 |
30 | R parse(T config);
31 |
32 | default R parse(T config, final Supplier defaultValue) {
33 | if (config == null) {
34 | config = defaultValue.get();
35 | }
36 |
37 | return parse(config);
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/ConfigParserUtil.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import java.util.Arrays;
19 | import java.util.List;
20 | import java.util.Optional;
21 | import java.util.function.Function;
22 | import java.util.stream.Collectors;
23 |
24 | /**
25 | * Utility tool belt to parse configuration. Only add methods here if they are used in more than one
26 | * class.
27 | */
28 | final class ConfigParserUtil {
29 | private ConfigParserUtil() {}
30 |
31 | static T get(final T property, final T fallback) {
32 | return Optional.ofNullable(property).orElse(fallback);
33 | }
34 |
35 | static R get(final T property, final R fallback, final Function transformer) {
36 | return Optional.ofNullable(property).map(transformer).orElse(fallback);
37 | }
38 |
39 | static List splitCommaSeparatedString(final String value) {
40 | return Arrays.stream(value.split(",")).map(String::trim).collect(Collectors.toList());
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawConfigParser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static io.zeebe.exporters.kafka.config.parser.ConfigParserUtil.get;
19 |
20 | import io.zeebe.exporters.kafka.config.Config;
21 | import io.zeebe.exporters.kafka.config.ProducerConfig;
22 | import io.zeebe.exporters.kafka.config.RecordsConfig;
23 | import io.zeebe.exporters.kafka.config.raw.RawConfig;
24 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig;
25 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig;
26 | import java.time.Duration;
27 | import java.util.Objects;
28 |
29 | /**
30 | * {@link RawConfigParser} parses a given {@link RawConfig} into a valid {@link Config} instance,
31 | * substituting sane defaults for missing properties.
32 | *
33 | * You can inject your own {@code recordsConfigParser} and {@code producerConfig} implementations
34 | * to overwrite the parsing for nested types.
35 | */
36 | public final class RawConfigParser implements ConfigParser {
37 | static final int DEFAULT_MAX_BATCH_SIZE = 100;
38 | static final Duration DEFAULT_FLUSH_INTERVAL_MS = Duration.ofSeconds(1);
39 |
40 | private final ConfigParser recordsConfigParser;
41 | private final ConfigParser producerConfigParser;
42 |
43 | public RawConfigParser() {
44 | this(new RawRecordsConfigParser(), new RawProducerConfigParser());
45 | }
46 |
47 | RawConfigParser(
48 | final ConfigParser recordsConfigParser,
49 | final ConfigParser producerConfigParser) {
50 | this.recordsConfigParser = Objects.requireNonNull(recordsConfigParser);
51 | this.producerConfigParser = Objects.requireNonNull(producerConfigParser);
52 | }
53 |
54 | @Override
55 | public Config parse(final RawConfig config) {
56 | Objects.requireNonNull(config);
57 |
58 | final ProducerConfig producerConfig =
59 | producerConfigParser.parse(config.producer, RawProducerConfig::new);
60 | final RecordsConfig recordsConfig =
61 | recordsConfigParser.parse(config.records, RawRecordsConfig::new);
62 | final Integer maxBatchSize = get(config.maxBatchSize, DEFAULT_MAX_BATCH_SIZE);
63 | final Duration flushInterval =
64 | get(config.flushIntervalMs, DEFAULT_FLUSH_INTERVAL_MS, Duration::ofMillis);
65 |
66 | return new Config(producerConfig, recordsConfig, maxBatchSize, flushInterval);
67 | }
68 | }
69 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawProducerConfigParser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static io.zeebe.exporters.kafka.config.parser.ConfigParserUtil.get;
19 |
20 | import io.zeebe.exporters.kafka.config.ProducerConfig;
21 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig;
22 | import java.io.IOException;
23 | import java.io.Reader;
24 | import java.io.StringReader;
25 | import java.io.UncheckedIOException;
26 | import java.time.Duration;
27 | import java.util.Collections;
28 | import java.util.HashMap;
29 | import java.util.List;
30 | import java.util.Map;
31 | import java.util.Objects;
32 | import java.util.Properties;
33 |
34 | /**
35 | * {@link RawProducerConfigParser} parses instances of {@link RawProducerConfig} into valid
36 | * instances of {@link ProducerConfig}, substituting sane defaults for missing properties.
37 | *
38 | * One thing to note, is it will parse the {@link RawProducerConfig#config} string as if it were
39 | * a properties file, delegating this to {@link Properties#load(Reader)}.
40 | */
41 | public class RawProducerConfigParser implements ConfigParser {
42 |
43 | public static final Duration DEFAULT_MAX_BLOCKING_TIMEOUT = Duration.ofSeconds(2);
44 | static final List DEFAULT_SERVERS = Collections.singletonList("localhost:9092");
45 | static final String DEFAULT_CLIENT_ID = "zeebe";
46 | static final Duration DEFAULT_CLOSE_TIMEOUT = Duration.ofSeconds(20);
47 | static final Duration DEFAULT_REQUEST_TIMEOUT = Duration.ofSeconds(5);
48 |
49 | @Override
50 | public ProducerConfig parse(final RawProducerConfig config) {
51 | Objects.requireNonNull(config);
52 |
53 | final List servers =
54 | get(config.servers, DEFAULT_SERVERS, ConfigParserUtil::splitCommaSeparatedString);
55 | final String clientId = get(config.clientId, DEFAULT_CLIENT_ID);
56 | final Duration closeTimeout =
57 | get(config.closeTimeoutMs, DEFAULT_CLOSE_TIMEOUT, Duration::ofMillis);
58 | final Duration requestTimeout =
59 | get(config.requestTimeoutMs, DEFAULT_REQUEST_TIMEOUT, Duration::ofMillis);
60 | final Duration maxBlockingTimeout =
61 | get(config.maxBlockingTimeoutMs, DEFAULT_MAX_BLOCKING_TIMEOUT, Duration::ofMillis);
62 | final Map producerConfig =
63 | get(config.config, new HashMap<>(), this::parseProperties);
64 |
65 | return new ProducerConfig(
66 | clientId, closeTimeout, producerConfig, requestTimeout, maxBlockingTimeout, servers);
67 | }
68 |
69 | private Map parseProperties(final String propertiesString) {
70 | final Properties properties = new Properties();
71 | final Map parsed = new HashMap<>();
72 |
73 | try {
74 | properties.load(new StringReader(propertiesString));
75 | } catch (final IOException e) {
76 | throw new UncheckedIOException(e);
77 | }
78 |
79 | for (final String property : properties.stringPropertyNames()) {
80 | parsed.put(property, properties.get(property));
81 | }
82 |
83 | return parsed;
84 | }
85 | }
86 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawRecordConfigParser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static io.zeebe.exporters.kafka.config.parser.ConfigParserUtil.get;
19 |
20 | import io.camunda.zeebe.protocol.record.RecordType;
21 | import io.zeebe.exporters.kafka.config.RecordConfig;
22 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig;
23 | import java.util.Collections;
24 | import java.util.EnumSet;
25 | import java.util.Objects;
26 | import java.util.Optional;
27 | import java.util.Set;
28 | import java.util.function.Predicate;
29 |
30 | /**
31 | * {@link RawRecordConfigParser} parses instances of {@link RawRecordConfig} into valid instances of
32 | * {@link RecordConfig}, substituting defaults for missing properties.
33 | *
34 | * The defaults can be overridden with an instance of {@link RecordConfig}, and default
35 | * properties will be taken from there. This is used notably in {@link RawRecordsConfigParser} where
36 | * it first parses {@link io.zeebe.exporters.kafka.config.raw.RawRecordsConfig#defaults} and passes
37 | * the parsed value as a defaults here for all subsequent properties.
38 | */
39 | public class RawRecordConfigParser implements ConfigParser {
40 | static final String DEFAULT_TOPIC_NAME = "zeebe";
41 | static final EnumSet DEFAULT_ALLOWED_TYPES =
42 | EnumSet.complementOf(EnumSet.of(RecordType.NULL_VAL, RecordType.SBE_UNKNOWN));
43 |
44 | private final RecordConfig defaults;
45 |
46 | public RawRecordConfigParser() {
47 | this(new RecordConfig(DEFAULT_ALLOWED_TYPES, DEFAULT_TOPIC_NAME));
48 | }
49 |
50 | public RawRecordConfigParser(final RecordConfig defaults) {
51 | this.defaults = defaults;
52 | }
53 |
54 | @Override
55 | public RecordConfig parse(final RawRecordConfig config) {
56 | Objects.requireNonNull(config);
57 |
58 | final Set allowedTypes;
59 | final String topic = Optional.ofNullable(config.topic).orElse(defaults.getTopic());
60 |
61 | if (config.type != null) {
62 | allowedTypes = EnumSet.noneOf(RecordType.class);
63 | get(config.type, Collections.emptyList(), ConfigParserUtil::splitCommaSeparatedString)
64 | .stream()
65 | .filter(Predicate.not(String::isBlank))
66 | .forEach(t -> allowedTypes.add(AllowedType.forName(t).getRecordType()));
67 | } else {
68 | allowedTypes = defaults.getAllowedTypes();
69 | }
70 |
71 | return new RecordConfig(allowedTypes, topic);
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/parser/RawRecordsConfigParser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import io.camunda.zeebe.protocol.record.ValueType;
19 | import io.zeebe.exporters.kafka.config.RecordConfig;
20 | import io.zeebe.exporters.kafka.config.RecordsConfig;
21 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig;
22 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig;
23 | import java.util.EnumMap;
24 | import java.util.Map;
25 | import java.util.Objects;
26 | import java.util.Optional;
27 |
28 | /**
29 | * {@link RawRecordsConfigParser} parses instances of {@link RawRecordsConfig} into valid instances
30 | * of {@link RecordsConfig}.
31 | *
32 | * You'll note that it's not possible to pass your own implementation of {@code
33 | * ConfigParser} - this is because after parsing {@link
34 | * RawRecordsConfig#defaults}, the result is passed as defaults to a new instance of {@link
35 | * RawRecordConfigParser}. This breaks the usual design and usage of DI, and should be refactored.
36 | */
37 | public class RawRecordsConfigParser implements ConfigParser {
38 | private static final ConfigParser DEFAULTS_RECORD_CONFIG_PARSER =
39 | new RawRecordConfigParser();
40 |
41 | @SuppressWarnings("java:S138")
42 | @Override
43 | public RecordsConfig parse(final RawRecordsConfig config) {
44 | Objects.requireNonNull(config);
45 |
46 | final Map typeMap = new EnumMap<>(ValueType.class);
47 | final RecordConfig defaults =
48 | DEFAULTS_RECORD_CONFIG_PARSER.parse(config.defaults, RawRecordConfig::new);
49 | final ConfigParser recordConfigParser =
50 | new RawRecordConfigParser(defaults);
51 |
52 | Optional.ofNullable(config.deployment)
53 | .map(recordConfigParser::parse)
54 | .ifPresent(c -> typeMap.put(ValueType.DEPLOYMENT, c));
55 | Optional.ofNullable(config.deploymentDistribution)
56 | .map(recordConfigParser::parse)
57 | .ifPresent(c -> typeMap.put(ValueType.DEPLOYMENT_DISTRIBUTION, c));
58 | Optional.ofNullable(config.error)
59 | .map(recordConfigParser::parse)
60 | .ifPresent(c -> typeMap.put(ValueType.ERROR, c));
61 | Optional.ofNullable(config.incident)
62 | .map(recordConfigParser::parse)
63 | .ifPresent(c -> typeMap.put(ValueType.INCIDENT, c));
64 | Optional.ofNullable(config.job)
65 | .map(recordConfigParser::parse)
66 | .ifPresent(c -> typeMap.put(ValueType.JOB, c));
67 | Optional.ofNullable(config.jobBatch)
68 | .map(recordConfigParser::parse)
69 | .ifPresent(c -> typeMap.put(ValueType.JOB_BATCH, c));
70 | Optional.ofNullable(config.message)
71 | .map(recordConfigParser::parse)
72 | .ifPresent(c -> typeMap.put(ValueType.MESSAGE, c));
73 | Optional.ofNullable(config.messageSubscription)
74 | .map(recordConfigParser::parse)
75 | .ifPresent(c -> typeMap.put(ValueType.MESSAGE_SUBSCRIPTION, c));
76 | Optional.ofNullable(config.messageStartEventSubscription)
77 | .map(recordConfigParser::parse)
78 | .ifPresent(c -> typeMap.put(ValueType.MESSAGE_START_EVENT_SUBSCRIPTION, c));
79 | Optional.ofNullable(config.processInstance)
80 | .map(recordConfigParser::parse)
81 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_INSTANCE, c));
82 | Optional.ofNullable(config.processInstanceCreation)
83 | .map(recordConfigParser::parse)
84 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_INSTANCE_CREATION, c));
85 | Optional.ofNullable(config.processInstanceResult)
86 | .map(recordConfigParser::parse)
87 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_INSTANCE_RESULT, c));
88 | Optional.ofNullable(config.processMessageSubscription)
89 | .map(recordConfigParser::parse)
90 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_MESSAGE_SUBSCRIPTION, c));
91 | Optional.ofNullable(config.process)
92 | .map(recordConfigParser::parse)
93 | .ifPresent(c -> typeMap.put(ValueType.PROCESS, c));
94 | Optional.ofNullable(config.processEvent)
95 | .map(recordConfigParser::parse)
96 | .ifPresent(c -> typeMap.put(ValueType.PROCESS_EVENT, c));
97 | Optional.ofNullable(config.timer)
98 | .map(recordConfigParser::parse)
99 | .ifPresent(c -> typeMap.put(ValueType.TIMER, c));
100 | Optional.ofNullable(config.variable)
101 | .map(recordConfigParser::parse)
102 | .ifPresent(c -> typeMap.put(ValueType.VARIABLE, c));
103 | Optional.ofNullable(config.variableDocument)
104 | .map(recordConfigParser::parse)
105 | .ifPresent(c -> typeMap.put(ValueType.VARIABLE_DOCUMENT, c));
106 |
107 | return new RecordsConfig(typeMap, defaults);
108 | }
109 | }
110 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.raw;
17 |
18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck")
19 | public final class RawConfig {
20 | /**
21 | * Controls the number of records to buffer in a single record batch before forcing a flush. Note
22 | * that a flush may occur before anyway due to periodic flushing. This setting should help you
23 | * estimate a soft upper bound to the memory consumption of the exporter. If you assume a worst
24 | * case scenario where every record is the size of your zeebe.broker.network.maxMessageSize, then
25 | * the memory required by the exporter would be at least: (maxBatchSize *
26 | * zeebe.broker.network.maxMessageSize * 2)
27 | *
28 | * We multiply by 2 as the records are buffered twice - once in the exporter itself, and once
29 | * in the producer's network buffers (but serialized at that point). There's some additional
30 | * memory overhead used by the producer as well for compression/encryption/etc., so you have to
31 | * add a bit, but that one is not proportional to the number of records and is more or less
32 | * constant.
33 | *
34 | *
Once the batch has reached this size, a flush is automatically triggered. Too small a number
35 | * here would cause many flush, which is not good for performance, but would mean you will see
36 | * your records faster/sooner.
37 | */
38 | public Integer maxBatchSize;
39 |
40 | /**
41 | * How often should the current batch be flushed to Kafka, regardless of whether its full or not.
42 | */
43 | public Long flushIntervalMs;
44 |
45 | /** Producer specific configuration; see {@link RawProducerConfig}. */
46 | public RawProducerConfig producer;
47 |
48 | /** Records specific configuration; see {@link RawRecordsConfig}. */
49 | public RawRecordsConfig records;
50 | }
51 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawProducerConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.raw;
17 |
18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck")
19 | public final class RawProducerConfig {
20 |
21 | /**
22 | * Producer client identifier.
23 | *
24 | * @see org.apache.kafka.clients.producer.ProducerConfig#CLIENT_ID_CONFIG
25 | */
26 | public String clientId;
27 |
28 | /**
29 | * Grace period when shutting down the producer in milliseconds. A period which is too short could
30 | * result in possible resource leaks, but generally should be fine.
31 | */
32 | public Long closeTimeoutMs;
33 |
34 | /**
35 | * Line-separated list of Java properties, e.g. the contents of a properties file. The resulting
36 | * map is passed verbatim as part of the {@link org.apache.kafka.clients.producer.ProducerConfig}.
37 | * You can use any of the properties defined there. This allows you to configure OAuth, SSL, SASL,
38 | * etc.
39 | *
40 | *
Be careful as this allows you to overwrite anything - e.g. key and value serializers - which
41 | * can break the exporter behaviour, so make sure to properly test your settings before deploying.
42 | */
43 | public String config;
44 |
45 | /**
46 | * Controls how long the producer will wait for a request to be acknowledged by the Kafka broker
47 | * before retrying it.
48 | *
49 | * @see org.apache.kafka.clients.producer.ProducerConfig#REQUEST_TIMEOUT_MS_CONFIG
50 | */
51 | public Long requestTimeoutMs;
52 |
53 | /**
54 | * The maximum time to block for all blocking requests, e.g. beginTransaction, commitTransaction.
55 | * It's recommended to keep this low, around a second, as it's also the time the exporter will
56 | * block if the batch is full when trying to commit/flush it. Keeping it low isn't a big issue, as
57 | * even if it times out the first time, Kafka will still commit the transaction in the background,
58 | * and on the next try the transaction will commit much faster (e.g. if it's already committed as
59 | * far as the brokers are concerned, then it should be really fast).
60 | *
61 | * @see org.apache.kafka.clients.producer.ProducerConfig#MAX_BLOCK_MS_CONFIG
62 | */
63 | public Long maxBlockingTimeoutMs;
64 |
65 | /**
66 | * The comma separated list of initial Kafka broker contact points. The format should be the same
67 | * one as the {@link org.apache.kafka.clients.producer.ProducerConfig} expects, i.e. "host:port".
68 | *
69 | * @see org.apache.kafka.clients.producer.ProducerConfig#BOOTSTRAP_SERVERS_CONFIG
70 | */
71 | public String servers;
72 | }
73 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawRecordConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.raw;
17 |
18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck")
19 | public final class RawRecordConfig {
20 |
21 | /**
22 | * Type is a comma separated string of accepted record types, allowing you to filter if you want
23 | * nothing (""), commands ("command"), events ("events"), or rejections ("rejection"), or a
24 | * combination of the three, e.g. "command,event".
25 | */
26 | public String type;
27 |
28 | /**
29 | * Topic is the topic to which the record with the given value type should be sent to, e.g. for a
30 | * deployment record below we would send the record to "zeebe-deployment" topic.
31 | */
32 | public String topic;
33 | }
34 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/config/raw/RawRecordsConfig.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.raw;
17 |
18 | @SuppressWarnings("squid:ClassVariableVisibilityCheck")
19 | public final class RawRecordsConfig {
20 |
21 | /**
22 | * If a record value type is omitted in your configuration file, it will fall back to whatever is
23 | * configured in the defaults.
24 | */
25 | public RawRecordConfig defaults;
26 |
27 | /**
28 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#DEPLOYMENT}
29 | */
30 | public RawRecordConfig deployment;
31 |
32 | /**
33 | * For records with a value of type {@link
34 | * io.camunda.zeebe.protocol.record.ValueType#DEPLOYMENT_DISTRIBUTION}
35 | */
36 | public RawRecordConfig deploymentDistribution;
37 |
38 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#ERROR} */
39 | public RawRecordConfig error;
40 |
41 | /**
42 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#INCIDENT}
43 | */
44 | public RawRecordConfig incident;
45 |
46 | /**
47 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#JOB_BATCH}
48 | */
49 | public RawRecordConfig jobBatch;
50 |
51 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#JOB} */
52 | public RawRecordConfig job;
53 |
54 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#MESSAGE} */
55 | public RawRecordConfig message;
56 |
57 | /**
58 | * For records with a value of type {@link
59 | * io.camunda.zeebe.protocol.record.ValueType#MESSAGE_SUBSCRIPTION}
60 | */
61 | public RawRecordConfig messageSubscription;
62 |
63 | /**
64 | * For records with a value of type {@link
65 | * io.camunda.zeebe.protocol.record.ValueType#MESSAGE_START_EVENT_SUBSCRIPTION}
66 | */
67 | public RawRecordConfig messageStartEventSubscription;
68 |
69 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#PROCESS} */
70 | public RawRecordConfig process;
71 |
72 | /**
73 | * For records with a value of type {@link
74 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_EVENT}
75 | */
76 | public RawRecordConfig processEvent;
77 |
78 | /**
79 | * For records with a value of type {@link
80 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_INSTANCE}
81 | */
82 | public RawRecordConfig processInstance;
83 |
84 | /**
85 | * For records with a value of type {@link
86 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_INSTANCE_CREATION}
87 | */
88 | public RawRecordConfig processInstanceCreation;
89 |
90 | /**
91 | * For records with a value of type {@link
92 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_INSTANCE_RESULT}
93 | */
94 | public RawRecordConfig processInstanceResult;
95 |
96 | /**
97 | * For records with a value of type {@link
98 | * io.camunda.zeebe.protocol.record.ValueType#PROCESS_MESSAGE_SUBSCRIPTION}
99 | */
100 | public RawRecordConfig processMessageSubscription;
101 |
102 | /** For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#TIMER} */
103 | public RawRecordConfig timer;
104 |
105 | /**
106 | * For records with a value of type {@link io.camunda.zeebe.protocol.record.ValueType#VARIABLE}
107 | */
108 | public RawRecordConfig variable;
109 |
110 | /**
111 | * For records with a value of type {@link
112 | * io.camunda.zeebe.protocol.record.ValueType#VARIABLE_DOCUMENT}
113 | */
114 | public RawRecordConfig variableDocument;
115 | }
116 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/producer/BoundedTransactionalRecordBatch.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.config.ProducerConfig;
19 | import io.zeebe.exporters.kafka.record.FullRecordBatchException;
20 | import io.zeebe.exporters.kafka.serde.RecordId;
21 | import java.util.LinkedList;
22 | import java.util.Objects;
23 | import java.util.UUID;
24 | import java.util.function.LongConsumer;
25 | import org.apache.kafka.clients.producer.Producer;
26 | import org.apache.kafka.clients.producer.ProducerRecord;
27 | import org.apache.kafka.common.KafkaException;
28 | import org.apache.kafka.common.errors.InterruptException;
29 | import org.apache.kafka.common.errors.TimeoutException;
30 | import org.slf4j.Logger;
31 |
32 | /**
33 | * An implementation of {@link RecordBatch} which uses Kafka transactions to guarantee the atomicity
34 | * of the flush operation. When records are added, it will first add them to a linked list before
35 | * immediately forwarding them to the producer. If there was no transaction yet, it will be started
36 | * before. On flush, the transaction is committed.
37 | *
38 | *
NOTE: while atomicity could still be guaranteed without transactions, they make the whole
39 | * error handling much simpler. I do realize that we cannot use the exactly-once semantics due to
40 | * Zeebe's own at-least-once semantics, but it still seems useful to simplify error handling.
41 | *
42 | *
NOTE: whenever an error occurs, if it is recoverable, it will be logged and the batch remains
43 | * as is - the operation will be retried either by adding a new record or by attempting to flush the
44 | * batch externally. If it's unrecoverable, the current producer is closed, the state is reset
45 | * (minus the linked list which remains the same so we can retry the records), and on the next add
46 | * or flush operation, the whole batch is retried.
47 | *
48 | *
NOTE: when adding a record to a full batch, it will attempt to flush the batch, blocking up to
49 | * {@link io.zeebe.exporters.kafka.config.raw.RawProducerConfig#maxBlockingTimeoutMs} milliseconds.
50 | * If it flushed successfully, then the record will be added and operations will resume as normal.
51 | * If it failed to flush, then the error will bubble up wrapped in a {@link
52 | * FullRecordBatchException}.
53 | *
54 | *
NOTE: when using this type of batch, make sure your consumers use "read_committed" as
55 | * isolation level, otherwise they may see uncommitted records. This isn't too big of a deal as
56 | * these records are anyway committed on the Zeebe side, but they may show up as duplicates.
57 | */
58 | final class BoundedTransactionalRecordBatch implements RecordBatch {
59 | private final LinkedList> records = new LinkedList<>();
60 |
61 | private final KafkaProducerFactory producerFactory;
62 | private final ProducerConfig config;
63 | private final String producerId;
64 | private final int maxBatchSize;
65 | private final LongConsumer onFlushCallback;
66 | private final Logger logger;
67 |
68 | private Producer producer;
69 | private boolean producerInitialized = false;
70 | private boolean transactionBegan = false;
71 | private int nextSendIndex = 0;
72 |
73 | public BoundedTransactionalRecordBatch(
74 | final ProducerConfig config,
75 | final int maxBatchSize,
76 | final LongConsumer onFlushCallback,
77 | final Logger logger,
78 | final KafkaProducerFactory producerFactory) {
79 | this(
80 | config,
81 | maxBatchSize,
82 | onFlushCallback,
83 | logger,
84 | producerFactory,
85 | UUID.randomUUID().toString());
86 | }
87 |
88 | public BoundedTransactionalRecordBatch(
89 | final ProducerConfig config,
90 | final int maxBatchSize,
91 | final LongConsumer onFlushCallback,
92 | final Logger logger,
93 | final KafkaProducerFactory producerFactory,
94 | final String producerId) {
95 | this.config = Objects.requireNonNull(config);
96 | this.maxBatchSize = maxBatchSize;
97 | this.onFlushCallback = Objects.requireNonNull(onFlushCallback);
98 | this.logger = Objects.requireNonNull(logger);
99 | this.producerFactory = Objects.requireNonNull(producerFactory);
100 | this.producerId = Objects.requireNonNull(producerId);
101 | }
102 |
103 | @Override
104 | public void add(final ProducerRecord record) throws FullRecordBatchException {
105 | if (records.size() >= maxBatchSize) {
106 | try {
107 | flushBatch();
108 | } catch (final TimeoutException | InterruptException e) {
109 | throw new FullRecordBatchException(maxBatchSize, e);
110 | } catch (final Exception e) {
111 | close();
112 | throw new FullRecordBatchException(maxBatchSize, e);
113 | }
114 | }
115 |
116 | records.add(record);
117 |
118 | try {
119 | sendUnsentRecords();
120 | } catch (final TimeoutException | InterruptException e) {
121 | logger.debug(
122 | "Timed out or interrupted while sending unsent records, will be retried later", e);
123 | } catch (final Exception e) {
124 | logger.warn("Failed to send unsent record, will be retried later with a new producer", e);
125 | close();
126 | }
127 | }
128 |
129 | @Override
130 | public void flush() {
131 | if (records.isEmpty()) {
132 | logger.trace("Skipping batch commit as there are no records in the batch");
133 | return;
134 | }
135 |
136 | logger.trace(
137 | "Committing {} from the current batch, up to position {}",
138 | records.size(),
139 | records.getLast().key().getPosition());
140 |
141 | try {
142 | flushBatch();
143 | } catch (final TimeoutException | InterruptException e) {
144 | logger.debug("Timed out or interrupted while committing, will be retried later", e);
145 | } catch (final Exception e) {
146 | logger.warn("Non-recoverable error occurred while committing, retrying with new producer", e);
147 | close();
148 | }
149 | }
150 |
151 | @Override
152 | public void close() {
153 | if (producer == null) {
154 | return;
155 | }
156 |
157 | final var closeTimeout = config.getCloseTimeout();
158 | logger.debug("Closing producer with timeout {}", closeTimeout);
159 |
160 | try {
161 | producer.close(closeTimeout);
162 | } catch (final Exception e) {
163 | logger.warn(
164 | "Failed to gracefully close Kafka exporter; this is most likely fine, but may cause "
165 | + "resource to leaks. Investigate if it keeps repeating itself.",
166 | e);
167 | }
168 |
169 | producer = null;
170 | producerInitialized = false;
171 | transactionBegan = false;
172 | nextSendIndex = 0;
173 | // the records' list is not cleared on purpose, so that we can later try it
174 | }
175 |
176 | private void flushBatch() throws KafkaException, IllegalStateException {
177 | sendUnsentRecords();
178 |
179 | final var commitPosition = records.getLast().key().getPosition();
180 | commitTransaction();
181 | onFlushCallback.accept(commitPosition);
182 | }
183 |
184 | private void commitTransaction() {
185 | if (!transactionBegan) {
186 | throw new IllegalStateException(
187 | "Expected to be in transaction, but no transaction is in flight");
188 | }
189 |
190 | producer.commitTransaction();
191 | transactionBegan = false;
192 | records.clear();
193 | nextSendIndex = 0;
194 | }
195 |
196 | private void sendUnsentRecords() {
197 | final var unsentRecords = Math.max(0, records.size() - nextSendIndex);
198 | logger.trace("Sending {} remaining unsent records from the current batch", unsentRecords);
199 |
200 | ensureWithinTransaction();
201 |
202 | while (nextSendIndex < records.size()) {
203 | final var record = records.get(nextSendIndex);
204 | producer.send(record);
205 | logger.trace("Sent record {}", record);
206 | nextSendIndex++;
207 | }
208 | }
209 |
210 | private void ensureProducer() {
211 | if (producer != null) {
212 | return;
213 | }
214 |
215 | producer = producerFactory.newProducer(config, producerId);
216 | logger.trace("Created new producer");
217 | }
218 |
219 | private void ensureProducerInitialized() {
220 | ensureProducer();
221 |
222 | if (!producerInitialized) {
223 | producer.initTransactions();
224 | producerInitialized = true;
225 | logger.trace("Initialized producer for transactions");
226 | }
227 | }
228 |
229 | private void ensureWithinTransaction() {
230 | ensureProducerInitialized();
231 |
232 | if (!transactionBegan) {
233 | producer.beginTransaction();
234 | transactionBegan = true;
235 | logger.trace("Began new producer transaction");
236 | }
237 | }
238 | }
239 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/producer/DefaultKafkaProducerFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.config.Config;
19 | import io.zeebe.exporters.kafka.serde.RecordId;
20 | import io.zeebe.exporters.kafka.serde.RecordIdSerializer;
21 | import java.util.HashMap;
22 | import org.apache.kafka.clients.producer.KafkaProducer;
23 | import org.apache.kafka.clients.producer.Producer;
24 | import org.apache.kafka.clients.producer.ProducerConfig;
25 | import org.apache.kafka.common.serialization.ByteArraySerializer;
26 |
27 | /**
28 | * {@link DefaultKafkaProducerFactory} is the default implementation of {@link KafkaProducerFactory}
29 | * used by {@link io.zeebe.exporters.kafka.KafkaExporter}. It creates a new {@link Producer} based
30 | * on the given {@link Config}, and adds a few default properties.
31 | *
32 | * It's tuned for small, fast batching, and low memory consumption. By default, it will wait up
33 | * to 10ms or until it has batched 4MB (the default maxMessageSize of Zeebe) in memory before
34 | * sending a request. This is to lessen the load on Kafka while remaining fairly responsive.
35 | *
36 | *
The memory usage of the producer is soft capped to 40Mb - if you produce much faster than it
37 | * can export, then you may run into exceptions. In this case, you can increase the memory to
38 | * something you feel more comfortable with via {@link
39 | * io.zeebe.exporters.kafka.config.raw.RawProducerConfig#config}.
40 | */
41 | final class DefaultKafkaProducerFactory implements KafkaProducerFactory {
42 | @Override
43 | public Producer newProducer(
44 | final io.zeebe.exporters.kafka.config.ProducerConfig config, final String producerId) {
45 | final var options = new HashMap();
46 | final var clientId = String.format("%s-%s", config.getClientId(), producerId);
47 |
48 | options.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, producerId);
49 | options.put(ProducerConfig.CLIENT_ID_CONFIG, clientId);
50 | options.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
51 |
52 | // disable concurrent connections to ensure order is preserved
53 | options.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, 1);
54 | options.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, Integer.MAX_VALUE);
55 | options.put(
56 | ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, (int) config.getRequestTimeout().toMillis());
57 | options.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, config.getServers());
58 |
59 | // provides a soft memory bound - there's some memory overhead used by SSL, compression, etc.,
60 | // but this gives us a good idea of how much memory will be used by the exporter
61 | options.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 40 * 1024 * 1024L);
62 |
63 | // wait up to 10ms or until the batch is full before sending
64 | options.put(ProducerConfig.LINGER_MS_CONFIG, 10L);
65 | options.put(ProducerConfig.BATCH_SIZE_CONFIG, 4 * 1024 * 1024L);
66 | options.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, config.getMaxBlockingTimeout().toMillis());
67 |
68 | // leave always close to the last step to allow user configuration to override producer options
69 | options.putAll(config.getConfig());
70 |
71 | options.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, RecordIdSerializer.class);
72 | options.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
73 | options.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, RecordIdPartitioner.class);
74 |
75 | return new KafkaProducer<>(options);
76 | }
77 | }
78 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/producer/KafkaProducerFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.config.ProducerConfig;
19 | import io.zeebe.exporters.kafka.serde.RecordId;
20 | import org.apache.kafka.clients.producer.Producer;
21 |
22 | /**
23 | * Implementations may or may not make use of the given configuration, but must always return a
24 | * valid producer.
25 | */
26 | @FunctionalInterface
27 | public interface KafkaProducerFactory {
28 |
29 | Producer newProducer(final ProducerConfig config, final String producerId);
30 |
31 | static KafkaProducerFactory defaultFactory() {
32 | return new DefaultKafkaProducerFactory();
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/producer/RecordBatch.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.record.FullRecordBatchException;
19 | import io.zeebe.exporters.kafka.serde.RecordId;
20 | import org.apache.kafka.clients.producer.ProducerRecord;
21 |
22 | /**
23 | * Represents a batch of producer records which can be committed at will. Implementations can decide
24 | * whether to bound the batch, or the semantics of it, as long as they respect this contract.
25 | *
26 | * NOTE: while it may seem like overhead to create this abstraction, it gives us the following:
27 | *
28 | *
29 | * - Separation of concerns allowing us to test the Kafka Producer specific code in a more
30 | * narrow setting, making it easier to unit test
31 | *
- Easily swap out the default transactional behavior later on for a non transactional one if
32 | * there are major downsides with transactions (as we still need to deal with at least once
33 | * anyway due to Zeebe)
34 | *
35 | */
36 | public interface RecordBatch extends AutoCloseable {
37 |
38 | /**
39 | * Adds the record to the batch. May throw {@link FullRecordBatchException} if the batch is
40 | * bounded. Unbounded implementations are free to erase the throws portion of the signature.
41 | *
42 | * @param record the record to add
43 | * @throws FullRecordBatchException if the batch is full
44 | */
45 | void add(final ProducerRecord record) throws FullRecordBatchException;
46 |
47 | /**
48 | * Commits the batch, returning the highest guaranteed exported position. This is expected to be a
49 | * blocking operation - if it returns with a value, it should then be guaranteed that ALL records
50 | * up to that position have been committed. On success, the batch should be cleared and new
51 | * records can be added to it.
52 | *
53 | * NOTE: This method should not throw any error, as it's not expected to be called from a path
54 | * where errors can be safely handled, i.e. in a scheduled task.
55 | *
56 | *
NOTE: this is expected to be an atomic operation. Either ALL records were flushed, or none
57 | * of them were.
58 | */
59 | void flush();
60 |
61 | /**
62 | * Should release any resources belonging to the batch. It's not expected that other operations
63 | * are called after this.
64 | */
65 | void close();
66 | }
67 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/producer/RecordBatchFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.config.ProducerConfig;
19 | import java.util.function.LongConsumer;
20 | import org.slf4j.Logger;
21 |
22 | /**
23 | * While this seems like overhead, it's the only way to inject the record batch type into the
24 | * exporter instance, as the exporter instance is created by the Zeebe broker using the
25 | * argument-less constructor. The other option would be via configuration, which would be more
26 | * overhead, but the right approach in the future if multiple types are available.
27 | *
28 | *
The primary goal of this and the {@link RecordBatch} interface are to ease unit testing.
29 | */
30 | @FunctionalInterface
31 | public interface RecordBatchFactory {
32 |
33 | RecordBatch newRecordBatch(
34 | final ProducerConfig config,
35 | final int maxBatchSize,
36 | final LongConsumer onFlushCallback,
37 | final Logger logger);
38 |
39 | static RecordBatchFactory defaultFactory() {
40 | return (config, maxBatchSize, onFlushCallback, logger) ->
41 | new BoundedTransactionalRecordBatch(
42 | config, maxBatchSize, onFlushCallback, logger, KafkaProducerFactory.defaultFactory());
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/producer/RecordIdPartitioner.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.serde.RecordId;
19 | import java.util.List;
20 | import java.util.Map;
21 | import org.apache.kafka.clients.producer.Partitioner;
22 | import org.apache.kafka.clients.producer.internals.DefaultPartitioner;
23 | import org.apache.kafka.common.Cluster;
24 | import org.apache.kafka.common.PartitionInfo;
25 | import org.slf4j.Logger;
26 | import org.slf4j.LoggerFactory;
27 |
28 | /**
29 | * A {@link Partitioner} implementation which expects only {@link RecordId} objects as keys.
30 | *
31 | *
It will partition the records using {@link RecordId#getPartitionId()}, ensuring that all Zeebe
32 | * records on the same Zeebe partition will also be on the same Kafka partition, preserving the
33 | * ordering. It does so by taking the Zeebe partition ID (which starts at 1), and applying a modulo
34 | * against the number of Kafka partitions for the given topic, e.g. {@code zeebePartitionId %
35 | * kafkaPartitionsCount}.
36 | *
37 | *
One downside is that if you have more Kafka partitions than Zeebe partitions, some of your
38 | * partitions will be unused: partition 0, and any partition whose number is greater than the count
39 | * of Zeebe partitions.
40 | *
41 | *
For example, if you have 3 Zeebe partitions, and 2 Kafka partitions:
42 | *
43 | *
44 | * - RecordId{partitionId=1, position=1} to Kafka partition 1
45 | *
- RecordId{partitionId=2, position=1} to Kafka partition 0
46 | *
- RecordId{partitionId=3, position=1} to Kafka partition 1
47 | *
- RecordId{partitionId=3, position=2} to Kafka partition 1
48 | *
- RecordId{partitionId=2, position=2} to Kafka partition 0
49 | *
50 | *
51 | * With more Kafka partitions, for example, 4 Kafka partitions, and 3 Zeebe partitions:
52 | *
53 | *
54 | * - RecordId{partitionId=1, position=1} to Kafka partition 1
55 | *
- RecordId{partitionId=2, position=1} to Kafka partition 2
56 | *
- RecordId{partitionId=3, position=1} to Kafka partition 3
57 | *
- RecordId{partitionId=3, position=2} to Kafka partition 3
58 | *
- RecordId{partitionId=2, position=2} to Kafka partition 2
59 | *
60 | */
61 | public final class RecordIdPartitioner implements Partitioner {
62 | private static final Logger LOGGER = LoggerFactory.getLogger(RecordIdPartitioner.class);
63 |
64 | private final DefaultPartitioner defaultPartitioner = new DefaultPartitioner();
65 |
66 | @Override
67 | public int partition(
68 | final String topic,
69 | final Object key,
70 | final byte[] keyBytes,
71 | final Object value,
72 | final byte[] valueBytes,
73 | final Cluster cluster) {
74 | if (!(key instanceof RecordId)) {
75 | LOGGER.warn(
76 | "Expected to partition a RecordId object, but got {}; falling back to default partitioner",
77 | key.getClass());
78 | return defaultPartitioner.partition(topic, key, keyBytes, value, valueBytes, cluster);
79 | }
80 |
81 | final List partitions = cluster.partitionsForTopic(topic);
82 | final int numPartitions = partitions.size();
83 | final RecordId recordId = (RecordId) key;
84 | final int partitionId = recordId.getPartitionId() % numPartitions;
85 |
86 | LOGGER.trace("Assigning partition {} to record ID {}", partitionId, recordId);
87 |
88 | return partitionId;
89 | }
90 |
91 | @Override
92 | public void close() {
93 | // do nothing
94 | }
95 |
96 | @Override
97 | public void configure(final Map configs) {
98 | // not configurable yet
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/record/FullRecordBatchException.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.record;
17 |
18 | @SuppressWarnings("unused")
19 | public final class FullRecordBatchException extends RuntimeException {
20 | private static final String MESSAGE_FORMAT =
21 | "No new records can be added to the record batch with a maximum size of %d";
22 |
23 | private final int maxBatchSize;
24 |
25 | public FullRecordBatchException(final int maxBatchSize, final Throwable cause) {
26 | super(String.format(MESSAGE_FORMAT, maxBatchSize), cause);
27 | this.maxBatchSize = maxBatchSize;
28 | }
29 |
30 | public int getMaxBatchSize() {
31 | return maxBatchSize;
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/record/KafkaRecordFilter.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.record;
17 |
18 | import io.camunda.zeebe.exporter.api.context.Context.RecordFilter;
19 | import io.camunda.zeebe.protocol.record.RecordType;
20 | import io.camunda.zeebe.protocol.record.ValueType;
21 | import io.zeebe.exporters.kafka.config.RecordsConfig;
22 | import java.util.Objects;
23 | import java.util.Optional;
24 |
25 | /**
26 | * {@link KafkaRecordFilter} is an implementation of {@link RecordFilter} which uses the {@link
27 | * RecordsConfig} to build the filter.
28 | */
29 | public final class KafkaRecordFilter implements RecordFilter {
30 | private final RecordsConfig config;
31 |
32 | public KafkaRecordFilter(final RecordsConfig config) {
33 | this.config = Objects.requireNonNull(config);
34 | }
35 |
36 | /**
37 | * If any of the {@link RecordsConfig#getTypeMap()} accept the given record type, the {@code
38 | * recordType} is accepted.
39 | *
40 | * @param recordType {@inheritDoc}
41 | * @return {@inheritDoc}
42 | */
43 | @Override
44 | public boolean acceptType(final RecordType recordType) {
45 | return config.getDefaults().getAllowedTypes().contains(recordType)
46 | || config.getTypeMap().values().stream()
47 | .anyMatch(c -> c.getAllowedTypes().contains(recordType));
48 | }
49 |
50 | /**
51 | * If the {@link io.zeebe.exporters.kafka.config.RecordConfig} instance stored in {@link
52 | * RecordsConfig#getTypeMap()} for {@code valueType} has any allowed type at all, the {@code
53 | * valueType} is accepted.
54 | *
55 | * @param valueType {@inheritDoc}
56 | * @return {@inheritDoc}
57 | */
58 | @Override
59 | public boolean acceptValue(final ValueType valueType) {
60 | return !Optional.ofNullable(config.getTypeMap().get(valueType))
61 | .orElse(config.getDefaults())
62 | .getAllowedTypes()
63 | .isEmpty();
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/record/RecordHandler.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.record;
17 |
18 | import io.camunda.zeebe.protocol.record.Record;
19 | import io.zeebe.exporters.kafka.config.RecordConfig;
20 | import io.zeebe.exporters.kafka.config.RecordsConfig;
21 | import io.zeebe.exporters.kafka.serde.RecordId;
22 | import java.util.Objects;
23 | import org.apache.kafka.clients.producer.ProducerRecord;
24 | import org.apache.kafka.common.serialization.Serializer;
25 |
26 | /**
27 | * {@link RecordHandler} is responsible for testing if certain records are allowed, and if so,
28 | * transforming them.
29 | *
30 | * Should be refactored into two for single responsibility.
31 | */
32 | public final class RecordHandler {
33 | private final RecordsConfig configuration;
34 | private final Serializer> serializer;
35 |
36 | public RecordHandler(final RecordsConfig configuration) {
37 | this(configuration, new RecordSerializer());
38 | }
39 |
40 | public RecordHandler(final RecordsConfig configuration, final Serializer> serializer) {
41 | this.configuration = Objects.requireNonNull(configuration);
42 | this.serializer = Objects.requireNonNull(serializer);
43 | }
44 |
45 | /**
46 | * Transforms the given {@link Record} into a Kafka {@link ProducerRecord}.
47 | *
48 | * @param record the record to transform
49 | * @return the transformed record
50 | */
51 | public ProducerRecord transform(final Record record) {
52 | final RecordConfig config = getRecordConfig(record);
53 | final byte[] serializedRecord = serializer.serialize(config.getTopic(), record);
54 | return new ProducerRecord<>(
55 | config.getTopic(),
56 | new RecordId(record.getPartitionId(), record.getPosition()),
57 | serializedRecord);
58 | }
59 |
60 | /**
61 | * Tests whether or not the given record is allowed, as specified by the configuration.
62 | *
63 | * @param record the record to test
64 | * @return true if allowed, false otherwise
65 | */
66 | public boolean isAllowed(final Record> record) {
67 | final RecordConfig config = getRecordConfig(record);
68 | return config.getAllowedTypes().contains(record.getRecordType());
69 | }
70 |
71 | private RecordConfig getRecordConfig(final Record> record) {
72 | return configuration.forType(Objects.requireNonNull(record).getValueType());
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/exporter/src/main/java/io/zeebe/exporters/kafka/record/RecordSerializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.record;
17 |
18 | import io.camunda.zeebe.protocol.record.Record;
19 | import java.util.Map;
20 | import org.apache.kafka.common.serialization.Serializer;
21 | import org.apache.kafka.common.serialization.StringSerializer;
22 |
23 | /**
24 | * A {@link Serializer} implementations for {@link Record} objects, which first uses a wrapped
25 | * {@link StringSerializer} to serialize {@link Record} to JSON. You can specify your encoding of
26 | * preference via {@link StringSerializer} configuration. Any configuration given to this serializer
27 | * is also passed to the wrapped {@link StringSerializer}.
28 | */
29 | public final class RecordSerializer implements Serializer> {
30 | private final StringSerializer delegate;
31 |
32 | public RecordSerializer() {
33 | this(new StringSerializer());
34 | }
35 |
36 | public RecordSerializer(final StringSerializer delegate) {
37 | this.delegate = delegate;
38 | }
39 |
40 | @Override
41 | public void configure(final Map configs, final boolean isKey) {
42 | delegate.configure(configs, isKey);
43 | }
44 |
45 | @Override
46 | public byte[] serialize(final String topic, final Record data) {
47 | return delegate.serialize(topic, data.toJson());
48 | }
49 |
50 | @Override
51 | public void close() {
52 | delegate.close();
53 | }
54 | }
55 |
--------------------------------------------------------------------------------
/exporter/src/main/resources/META-INF/services/io.camunda.zeebe.exporter.api.Exporter:
--------------------------------------------------------------------------------
1 | io.zeebe.exporters.kafka.KafkaExporter
2 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/KafkaExporterTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 | import static org.assertj.core.api.Assertions.assertThatThrownBy;
20 | import static org.assertj.core.api.Assertions.tuple;
21 |
22 | import io.camunda.zeebe.protocol.record.ValueType;
23 | import io.camunda.zeebe.test.exporter.ExporterTestHarness;
24 | import io.camunda.zeebe.test.exporter.record.MockRecordMetadata;
25 | import io.zeebe.exporters.kafka.config.Config;
26 | import io.zeebe.exporters.kafka.config.parser.MockConfigParser;
27 | import io.zeebe.exporters.kafka.config.parser.RawConfigParser;
28 | import io.zeebe.exporters.kafka.config.raw.RawConfig;
29 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig;
30 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig;
31 | import io.zeebe.exporters.kafka.producer.RecordBatchStub;
32 | import io.zeebe.exporters.kafka.record.RecordHandler;
33 | import io.zeebe.exporters.kafka.serde.RecordId;
34 | import java.util.stream.Collectors;
35 | import org.apache.kafka.clients.producer.ProducerRecord;
36 | import org.junit.jupiter.api.Test;
37 | import org.junit.jupiter.api.parallel.Execution;
38 | import org.junit.jupiter.api.parallel.ExecutionMode;
39 |
40 | @SuppressWarnings("rawtypes")
41 | @Execution(ExecutionMode.CONCURRENT)
42 | final class KafkaExporterTest {
43 | private static final String EXPORTER_ID = "kafka";
44 |
45 | private final RawConfig rawConfig = new RawConfig();
46 | private final MockConfigParser mockConfigParser =
47 | new MockConfigParser<>(new RawConfigParser());
48 | private final RecordBatchStub.Factory batchStubFactory = new RecordBatchStub.Factory();
49 | private final KafkaExporter exporter = new KafkaExporter(batchStubFactory, mockConfigParser);
50 | private final ExporterTestHarness testHarness = new ExporterTestHarness(exporter);
51 |
52 | @Test
53 | void shouldAddRecordToBatchOnExport() throws Exception {
54 | // given
55 | rawConfig.maxBatchSize = 5;
56 | testHarness.configure(EXPORTER_ID, rawConfig);
57 | testHarness.open();
58 |
59 | // when
60 | final var records = testHarness.stream().export(5);
61 |
62 | // then
63 | final var expectedIds =
64 | records.stream()
65 | .map(r -> new RecordId(r.getPartitionId(), r.getPosition()))
66 | .collect(Collectors.toList());
67 | assertThat(batchStubFactory.stub.getPendingRecords())
68 | .as("the records were added to the batch in order")
69 | .extracting(ProducerRecord::key)
70 | .containsExactlyElementsOf(expectedIds);
71 | assertThat(batchStubFactory.stub.getFlushedRecords())
72 | .as("no records were flushed yet")
73 | .isEmpty();
74 | }
75 |
76 | @Test
77 | void shouldUseCorrectSerializer() throws Exception {
78 | // given
79 | testHarness.configure(EXPORTER_ID, rawConfig);
80 | testHarness.open();
81 | final var recordHandler = new RecordHandler(mockConfigParser.config.getRecords());
82 |
83 | // when
84 | final var json = "{\"a\": 1}";
85 | final var record = testHarness.export(r -> r.setJson(json));
86 |
87 | // then
88 | final var expectedRecord = recordHandler.transform(record);
89 | assertThat(batchStubFactory.stub.getPendingRecords())
90 | .as("the serialized record was added to the batch")
91 | .extracting("topic", "key", "value")
92 | .containsExactly(
93 | tuple(expectedRecord.topic(), expectedRecord.key(), expectedRecord.value()));
94 | }
95 |
96 | @Test
97 | void shouldSkipDisallowedRecords() throws Exception {
98 | // given
99 | rawConfig.records = new RawRecordsConfig();
100 | rawConfig.records.deployment = new RawRecordConfig();
101 | rawConfig.records.deployment.type = "";
102 | testHarness.configure(EXPORTER_ID, rawConfig);
103 | testHarness.open();
104 |
105 | // when
106 | testHarness.export(
107 | r -> r.setMetadata(new MockRecordMetadata().setValueType(ValueType.DEPLOYMENT)));
108 |
109 | // then
110 | assertThat(batchStubFactory.stub.getPendingRecords())
111 | .as("disallowed record should not be added to the batch")
112 | .isEmpty();
113 | }
114 |
115 | @Test
116 | void shouldFlushOnScheduledTask() throws Exception {
117 | // given
118 | rawConfig.maxBatchSize = 5;
119 | testHarness.configure(EXPORTER_ID, rawConfig);
120 | testHarness.open();
121 |
122 | // when
123 | final var records = testHarness.stream().export(5);
124 | triggerFlushTask();
125 |
126 | // then
127 | final var expectedIds =
128 | records.stream()
129 | .map(r -> new RecordId(r.getPartitionId(), r.getPosition()))
130 | .collect(Collectors.toList());
131 | assertThat(batchStubFactory.stub.getFlushedRecords())
132 | .as("the records were added to the batch in order")
133 | .extracting(ProducerRecord::key)
134 | .containsExactlyElementsOf(expectedIds);
135 | assertThat(batchStubFactory.stub.getPendingRecords())
136 | .as("no pending records after flush")
137 | .isEmpty();
138 | }
139 |
140 | @Test
141 | void shouldUpdatePositionOnFlush() throws Exception {
142 | // given
143 | testHarness.configure(EXPORTER_ID, rawConfig);
144 | testHarness.open();
145 |
146 | // when
147 | final var records = testHarness.stream().export(5);
148 | triggerFlushTask();
149 |
150 | // then
151 | assertThat(testHarness.getLastUpdatedPosition())
152 | .as("position should be updated since after flush")
153 | .isEqualTo(records.get(4).getPosition());
154 | }
155 |
156 | @Test
157 | void shouldRescheduleFlushTaskEvenOnException() throws Exception {
158 | // given
159 | testHarness.configure(EXPORTER_ID, rawConfig);
160 | testHarness.open();
161 |
162 | // when
163 | final var records = testHarness.stream().export(2);
164 | batchStubFactory.stub.flushException = new RuntimeException("failed to flush");
165 | assertThatThrownBy(this::triggerFlushTask).isEqualTo(batchStubFactory.stub.flushException);
166 | batchStubFactory.stub.flushException = null;
167 | triggerFlushTask();
168 |
169 | // then
170 | assertThat(testHarness.getLastUpdatedPosition())
171 | .as("position should be updated since we managed to flush after the second try")
172 | .isEqualTo(records.get(1).getPosition());
173 | }
174 |
175 | @Test
176 | void shouldFlushBatchOnClose() throws Exception {
177 | // given
178 | testHarness.configure(EXPORTER_ID, rawConfig);
179 | testHarness.open();
180 |
181 | // when
182 | final var records = testHarness.stream().export(2);
183 | testHarness.close();
184 |
185 | // then
186 | assertThat(testHarness.getLastUpdatedPosition())
187 | .as("position should be updated since we managed to flush after the second try")
188 | .isEqualTo(records.get(1).getPosition());
189 | assertThat(batchStubFactory.stub.isClosed())
190 | .as("batch should be closed on exporter close")
191 | .isTrue();
192 | }
193 |
194 | @Test
195 | void shouldRescheduleFlush() throws Exception {
196 | // given
197 | testHarness.configure(EXPORTER_ID, rawConfig);
198 | testHarness.open();
199 |
200 | // when
201 | triggerFlushTask();
202 | final var records = testHarness.stream().export(2);
203 | triggerFlushTask();
204 |
205 | // then
206 | assertThat(testHarness.getLastUpdatedPosition())
207 | .as("position should be updated after triggering the second flush task")
208 | .isEqualTo(records.get(1).getPosition());
209 | }
210 |
211 | private void triggerFlushTask() {
212 | mockConfigParser.parse(rawConfig);
213 | testHarness.runScheduledTasks(mockConfigParser.config.getFlushInterval());
214 | }
215 | }
216 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/MockConfigParser.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import java.util.Objects;
19 |
20 | /**
21 | * {@link MockConfigParser} allows setting a predefined parsed value for any given value. If not
22 | * set, it will delegate to an underlying parser of the same types, and memoize the value, such that
23 | * every subsequent {@link #parse(Object)} call will return the same object.
24 | *
25 | * You can override this by calling {@link #forceParse(Object)} if you need.
26 | *
27 | * @param {@inheritDoc}
28 | * @param {@inheritDoc}
29 | */
30 | public final class MockConfigParser implements ConfigParser {
31 | public R config;
32 |
33 | private final ConfigParser delegate;
34 |
35 | public MockConfigParser(final ConfigParser delegate) {
36 | this.delegate = Objects.requireNonNull(delegate);
37 | }
38 |
39 | @Override
40 | public R parse(final T config) {
41 | if (this.config == null) {
42 | this.config = delegate.parse(config);
43 | }
44 |
45 | return this.config;
46 | }
47 |
48 | /** A helper method in tests to force re-parsing an updated configuration. */
49 | public void forceParse(final T config) {
50 | this.config = null;
51 | parse(config);
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawConfigParserTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import io.zeebe.exporters.kafka.config.Config;
21 | import io.zeebe.exporters.kafka.config.ProducerConfig;
22 | import io.zeebe.exporters.kafka.config.RecordsConfig;
23 | import io.zeebe.exporters.kafka.config.raw.RawConfig;
24 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig;
25 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig;
26 | import java.time.Duration;
27 | import org.junit.jupiter.api.Test;
28 | import org.junit.jupiter.api.parallel.Execution;
29 | import org.junit.jupiter.api.parallel.ExecutionMode;
30 |
31 | @Execution(ExecutionMode.CONCURRENT)
32 | final class RawConfigParserTest {
33 | private final MockConfigParser recordsConfigParser =
34 | new MockConfigParser<>(new RawRecordsConfigParser());
35 | private final MockConfigParser producerConfigParser =
36 | new MockConfigParser<>(new RawProducerConfigParser());
37 | private final RawConfigParser parser =
38 | new RawConfigParser(recordsConfigParser, producerConfigParser);
39 |
40 | @Test
41 | void shouldUseDefaultValues() {
42 | // given
43 | final RawConfig config = new RawConfig();
44 |
45 | // when
46 | final Config parsed = parser.parse(config);
47 |
48 | // then
49 | assertThat(parsed.getRecords()).isEqualTo(recordsConfigParser.parse(new RawRecordsConfig()));
50 | assertThat(parsed.getProducer()).isEqualTo(producerConfigParser.parse(new RawProducerConfig()));
51 | assertThat(parsed.getMaxBatchSize()).isEqualTo(RawConfigParser.DEFAULT_MAX_BATCH_SIZE);
52 | assertThat(parsed.getFlushInterval()).isEqualTo(RawConfigParser.DEFAULT_FLUSH_INTERVAL_MS);
53 | }
54 |
55 | @Test
56 | void shouldParse() {
57 | // given
58 | final RawConfig config = new RawConfig();
59 | final ProducerConfig producerConfig = producerConfigParser.parse(new RawProducerConfig());
60 | final RecordsConfig recordsConfig = recordsConfigParser.parse(new RawRecordsConfig());
61 | config.maxBatchSize = 2;
62 | config.flushIntervalMs = 500L;
63 |
64 | // when
65 | final Config parsed = parser.parse(config);
66 |
67 | // then
68 | assertThat(parsed.getProducer()).isEqualTo(producerConfig);
69 | assertThat(parsed.getRecords()).isEqualTo(recordsConfig);
70 | assertThat(parsed.getMaxBatchSize()).isEqualTo(2);
71 | assertThat(parsed.getFlushInterval()).isEqualTo(Duration.ofMillis(500));
72 | }
73 | }
74 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawProducerConfigParserTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import io.zeebe.exporters.kafka.config.ProducerConfig;
21 | import io.zeebe.exporters.kafka.config.raw.RawProducerConfig;
22 | import java.time.Duration;
23 | import java.util.Collections;
24 | import java.util.HashMap;
25 | import java.util.Map;
26 | import org.junit.jupiter.api.Test;
27 | import org.junit.jupiter.api.parallel.Execution;
28 | import org.junit.jupiter.api.parallel.ExecutionMode;
29 |
30 | @Execution(ExecutionMode.CONCURRENT)
31 | final class RawProducerConfigParserTest {
32 | private final RawProducerConfigParser parser = new RawProducerConfigParser();
33 |
34 | @Test
35 | void shouldUseDefaultValuesForMissingProperties() {
36 | // given
37 | final RawProducerConfig config = new RawProducerConfig();
38 |
39 | // when
40 | final ProducerConfig parsed = parser.parse(config);
41 |
42 | // then
43 | assertThat(parsed)
44 | .extracting(
45 | "servers", "clientId", "closeTimeout", "requestTimeout", "maxBlockingTimeout", "config")
46 | .containsExactly(
47 | RawProducerConfigParser.DEFAULT_SERVERS,
48 | RawProducerConfigParser.DEFAULT_CLIENT_ID,
49 | RawProducerConfigParser.DEFAULT_CLOSE_TIMEOUT,
50 | RawProducerConfigParser.DEFAULT_REQUEST_TIMEOUT,
51 | RawProducerConfigParser.DEFAULT_MAX_BLOCKING_TIMEOUT,
52 | new HashMap<>());
53 | }
54 |
55 | @Test
56 | void shouldParse() {
57 | // given
58 | final RawProducerConfig config = new RawProducerConfig();
59 | config.servers = "localhost:3000";
60 | config.clientId = "client";
61 | config.closeTimeoutMs = 3000L;
62 | config.requestTimeoutMs = 3000L;
63 | config.maxBlockingTimeoutMs = 5000L;
64 | config.config = "linger.ms=5\nmax.buffer.count=2";
65 |
66 | // when
67 | final ProducerConfig parsed = parser.parse(config);
68 |
69 | // then
70 | assertThat(parsed)
71 | .extracting(
72 | "servers", "clientId", "closeTimeout", "requestTimeout", "maxBlockingTimeout", "config")
73 | .containsExactly(
74 | Collections.singletonList("localhost:3000"),
75 | "client",
76 | Duration.ofSeconds(3),
77 | Duration.ofSeconds(3),
78 | Duration.ofSeconds(5),
79 | Map.of("linger.ms", "5", "max.buffer.count", "2"));
80 | }
81 | }
82 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawRecordConfigParserTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 | import static org.assertj.core.api.Assertions.assertThatThrownBy;
20 |
21 | import io.camunda.zeebe.protocol.record.RecordType;
22 | import io.zeebe.exporters.kafka.config.RecordConfig;
23 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig;
24 | import java.util.EnumSet;
25 | import org.junit.jupiter.api.Test;
26 | import org.junit.jupiter.api.parallel.Execution;
27 | import org.junit.jupiter.api.parallel.ExecutionMode;
28 |
29 | @Execution(ExecutionMode.CONCURRENT)
30 | final class RawRecordConfigParserTest {
31 | private final RawRecordConfigParser parser = new RawRecordConfigParser();
32 |
33 | @Test
34 | void shouldParseAllowedTypes() {
35 | // given
36 | final RawRecordConfig config = new RawRecordConfig();
37 | config.type =
38 | String.format("%s,%s", AllowedType.COMMAND.getTypeName(), AllowedType.EVENT.getTypeName());
39 |
40 | // when
41 | final RecordConfig parsed = parser.parse(config);
42 |
43 | // then
44 | assertThat(parsed.getAllowedTypes())
45 | .containsExactlyInAnyOrder(RecordType.COMMAND, RecordType.EVENT);
46 | }
47 |
48 | @Test
49 | void shouldParseTopic() {
50 | // given
51 | final RawRecordConfig config = new RawRecordConfig();
52 | config.topic = "something";
53 |
54 | // when
55 | final RecordConfig parsed = parser.parse(config);
56 |
57 | // then
58 | assertThat(parsed.getTopic()).isEqualTo("something");
59 | }
60 |
61 | @Test
62 | void shouldSetDefaultsIfNull() {
63 | // given
64 | final RawRecordConfig config = new RawRecordConfig();
65 |
66 | // when
67 | final RecordConfig parsed = parser.parse(config);
68 |
69 | // then
70 | assertThat(parsed.getTopic()).isEqualTo(RawRecordConfigParser.DEFAULT_TOPIC_NAME);
71 | assertThat(parsed.getAllowedTypes()).isEqualTo(RawRecordConfigParser.DEFAULT_ALLOWED_TYPES);
72 | }
73 |
74 | @Test
75 | void shouldSetExplicitDefaultsIfNull() {
76 | // given
77 | final RecordConfig defaults = new RecordConfig(EnumSet.allOf(RecordType.class), "topic");
78 | final RawRecordConfigParser explicitParser = new RawRecordConfigParser(defaults);
79 | final RawRecordConfig config = new RawRecordConfig();
80 |
81 | // when
82 | final RecordConfig parsed = explicitParser.parse(config);
83 |
84 | // then
85 | assertThat(parsed.getTopic()).isEqualTo(defaults.getTopic());
86 | assertThat(parsed.getAllowedTypes()).isEqualTo(defaults.getAllowedTypes());
87 | }
88 |
89 | @Test
90 | void shouldThrowExceptionIfAllowedTypeIsUnknown() {
91 | // given
92 | final RawRecordConfig config = new RawRecordConfig();
93 | config.type = "something unlikely";
94 |
95 | // when - then
96 | assertThatThrownBy(() -> parser.parse(config)).isInstanceOf(IllegalArgumentException.class);
97 | }
98 |
99 | @Test
100 | void shouldDisallowOnEmptyString() {
101 | // given
102 | final RawRecordConfigParser explicitParser = new RawRecordConfigParser();
103 | final RawRecordConfig config = new RawRecordConfig();
104 | config.type = "";
105 |
106 | // when
107 | final RecordConfig parsed = explicitParser.parse(config);
108 |
109 | // then
110 | assertThat(parsed.getAllowedTypes()).isEmpty();
111 | }
112 | }
113 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/config/parser/RawRecordsConfigParserTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.config.parser;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import io.camunda.zeebe.protocol.record.RecordType;
21 | import io.camunda.zeebe.protocol.record.ValueType;
22 | import io.zeebe.exporters.kafka.config.RecordsConfig;
23 | import io.zeebe.exporters.kafka.config.raw.RawRecordConfig;
24 | import io.zeebe.exporters.kafka.config.raw.RawRecordsConfig;
25 | import java.util.EnumSet;
26 | import java.util.Set;
27 | import org.junit.jupiter.api.Test;
28 | import org.junit.jupiter.api.parallel.Execution;
29 | import org.junit.jupiter.api.parallel.ExecutionMode;
30 |
31 | @Execution(ExecutionMode.CONCURRENT)
32 | final class RawRecordsConfigParserTest {
33 | private static final Set EXPECTED_VALUE_TYPES =
34 | EnumSet.complementOf(EnumSet.of(ValueType.NULL_VAL, ValueType.SBE_UNKNOWN));
35 |
36 | private final RawRecordsConfigParser parser = new RawRecordsConfigParser();
37 |
38 | @Test
39 | void shouldParseDefaultsWithDefaultValue() {
40 | // given
41 | final RawRecordsConfig config = new RawRecordsConfig();
42 |
43 | // when
44 | final RecordsConfig parsed = parser.parse(config);
45 |
46 | // then
47 | assertThat(parsed.getDefaults().getAllowedTypes())
48 | .isEqualTo(RawRecordConfigParser.DEFAULT_ALLOWED_TYPES);
49 | assertThat(parsed.getDefaults().getTopic()).isEqualTo(RawRecordConfigParser.DEFAULT_TOPIC_NAME);
50 | }
51 |
52 | @Test
53 | void shouldParseRecordConfigUnderCorrectValueType() {
54 | // given
55 | final RawRecordsConfig config = new RawRecordsConfig();
56 | config.deployment = newConfigFromType(ValueType.DEPLOYMENT);
57 | config.deploymentDistribution = newConfigFromType(ValueType.DEPLOYMENT_DISTRIBUTION);
58 | config.error = newConfigFromType(ValueType.ERROR);
59 | config.incident = newConfigFromType(ValueType.INCIDENT);
60 | config.job = newConfigFromType(ValueType.JOB);
61 | config.jobBatch = newConfigFromType(ValueType.JOB_BATCH);
62 | config.message = newConfigFromType(ValueType.MESSAGE);
63 | config.messageSubscription = newConfigFromType(ValueType.MESSAGE_SUBSCRIPTION);
64 | config.messageStartEventSubscription =
65 | newConfigFromType(ValueType.MESSAGE_START_EVENT_SUBSCRIPTION);
66 | config.process = newConfigFromType(ValueType.PROCESS);
67 | config.processEvent = newConfigFromType(ValueType.PROCESS_EVENT);
68 | config.processInstance = newConfigFromType(ValueType.PROCESS_INSTANCE);
69 | config.processInstanceCreation = newConfigFromType(ValueType.PROCESS_INSTANCE_CREATION);
70 | config.processInstanceResult = newConfigFromType(ValueType.PROCESS_INSTANCE_RESULT);
71 | config.processMessageSubscription = newConfigFromType(ValueType.PROCESS_MESSAGE_SUBSCRIPTION);
72 | config.timer = newConfigFromType(ValueType.TIMER);
73 | config.variable = newConfigFromType(ValueType.VARIABLE);
74 | config.variableDocument = newConfigFromType(ValueType.VARIABLE_DOCUMENT);
75 |
76 | // when
77 | final RecordsConfig parsed = parser.parse(config);
78 |
79 | // then
80 | for (final ValueType type : EXPECTED_VALUE_TYPES) {
81 | assertThat(parsed.forType(type).getTopic()).isEqualTo(type.name());
82 | }
83 | }
84 |
85 | @Test
86 | void shouldUseDefaultsOnMissingProperties() {
87 | // given
88 | final RawRecordsConfig config = new RawRecordsConfig();
89 | config.defaults = new RawRecordConfig();
90 | config.defaults.topic = "default";
91 | config.defaults.type =
92 | String.format(
93 | "%s,%s", AllowedType.COMMAND.getTypeName(), AllowedType.REJECTION.getTypeName());
94 |
95 | // when
96 | final RecordsConfig parsed = parser.parse(config);
97 |
98 | // then
99 | parsed
100 | .getTypeMap()
101 | .forEach(
102 | (t, c) -> {
103 | assertThat(c.getTopic()).isEqualTo(config.defaults.topic);
104 | assertThat(c.getAllowedTypes())
105 | .containsExactly(RecordType.COMMAND, RecordType.COMMAND_REJECTION);
106 | });
107 | }
108 |
109 | private RawRecordConfig newConfigFromType(final ValueType type) {
110 | final RawRecordConfig recordConfig = new RawRecordConfig();
111 | recordConfig.topic = type.name();
112 |
113 | return recordConfig;
114 | }
115 | }
116 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/producer/MockKafkaProducerFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.config.ProducerConfig;
19 | import io.zeebe.exporters.kafka.serde.RecordId;
20 | import java.util.Objects;
21 | import java.util.function.Supplier;
22 | import org.apache.kafka.clients.producer.MockProducer;
23 | import org.apache.kafka.clients.producer.Producer;
24 |
25 | /**
26 | * A utility implementation to allow more control of the execution of the {@link
27 | * io.zeebe.exporters.kafka.KafkaExporter} in tests. Allows overriding the producer which will be
28 | * given to the exporter - if none given, it will create a {@link MockProducer} and memoize the
29 | * value.
30 | */
31 | public class MockKafkaProducerFactory implements KafkaProducerFactory {
32 | public Supplier> mockProducerSupplier;
33 | public MockProducer mockProducer;
34 | public String producerId;
35 |
36 | public MockKafkaProducerFactory(
37 | final Supplier> mockProducerSupplier) {
38 | this.mockProducerSupplier = Objects.requireNonNull(mockProducerSupplier);
39 | }
40 |
41 | @Override
42 | public Producer newProducer(
43 | final ProducerConfig config, final String producerId) {
44 | this.producerId = Objects.requireNonNull(producerId);
45 | if (mockProducer == null || mockProducer.closed()) {
46 | mockProducer = mockProducerSupplier.get();
47 | }
48 |
49 | return mockProducer;
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/producer/RecordBatchStub.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.producer;
17 |
18 | import io.zeebe.exporters.kafka.config.ProducerConfig;
19 | import io.zeebe.exporters.kafka.record.FullRecordBatchException;
20 | import io.zeebe.exporters.kafka.serde.RecordId;
21 | import java.nio.BufferOverflowException;
22 | import java.util.LinkedList;
23 | import java.util.List;
24 | import java.util.Objects;
25 | import java.util.function.LongConsumer;
26 | import org.apache.kafka.clients.producer.ProducerRecord;
27 | import org.slf4j.Logger;
28 |
29 | public final class RecordBatchStub implements RecordBatch {
30 | public RuntimeException flushException;
31 |
32 | private final ProducerConfig config;
33 | private final int maxBatchSize;
34 | private final LongConsumer onFlushCallback;
35 | private final Logger logger;
36 |
37 | private final LinkedList> flushedRecords = new LinkedList<>();
38 |
39 | private final LinkedList> pendingRecords = new LinkedList<>();
40 |
41 | private boolean closed = false;
42 |
43 | public RecordBatchStub(
44 | final ProducerConfig config,
45 | final int maxBatchSize,
46 | final LongConsumer onFlushCallback,
47 | final Logger logger) {
48 | this.config = Objects.requireNonNull(config);
49 | this.maxBatchSize = maxBatchSize;
50 | this.onFlushCallback = Objects.requireNonNull(onFlushCallback);
51 | this.logger = Objects.requireNonNull(logger);
52 | }
53 |
54 | @Override
55 | public void add(final ProducerRecord record) throws FullRecordBatchException {
56 | if (pendingRecords.size() >= maxBatchSize) {
57 | throw new FullRecordBatchException(maxBatchSize, new BufferOverflowException());
58 | }
59 |
60 | pendingRecords.add(record);
61 | }
62 |
63 | @Override
64 | public void flush() {
65 | if (flushException != null) {
66 | throw flushException;
67 | }
68 |
69 | flushedRecords.addAll(pendingRecords);
70 | pendingRecords.clear();
71 |
72 | if (!flushedRecords.isEmpty()) {
73 | onFlushCallback.accept(flushedRecords.getLast().key().getPosition());
74 | }
75 | }
76 |
77 | @Override
78 | public void close() {
79 | closed = true;
80 | }
81 |
82 | public List> getFlushedRecords() {
83 | return flushedRecords;
84 | }
85 |
86 | public List> getPendingRecords() {
87 | return pendingRecords;
88 | }
89 |
90 | public boolean isClosed() {
91 | return closed;
92 | }
93 |
94 | public static class Factory implements RecordBatchFactory {
95 | public RecordBatchStub stub;
96 |
97 | @Override
98 | public RecordBatch newRecordBatch(
99 | final ProducerConfig config,
100 | final int maxBatchSize,
101 | final LongConsumer onFlushCallback,
102 | final Logger logger) {
103 | if (stub == null) {
104 | stub = new RecordBatchStub(config, maxBatchSize, onFlushCallback, logger);
105 | }
106 |
107 | return stub;
108 | }
109 | }
110 | }
111 |
--------------------------------------------------------------------------------
/exporter/src/test/java/io/zeebe/exporters/kafka/record/RecordHandlerTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.record;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import io.camunda.zeebe.protocol.jackson.record.DeploymentRecordValueBuilder;
21 | import io.camunda.zeebe.protocol.jackson.record.RecordBuilder;
22 | import io.camunda.zeebe.protocol.record.Record;
23 | import io.camunda.zeebe.protocol.record.RecordType;
24 | import io.camunda.zeebe.protocol.record.ValueType;
25 | import io.camunda.zeebe.protocol.record.intent.DeploymentIntent;
26 | import io.camunda.zeebe.protocol.record.value.DeploymentRecordValue;
27 | import io.zeebe.exporters.kafka.config.RecordConfig;
28 | import io.zeebe.exporters.kafka.config.RecordsConfig;
29 | import io.zeebe.exporters.kafka.serde.RecordId;
30 | import java.nio.charset.StandardCharsets;
31 | import java.util.EnumSet;
32 | import java.util.Map;
33 | import org.apache.kafka.clients.producer.ProducerRecord;
34 | import org.junit.jupiter.api.Test;
35 | import org.junit.jupiter.api.parallel.Execution;
36 | import org.junit.jupiter.api.parallel.ExecutionMode;
37 |
38 | @Execution(ExecutionMode.CONCURRENT)
39 | final class RecordHandlerTest {
40 |
41 | private static final RecordConfig DEFAULT_RECORD_CONFIG =
42 | new RecordConfig(EnumSet.allOf(RecordType.class), "zeebe");
43 |
44 | @Test
45 | void shouldTransformRecord() {
46 | // given
47 | final Record record =
48 | buildDeploymentRecord().recordType(RecordType.COMMAND).build();
49 | final RecordConfig deploymentRecordConfig =
50 | new RecordConfig(EnumSet.allOf(RecordType.class), "topic");
51 | final RecordHandler recordHandler = new RecordHandler(newRecordsConfig(RecordType.COMMAND));
52 |
53 | // when
54 | final ProducerRecord transformed = recordHandler.transform(record);
55 |
56 | // then
57 | assertThat(transformed.topic()).isEqualTo(deploymentRecordConfig.getTopic());
58 | assertThat(transformed.key())
59 | .isEqualTo(new RecordId(record.getPartitionId(), record.getPosition()));
60 | assertThat(transformed.value()).isEqualTo(record.toJson().getBytes(StandardCharsets.UTF_8));
61 | }
62 |
63 | @Test
64 | void shouldTestRecordAsNotAllowed() {
65 | // given
66 | final Record record =
67 | buildDeploymentRecord().recordType(RecordType.COMMAND).build();
68 | final RecordHandler recordHandler = new RecordHandler(newRecordsConfig(RecordType.EVENT));
69 |
70 | // when - then
71 | assertThat(recordHandler.isAllowed(record)).isFalse();
72 | }
73 |
74 | @Test
75 | void shouldTestRecordAsAllowed() {
76 | // given
77 | final Record record =
78 | buildDeploymentRecord().recordType(RecordType.EVENT).build();
79 | final RecordHandler recordHandler = new RecordHandler(newRecordsConfig(RecordType.EVENT));
80 |
81 | // when - then
82 | assertThat(recordHandler.isAllowed(record)).isTrue();
83 | }
84 |
85 | private RecordsConfig newRecordsConfig(final RecordType allowedType) {
86 | final RecordConfig recordConfig = new RecordConfig(EnumSet.of(allowedType), "topic");
87 | return new RecordsConfig(Map.of(ValueType.DEPLOYMENT, recordConfig), DEFAULT_RECORD_CONFIG);
88 | }
89 |
90 | private RecordBuilder buildDeploymentRecord() {
91 | return new RecordBuilder()
92 | .valueType(ValueType.DEPLOYMENT)
93 | .recordType(RecordType.EVENT)
94 | .timestamp(System.currentTimeMillis())
95 | .intent(DeploymentIntent.CREATE)
96 | .value(new DeploymentRecordValueBuilder().build())
97 | .partitionId(1)
98 | .position(1);
99 | }
100 | }
101 |
--------------------------------------------------------------------------------
/exporter/src/test/resources/simplelogger.properties:
--------------------------------------------------------------------------------
1 | org.slf4j.simpleLogger.logFile=System.out
2 | org.slf4j.simplerLogger.showShortLogName=true
3 | org.slf4j.simpleLogger.defaultLogLevel=info
4 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka=debug
5 | org.slf4j.simpleLogger.log.io.zeebe.broker.exporter=debug
6 | org.slf4j.simpleLogger.log.org.apache=warn
7 | org.slf4j.simpleLogger.log.kafka=warn
8 |
--------------------------------------------------------------------------------
/qa/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 4.0.0
6 | Zeebe Kafka Exporter QA
7 | zeebe-kafka-exporter-qa
8 | jar
9 | https://github.com/zeebe-io/zeebe-kafka-exporter/qa
10 |
11 |
12 | zeebe-kafka-exporter-root
13 | io.zeebe
14 | 3.1.2-SNAPSHOT
15 | ../pom.xml
16 |
17 |
18 |
19 |
20 |
21 | io.zeebe
22 | zeebe-kafka-exporter
23 | test
24 |
25 |
26 |
27 | io.zeebe
28 | zeebe-kafka-exporter-serde
29 | test
30 |
31 |
32 |
33 | io.camunda
34 | zeebe-protocol-jackson
35 | test
36 |
37 |
38 |
39 | com.fasterxml.jackson.core
40 | jackson-core
41 | test
42 |
43 |
44 |
45 | com.fasterxml.jackson.core
46 | jackson-databind
47 | test
48 |
49 |
50 |
51 | io.camunda
52 | zeebe-client-java
53 | test
54 |
55 |
56 |
57 | io.camunda
58 | zeebe-bpmn-model
59 | test
60 |
61 |
62 |
63 | io.camunda
64 | zeebe-protocol
65 | test
66 |
67 |
68 |
69 | io.camunda
70 | zeebe-protocol-asserts
71 | test
72 |
73 |
74 |
75 | org.agrona
76 | agrona
77 | test
78 |
79 |
80 |
81 | org.apache.kafka
82 | kafka-clients
83 | test
84 |
85 |
86 |
87 | org.slf4j
88 | slf4j-api
89 | test
90 |
91 |
92 |
93 | org.slf4j
94 | slf4j-simple
95 | test
96 |
97 |
98 |
99 | org.testcontainers
100 | testcontainers
101 | test
102 |
103 |
104 |
105 | org.testcontainers
106 | kafka
107 | test
108 |
109 |
110 |
111 | org.testcontainers
112 | junit-jupiter
113 | test
114 |
115 |
116 |
117 | io.zeebe
118 | zeebe-test-container
119 | test
120 |
121 |
122 |
123 | org.junit.jupiter
124 | junit-jupiter-api
125 | test
126 |
127 |
128 |
129 | org.assertj
130 | assertj-core
131 | test
132 |
133 |
134 |
135 | org.awaitility
136 | awaitility
137 | test
138 |
139 |
140 |
141 |
142 |
143 |
145 |
146 | org.apache.maven.plugins
147 | maven-dependency-plugin
148 |
149 |
150 | copy
151 | pre-integration-test
152 |
153 | copy
154 |
155 |
156 |
157 |
158 | io.zeebe
159 | zeebe-kafka-exporter
160 | ${project.version}
161 | jar
162 | jar-with-dependencies
163 | ${project.basedir}/src/test/resources
164 | zeebe-kafka-exporter.jar
165 |
166 |
167 | false
168 | true
169 |
170 |
171 |
172 |
173 |
174 |
178 | io.zeebe:zeebe-kafka-exporter
179 |
180 | org.slf4j:slf4j-simple
181 |
182 |
183 |
184 |
185 |
186 | org.apache.maven.plugins
187 | maven-failsafe-plugin
188 |
189 |
190 |
191 |
192 |
--------------------------------------------------------------------------------
/qa/src/test/java/io/zeebe/exporters/kafka/qa/DebugHttpExporterClient.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.qa;
17 |
18 | import com.fasterxml.jackson.core.type.TypeReference;
19 | import com.fasterxml.jackson.databind.ObjectMapper;
20 | import com.fasterxml.jackson.databind.ObjectReader;
21 | import io.camunda.zeebe.protocol.jackson.record.AbstractRecord;
22 | import io.camunda.zeebe.protocol.record.Record;
23 | import java.io.IOException;
24 | import java.io.UncheckedIOException;
25 | import java.net.URL;
26 | import java.util.Collections;
27 | import java.util.List;
28 | import java.util.stream.Stream;
29 |
30 | /**
31 | * A dumb client for the DebugHttpExporter. This exporter starts a server on a single broker for all
32 | * known partitions (of that broker), and simply exposes a poll mechanism for the records.
33 | *
34 | * NOTE: the server returns records in reverse order, from newest to oldest, which is the
35 | * opposite of what we typically want, i.e. sorted in causal order. The {@link #streamRecords()}
36 | * method therefore returns them reversed.
37 | *
38 | *
NOTE: the streaming is "dumb", and really only returns the records from the server as is as a
39 | * stream. This is fine for now since we typically don't have a lot of records, but it means you may
40 | * have to call the method multiple times.
41 | */
42 | final class DebugHttpExporterClient {
43 |
44 | private static final ObjectReader READER =
45 | new ObjectMapper().readerFor(new TypeReference>>() {});
46 |
47 | private final URL serverUrl;
48 |
49 | DebugHttpExporterClient(final URL serverUrl) {
50 | this.serverUrl = serverUrl;
51 | }
52 |
53 | Stream> streamRecords() {
54 | try {
55 | // the HTTP exporter returns records in reversed order, so flip them before returning
56 | final List> records = READER.readValue(serverUrl);
57 | Collections.reverse(records);
58 |
59 | return records.stream().map(r -> r);
60 | } catch (final IOException e) {
61 | throw new UncheckedIOException(e);
62 | }
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/qa/src/test/java/io/zeebe/exporters/kafka/qa/KafkaExporterIT.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.qa;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import io.camunda.zeebe.client.ZeebeClient;
21 | import io.camunda.zeebe.protocol.record.Record;
22 | import io.zeebe.containers.ZeebeContainer;
23 | import io.zeebe.exporters.kafka.serde.RecordDeserializer;
24 | import io.zeebe.exporters.kafka.serde.RecordId;
25 | import io.zeebe.exporters.kafka.serde.RecordIdDeserializer;
26 | import java.net.MalformedURLException;
27 | import java.net.URL;
28 | import java.time.Duration;
29 | import java.util.ArrayList;
30 | import java.util.Comparator;
31 | import java.util.HashMap;
32 | import java.util.List;
33 | import java.util.Map;
34 | import java.util.concurrent.CompletableFuture;
35 | import java.util.concurrent.CountDownLatch;
36 | import java.util.concurrent.TimeUnit;
37 | import java.util.regex.Pattern;
38 | import java.util.stream.Collectors;
39 | import org.agrona.CloseHelper;
40 | import org.apache.kafka.clients.admin.AdminClient;
41 | import org.apache.kafka.clients.admin.AdminClientConfig;
42 | import org.apache.kafka.clients.admin.NewTopic;
43 | import org.apache.kafka.clients.consumer.Consumer;
44 | import org.apache.kafka.clients.consumer.ConsumerConfig;
45 | import org.apache.kafka.clients.consumer.ConsumerRecord;
46 | import org.apache.kafka.clients.consumer.KafkaConsumer;
47 | import org.awaitility.Awaitility;
48 | import org.junit.jupiter.api.AfterEach;
49 | import org.junit.jupiter.api.Test;
50 | import org.junit.jupiter.api.Timeout;
51 | import org.junit.jupiter.api.parallel.Execution;
52 | import org.junit.jupiter.api.parallel.ExecutionMode;
53 | import org.slf4j.Logger;
54 | import org.slf4j.LoggerFactory;
55 | import org.testcontainers.containers.KafkaContainer;
56 | import org.testcontainers.containers.Network;
57 | import org.testcontainers.containers.output.Slf4jLogConsumer;
58 | import org.testcontainers.junit.jupiter.Testcontainers;
59 | import org.testcontainers.utility.DockerImageName;
60 | import org.testcontainers.utility.MountableFile;
61 |
62 | /**
63 | * This tests the deployment of the exporter into a Zeebe broker in a as-close-to-production way as
64 | * possible, by starting a Zeebe container and deploying the exporter as one normally would.
65 | *
66 | * In order to verify certain properties - i.e. all records were exported correctly, order was
67 | * maintained on a per partition basis, etc. - we use an exporter deemed "reliable", the
68 | * DebugHttpExporter, to compare results.
69 | */
70 | @Testcontainers
71 | @Timeout(value = 5, unit = TimeUnit.MINUTES)
72 | @Execution(ExecutionMode.CONCURRENT)
73 | final class KafkaExporterIT {
74 | private static final Pattern TOPIC_SUBSCRIPTION_PATTERN = Pattern.compile("zeebe.*");
75 |
76 | private final Network network = Network.newNetwork();
77 | private KafkaContainer kafkaContainer = newKafkaContainer();
78 | private final ZeebeContainer zeebeContainer = newZeebeContainer();
79 |
80 | private ZeebeClient zeebeClient;
81 | private DebugHttpExporterClient debugExporter;
82 |
83 | @AfterEach
84 | void tearDown() {
85 | CloseHelper.quietCloseAll(zeebeClient, zeebeContainer, kafkaContainer, network);
86 | }
87 |
88 | @Test
89 | void shouldExportToKafka() throws MalformedURLException {
90 | // given
91 | startKafka();
92 | zeebeContainer.start();
93 | final var sampleWorkload = newSampleWorkload();
94 |
95 | // when
96 | sampleWorkload.execute();
97 |
98 | // then
99 | assertRecordsExported(sampleWorkload);
100 | }
101 |
102 | @Test
103 | void shouldExportEvenIfKafkaStartedLater() throws MalformedURLException {
104 | // given
105 | zeebeContainer.start();
106 | final var sampleWorkload = newSampleWorkload();
107 |
108 | // when
109 | sampleWorkload.execute();
110 | startKafka();
111 |
112 | // then
113 | assertRecordsExported(sampleWorkload);
114 | }
115 |
116 | @Test
117 | void shouldExportEvenIfKafkaRestartedInTheMiddle()
118 | throws MalformedURLException, InterruptedException {
119 | // given
120 | startKafka();
121 | zeebeContainer.start();
122 | final var sampleWorkload = newSampleWorkload();
123 |
124 | // when
125 | final var latch = new CountDownLatch(1);
126 | final var workloadFinished =
127 | CompletableFuture.runAsync(() -> sampleWorkload.execute(latch::countDown));
128 |
129 | assertThat(latch.await(15, TimeUnit.SECONDS))
130 | .as("midpoint hook was called to stop kafka")
131 | .isTrue();
132 | kafkaContainer.stop();
133 | kafkaContainer = newKafkaContainer();
134 | startKafka();
135 | workloadFinished.join();
136 |
137 | // then
138 | assertRecordsExported(sampleWorkload);
139 | }
140 |
141 | private SampleWorkload newSampleWorkload() throws MalformedURLException {
142 | return new SampleWorkload(getLazyZeebeClient(), getLazyDebugExporter());
143 | }
144 |
145 | /**
146 | * Asserts that the expected records have been correctly exported.
147 | *
148 | *
The properties asserted are the following for every partition:
149 | *
150 | *
151 | * - every record for partition X was exported to Kafka
152 | *
- every record for partition X was exported to the same Kafka partition Y
153 | *
- every record for partition X is consumed in the order in which they were written (i.e. by
154 | * position)
155 | *
156 | *
157 | * The first property is self explanatory - just ensure all records can be consumed from the
158 | * expected Kafka topic.
159 | *
160 | * The second property checks the partitioning logic - Zeebe records are causally linked, and
161 | * exporting them to different partitions will result in them being consumed out of order. So this
162 | * ensures that all records from a given Zeebe partition are exported to the same Kafka partition
163 | * in order to preserve ordering.
164 | *
165 | *
The third property is an extension of this, and checks that they are indeed ordered by
166 | * position.
167 | */
168 | private void assertRecordsExported(final SampleWorkload workload) {
169 | final var expectedRecords = workload.getExpectedRecords(Duration.ofSeconds(5));
170 | final var expectedRecordsPerPartition =
171 | expectedRecords.stream().collect(Collectors.groupingBy(Record::getPartitionId));
172 | final var actualRecords = awaitAllExportedRecords(expectedRecordsPerPartition);
173 |
174 | assertThat(expectedRecords).as("there should have been some records exported").isNotEmpty();
175 | assertThat(actualRecords)
176 | .allSatisfy(
177 | (partitionId, records) -> {
178 | assertExportedRecordsPerPartition(
179 | partitionId, records, expectedRecordsPerPartition.get(partitionId));
180 | });
181 | }
182 |
183 | @SuppressWarnings("rawtypes")
184 | private void assertExportedRecordsPerPartition(
185 | final Integer partitionId,
186 | final List>> exportedRecords,
187 | final List> expectedRecords) {
188 | final var expectedKafkaPartition = exportedRecords.get(0).partition();
189 | assertThat(exportedRecords)
190 | .as(
191 | "all exported records from Zeebe partition %d were exported to the same Kafka partition %d",
192 | partitionId, expectedKafkaPartition)
193 | .allMatch(r -> r.partition() == expectedKafkaPartition)
194 | // cast to raw type to be able to compare the containers
195 | .map(r -> (Record) r.value())
196 | .as(
197 | "the records for partition %d are the same as those reported by the DebugHttpExporter",
198 | partitionId)
199 | .containsExactlyInAnyOrderElementsOf(expectedRecords)
200 | .as("the records for partition %d are sorted by position", partitionId)
201 | .isSortedAccordingTo(Comparator.comparing(Record::getPosition));
202 | }
203 |
204 | /**
205 | * A wrapper around {@link #consumeExportedRecords(Map)} to avoid race conditions where we poll
206 | * too early and receive less records. Doing this avoids any potential flakiness at the cost of a
207 | * bit more complexity/unreadability.
208 | */
209 | private Map>>> awaitAllExportedRecords(
210 | final Map>> expectedRecords) {
211 | final var records = new HashMap>>>();
212 |
213 | Awaitility.await("until the expected number of records has been consumed")
214 | .atMost(Duration.ofSeconds(30))
215 | .pollDelay(Duration.ZERO)
216 | .pollInterval(Duration.ofMillis(100))
217 | .pollInSameThread()
218 | .untilAsserted(
219 | () -> {
220 | consumeExportedRecords(records);
221 | assertThat(records)
222 | .allSatisfy(
223 | (partitionId, list) -> {
224 | assertThat(list)
225 | .as("records consumed for partition %d", partitionId)
226 | .hasSameSizeAs(expectedRecords.get(partitionId));
227 | });
228 | });
229 |
230 | return records;
231 | }
232 |
233 | private void consumeExportedRecords(
234 | final Map>>> records) {
235 | final var timeout = Duration.ofSeconds(5);
236 |
237 | try (final Consumer> consumer = newConsumer()) {
238 | final var consumedRecords = consumer.poll(timeout);
239 | for (final var consumedRecord : consumedRecords) {
240 | final var perPartitionRecords =
241 | records.computeIfAbsent(
242 | consumedRecord.value().getPartitionId(), ignored -> new ArrayList<>());
243 |
244 | perPartitionRecords.add(consumedRecord);
245 | perPartitionRecords.sort(Comparator.comparing(ConsumerRecord::offset, Long::compareTo));
246 | }
247 | }
248 | }
249 |
250 | private ZeebeClient getLazyZeebeClient() {
251 | if (zeebeClient == null) {
252 | zeebeClient =
253 | ZeebeClient.newClientBuilder()
254 | .gatewayAddress(zeebeContainer.getExternalGatewayAddress())
255 | .usePlaintext()
256 | .build();
257 | }
258 |
259 | return zeebeClient;
260 | }
261 |
262 | private DebugHttpExporterClient getLazyDebugExporter() throws MalformedURLException {
263 | if (debugExporter == null) {
264 | final var exporterServerUrl =
265 | new URL(String.format("http://%s/records.json", zeebeContainer.getExternalAddress(8000)));
266 | debugExporter = new DebugHttpExporterClient((exporterServerUrl));
267 | }
268 |
269 | return debugExporter;
270 | }
271 |
272 | @SuppressWarnings("OctalInteger")
273 | private ZeebeContainer newZeebeContainer() {
274 | final var container = new ZeebeContainer();
275 | final var exporterJar = MountableFile.forClasspathResource("zeebe-kafka-exporter.jar", 0775);
276 | final var exporterConfig = MountableFile.forClasspathResource("exporters.yml", 0775);
277 | final var loggingConfig = MountableFile.forClasspathResource("log4j2.xml", 0775);
278 | final var networkAlias = "zeebe";
279 | final var logConsumer = new Slf4jLogConsumer(newContainerLogger("zeebeContainer"), true);
280 |
281 | container.addExposedPort(8000);
282 | return container
283 | .withNetwork(network)
284 | .withNetworkAliases(networkAlias)
285 | .withEnv("ZEEBE_BROKER_NETWORK_ADVERTISEDHOST", networkAlias)
286 | .withEnv("ZEEBE_BROKER_CLUSTER_PARTITIONSCOUNT", "3")
287 | .withEnv("ZEEBE_BROKER_EXPORTERS_KAFKA_ARGS_PRODUCER_SERVERS", "kafka:9092")
288 | .withEnv("ZEEBE_LOG_LEVEL", "info")
289 | .withEnv(
290 | "LOG4J_CONFIGURATION_FILE",
291 | "/usr/local/zeebe/config/log4j2.xml,/usr/local/zeebe/config/log4j2-exporter.xml")
292 | .withCopyFileToContainer(exporterJar, "/usr/local/zeebe/exporters/zeebe-kafka-exporter.jar")
293 | .withCopyFileToContainer(exporterConfig, "/usr/local/zeebe/config/exporters.yml")
294 | .withCopyFileToContainer(loggingConfig, "/usr/local/zeebe/config/log4j2-exporter.xml")
295 | .withEnv("SPRING_CONFIG_ADDITIONAL_LOCATION", "file:/usr/local/zeebe/config/exporters.yml")
296 | .withLogConsumer(logConsumer);
297 | }
298 |
299 | private Consumer> newConsumer() {
300 | final var config = new HashMap();
301 | config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
302 | config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaContainer.getBootstrapServers());
303 | config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);
304 | config.put(ConsumerConfig.GROUP_ID_CONFIG, this.getClass().getName());
305 | config.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, Integer.MAX_VALUE);
306 | config.put(ConsumerConfig.METADATA_MAX_AGE_CONFIG, 500);
307 | config.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
308 |
309 | final var consumer =
310 | new KafkaConsumer<>(config, new RecordIdDeserializer(), new RecordDeserializer());
311 | consumer.subscribe(TOPIC_SUBSCRIPTION_PATTERN);
312 |
313 | return consumer;
314 | }
315 |
316 | private KafkaContainer newKafkaContainer() {
317 | final var kafkaImage = DockerImageName.parse("confluentinc/cp-kafka").withTag("5.5.1");
318 | final var container = new KafkaContainer(kafkaImage);
319 | final var logConsumer = new Slf4jLogConsumer(newContainerLogger("kafkaContainer"), true);
320 |
321 | return container
322 | .withEnv("KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", "1")
323 | .withEnv("KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", "1")
324 | .withEmbeddedZookeeper()
325 | .withNetwork(network)
326 | .withNetworkAliases("kafka")
327 | .withLogConsumer(logConsumer);
328 | }
329 |
330 | private void startKafka() {
331 | kafkaContainer.start();
332 |
333 | // provision Kafka topics - this is difficult at the moment to achieve purely via
334 | // configuration, so we do it as a pre-step
335 | final NewTopic topic = new NewTopic("zeebe", 3, (short) 1);
336 | try (final AdminClient admin =
337 | AdminClient.create(
338 | Map.of(
339 | AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG,
340 | kafkaContainer.getBootstrapServers()))) {
341 | admin.createTopics(List.of(topic));
342 | }
343 | }
344 |
345 | private static Logger newContainerLogger(final String containerName) {
346 | return LoggerFactory.getLogger(KafkaExporterIT.class.getName() + "." + containerName);
347 | }
348 | }
349 |
--------------------------------------------------------------------------------
/qa/src/test/java/io/zeebe/exporters/kafka/qa/SampleWorkload.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.qa;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import io.camunda.zeebe.client.ZeebeClient;
21 | import io.camunda.zeebe.client.api.response.ActivatedJob;
22 | import io.camunda.zeebe.client.api.worker.JobClient;
23 | import io.camunda.zeebe.client.api.worker.JobHandler;
24 | import io.camunda.zeebe.client.api.worker.JobWorker;
25 | import io.camunda.zeebe.model.bpmn.Bpmn;
26 | import io.camunda.zeebe.model.bpmn.BpmnModelInstance;
27 | import io.camunda.zeebe.protocol.record.Record;
28 | import io.camunda.zeebe.protocol.record.RecordAssert;
29 | import io.camunda.zeebe.protocol.record.intent.IncidentIntent;
30 | import io.camunda.zeebe.protocol.record.intent.MessageIntent;
31 | import io.camunda.zeebe.protocol.record.intent.ProcessInstanceIntent;
32 | import io.camunda.zeebe.protocol.record.value.BpmnElementType;
33 | import io.camunda.zeebe.protocol.record.value.IncidentRecordValue;
34 | import io.camunda.zeebe.protocol.record.value.ProcessInstanceRecordValue;
35 | import java.time.Duration;
36 | import java.util.ArrayList;
37 | import java.util.HashMap;
38 | import java.util.List;
39 | import java.util.Map;
40 | import java.util.Objects;
41 | import java.util.Optional;
42 | import java.util.concurrent.atomic.AtomicBoolean;
43 | import java.util.stream.Collectors;
44 | import org.awaitility.Awaitility;
45 |
46 | public final class SampleWorkload {
47 | private static final String JOB_TYPE = "work";
48 | private static final String MESSAGE_NAME = "catch";
49 | private static final String CORRELATION_KEY = "foo-bar-123";
50 | private static final String PROCESS_NAME = "testProcess";
51 | private static final String PROCESS_FILE_NAME = "sample_workflow.bpmn";
52 | private static final String TASK_NAME = "task";
53 | private static final BpmnModelInstance SAMPLE_PROCESS =
54 | Bpmn.createExecutableProcess(PROCESS_NAME)
55 | .startEvent()
56 | .intermediateCatchEvent(
57 | "message",
58 | e -> e.message(m -> m.name(MESSAGE_NAME).zeebeCorrelationKeyExpression("orderId")))
59 | .serviceTask(TASK_NAME, t -> t.zeebeJobType(JOB_TYPE).zeebeTaskHeader("foo", "bar"))
60 | .endEvent()
61 | .done();
62 |
63 | private final ZeebeClient client;
64 | private final DebugHttpExporterClient exporterClient;
65 |
66 | private long endMarkerKey;
67 |
68 | public SampleWorkload(final ZeebeClient client, final DebugHttpExporterClient exporterClient) {
69 | this.client = Objects.requireNonNull(client);
70 | this.exporterClient = Objects.requireNonNull(exporterClient);
71 | }
72 |
73 | public void execute() {
74 | execute(() -> {});
75 | }
76 |
77 | /** Runs a sample workload on the broker, exporting several records of different types. */
78 | public void execute(final Runnable midpointHook) {
79 | deployWorkflow();
80 |
81 | final Map variables = new HashMap<>();
82 | variables.put("orderId", CORRELATION_KEY);
83 | variables.put("largeValue", "x".repeat(8192));
84 | variables.put("unicode", "Á");
85 |
86 | final long workflowInstanceKey = createWorkflowInstance(variables);
87 | final AtomicBoolean fail = new AtomicBoolean(true);
88 | final JobWorker worker = createJobWorker((jobClient, job) -> handleJob(fail, jobClient, job));
89 |
90 | midpointHook.run();
91 | publishMessage();
92 |
93 | final Record incident = awaitIncidentRaised(workflowInstanceKey);
94 | client.newUpdateRetriesCommand(incident.getValue().getJobKey()).retries(3).send().join();
95 | client.newResolveIncidentCommand(incident.getKey()).send().join();
96 |
97 | // wrap up
98 | awaitWorkflowCompletion(workflowInstanceKey);
99 | worker.close();
100 | publishEndMarker();
101 | }
102 |
103 | public List> getExpectedRecords(final Duration timeout) {
104 | final var records = new ArrayList>();
105 | assertThat(endMarkerKey).as("the end marker was published so it can be looked up").isPositive();
106 |
107 | Awaitility.await("until all expected records have been exported")
108 | .atMost(timeout)
109 | .pollInterval(Duration.ofMillis(250))
110 | .pollDelay(Duration.ZERO)
111 | .pollInSameThread()
112 | .untilAsserted(
113 | () -> {
114 | records.clear();
115 | records.addAll(exporterClient.streamRecords().collect(Collectors.toList()));
116 | assertEndMarkerExported(records);
117 | });
118 |
119 | return records;
120 | }
121 |
122 | private void assertEndMarkerExported(final ArrayList> records) {
123 | assertThat(records)
124 | .last()
125 | .as("exported records contain the last expected record")
126 | .satisfies(
127 | r -> RecordAssert.assertThat(r).hasKey(endMarkerKey).hasIntent(MessageIntent.EXPIRED));
128 | }
129 |
130 | private void publishEndMarker() {
131 | final var response =
132 | client
133 | .newPublishMessageCommand()
134 | .messageName("endMarker")
135 | .correlationKey("endMarker")
136 | .messageId("endMarker")
137 | .timeToLive(Duration.ZERO)
138 | .send()
139 | .join();
140 |
141 | endMarkerKey = response.getMessageKey();
142 | }
143 |
144 | private Record awaitIncidentRaised(final long workflowInstanceKey) {
145 | return Awaitility.await("await incident to be raised")
146 | .pollInterval(Duration.ofMillis(200))
147 | .atMost(Duration.ofSeconds(30))
148 | .until(() -> findIncident(workflowInstanceKey), Optional::isPresent)
149 | .orElseThrow();
150 | }
151 |
152 | @SuppressWarnings({"unchecked", "java:S1905"})
153 | private Optional> findIncident(final long processInstanceKey) {
154 | return exporterClient
155 | .streamRecords()
156 | .filter(r -> r.getIntent() == IncidentIntent.CREATED)
157 | .map(r -> (Record) r)
158 | .filter(r -> r.getValue().getProcessInstanceKey() == processInstanceKey)
159 | .filter(r -> r.getValue().getElementId().equals(TASK_NAME))
160 | .findFirst();
161 | }
162 |
163 | private void handleJob(
164 | final AtomicBoolean fail, final JobClient jobClient, final ActivatedJob job) {
165 | if (fail.getAndSet(false)) {
166 | jobClient.newFailCommand(job.getKey()).retries(0).errorMessage("failed").send().join();
167 | } else {
168 | jobClient.newCompleteCommand(job.getKey()).send().join();
169 | }
170 | }
171 |
172 | private void deployWorkflow() {
173 | client.newDeployCommand().addProcessModel(SAMPLE_PROCESS, PROCESS_FILE_NAME).send().join();
174 | }
175 |
176 | private long createWorkflowInstance(final Map variables) {
177 | return client
178 | .newCreateInstanceCommand()
179 | .bpmnProcessId(PROCESS_NAME)
180 | .latestVersion()
181 | .variables(variables)
182 | .send()
183 | .join()
184 | .getProcessInstanceKey();
185 | }
186 |
187 | private JobWorker createJobWorker(final JobHandler handler) {
188 | return client.newWorker().jobType(JOB_TYPE).handler(handler).open();
189 | }
190 |
191 | private void publishMessage() {
192 | client
193 | .newPublishMessageCommand()
194 | .messageName(MESSAGE_NAME)
195 | .correlationKey(CORRELATION_KEY)
196 | .send()
197 | .join();
198 | }
199 |
200 | private void awaitWorkflowCompletion(final long workflowInstanceKey) {
201 | Awaitility.await("await workflow " + workflowInstanceKey + " completion")
202 | .pollInterval(Duration.ofMillis(200))
203 | .atMost(Duration.ofSeconds(30))
204 | .untilAsserted(() -> assertThat(getProcessCompleted(workflowInstanceKey)).isPresent());
205 | }
206 |
207 | @SuppressWarnings({"unchecked", "java:S1905"})
208 | private Optional> getProcessCompleted(
209 | final long workflowInstanceKey) {
210 | return exporterClient
211 | .streamRecords()
212 | .filter(r -> r.getIntent() == ProcessInstanceIntent.ELEMENT_COMPLETED)
213 | .filter(r -> r.getKey() == workflowInstanceKey)
214 | .map(r -> (Record) r)
215 | .filter(r -> r.getValue().getBpmnElementType() == BpmnElementType.PROCESS)
216 | .findFirst();
217 | }
218 | }
219 |
--------------------------------------------------------------------------------
/qa/src/test/resources/exporters.yml:
--------------------------------------------------------------------------------
1 | zeebe:
2 | broker:
3 | exporters:
4 | debug:
5 | className: io.camunda.zeebe.broker.exporter.debug.DebugHttpExporter
6 | args:
7 | port: 8000
8 | kafka:
9 | className: io.zeebe.exporters.kafka.KafkaExporter
10 | jarPath: /usr/local/zeebe/exporters/zeebe-kafka-exporter.jar
11 | args:
12 | maxBatchSize: 100
13 | maxBlockingTimeoutMs: 1000
14 | inFlightRecordCheckIntervalMs: 1000
15 |
16 | producer:
17 | requestTimeoutMs: 5000
18 | closeTimeoutMs: 5000
19 | clientId: zeebe
20 | maxConcurrentRequests: 3
21 |
22 | config: |
23 | linger.ms=5
24 | buffer.memory=8388608
25 | batch.size=32768
26 | max.block.ms=5000
27 |
28 | records:
29 | defaults: { type: "command,event,rejection", topic: zeebe }
30 |
--------------------------------------------------------------------------------
/qa/src/test/resources/log4j2.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/qa/src/test/resources/simplelogger.properties:
--------------------------------------------------------------------------------
1 | org.slf4j.simpleLogger.logFile=System.out
2 | org.slf4j.simplerLogger.showShortLogName=true
3 | org.slf4j.simpleLogger.defaultLogLevel=info
4 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka.qa.KafkaExporterIT=debug
5 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka.qa.KafkaExporterIT.zeebeContainer=warn
6 | org.slf4j.simpleLogger.log.io.zeebe.exporters.kafka.qa.KafkaExporterIT.kafkaContainer=warn
7 | org.slf4j.simpleLogger.log.org.apache=warn
8 | org.slf4j.simpleLogger.log.kafka=warn
9 |
--------------------------------------------------------------------------------
/revapi.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "extension": "revapi.java",
4 | "configuration": {
5 | "reportUsesFor": "all-differences",
6 | "missing-classes": {
7 | "behavior": "ignore",
8 | "ignoreMissingAnnotations": true
9 | },
10 | "matchOverloads": false
11 | }
12 | },
13 | {
14 | "extension": "revapi.versions",
15 | "configuration": {
16 | "enabled": true,
17 | "passThroughDifferences": [
18 | "java.class.nonPublicPartOfAPI"
19 | ],
20 | "versionIncreaseAllows": {
21 | "major": {
22 | "severity": "BREAKING"
23 | },
24 | "minor": {
25 | "classification": {
26 | "BINARY": "NON_BREAKING",
27 | "SOURCE": "BREAKING",
28 | "SEMANTIC": "BREAKING",
29 | "OTHER": "BREAKING"
30 | }
31 | },
32 | "patch": {
33 | "classification": {
34 | "BINARY": "NON_BREAKING",
35 | "SOURCE": "BREAKING",
36 | "SEMANTIC": "BREAKING",
37 | "OTHER": "BREAKING"
38 | }
39 | }
40 | }
41 | }
42 | },
43 | {
44 | "extension": "revapi.filter",
45 | "justification": "Ignore everything not included in the module itself",
46 | "configuration": {
47 | "archives": {
48 | "include": [
49 | "io\\.zeebe:zeebe-kafka-exporter:.*"
50 | ]
51 | },
52 | "exclude": [
53 | {
54 | "matcher": "java",
55 | "match": "@org.apiguardian.api.API(status != org.apiguardian.api.API.Status.STABLE) ^*;"
56 | }
57 | ]
58 | }
59 | },
60 | {
61 | "extension": "revapi.differences",
62 | "id": "intentional-api-changes",
63 | "configuration": {
64 | "differences": []
65 | }
66 | }
67 | ]
68 |
--------------------------------------------------------------------------------
/serde/pom.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | 4.0.0
6 | Zeebe Kafka Exporter Serialization
7 | zeebe-kafka-exporter-serde
8 | jar
9 | https://github.com/zeebe-io/zeebe-kafka-exporter/serde
10 |
11 |
12 | zeebe-kafka-exporter-root
13 | io.zeebe
14 | 3.1.2-SNAPSHOT
15 | ../pom.xml
16 |
17 |
18 |
19 |
20 | 8
21 |
22 |
23 |
24 |
25 |
26 | io.camunda
27 | zeebe-protocol-jackson
28 |
29 |
30 |
31 | com.fasterxml.jackson.core
32 | jackson-databind
33 |
34 |
35 |
36 | com.fasterxml.jackson.core
37 | jackson-annotations
38 |
39 |
40 |
41 | com.fasterxml.jackson.core
42 | jackson-core
43 |
44 |
45 |
46 |
47 | org.apache.kafka
48 | kafka-clients
49 |
50 |
51 |
52 |
53 | io.camunda
54 | zeebe-protocol
55 |
56 |
57 |
58 | org.junit.jupiter
59 | junit-jupiter-api
60 | test
61 |
62 |
63 |
64 | org.assertj
65 | assertj-core
66 | test
67 |
68 |
69 |
70 | com.fasterxml.jackson.dataformat
71 | jackson-dataformat-cbor
72 | test
73 |
74 |
75 |
76 |
77 |
78 |
79 | org.revapi
80 | revapi-maven-plugin
81 |
82 |
83 |
84 | revapi.json
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
--------------------------------------------------------------------------------
/serde/revapi.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "extension": "revapi.differences",
4 | "configuration": {
5 | "justification": "The serializers depend on Jackson, so exposing its types is fine",
6 | "ignore": true,
7 | "differences": [
8 | {
9 | "code": "java.class.externalClassExposedInAPI",
10 | "oldArchive": "com.fasterxml.jackson.core:*:jar"
11 | },
12 | {
13 | "code": "java.class.nonPublicPartOfAPI",
14 | "oldArchive": "com.fasterxml.jackson.core:*:jar"
15 | }
16 | ]
17 | }
18 | }
19 | ]
20 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/JacksonDeserializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.databind.ObjectReader;
19 | import java.io.IOException;
20 | import java.util.Map;
21 | import org.apache.kafka.common.errors.SerializationException;
22 | import org.apache.kafka.common.serialization.Deserializer;
23 |
24 | /**
25 | * Deserializer implementation which reads an object from a pre-configured {@link ObjectReader}.
26 | *
27 | * @param the concrete type to deserialize
28 | */
29 | public abstract class JacksonDeserializer implements Deserializer {
30 | protected final ObjectReader reader;
31 |
32 | protected JacksonDeserializer(final ObjectReader reader) {
33 | this.reader = reader;
34 | }
35 |
36 | @Override
37 | public void configure(final Map configs, final boolean isKey) {}
38 |
39 | @Override
40 | public T deserialize(final String topic, final byte[] data) {
41 | try {
42 | return reader.readValue(data);
43 | } catch (final IOException e) {
44 | throw new SerializationException(
45 | String.format("Expected to deserialize data from topic [%s], but failed", topic), e);
46 | }
47 | }
48 | }
49 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/JacksonSerializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.core.JsonProcessingException;
19 | import com.fasterxml.jackson.databind.ObjectWriter;
20 | import java.util.Map;
21 | import org.apache.kafka.common.errors.SerializationException;
22 | import org.apache.kafka.common.serialization.Serializer;
23 |
24 | /**
25 | * Serializer implementation which writes an object from a pre-configured {@link ObjectWriter}.
26 | *
27 | * @param the concrete type to serialize
28 | */
29 | public abstract class JacksonSerializer implements Serializer {
30 | protected final ObjectWriter writer;
31 |
32 | protected JacksonSerializer(final ObjectWriter writer) {
33 | this.writer = writer;
34 | }
35 |
36 | @Override
37 | public void configure(final Map configs, final boolean isKey) {}
38 |
39 | @Override
40 | public byte[] serialize(final String topic, final T data) {
41 | try {
42 | return writer.writeValueAsBytes(data);
43 | } catch (final JsonProcessingException e) {
44 | throw new SerializationException(
45 | String.format("Expected to serialize data for topic [%s], but failed", topic), e);
46 | }
47 | }
48 |
49 | @Override
50 | public void close() {}
51 | }
52 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordDeserializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.core.type.TypeReference;
19 | import com.fasterxml.jackson.databind.ObjectMapper;
20 | import com.fasterxml.jackson.databind.ObjectReader;
21 | import io.camunda.zeebe.protocol.jackson.record.AbstractRecord;
22 | import io.camunda.zeebe.protocol.record.Record;
23 | import org.apache.kafka.common.serialization.Deserializer;
24 |
25 | /**
26 | * A {@link Deserializer} implementations for {@link Record} objects, which uses a pre-configured *
27 | * {@link ObjectReader} for that type, and {@link
28 | * io.camunda.zeebe.protocol.jackson.record.AbstractRecord} as the concrete {@link Record}
29 | * implementation.
30 | */
31 | public final class RecordDeserializer extends JacksonDeserializer> {
32 |
33 | public RecordDeserializer() {
34 | this(new ObjectMapper());
35 | }
36 |
37 | public RecordDeserializer(final ObjectMapper objectMapper) {
38 | this(objectMapper.readerFor(new TypeReference>() {}));
39 | }
40 |
41 | public RecordDeserializer(final ObjectReader objectReader) {
42 | super(objectReader);
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordId.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.annotation.JsonCreator;
19 | import com.fasterxml.jackson.annotation.JsonGetter;
20 | import com.fasterxml.jackson.annotation.JsonProperty;
21 | import io.camunda.zeebe.protocol.record.Record;
22 | import java.util.Objects;
23 |
24 | /**
25 | * {@link RecordId} represents a unique identifier for a given Zeebe {@link
26 | * io.camunda.zeebe.protocol.record.Record}. On a single partition (identified via {@link
27 | * Record#getPartitionId()}), every record has a unique position (identified via {@link
28 | * Record#getPosition()}).
29 | */
30 | public final class RecordId {
31 | @JsonProperty("partitionId")
32 | private final int partitionId;
33 |
34 | @JsonProperty("position")
35 | private final long position;
36 |
37 | @JsonCreator
38 | public RecordId(
39 | final @JsonProperty("partitionId") int partitionId,
40 | final @JsonProperty("position") long position) {
41 | this.partitionId = partitionId;
42 | this.position = position;
43 | }
44 |
45 | @JsonGetter
46 | public int getPartitionId() {
47 | return partitionId;
48 | }
49 |
50 | @JsonGetter
51 | public long getPosition() {
52 | return position;
53 | }
54 |
55 | @Override
56 | public int hashCode() {
57 | return Objects.hash(getPartitionId(), getPosition());
58 | }
59 |
60 | @Override
61 | public boolean equals(final Object o) {
62 | if (this == o) {
63 | return true;
64 | }
65 | if (o == null || getClass() != o.getClass()) {
66 | return false;
67 | }
68 | final RecordId recordId = (RecordId) o;
69 | return getPartitionId() == recordId.getPartitionId() && getPosition() == recordId.getPosition();
70 | }
71 |
72 | @Override
73 | public String toString() {
74 | return "RecordId{" + "partitionId=" + partitionId + ", position=" + position + '}';
75 | }
76 | }
77 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordIdDeserializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.databind.ObjectMapper;
19 | import com.fasterxml.jackson.databind.ObjectReader;
20 | import org.apache.kafka.common.serialization.Deserializer;
21 |
22 | /**
23 | * A {@link Deserializer} implementations for {@link RecordId} objects, which uses a pre-configured
24 | * * {@link ObjectReader} for that type.
25 | */
26 | public final class RecordIdDeserializer extends JacksonDeserializer {
27 | public RecordIdDeserializer() {
28 | this(new ObjectMapper());
29 | }
30 |
31 | public RecordIdDeserializer(final ObjectMapper objectMapper) {
32 | this(objectMapper.readerFor(RecordId.class));
33 | }
34 |
35 | public RecordIdDeserializer(final ObjectReader objectReader) {
36 | super(objectReader);
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordIdSerializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.databind.ObjectMapper;
19 | import com.fasterxml.jackson.databind.ObjectWriter;
20 | import org.apache.kafka.common.serialization.Serializer;
21 |
22 | /**
23 | * A {@link Serializer} implementations for {@link RecordId} objects which uses a pre-configured
24 | * {@link ObjectWriter} for that type.
25 | */
26 | public final class RecordIdSerializer extends JacksonSerializer {
27 |
28 | public RecordIdSerializer() {
29 | this(new ObjectMapper());
30 | }
31 |
32 | protected RecordIdSerializer(final ObjectMapper objectMapper) {
33 | this(objectMapper.writerFor(RecordId.class));
34 | }
35 |
36 | protected RecordIdSerializer(final ObjectWriter writer) {
37 | super(writer);
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/serde/src/main/java/io/zeebe/exporters/kafka/serde/RecordSerializer.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import com.fasterxml.jackson.core.type.TypeReference;
19 | import com.fasterxml.jackson.databind.ObjectMapper;
20 | import com.fasterxml.jackson.databind.ObjectWriter;
21 | import io.camunda.zeebe.protocol.jackson.record.AbstractRecord;
22 | import io.camunda.zeebe.protocol.record.Record;
23 | import org.apache.kafka.common.serialization.Serializer;
24 |
25 | /**
26 | * A {@link Serializer} implementations for {@link Record} objects which uses a pre-configured
27 | * {@link ObjectWriter} for that type.
28 | *
29 | * NOTE: this serializer is not used by the exporter itself. The exporter uses a custom
30 | * serializer which piggybacks on Zeebe's built-in {@link Record#toJson()} method which does not
31 | * allow customization of the underlying {@link ObjectWriter}. It's provided here for testing
32 | * purposes, and potentially for users who would like to produce records to the same topics but
33 | * separately.
34 | */
35 | public final class RecordSerializer extends JacksonSerializer> {
36 | public RecordSerializer() {
37 | this(new ObjectMapper());
38 | }
39 |
40 | protected RecordSerializer(final ObjectMapper objectMapper) {
41 | this(objectMapper.writerFor(new TypeReference>() {}));
42 | }
43 |
44 | protected RecordSerializer(final ObjectWriter writer) {
45 | super(writer);
46 | }
47 | }
48 |
--------------------------------------------------------------------------------
/serde/src/test/java/io/zeebe/exporters/kafka/serde/RecordIdTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import com.fasterxml.jackson.databind.ObjectMapper;
21 | import com.fasterxml.jackson.dataformat.cbor.databind.CBORMapper;
22 | import org.junit.jupiter.api.Test;
23 | import org.junit.jupiter.api.parallel.Execution;
24 | import org.junit.jupiter.api.parallel.ExecutionMode;
25 |
26 | @Execution(ExecutionMode.CONCURRENT)
27 | final class RecordIdTest {
28 | private static final String TOPIC = "zeebe";
29 |
30 | @Test
31 | void shouldSerialize() {
32 | // given
33 | final RecordId id = new RecordId(1, 1);
34 | final RecordIdSerializer serializer = new RecordIdSerializer();
35 | final RecordIdDeserializer deserializer = new RecordIdDeserializer();
36 |
37 | // when
38 | final byte[] serialized = serializer.serialize(TOPIC, id);
39 | final RecordId deserialized = deserializer.deserialize(TOPIC, serialized);
40 |
41 | // then
42 | assertThat(deserialized).as("the deserialized ID is the same as the original").isEqualTo(id);
43 | }
44 |
45 | @Test
46 | void shouldSerializeOtherFormat() {
47 | // given
48 | final ObjectMapper cborMapper = new CBORMapper();
49 | final RecordId id = new RecordId(1, 1);
50 | final RecordIdSerializer serializer = new RecordIdSerializer(cborMapper);
51 | final RecordIdDeserializer deserializer = new RecordIdDeserializer(cborMapper);
52 |
53 | // when
54 | final byte[] serialized = serializer.serialize(TOPIC, id);
55 | final RecordId deserialized = deserializer.deserialize(TOPIC, serialized);
56 |
57 | // then
58 | assertThat(deserialized).as("the deserialized ID is the same as the original").isEqualTo(id);
59 | }
60 | }
61 |
--------------------------------------------------------------------------------
/serde/src/test/java/io/zeebe/exporters/kafka/serde/RecordTest.java:
--------------------------------------------------------------------------------
1 | /*
2 | * Copyright © 2019 camunda services GmbH (info@camunda.com)
3 | *
4 | * Licensed under the Apache License, Version 2.0 (the "License");
5 | * you may not use this file except in compliance with the License.
6 | * You may obtain a copy of the License at
7 | *
8 | * http://www.apache.org/licenses/LICENSE-2.0
9 | *
10 | * Unless required by applicable law or agreed to in writing, software
11 | * distributed under the License is distributed on an "AS IS" BASIS,
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 | * See the License for the specific language governing permissions and
14 | * limitations under the License.
15 | */
16 | package io.zeebe.exporters.kafka.serde;
17 |
18 | import static org.assertj.core.api.Assertions.assertThat;
19 |
20 | import com.fasterxml.jackson.databind.ObjectMapper;
21 | import com.fasterxml.jackson.dataformat.cbor.databind.CBORMapper;
22 | import io.camunda.zeebe.protocol.jackson.record.DeploymentRecordValueBuilder;
23 | import io.camunda.zeebe.protocol.jackson.record.RecordBuilder;
24 | import io.camunda.zeebe.protocol.record.Record;
25 | import io.camunda.zeebe.protocol.record.RecordType;
26 | import io.camunda.zeebe.protocol.record.ValueType;
27 | import io.camunda.zeebe.protocol.record.intent.DeploymentIntent;
28 | import io.camunda.zeebe.protocol.record.value.DeploymentRecordValue;
29 | import org.junit.jupiter.api.Test;
30 | import org.junit.jupiter.api.parallel.Execution;
31 | import org.junit.jupiter.api.parallel.ExecutionMode;
32 |
33 | @Execution(ExecutionMode.CONCURRENT)
34 | final class RecordTest {
35 | private static final String TOPIC = "zeebe";
36 |
37 | @Test
38 | void shouldSerialize() {
39 | // given
40 | final Record> record =
41 | new RecordBuilder()
42 | .intent(DeploymentIntent.CREATED)
43 | .recordType(RecordType.EVENT)
44 | .valueType(ValueType.DEPLOYMENT)
45 | .value(new DeploymentRecordValueBuilder().build())
46 | .build();
47 | final RecordSerializer serializer = new RecordSerializer();
48 | final RecordDeserializer deserializer = new RecordDeserializer();
49 |
50 | // when
51 | final byte[] serialized = serializer.serialize(TOPIC, record);
52 | final Record> deserialized = deserializer.deserialize(TOPIC, serialized);
53 |
54 | // then
55 | assertThat(deserialized)
56 | .as("the deserialized record is the same as the original")
57 | .isEqualTo(record);
58 | }
59 |
60 | @Test
61 | void shouldSerializeOtherFormat() {
62 | // given
63 | final ObjectMapper cborMapper = new CBORMapper();
64 | final Record> record =
65 | new RecordBuilder()
66 | .intent(DeploymentIntent.CREATED)
67 | .recordType(RecordType.EVENT)
68 | .valueType(ValueType.DEPLOYMENT)
69 | .value(new DeploymentRecordValueBuilder().build())
70 | .build();
71 | final RecordSerializer serializer = new RecordSerializer(cborMapper);
72 | final RecordDeserializer deserializer = new RecordDeserializer(cborMapper);
73 |
74 | // when
75 | final byte[] serialized = serializer.serialize(TOPIC, record);
76 | final Record> deserialized = deserializer.deserialize(TOPIC, serialized);
77 |
78 | // then
79 | assertThat(deserialized)
80 | .as("the deserialized record is the same as the original")
81 | .isEqualTo(record);
82 | }
83 | }
84 |
--------------------------------------------------------------------------------