├── LICENSE.md
├── META-INF
├── application.xml
└── weblogic-application.xml
├── README.md
├── build.properties
├── build.xml
├── images
├── kafka-transport-inbound.png
└── kafka-transport-outbound.png
├── install
└── install.py
├── l10n
└── oracle
│ └── ateam
│ └── sb
│ └── transports
│ └── kafka
│ └── KafkaUIBinding.properties
├── lib
└── appmarker.jar
├── offline
└── transport-kafka.xml
├── resources
└── kafka-config.xml
├── schemas
└── kafka-transport.xsd
└── src
└── oracle
└── ateam
└── sb
└── transports
└── kafka
├── KafkaApplicationListener.java
├── KafkaConstants.java
├── KafkaEndpoint.java
├── KafkaInboundMessageContext.java
├── KafkaOutboundMessageContext.java
├── KafkaRequestHeaders.java
├── KafkaRequestMetadata.java
├── KafkaResponseHeaders.java
├── KafkaResponseMetadata.java
├── KafkaTransportLogger.java
├── KafkaTransportProvider.java
├── KafkaTransportProviderFactory.java
├── KafkaUIBinding.java
└── KafkaUtil.java
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | You may not use the identified files except in compliance with the
4 | Universal Permissive License (UPL), Version 1.0 (the "License.")
5 |
6 | You may obtain a copy of the License at
7 | https://opensource.org/licenses/UPL. A copy of the license is
8 | also reproduced below.
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13 | implied.
14 |
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 |
19 | ```
20 | Copyright (c) 2014, 2016 Oracle and/or its affiliates
21 | The Universal Permissive License (UPL), Version 1.0
22 |
23 | Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this software, associated documentation and/or data (collectively the "Software"), free of charge and under any and all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or (ii) the Larger Works (as defined below), to deal in both
24 |
25 | (a) the Software, and
26 |
27 | (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software (each a “Larger Work” to which the Software is contributed by such licensors),
28 |
29 | without restriction, including without limitation the rights to copy, create derivative works of, display, perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
30 |
31 | This license is subject to the following condition:
32 |
33 | The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must be included in all copies or substantial portions of the Software.
34 |
35 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 |
37 | ```
38 |
--------------------------------------------------------------------------------
/META-INF/application.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
8 | Service Bus Kafka Transport Provider
9 | Service Bus Kafka Transport Provider
10 |
11 |
12 | appmarker.jar
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/META-INF/weblogic-application.xml:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 |
11 | webapp.encoding.default
12 | UTF-8
13 |
14 |
15 |
16 | oracle.ateam.sb.transports.kafka.KafkaApplicationListener
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Oracle Service Bus Transport for Apache Kafka
2 |
3 | ## Introduction
4 | This sample provides a native transport for [OSB](http://www.oracle.com/technetwork/middleware/service-bus/overview/index-096326.html) (Oracle Service Bus) that allows connectivity with [Apache Kafka](https://kafka.apache.org/). By using native APIs, the transport allows resilient and high speed access to Apache Kafka clusters. Integration developers can benefit from this transport in the implementation of use cases that requires the integration to/from Apache Kafka with applications (SaaS and On-Premise) supported by OSB, as well as technologies such as JMS, HTTP, MSMQ, Coherence, Tuxedo, FTP, etc.
5 |
6 | #### Using the Kafka Transport for Inbound Processing (From Kafka to the World)
7 |
8 | 
9 |
10 | #### Using the Kafka Transport for Outbound Processing (From the World to Kafka)
11 |
12 | 
13 |
14 | This is an Open-Source project maintained by Oracle.
15 |
16 | ## Features and Benefits:
17 | The OSB Transport for Apache Kafka provides inbound and outbound connectivity with Apache Kafka. But this is definetely a oversimplification of what this transport can really do. The list below summarizes the most important features found in this implementation.
18 |
19 | * Supports multiple Apache Kafka versions such as 0.9.X, 0.10.X and above.
20 | * It works with the enterprise version of Kafka (Confluent Platform) as well.
21 | * Designed to work with 12c versions of OSB. Compatible with 12.1.3 and 12.2.1.
22 | * Can be used both On-Premise and in Cloud deployments, via SOA Cloud Service.
23 | * Supports inbound (Proxy Service) and outbound (Business Service) use cases.
24 | * Allows both text/binary payload types to flow through Service Bus pipelines.
25 | * Allows inbound processing to be spread out over multiple concurrent threads.
26 | * Deeper integration with WebLogic lifecycle. It smartly starts the endpoints.
27 | * Allows sync/async commits when the option 'enable.auto.commit' is disabled.
28 | * Allows association with native WebLogic Work Managers for maximum work control.
29 | * Allows message level partitioning using Transport Headers for outbound scenarios.
30 | * Allows fine tuning over delivery semantics by supporting multiple ack modes.
31 | * Provides native response headers during outbound scenarios for better control.
32 | * Allows the implementation of native Kafka properties using custom properties.
33 | * Allows the development of OSB projects using both the Console and JDeveloper.
34 | * Provides JVM properties that controls some behaviors and allows log debugging.
35 |
36 | ## Gettting Started
37 | The very first thing you need to do to start playing with the transport is building it from the sources. The build process of this transport has been completely based on the best practices described in the [product documentation section](https://docs.oracle.com/middleware/1221/osb/develop/GUID-F3574BDE-F053-4015-ACC2-4CE2473B39EA.htm#OSBDV1292) about custom transports development. Therefore, if you are familiar with the build process for custom transports then you should be OK following the steps below.
38 |
39 | In a nutshell, the build process is based on Ant. The [build.xml](./build.xml) script provided encapsulates all the necessary steps to generate the implementation files (kafka-transport.ear and kafka-transport.jar) needed to deploy the transport into your Service Bus domain. But in order to work, the script relies on information from the environment. Especifically, information about where to find the Fusion Middleware JAR files necessary for the code compilation. Thus, you will need to build the implementation files in a machine that has Oracle Service Bus.
40 |
41 | The quickest way to load all the Fusion Middleware information into the environment is sourcing the setDomainEnv.sh script from your domain:
42 |
43 | ```
44 | source $FMW_HOME/user-projects/domains//bin/setDomainEnv.sh
45 | ```
46 |
47 | Next, you will need to specify in the [build.properties](./build.properties) file the location of the Kafka Clients API JAR file:
48 |
49 | ```
50 | ### Apache Kafka Clients API
51 | kafka.clients.api=/opt/kafka_2.11-0.10.0.1/libs/kafka-clients-0.10.1.0.jar
52 | ```
53 |
54 | Now you can simply execute the script by typing 'ant' in the command-line. Once the build finishes, the implementation files will be generated under the newly created 'build' folder. Alternatively, the implementation files will also be proactively copied into your Fusion Middleware installation.
55 |
56 | The last step is the deployment of the implementation files into your Service Bus domain. To make things easier, the [install.py](./install/install.py) script encapsulates the details about how to connect to the WebLogic domain, perform the deployment and commiting the changes. Therefore, get into the 'install' folder and type:
57 |
58 | ```
59 | java weblogic.WLST install.py
60 | ```
61 |
62 | The script will ask information about the location of the implementation files and connection details of the WebLogic domain.
63 |
64 | For a deeper introduction into the Kafka transport, please read a series of [two blogs](http://www.ateam-oracle.com/osb-transport-for-apache-kafka-part-1/) written in the Oracle A-Team chronicles website. They will provide details about how to use it and how to configure it to implement more complex scenarios.
65 |
66 | ## License
67 | Copyright (c) 2014, 2016 Oracle and/or its affiliates
68 | The Universal Permissive License (UPL), Version 1.0
69 |
--------------------------------------------------------------------------------
/build.properties:
--------------------------------------------------------------------------------
1 | # What we get from the env.
2 | root.dir=${basedir}
3 |
4 | # Compiler Options
5 | optimize=off
6 | debug=on
7 | deprecation=off
8 | build.compiler=modern
9 | javac.lint.flag=
10 |
11 | # WebLogic Information
12 | #mw.home=${root.dir}/bea
13 | #wl.home=${mw.home}/wlserver
14 | mw.home=${env.MW_HOME}
15 | wl.home=${env.WL_HOME}
16 | osb.install.dir=${env.ALSB_HOME}
17 |
18 | ### Apache Kafka Clients API
19 | kafka.clients.api=/opt/kafka_2.11-0.10.1.0/libs/kafka-clients-0.10.1.0.jar
20 |
21 | ### XMLBeans Module Version
22 | version.com.bea.core.xml.xmlbeans=1.0.0.0_2-6-0
23 |
--------------------------------------------------------------------------------
/build.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
33 |
35 |
37 |
39 |
40 |
42 |
44 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
56 |
58 |
59 |
61 |
63 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
165 |
166 |
167 |
168 |
169 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
208 |
209 |
210 |
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
234 |
235 |
236 |
238 |
239 |
240 |
242 |
243 |
244 |
245 |
246 |
--------------------------------------------------------------------------------
/images/kafka-transport-inbound.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/osb-kafka-transport/1e3a1382fa06bc2aae66a84f402e8ef8d11018af/images/kafka-transport-inbound.png
--------------------------------------------------------------------------------
/images/kafka-transport-outbound.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/osb-kafka-transport/1e3a1382fa06bc2aae66a84f402e8ef8d11018af/images/kafka-transport-outbound.png
--------------------------------------------------------------------------------
/install/install.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | libraryName = 'transport-kafka'
4 | libraryFile = 'kafka-transport.jar'
5 | appName = 'Service Bus Kafka Transport Provider'
6 | appFile = 'kafka-transport.ear'
7 |
8 | print '***** Service Bus Kafka Transport Install *****'
9 | print ''
10 |
11 | ############################## Connect and Undeploy ###############################
12 |
13 | connect()
14 |
15 | appDeployment = cmo.lookupAppDeployment(appName)
16 |
17 | if (appDeployment != None):
18 |
19 | stopApplication(appName)
20 | undeploy(appName)
21 |
22 | library = cmo.lookupLibrary(libraryName)
23 |
24 | if (library != None):
25 |
26 | undeploy(libraryName)
27 |
28 | ########################## Retrieve Targets Information ###########################
29 |
30 | targets = cmo.getAdminServerName()
31 | clusters = cmo.getClusters()
32 |
33 | for cluster in clusters:
34 |
35 | targets = targets + ',' + cluster.getName()
36 |
37 | servers = cmo.getServers()
38 |
39 | for server in servers:
40 |
41 | if (server.getCluster == None):
42 |
43 | targets = targets + ',' + server.getName()
44 |
45 | ######################### Install and Start the Transport #########################
46 |
47 | if (not os.path.exists(libraryFile)):
48 |
49 | tmp = raw_input('Enter the location of the "kafka-transport.jar" file: ')
50 |
51 | if (tmp.endswith(libraryFile)):
52 |
53 | libraryFile = tmp
54 |
55 | else:
56 |
57 | libraryFile = os.path.join(tmp, libraryFile)
58 |
59 | if (not os.path.exists(appFile)):
60 |
61 | tmp = raw_input('Enter the location of the "kafka-transport.ear" file: ')
62 |
63 | if (tmp.endswith(appFile)):
64 |
65 | appFile = tmp
66 |
67 | else:
68 |
69 | appFile = os.path.join(tmp, appFile)
70 |
71 | deploy(libraryName, libraryFile, targets=targets, libraryModule='true')
72 | deploy(appName, appFile, targets=targets)
73 |
74 | ################################## Disconnect ####################################
75 |
76 | disconnect()
77 |
--------------------------------------------------------------------------------
/l10n/oracle/ateam/sb/transports/kafka/KafkaUIBinding.properties:
--------------------------------------------------------------------------------
1 | # General Strings for the UI
2 | PROXY_URI_FORMAT=host1:port,host2:port
3 | BUSINESS_URI_FORMAT=host:port
4 | TOPIC_NAME_LABEL=Topic Name
5 | TOPIC_NAME_DESC=The name of the topic(s).
6 | CUSTOM_PROPS_LABEL=Custom Properties
7 | CUSTOM_PROPS_LABEL_CONSUMER=Consumer Configs
8 | CUSTOM_PROPS_LABEL_PRODUCER=Producer Configs
9 | CUSTOM_PROPS_DESC=Comma-separated list of properties in the key=value format. Use this field to specify any property that does not have an equivalent in this user interface.
10 |
11 | # UI Fields (Inbound)
12 | CONSUMER_THREADS_LABEL=Consumer Threads
13 | CONSUMER_THREADS_DESC=The number of concurrent threads used to fetch messages from the topic(s). Each thread will create a internal consumer that binds to an available partition.
14 | GROUP_ID_LABEL=Group Identifier
15 | GROUP_ID_DESC=A unique string that identifies the consumer group this consumer belongs to. Using the same group identifier for multiple processes creates a cluster of consumers that works in a load balancing fashion. This is the UI equivalent to the 'group.id' property.
16 | PARTITION_ASSIGNMENT_STRATEGY_LABEL=Partition Assignment Strategy
17 | PARTITION_ASSIGNMENT_STRATEGY_DESC=Select between the 'range' or 'roundrobin' strategy for assigning partitions to consumer streams.
18 | SOCKET_TIMEOUT_MS_LABEL=Socket Timeout
19 | SOCKET_TIMEOUT_MS_DESC=The socket timeout, in milliseconds, for network requests.
20 | SOCKET_RECEIVE_BUFFER_BYTES_LABEL=Socket Receive Buffer Bytes
21 | SOCKET_RECEIVE_BUFFER_BYTES_DESC=The socket receive buffer, in bytes, for network requests.
22 | FETCH_MESSAGE_MAX_BYTES_LABEL=Maximum Fetch Message Bytes
23 | FETCH_MESSAGE_MAX_BYTES_DESC=The number of bytes of messages to attempt to fetch for each topic-partition in each fetch request. These bytes will be read into memory for each partition, so this helps control the memory used by the consumer. The fetch request size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch.
24 | NUM_CONSUMER_FETCHERS_LABEL=Number of Consumer Fetchers
25 | NUM_CONSUMER_FETCHERS_DESC=The number of fetcher threads used to fetch data.
26 | AUTO_COMMIT_ENABLE_LABEL=Auto Commit Enable
27 | AUTO_COMMIT_ENABLE_DESC=If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin.
28 | AUTO_COMMIT_INTERVAL_MS_LABEL=Auto Commit Interval
29 | AUTO_COMMIT_INTERVAL_MS_DESC=The frequency in milliseconds that the consumer offsets are committed to zookeeper.
30 | QUEUED_MAX_MESSAGE_CHUNKS_LABEL=Maximum Queued Message Chunks
31 | QUEUED_MAX_MESSAGE_CHUNKS_DESC=Max number of message chunks buffered for consumption.
32 | REBALANCE_MAX_RETRIES_LABEL=Maximum Rebalance Retries
33 | REBALANCE_MAX_RETRIES_DESC=When a new consumer joins a consumer group the set of consumers attempt to 'rebalance' the load to assign partitions to each consumer. If the set of consumers changes while this assignment is taking place the rebalance will fail and retry. This setting controls the maximum number of attempts before giving up.
34 | FETCH_MIN_BYTES_LABEL=Minimum Fetch Bytes
35 | FETCH_MIN_BYTES_DESC=The minimum amount of data, in bytes, the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.
36 | FETCH_WAIT_MAX_MS_LABEL=Maximum Fetch Wait
37 | FETCH_WAIT_MAX_MS_DESC=The maximum amount of time, in milliseconds, the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy minimum fetch bytes.
38 | REBALANCE_BACKOFF_MS_LABEL=Rebalance Backoff
39 | REBALANCE_BACKOFF_MS_DESC=Backoff time, in milliseconds, between retries during rebalance.
40 | REFRESH_LEADER_BACKOFF_MS_LABEL=Refresh Leader Backoff
41 | REFRESH_LEADER_BACKOFF_MS_DESC=Backoff time, in milliseconds, to wait before trying to determine the leader of a partition that has just lost its leader.
42 | AUTO_OFFSET_RESET_LABEL=Auto Offset Reset
43 | AUTO_OFFSET_RESET_DESC=What to do when there is no initial offset in ZooKeeper or if an offset is out of range.
44 | CONSUMER_TIMEOUT_MS_LABEL=Consumer Timeout
45 | CONSUMER_TIMEOUT_MS_DESC=Throw a timeout exception to the consumer if no message is available for consumption after the specified interval in milliseconds.
46 | EXCLUDE_INTERNAL_TOPICS_LABEL=Exclude Internal Topics
47 | EXCLUDE_INTERNAL_TOPICS_DESC=Whether messages from internal topics (such as offsets) should be exposed to the consumer.
48 | ZOOKEEPER_SESSION_TIMEOUT_MS_LABEL=Zookeeper Session Timeout
49 | ZOOKEEPER_SESSION_TIMEOUT_MS_DESC=ZooKeeper session timeout in milliseconds. If the consumer fails to heartbeat to ZooKeeper for this period of time it is considered dead and a rebalance will occur.
50 | ZOOKEEPER_CONNECTION_TIMEOUT_MS_LABEL=Zookeeper Connection Timeout
51 | ZOOKEEPER_CONNECTION_TIMEOUT_MS_DESC=The max time, in milliseconds, that the client waits while establishing a connection to Zookeeper.
52 | ZOOKEEPER_SYNC_TIME_MS_LABEL=Zookeeper Sync Time
53 | ZOOKEEPER_SYNC_TIME_MS_DESC=How far, in milliseconds, a ZK follower can be behind a ZK leader.
54 | OFFSETS_STORAGE_LABEL=Offsets Storage
55 | OFFSETS_STORAGE_DESC=Select where offsets should be stored, if is in Zookeeper or Kafka.
56 | OFFSETS_CHANNEL_BACKOFF_MS_LABEL=Offsets Channel Backoff
57 | OFFSETS_CHANNEL_BACKOFF_MS_DESC=The backoff period, in milliseconds, when reconnecting the offsets channel or retrying failed offset fetch/commit requests.
58 | OFFSETS_CHANNEL_SOCKET_TIMEOUT_MS_LABEL=Offsets Channel Socket Timeout
59 | OFFSETS_CHANNEL_SOCKET_TIMEOUT_MS_DESC=Socket timeout, in milliseconds, when reading responses for offset fetch/commit requests. This timeout is also used for ConsumerMetadata requests that are used to query for the offset manager.
60 | OFFSETS_COMMIT_MAX_RETRIES_LABEL=Maximum Offset Commit Retries
61 | OFFSETS_COMMIT_MAX_RETRIES_DESC=Retry the offset commit up to this many times on failure. This retry count only applies to offset commits during shut-down. It does not apply to commits originating from the auto-commit thread. It also does not apply to attempts to query for the offset coordinator before committing offsets. i.e., if a consumer metadata request fails for any reason, it will be retried and that retry does not count toward this limit.
62 | DUAL_COMMIT_ENABLED_LABEL=Dual Commit Enabled
63 | DUAL_COMMIT_ENABLED_DESC=If you are using 'kafka' as offsets storage, you can dual commit offsets to ZooKeeper (in addition to Kafka). This is required during migration from Zookeeper-based offset storage to Kafka-based offset storage. With respect to any given consumer group, it is safe to turn this off after all instances within that group have been migrated to the new version that commits offsets to the broker (instead of directly to ZooKeeper).
64 |
65 | # UI Fields (Outbound)
66 | ACKS_LABEL=Acknowledge
67 | ACKS_DESC=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The 'Without Acknowledge'option specifies that the record will be immediately added to the socket buffer and considered sent. The 'Leader Acknowledge' option specifies that the leader will write the record to its local log but will respond without awaiting full acknowledgement from all followers. The 'ISRs Acknowledge' option specifies that the leader will wait for the full set of in-sync replicas to acknowledge the record. This is the UI equivalent to the 'acks' property.
68 | TIMEOUT_MS_LABEL=Request Timeout
69 | TIMEOUT_MS_DESC=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. This is the UI equivalent of the property 'request.timeout.ms' property.
70 | BUFFER_MEMORY_LABEL=Buffer Memory
71 | BUFFER_MEMORY_DESC=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block on buffer full.
72 | COMPRESSION_TYPE_LABEL=Compression Type
73 | COMPRESSION_TYPE_DESC=The compression type for all data generated by the producer. Valid values are none, gzip, or snappy. Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression).
74 | BATCH_SIZE_LABEL=Batch Size
75 | BATCH_SIZE_DESC=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes.
76 | LINGER_MS_LABEL=Linger
77 | LINGER_MS_DESC=The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay. Set this value to zero to no delay.
78 | MAX_REQUEST_SIZE_LABEL=Maximum Request Size
79 | MAX_REQUEST_SIZE_DESC=The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
80 | RECEIVE_BUFFER_BYTES_LABEL=Receive Buffer Bytes
81 | RECEIVE_BUFFER_BYTES_DESC=The size of the TCP receive buffer to use when reading data.
82 | SEND_BUFFER_BYTES_LABEL=Send Buffer Bytes
83 | SEND_BUFFER_BYTES_DESC=The size of the TCP send buffer to use when sending data.
84 | BLOCK_ON_BUFFER_FULL_LABEL=Block On Buffer Full
85 | BLOCK_ON_BUFFER_FULL_DESC=When our memory buffer is exhausted we must either stop accepting new records (block) or throw errors. By default this setting is true and we block, however in some scenarios blocking is not desirable and it is better to immediately give an error. Setting this to false will accomplish that: the producer will throw a BufferExhaustedException if a record is sent and the buffer space is full.
86 | METADATA_FETCH_TIMEOUT_MS_LABEL=Metadata Fetch Timeout
87 | METADATA_FETCH_TIMEOUT_MS_DESC=The first time data is sent to a topic we must fetch metadata about that topic to know which servers host the topic's partitions. This configuration controls the maximum amount of time we will block waiting for the metadata fetch to succeed before throwing an exception back to the client.
88 | METADATA_MAX_AGE_MS_LABEL=Maximum Metadata Age
89 | METADATA_MAX_AGE_MS_DESC=The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.
90 | RECONNECT_BACKOFF_MS_LABEL=Reconnect Backoff
91 | RECONNECT_BACKOFF_MS_DESC=The amount of time to wait before attempting to reconnect to a given host when a connection fails. This avoids a scenario where the client repeatedly attempts to connect to a host in a tight loop.
92 | CLIENT_ID_LABEL=Client Identifier
93 | CLIENT_ID_DESC=The string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included with the request.
94 |
95 | # Validation Error Messages
96 | INVALID_LOAD_BALANCING=Load balancing is managed by Kafka. Therefore, the load balancing algorithm option must be set to 'None'.
97 | ENDPOINT_INFO_MISSING=The endpoint information about Kafka is missing.
98 | ENDPOINT_INCORRECT=One of the endpoints has an invalid URI format.
99 | NUMBER_THREADS_INVALID=The number of threads must be greater than or equals to one.
100 | TIMEOUT_INVALID=The request timeout must be greater than or equals to one.
101 |
--------------------------------------------------------------------------------
/lib/appmarker.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/osb-kafka-transport/1e3a1382fa06bc2aae66a84f402e8ef8d11018af/lib/appmarker.jar
--------------------------------------------------------------------------------
/offline/transport-kafka.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/resources/kafka-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | true
6 | true
7 |
8 |
9 |
10 |
11 | ONE_WAY
12 |
13 |
14 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/schemas/kafka-transport.xsd:
--------------------------------------------------------------------------------
1 |
2 |
8 |
9 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaApplicationListener.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.util.logging.Level;
33 |
34 | import com.bea.wli.sb.transports.TransportManager;
35 | import com.bea.wli.sb.transports.TransportManagerHelper;
36 |
37 | import weblogic.application.ApplicationException;
38 | import weblogic.application.ApplicationLifecycleEvent;
39 | import weblogic.application.ApplicationLifecycleListener;
40 |
41 | /**
42 | * @author Ricardo Ferreira
43 | */
44 | public class KafkaApplicationListener extends ApplicationLifecycleListener {
45 |
46 | private boolean isDependenciesMissing() {
47 |
48 | boolean missing = true;
49 |
50 | try {
51 |
52 | // Checkings for Kafka Clients API...
53 | Class.forName("org.apache.kafka.clients.consumer.KafkaConsumer");
54 | Class.forName("org.apache.kafka.clients.producer.KafkaProducer");
55 |
56 | // Checkings for Third-party...
57 | Class.forName("org.slf4j.ILoggerFactory");
58 |
59 | missing = false;
60 |
61 | } catch (ClassNotFoundException cnfe) {}
62 |
63 | return missing;
64 |
65 | }
66 |
67 | public void preStart(ApplicationLifecycleEvent appLifecycleEvent)
68 | throws ApplicationException {
69 |
70 | TransportManager transportManager = null;
71 |
72 | try {
73 |
74 | if (isDependenciesMissing()) {
75 |
76 | KafkaTransportLogger.log(Level.WARNING, "Kafka transport " +
77 | "could not be registered due to missing libraries. " +
78 | "For using the Kafka transport, its libraries must " +
79 | "be available in the classpath.");
80 |
81 | return;
82 |
83 | }
84 |
85 | transportManager = TransportManagerHelper.getTransportManager();
86 | transportManager.registerProvider(KafkaTransportProvider.getInstance(), null);
87 |
88 | } catch (Exception ex) {
89 |
90 | throw new ApplicationException(ex);
91 |
92 | }
93 |
94 | }
95 |
96 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaConstants.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import com.bea.wli.sb.services.BindingTypeInfo;
33 |
34 | /**
35 | * @author Ricardo Ferreira
36 | */
37 | public class KafkaConstants {
38 |
39 | public static final String DEFAULT_WORK_MANAGER = "default";
40 |
41 | public static final String KAFKA_PROVIDER_ID = "kafka";
42 |
43 | public static final String TEXT_REQUEST_TYPE =
44 | BindingTypeInfo.MessageTypeEnum.TEXT.toString();
45 |
46 | public static final String BINARY_REQUEST_TYPE =
47 | BindingTypeInfo.MessageTypeEnum.BINARY.toString();
48 |
49 | public static final String CHECK_INTERVAL =
50 | KafkaConstants.class.getPackage().getName() +
51 | ".endpoint.startup.checkInterval";
52 |
53 | public static final String STARTUP_TIMEOUT =
54 | KafkaConstants.class.getPackage().getName() +
55 | ".endpoint.startup.timeout";
56 |
57 | public static final String PRINT_PROPERTIES =
58 | KafkaConstants.class.getPackage().getName() +
59 | ".endpoint.config.printProperties";
60 |
61 | public static final String MESSAGE_KEY = "message-key";
62 | public static final String PARTITION = "partition";
63 | public static final String OFFSET = "offset";
64 |
65 | public static final String EP_CONSUMER_COMMIT_ASYNC =
66 | "endpoint.consumer.commitAsync";
67 |
68 | public static final String ABNORMAL_CONFIRM_RECEIVED =
69 | "Abnormal confirmation received. Both record " +
70 | "metadata and exception objects came empty.";
71 |
72 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaEndpoint.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.lang.reflect.Method;
33 | import java.net.URI;
34 | import java.util.ArrayList;
35 | import java.util.Arrays;
36 | import java.util.Collection;
37 | import java.util.HashMap;
38 | import java.util.List;
39 | import java.util.Map;
40 | import java.util.Properties;
41 | import java.util.Set;
42 | import java.util.logging.Level;
43 |
44 | import org.apache.kafka.clients.consumer.ConsumerConfig;
45 | import org.apache.kafka.clients.consumer.ConsumerRecord;
46 | import org.apache.kafka.clients.consumer.ConsumerRecords;
47 | import org.apache.kafka.clients.consumer.KafkaConsumer;
48 | import org.apache.kafka.clients.producer.Callback;
49 | import org.apache.kafka.clients.producer.KafkaProducer;
50 | import org.apache.kafka.clients.producer.ProducerConfig;
51 | import org.apache.kafka.clients.producer.ProducerRecord;
52 | import org.apache.kafka.clients.producer.RecordMetadata;
53 | import org.apache.kafka.common.serialization.ByteArrayDeserializer;
54 | import org.apache.kafka.common.serialization.ByteArraySerializer;
55 | import org.apache.kafka.common.serialization.StringDeserializer;
56 | import org.apache.kafka.common.serialization.StringSerializer;
57 |
58 | import com.bea.wli.config.Ref;
59 | import com.bea.wli.sb.sources.ByteArraySource;
60 | import com.bea.wli.sb.sources.Source;
61 | import com.bea.wli.sb.sources.StringSource;
62 | import com.bea.wli.sb.sources.TransformException;
63 | import com.bea.wli.sb.transports.EndPointConfiguration;
64 | import com.bea.wli.sb.transports.OutboundTransportMessageContext;
65 | import com.bea.wli.sb.transports.RequestHeaders;
66 | import com.bea.wli.sb.transports.TransportException;
67 | import com.bea.wli.sb.transports.TransportManager;
68 | import com.bea.wli.sb.transports.TransportManagerHelper;
69 | import com.bea.wli.sb.transports.TransportOptions;
70 | import com.bea.wli.sb.transports.TransportProvider;
71 | import com.bea.wli.sb.transports.TransportSendListener;
72 | import com.bea.wli.sb.transports.TransportSender;
73 | import com.bea.wli.sb.transports.URIType;
74 | import com.bea.wli.sb.transports.util.AbstractTransportEndPoint;
75 |
76 | /**
77 | * @author Ricardo Ferreira
78 | */
79 | public class KafkaEndpoint extends AbstractTransportEndPoint {
80 |
81 | private final String textType =
82 | KafkaConstants.TEXT_REQUEST_TYPE;
83 |
84 | private final String binaryType =
85 | KafkaConstants.BINARY_REQUEST_TYPE;
86 |
87 | private final String responseWorkManager =
88 | TransportManagerHelper.DEFAULT_RESPONSE_WORKMANAGER;
89 |
90 | private TransportProvider transportProvider;
91 | private TransportManager transportManager;
92 |
93 | private String topicName;
94 | private String requestType;
95 | @SuppressWarnings("unused")
96 | private String responseType;
97 | private String customProps;
98 | private String dispatchPolicy;
99 | private short consumerThreads;
100 |
101 | private List internalConsumers;
102 | private Properties consumerProps;
103 | private Properties producerProps;
104 | @SuppressWarnings("rawtypes")
105 | private KafkaProducer producer;
106 |
107 | protected KafkaEndpoint(TransportProvider transportProvider,
108 | Ref serviceRef, EndPointConfiguration endpointConfig)
109 | throws TransportException {
110 |
111 | super(serviceRef, endpointConfig);
112 | KafkaEndPointConfiguration kafkaEndpointConfig = null;
113 |
114 | try {
115 |
116 | this.transportProvider = transportProvider;
117 | kafkaEndpointConfig = KafkaUtil.getConfig(endpointConfig);
118 |
119 | topicName = kafkaEndpointConfig.getTopicName();
120 | requestType = kafkaEndpointConfig.getRequestType();
121 | responseType = kafkaEndpointConfig.getResponseType();
122 | customProps = kafkaEndpointConfig.getCustomProps();
123 |
124 | if (isInbound()) {
125 |
126 | initConsumerProperties(endpointConfig, kafkaEndpointConfig);
127 |
128 | } else {
129 |
130 | initProducerProperties(endpointConfig, kafkaEndpointConfig);
131 |
132 | }
133 |
134 | } catch (Exception ex) {
135 |
136 | throw new TransportException(ex);
137 |
138 | }
139 |
140 | }
141 |
142 | private void initConsumerProperties(EndPointConfiguration endpointConfig,
143 | KafkaEndPointConfiguration kafkaEndpointConfig) {
144 |
145 | KafkaInboundPropertiesType inboundProps =
146 | kafkaEndpointConfig.getInboundProperties();
147 |
148 | dispatchPolicy = inboundProps.getDispatchPolicy();
149 | consumerThreads = inboundProps.getConsumerThreads();
150 | consumerProps = new Properties();
151 |
152 | consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, endpointConfig.getURIArray()[0].getValue());
153 | consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, inboundProps.getGroupId());
154 | consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
155 |
156 | if (requestType.equals(textType)) {
157 |
158 | consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
159 | StringDeserializer.class.getName());
160 |
161 | } else if (requestType.equals(binaryType)) {
162 |
163 | consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
164 | ByteArrayDeserializer.class.getName());
165 |
166 | }
167 |
168 | setCustomProperties(consumerProps);
169 |
170 | if (!consumerProps.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) {
171 |
172 | consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.TRUE.toString());
173 |
174 | }
175 |
176 | checkPrintProperties(consumerProps);
177 |
178 | }
179 |
180 | private void initProducerProperties(EndPointConfiguration endpointConfig,
181 | KafkaEndPointConfiguration kafkaEndpointConfig) {
182 |
183 | KafkaOutboundPropertiesType outboundProps = kafkaEndpointConfig.getOutboundProperties();
184 | short retryCount = endpointConfig.getOutboundProperties().getRetryCount();
185 | int retryInterval = endpointConfig.getOutboundProperties().getRetryInterval();
186 | producerProps = new Properties();
187 |
188 | producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers(endpointConfig.getURIArray()));
189 | producerProps.put(ProducerConfig.ACKS_CONFIG, outboundProps.getAcks());
190 | producerProps.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(outboundProps.getTimeoutMs()));
191 | producerProps.put(ProducerConfig.RETRIES_CONFIG, String.valueOf(retryCount));
192 | producerProps.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, calculateRetryBackoff(retryCount, retryInterval));
193 | producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
194 |
195 | if (requestType.equals(textType)) {
196 |
197 | producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
198 | StringSerializer.class.getName());
199 |
200 | } else if (requestType.equals(binaryType)) {
201 |
202 | producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
203 | ByteArraySerializer.class.getName());
204 |
205 | }
206 |
207 | setCustomProperties(producerProps);
208 | checkPrintProperties(producerProps);
209 |
210 | }
211 |
212 | private void setCustomProperties(Properties properties) {
213 |
214 | if (customProps != null && customProps.length() > 0) {
215 |
216 | String[] _customProps = customProps.split(",");
217 |
218 | for (String propEntry : _customProps) {
219 |
220 | String[] property = propEntry.split("=");
221 |
222 | if (property.length == 2) {
223 |
224 | properties.setProperty(property[0], property[1]);
225 |
226 | }
227 |
228 | }
229 |
230 | }
231 |
232 | }
233 |
234 | private void checkPrintProperties(Properties properties) {
235 |
236 | if (KafkaUtil.printProperties()) {
237 |
238 | KafkaTransportLogger.log(Level.INFO, "The endpoint '" + getServiceRef().getLocalName() +
239 | "' is being created using the following properties");
240 |
241 | StringBuilder output = new StringBuilder();
242 | Set