├── lib
└── appmarker.jar
├── images
├── kafka-transport-inbound.png
└── kafka-transport-outbound.png
├── offline
└── transport-kafka.xml
├── META-INF
├── application.xml
└── weblogic-application.xml
├── resources
└── kafka-config.xml
├── install
└── install.py
├── LICENSE.md
├── src
└── oracle
│ └── ateam
│ └── sb
│ └── transports
│ └── kafka
│ ├── KafkaTransportProviderFactory.java
│ ├── KafkaRequestMetadata.java
│ ├── KafkaResponseMetadata.java
│ ├── KafkaResponseHeaders.java
│ ├── KafkaRequestHeaders.java
│ ├── KafkaConstants.java
│ ├── KafkaOutboundMessageContext.java
│ ├── KafkaApplicationListener.java
│ ├── KafkaTransportLogger.java
│ ├── KafkaInboundMessageContext.java
│ ├── KafkaUtil.java
│ ├── KafkaTransportProvider.java
│ ├── KafkaUIBinding.java
│ └── KafkaEndpoint.java
├── schemas
└── kafka-transport.xsd
├── README.md
└── l10n
└── oracle
└── ateam
└── sb
└── transports
└── kafka
└── KafkaUIBinding.properties
/lib/appmarker.jar:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/osb-kafka-transport/HEAD/lib/appmarker.jar
--------------------------------------------------------------------------------
/images/kafka-transport-inbound.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/osb-kafka-transport/HEAD/images/kafka-transport-inbound.png
--------------------------------------------------------------------------------
/images/kafka-transport-outbound.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/oracle/osb-kafka-transport/HEAD/images/kafka-transport-outbound.png
--------------------------------------------------------------------------------
/offline/transport-kafka.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
--------------------------------------------------------------------------------
/META-INF/application.xml:
--------------------------------------------------------------------------------
1 |
2 |
7 |
8 | Service Bus Kafka Transport Provider
9 | Service Bus Kafka Transport Provider
10 |
11 |
12 | appmarker.jar
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/META-INF/weblogic-application.xml:
--------------------------------------------------------------------------------
1 |
2 |
9 |
10 |
11 | webapp.encoding.default
12 | UTF-8
13 |
14 |
15 |
16 | oracle.ateam.sb.transports.kafka.KafkaApplicationListener
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/resources/kafka-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | true
6 | true
7 |
8 |
9 |
10 |
11 | ONE_WAY
12 |
13 |
14 |
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/install/install.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | libraryName = 'transport-kafka'
4 | libraryFile = 'kafka-transport.jar'
5 | appName = 'Service Bus Kafka Transport Provider'
6 | appFile = 'kafka-transport.ear'
7 |
8 | print '***** Service Bus Kafka Transport Install *****'
9 | print ''
10 |
11 | ############################## Connect and Undeploy ###############################
12 |
13 | connect()
14 |
15 | appDeployment = cmo.lookupAppDeployment(appName)
16 |
17 | if (appDeployment != None):
18 |
19 | stopApplication(appName)
20 | undeploy(appName)
21 |
22 | library = cmo.lookupLibrary(libraryName)
23 |
24 | if (library != None):
25 |
26 | undeploy(libraryName)
27 |
28 | ########################## Retrieve Targets Information ###########################
29 |
30 | targets = cmo.getAdminServerName()
31 | clusters = cmo.getClusters()
32 |
33 | for cluster in clusters:
34 |
35 | targets = targets + ',' + cluster.getName()
36 |
37 | servers = cmo.getServers()
38 |
39 | for server in servers:
40 |
41 | if (server.getCluster == None):
42 |
43 | targets = targets + ',' + server.getName()
44 |
45 | ######################### Install and Start the Transport #########################
46 |
47 | if (not os.path.exists(libraryFile)):
48 |
49 | tmp = raw_input('Enter the location of the "kafka-transport.jar" file: ')
50 |
51 | if (tmp.endswith(libraryFile)):
52 |
53 | libraryFile = tmp
54 |
55 | else:
56 |
57 | libraryFile = os.path.join(tmp, libraryFile)
58 |
59 | if (not os.path.exists(appFile)):
60 |
61 | tmp = raw_input('Enter the location of the "kafka-transport.ear" file: ')
62 |
63 | if (tmp.endswith(appFile)):
64 |
65 | appFile = tmp
66 |
67 | else:
68 |
69 | appFile = os.path.join(tmp, appFile)
70 |
71 | deploy(libraryName, libraryFile, targets=targets, libraryModule='true')
72 | deploy(appName, appFile, targets=targets)
73 |
74 | ################################## Disconnect ####################################
75 |
76 | disconnect()
77 |
--------------------------------------------------------------------------------
/LICENSE.md:
--------------------------------------------------------------------------------
1 | # License
2 |
3 | You may not use the identified files except in compliance with the
4 | Universal Permissive License (UPL), Version 1.0 (the "License.")
5 |
6 | You may obtain a copy of the License at
7 | https://opensource.org/licenses/UPL. A copy of the license is
8 | also reproduced below.
9 |
10 | Unless required by applicable law or agreed to in writing, software
11 | distributed under the License is distributed on an "AS IS" BASIS,
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13 | implied.
14 |
15 | See the License for the specific language governing permissions and
16 | limitations under the License.
17 |
18 |
19 | ```
20 | Copyright (c) 2014, 2016 Oracle and/or its affiliates
21 | The Universal Permissive License (UPL), Version 1.0
22 |
23 | Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this software, associated documentation and/or data (collectively the "Software"), free of charge and under any and all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or (ii) the Larger Works (as defined below), to deal in both
24 |
25 | (a) the Software, and
26 |
27 | (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software (each a “Larger Work” to which the Software is contributed by such licensors),
28 |
29 | without restriction, including without limitation the rights to copy, create derivative works of, display, perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
30 |
31 | This license is subject to the following condition:
32 |
33 | The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must be included in all copies or substantial portions of the Software.
34 |
35 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
36 |
37 | ```
38 |
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaTransportProviderFactory.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import com.bea.wli.sb.transports.TransportException;
33 | import com.bea.wli.sb.transports.TransportManager;
34 | import com.bea.wli.sb.transports.TransportProviderFactory;
35 |
36 | /**
37 | * @author Ricardo Ferreira
38 | */
39 | public class KafkaTransportProviderFactory implements TransportProviderFactory {
40 |
41 | @Override
42 | public String getId() {
43 |
44 | return KafkaConstants.KAFKA_PROVIDER_ID;
45 |
46 | }
47 |
48 | @Override
49 | public void registerProvider(TransportManager transportManager)
50 | throws TransportException {
51 |
52 | transportManager.registerProvider(KafkaTransportProvider.getInstance(), null);
53 |
54 | }
55 |
56 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaRequestMetadata.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import com.bea.wli.sb.transports.DefaultRequestMetaData;
33 | import com.bea.wli.sb.transports.RequestHeaders;
34 | import com.bea.wli.sb.transports.RequestHeadersXML;
35 | import com.bea.wli.sb.transports.TransportException;
36 | import com.bea.wli.sb.transports.TransportProvider;
37 |
38 | /**
39 | * @author Ricardo Ferreira
40 | */
41 | public class KafkaRequestMetadata extends
42 | DefaultRequestMetaData {
43 |
44 | public KafkaRequestMetadata() throws TransportException {
45 |
46 | super(KafkaTransportProvider.getInstance());
47 |
48 | }
49 |
50 | @Override
51 | protected RequestHeaders createHeaders(TransportProvider provider,
52 | RequestHeadersXML headers) throws TransportException {
53 |
54 | return new KafkaRequestHeaders(headers);
55 |
56 | }
57 |
58 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaResponseMetadata.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import com.bea.wli.sb.transports.DefaultResponseMetaData;
33 | import com.bea.wli.sb.transports.ResponseHeaders;
34 | import com.bea.wli.sb.transports.ResponseHeadersXML;
35 | import com.bea.wli.sb.transports.TransportException;
36 | import com.bea.wli.sb.transports.TransportProvider;
37 |
38 | /**
39 | * @author Ricardo Ferreira
40 | */
41 | public class KafkaResponseMetadata extends
42 | DefaultResponseMetaData {
43 |
44 | public KafkaResponseMetadata() throws TransportException {
45 |
46 | super(KafkaTransportProvider.getInstance());
47 |
48 | }
49 |
50 | @Override
51 | protected ResponseHeaders createHeaders(TransportProvider provider,
52 | ResponseHeadersXML headersXML) throws TransportException {
53 |
54 | return new KafkaResponseHeaders(headersXML);
55 |
56 | }
57 |
58 | private static final long serialVersionUID = -1321185702686299442L;
59 |
60 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaResponseHeaders.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import com.bea.wli.sb.transports.DefaultResponseHeaders;
33 | import com.bea.wli.sb.transports.ResponseHeadersXML;
34 | import com.bea.wli.sb.transports.TransportException;
35 |
36 | /**
37 | * @author Ricardo Ferreira
38 | */
39 | public class KafkaResponseHeaders extends
40 | DefaultResponseHeaders {
41 |
42 | public KafkaResponseHeaders(ResponseHeadersXML headers)
43 | throws TransportException {
44 |
45 | super(KafkaTransportProvider.getInstance(), headers);
46 |
47 | }
48 |
49 | public void setTopicName(String topicName) {
50 |
51 | setHeader("topic-name", topicName);
52 |
53 | }
54 |
55 | public void setPartition(int partition) {
56 |
57 | setHeader("partition", partition);
58 |
59 | }
60 |
61 | public void setOffset(long offset) {
62 |
63 | setHeader("offset", offset);
64 |
65 | }
66 |
67 | private static final long serialVersionUID = 2961627464065220196L;
68 |
69 | }
--------------------------------------------------------------------------------
/schemas/kafka-transport.xsd:
--------------------------------------------------------------------------------
1 |
2 |
8 |
9 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaRequestHeaders.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.math.BigInteger;
33 |
34 | import com.bea.wli.sb.transports.DefaultRequestHeaders;
35 | import com.bea.wli.sb.transports.RequestHeadersXML;
36 | import com.bea.wli.sb.transports.TransportException;
37 |
38 | /**
39 | * @author Ricardo Ferreira
40 | */
41 | public class KafkaRequestHeaders extends
42 | DefaultRequestHeaders {
43 |
44 | public KafkaRequestHeaders(RequestHeadersXML headers)
45 | throws TransportException {
46 |
47 | super(KafkaTransportProvider.getInstance(), headers);
48 |
49 | }
50 |
51 | public Integer getPartition() {
52 |
53 | BigInteger value = (BigInteger) getHeader("partition");
54 |
55 | if (value != null) {
56 |
57 | return value.intValue();
58 |
59 | }
60 |
61 | return null;
62 |
63 | }
64 |
65 | public String getMessageKey() {
66 |
67 | return (String) getHeader("message-key");
68 |
69 | }
70 |
71 | public void setMessageKey(String messageKey) {
72 |
73 | setHeader("message-key", messageKey);
74 |
75 | }
76 |
77 | public void setPartition(int partition) {
78 |
79 | setHeader("partition", partition);
80 |
81 | }
82 |
83 | public void setOffset(long offset) {
84 |
85 | setHeader("offset", offset);
86 |
87 | }
88 |
89 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaConstants.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import com.bea.wli.sb.services.BindingTypeInfo;
33 |
34 | /**
35 | * @author Ricardo Ferreira
36 | */
37 | public class KafkaConstants {
38 |
39 | public static final String DEFAULT_WORK_MANAGER = "default";
40 |
41 | public static final String KAFKA_PROVIDER_ID = "kafka";
42 |
43 | public static final String TEXT_REQUEST_TYPE =
44 | BindingTypeInfo.MessageTypeEnum.TEXT.toString();
45 |
46 | public static final String BINARY_REQUEST_TYPE =
47 | BindingTypeInfo.MessageTypeEnum.BINARY.toString();
48 |
49 | public static final String CHECK_INTERVAL =
50 | KafkaConstants.class.getPackage().getName() +
51 | ".endpoint.startup.checkInterval";
52 |
53 | public static final String STARTUP_TIMEOUT =
54 | KafkaConstants.class.getPackage().getName() +
55 | ".endpoint.startup.timeout";
56 |
57 | public static final String PRINT_PROPERTIES =
58 | KafkaConstants.class.getPackage().getName() +
59 | ".endpoint.config.printProperties";
60 |
61 | public static final String MESSAGE_KEY = "message-key";
62 | public static final String PARTITION = "partition";
63 | public static final String OFFSET = "offset";
64 |
65 | public static final String EP_CONSUMER_COMMIT_ASYNC =
66 | "endpoint.consumer.commitAsync";
67 |
68 | public static final String ABNORMAL_CONFIRM_RECEIVED =
69 | "Abnormal confirmation received. Both record " +
70 | "metadata and exception objects came empty.";
71 |
72 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaOutboundMessageContext.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.net.URI;
33 | import java.util.UUID;
34 |
35 | import com.bea.wli.sb.sources.Source;
36 | import com.bea.wli.sb.transports.OutboundTransportMessageContext;
37 | import com.bea.wli.sb.transports.ResponseMetaData;
38 | import com.bea.wli.sb.transports.TransportException;
39 |
40 | /**
41 | * @author Ricardo Ferreira
42 | */
43 | public class KafkaOutboundMessageContext implements
44 | OutboundTransportMessageContext {
45 |
46 | private URI uri;
47 | @SuppressWarnings("rawtypes")
48 | private ResponseMetaData responseMetadata;
49 |
50 | public KafkaOutboundMessageContext(URI uri) throws TransportException {
51 |
52 | this.uri = uri;
53 | this.responseMetadata = new KafkaResponseMetadata();
54 |
55 | }
56 |
57 | public KafkaOutboundMessageContext(URI uri, String topicName,
58 | int partition, long offset) throws TransportException {
59 |
60 | this(uri);
61 |
62 | ((KafkaResponseHeaders) responseMetadata.getHeaders()).setTopicName(topicName);
63 | ((KafkaResponseHeaders) responseMetadata.getHeaders()).setPartition(partition);
64 | ((KafkaResponseHeaders) responseMetadata.getHeaders()).setOffset(offset);
65 |
66 | }
67 |
68 | @Override
69 | public String getMessageId() {
70 |
71 | return UUID.randomUUID().toString();
72 |
73 | }
74 |
75 | @Override
76 | public URI getURI() {
77 |
78 | return uri;
79 |
80 | }
81 |
82 | @Override
83 | @SuppressWarnings("rawtypes")
84 | public ResponseMetaData getResponseMetaData() throws TransportException {
85 |
86 | return responseMetadata;
87 |
88 | }
89 |
90 | @Override
91 | public Source getResponsePayload() throws TransportException {
92 |
93 | return null;
94 |
95 | }
96 |
97 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaApplicationListener.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.util.logging.Level;
33 |
34 | import com.bea.wli.sb.transports.TransportManager;
35 | import com.bea.wli.sb.transports.TransportManagerHelper;
36 |
37 | import weblogic.application.ApplicationException;
38 | import weblogic.application.ApplicationLifecycleEvent;
39 | import weblogic.application.ApplicationLifecycleListener;
40 |
41 | /**
42 | * @author Ricardo Ferreira
43 | */
44 | public class KafkaApplicationListener extends ApplicationLifecycleListener {
45 |
46 | private boolean isDependenciesMissing() {
47 |
48 | boolean missing = true;
49 |
50 | try {
51 |
52 | // Checkings for Kafka Clients API...
53 | Class.forName("org.apache.kafka.clients.consumer.KafkaConsumer");
54 | Class.forName("org.apache.kafka.clients.producer.KafkaProducer");
55 |
56 | // Checkings for Third-party...
57 | Class.forName("org.slf4j.ILoggerFactory");
58 |
59 | missing = false;
60 |
61 | } catch (ClassNotFoundException cnfe) {}
62 |
63 | return missing;
64 |
65 | }
66 |
67 | public void preStart(ApplicationLifecycleEvent appLifecycleEvent)
68 | throws ApplicationException {
69 |
70 | TransportManager transportManager = null;
71 |
72 | try {
73 |
74 | if (isDependenciesMissing()) {
75 |
76 | KafkaTransportLogger.log(Level.WARNING, "Kafka transport " +
77 | "could not be registered due to missing libraries. " +
78 | "For using the Kafka transport, its libraries must " +
79 | "be available in the classpath.");
80 |
81 | return;
82 |
83 | }
84 |
85 | transportManager = TransportManagerHelper.getTransportManager();
86 | transportManager.registerProvider(KafkaTransportProvider.getInstance(), null);
87 |
88 | } catch (Exception ex) {
89 |
90 | throw new ApplicationException(ex);
91 |
92 | }
93 |
94 | }
95 |
96 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaTransportLogger.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.text.SimpleDateFormat;
33 | import java.util.Date;
34 | import java.util.Map;
35 | import java.util.concurrent.ConcurrentHashMap;
36 | import java.util.logging.Level;
37 |
38 | /**
39 | *
40 | * This helper class is intended to be a placeholder only.
41 | * One might want to integrate with WebLogic's logging
42 | * system instead. Therefore, this class is the perfect
43 | * API hook to link that up since it is used throughout
44 | * the implemented code.
45 | *
46 | * @author Ricardo Ferreira
47 | */
48 | public class KafkaTransportLogger {
49 |
50 | private static final Map cache =
51 | new ConcurrentHashMap();
52 |
53 | private static final SimpleDateFormat sdf =
54 | new SimpleDateFormat("MMM dd, yyyy h:mm:ss aaa z");
55 |
56 | private static final String subsystem =
57 | KafkaTransportLogger.class.getPackage().getName();
58 |
59 | private static String getName(Level level) {
60 |
61 | String name = cache.get(level);
62 |
63 | if (name == null) {
64 |
65 | name = level.getName();
66 | String firstLetter = name.substring(0, 1).toUpperCase();
67 | String rest = name.substring(1, name.length()).toLowerCase();
68 | name = firstLetter + rest;
69 |
70 | cache.put(level, name);
71 |
72 | }
73 |
74 | return name;
75 |
76 | }
77 |
78 | public static void log(Level level, String message) {
79 |
80 | StringBuilder sb = new StringBuilder();
81 |
82 | sb.append("<").append(sdf.format(new Date())).append("> ");
83 | sb.append("<").append(getName(level)).append("> ");
84 | sb.append("<").append(subsystem).append("> ");
85 | sb.append(" ");
86 | sb.append("<").append(message).append(">");
87 |
88 | System.out.println(sb.toString());
89 |
90 | }
91 |
92 | public static void error(String message, Throwable throwable) {
93 |
94 | StringBuilder sb = new StringBuilder();
95 |
96 | sb.append("<").append(sdf.format(new Date())).append("> ");
97 | sb.append("<").append("Error").append("> ");
98 | sb.append("<").append(subsystem).append("> ");
99 | sb.append(" ");
100 | sb.append("<").append(message).append(">");
101 |
102 | System.out.println(sb.toString());
103 |
104 | if (throwable != null) {
105 |
106 | throwable.printStackTrace();
107 |
108 | }
109 |
110 | }
111 |
112 | }
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Oracle Service Bus Transport for Apache Kafka
2 |
3 | ## Introduction
4 | This sample provides a native transport for [OSB](http://www.oracle.com/technetwork/middleware/service-bus/overview/index-096326.html) (Oracle Service Bus) that allows connectivity with [Apache Kafka](https://kafka.apache.org/). By using native APIs, the transport allows resilient and high speed access to Apache Kafka clusters. Integration developers can benefit from this transport in the implementation of use cases that requires the integration to/from Apache Kafka with applications (SaaS and On-Premise) supported by OSB, as well as technologies such as JMS, HTTP, MSMQ, Coherence, Tuxedo, FTP, etc.
5 |
6 | #### Using the Kafka Transport for Inbound Processing (From Kafka to the World)
7 |
8 | 
9 |
10 | #### Using the Kafka Transport for Outbound Processing (From the World to Kafka)
11 |
12 | 
13 |
14 | This is an Open-Source project maintained by Oracle.
15 |
16 | ## Features and Benefits:
17 | The OSB Transport for Apache Kafka provides inbound and outbound connectivity with Apache Kafka. But this is definetely a oversimplification of what this transport can really do. The list below summarizes the most important features found in this implementation.
18 |
19 | * Supports multiple Apache Kafka versions such as 0.9.X, 0.10.X and above.
20 | * It works with the enterprise version of Kafka (Confluent Platform) as well.
21 | * Designed to work with 12c versions of OSB. Compatible with 12.1.3 and 12.2.1.
22 | * Can be used both On-Premise and in Cloud deployments, via SOA Cloud Service.
23 | * Supports inbound (Proxy Service) and outbound (Business Service) use cases.
24 | * Allows both text/binary payload types to flow through Service Bus pipelines.
25 | * Allows inbound processing to be spread out over multiple concurrent threads.
26 | * Deeper integration with WebLogic lifecycle. It smartly starts the endpoints.
27 | * Allows sync/async commits when the option 'enable.auto.commit' is disabled.
28 | * Allows association with native WebLogic Work Managers for maximum work control.
29 | * Allows message level partitioning using Transport Headers for outbound scenarios.
30 | * Allows fine tuning over delivery semantics by supporting multiple ack modes.
31 | * Provides native response headers during outbound scenarios for better control.
32 | * Allows the implementation of native Kafka properties using custom properties.
33 | * Allows the development of OSB projects using both the Console and JDeveloper.
34 | * Provides JVM properties that controls some behaviors and allows log debugging.
35 |
36 | ## Gettting Started
37 | The very first thing you need to do to start playing with the transport is building it from the sources. The build process of this transport has been completely based on the best practices described in the [product documentation section](https://docs.oracle.com/middleware/1221/osb/develop/GUID-F3574BDE-F053-4015-ACC2-4CE2473B39EA.htm#OSBDV1292) about custom transports development. Therefore, if you are familiar with the build process for custom transports then you should be OK following the steps below.
38 |
39 | In a nutshell, the build process is based on Ant. The [build.xml](./build.xml) script provided encapsulates all the necessary steps to generate the implementation files (kafka-transport.ear and kafka-transport.jar) needed to deploy the transport into your Service Bus domain. But in order to work, the script relies on information from the environment. Especifically, information about where to find the Fusion Middleware JAR files necessary for the code compilation. Thus, you will need to build the implementation files in a machine that has Oracle Service Bus.
40 |
41 | The quickest way to load all the Fusion Middleware information into the environment is sourcing the setDomainEnv.sh script from your domain:
42 |
43 | ```
44 | source $FMW_HOME/user-projects/domains//bin/setDomainEnv.sh
45 | ```
46 |
47 | Next, you will need to specify in the [build.properties](./build.properties) file the location of the Kafka Clients API JAR file:
48 |
49 | ```
50 | ### Apache Kafka Clients API
51 | kafka.clients.api=/opt/kafka_2.11-0.10.0.1/libs/kafka-clients-0.10.1.0.jar
52 | ```
53 |
54 | Now you can simply execute the script by typing 'ant' in the command-line. Once the build finishes, the implementation files will be generated under the newly created 'build' folder. Alternatively, the implementation files will also be proactively copied into your Fusion Middleware installation.
55 |
56 | The last step is the deployment of the implementation files into your Service Bus domain. To make things easier, the [install.py](./install/install.py) script encapsulates the details about how to connect to the WebLogic domain, perform the deployment and commiting the changes. Therefore, get into the 'install' folder and type:
57 |
58 | ```
59 | java weblogic.WLST install.py
60 | ```
61 |
62 | The script will ask information about the location of the implementation files and connection details of the WebLogic domain.
63 |
64 | For a deeper introduction into the Kafka transport, please read a series of [two blogs](http://www.ateam-oracle.com/osb-transport-for-apache-kafka-part-1/) written in the Oracle A-Team chronicles website. They will provide details about how to use it and how to configure it to implement more complex scenarios.
65 |
66 | ## License
67 | Copyright (c) 2014, 2016 Oracle and/or its affiliates
68 | The Universal Permissive License (UPL), Version 1.0
69 |
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaInboundMessageContext.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.net.URI;
33 | import java.util.Map;
34 | import java.util.UUID;
35 |
36 | import org.apache.xmlbeans.XmlObject;
37 |
38 | import com.bea.wli.sb.sources.Source;
39 | import com.bea.wli.sb.transports.InboundTransportMessageContext;
40 | import com.bea.wli.sb.transports.RequestMetaData;
41 | import com.bea.wli.sb.transports.ResponseMetaData;
42 | import com.bea.wli.sb.transports.TransportEndPoint;
43 | import com.bea.wli.sb.transports.TransportException;
44 | import com.bea.wli.sb.transports.TransportOptions;
45 |
46 | /**
47 | * @author Ricardo Ferreira
48 | */
49 | public class KafkaInboundMessageContext implements
50 | InboundTransportMessageContext {
51 |
52 | @SuppressWarnings("rawtypes")
53 | private RequestMetaData requestMetadata;
54 | private Source requestPayload;
55 | private KafkaEndpoint endpoint;
56 |
57 | @SuppressWarnings("rawtypes")
58 | public KafkaInboundMessageContext(KafkaEndpoint endpoint,
59 | RequestMetaData requestMetadata, Source requestPayload) {
60 |
61 | this.endpoint = endpoint;
62 | this.requestMetadata = requestMetadata;
63 | this.requestPayload = requestPayload;
64 |
65 | }
66 |
67 | public KafkaInboundMessageContext(KafkaEndpoint endpoint,
68 | String messageKey, int partition, long offset,
69 | Source requestPayload) throws TransportException {
70 |
71 | this.endpoint = endpoint;
72 | this.requestPayload = requestPayload;
73 | this.requestMetadata = new KafkaRequestMetadata();
74 |
75 | ((KafkaRequestHeaders) requestMetadata.getHeaders()).setMessageKey(messageKey);
76 | ((KafkaRequestHeaders) requestMetadata.getHeaders()).setPartition(partition);
77 | ((KafkaRequestHeaders) requestMetadata.getHeaders()).setOffset(offset);
78 |
79 | }
80 |
81 | public KafkaInboundMessageContext(KafkaEndpoint endpoint,
82 | Map headers, Source requestPayload)
83 | throws TransportException {
84 |
85 | this.endpoint = endpoint;
86 | this.requestPayload = requestPayload;
87 | this.requestMetadata = new KafkaRequestMetadata();
88 |
89 | if (headers.containsKey(KafkaConstants.MESSAGE_KEY)) {
90 |
91 | String messageKey = (String) headers.get(KafkaConstants.MESSAGE_KEY);
92 | ((KafkaRequestHeaders) requestMetadata.getHeaders()).setMessageKey(messageKey);
93 |
94 | }
95 |
96 | if (headers.containsKey(KafkaConstants.PARTITION)) {
97 |
98 | int partition = (Integer) headers.get(KafkaConstants.PARTITION);
99 | ((KafkaRequestHeaders) requestMetadata.getHeaders()).setPartition(partition);
100 |
101 | }
102 |
103 | if (headers.containsKey(KafkaConstants.OFFSET)) {
104 |
105 | long offset = (Long) headers.get(KafkaConstants.OFFSET);
106 | ((KafkaRequestHeaders) requestMetadata.getHeaders()).setOffset(offset);
107 |
108 | }
109 |
110 | }
111 |
112 | @Override
113 | public TransportEndPoint getEndPoint() throws TransportException {
114 |
115 | return endpoint;
116 |
117 | }
118 |
119 | @Override
120 | public Source getRequestPayload() throws TransportException {
121 |
122 | return requestPayload;
123 |
124 | }
125 |
126 | @Override
127 | public String getMessageId() {
128 |
129 | return UUID.randomUUID().toString();
130 |
131 | }
132 |
133 | @Override
134 | public URI getURI() {
135 |
136 | return endpoint.getURI()[0];
137 |
138 | }
139 |
140 | @Override
141 | @SuppressWarnings("rawtypes")
142 | public RequestMetaData getRequestMetaData() throws TransportException {
143 |
144 | return requestMetadata;
145 |
146 | }
147 |
148 | @Override
149 | @SuppressWarnings("rawtypes")
150 | public ResponseMetaData createResponseMetaData() throws TransportException {
151 |
152 | return null;
153 |
154 | }
155 |
156 | @Override
157 | @SuppressWarnings("rawtypes")
158 | public ResponseMetaData createResponseMetaData(XmlObject xmlData)
159 | throws TransportException {
160 |
161 | return null;
162 |
163 | }
164 |
165 | @Override
166 | @SuppressWarnings("rawtypes")
167 | public void setResponseMetaData(ResponseMetaData responseMetadata)
168 | throws TransportException {
169 | }
170 |
171 | @Override
172 | public void setResponsePayload(Source responsePayload)
173 | throws TransportException {
174 | }
175 |
176 | @Override
177 | public void close(TransportOptions transportOptions) {
178 | }
179 |
180 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaUtil.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import org.apache.xmlbeans.XmlObject;
33 |
34 | import com.bea.alsb.platform.PlatformFactory;
35 | import com.bea.alsb.platform.ServerConfiguration;
36 | import com.bea.wli.sb.sources.ByteArraySource;
37 | import com.bea.wli.sb.sources.Source;
38 | import com.bea.wli.sb.sources.StringSource;
39 | import com.bea.wli.sb.sources.TransformException;
40 | import com.bea.wli.sb.sources.TransformOptions;
41 | import com.bea.wli.sb.sources.Transformer;
42 | import com.bea.wli.sb.transports.ALSBTransportManager;
43 | import com.bea.wli.sb.transports.EndPointConfiguration;
44 | import com.bea.wli.sb.transports.TransportException;
45 | import com.bea.wli.sb.transports.TransportManagerHelper;
46 |
47 | /**
48 | * @author Ricardo Ferreira
49 | */
50 | public class KafkaUtil {
51 |
52 | private static ServerConfiguration serverConfig;
53 | private static boolean printProperties = false;
54 | private static Transformer transformer = null;
55 | private static int startupTimeout = 300000;
56 | private static int checkInterval = 5000;
57 |
58 | static {
59 |
60 | String startupTimeoutProp = System.getProperty(KafkaConstants.STARTUP_TIMEOUT);
61 |
62 | if (startupTimeoutProp != null) {
63 |
64 | startupTimeout = Integer.parseInt(startupTimeoutProp);
65 |
66 | }
67 |
68 | String checkIntervalProp = System.getProperty(KafkaConstants.CHECK_INTERVAL);
69 |
70 | if (checkIntervalProp != null) {
71 |
72 | checkInterval = Integer.parseInt(checkIntervalProp);
73 |
74 | if (checkInterval > 60000) {
75 |
76 | // Any Service Bus start up should be measured in terms
77 | // of minutes, tops. Anything beyond that can be harmful
78 | // to the transport health since the idea is to have the
79 | // endpoints listening Kafka topics as soon the start up
80 | // finishes and WebLogic reaches the 'RUNNING' state.
81 |
82 | // In order to ensure the transport health, we set the
83 | // parameter back to its maximum, which is one minute.
84 | // That will ensure that no user (carrying good or bad
85 | // intentions) will hassle the bootstrap using longer
86 | // checks.
87 |
88 | checkInterval = 60000;
89 |
90 | }
91 |
92 | }
93 |
94 | String printPropsProp = System.getProperty(KafkaConstants.PRINT_PROPERTIES);
95 |
96 | if (printPropsProp != null) {
97 |
98 | printProperties = Boolean.parseBoolean(printPropsProp);
99 |
100 | }
101 |
102 | }
103 |
104 | public static int getStartupTimeout() {
105 |
106 | return startupTimeout;
107 |
108 | }
109 |
110 | public static int getCheckInterval() {
111 |
112 | return checkInterval;
113 |
114 | }
115 |
116 | public static boolean printProperties() {
117 |
118 | return printProperties;
119 |
120 | }
121 |
122 | @SuppressWarnings({ "rawtypes", "unchecked" })
123 | private static Source transform(Source payload, Class classType,
124 | String encoding) throws TransportException, TransformException {
125 |
126 | if (transformer == null) {
127 |
128 | transformer = ALSBTransportManager.getInstance().getTransformer();
129 |
130 | }
131 |
132 | TransformOptions options = new TransformOptions();
133 | options.setCharacterEncoding(encoding);
134 |
135 | return transformer.transform(payload, classType, options);
136 |
137 | }
138 |
139 | public static boolean isValidDispatchPolicy(String dispatchPolicy) {
140 |
141 | return dispatchPolicy != null && !dispatchPolicy.equals(
142 | KafkaConstants.DEFAULT_WORK_MANAGER);
143 |
144 | }
145 |
146 | public static void schedule(Runnable runnable, String dispatchPolicy)
147 | throws TransportException {
148 |
149 | TransportManagerHelper.schedule(runnable, dispatchPolicy);
150 |
151 | }
152 |
153 | public static KafkaEndPointConfiguration getConfig(
154 | EndPointConfiguration endpointConfig) throws TransportException {
155 |
156 | XmlObject xbean = endpointConfig.getProviderSpecific();
157 |
158 | if (xbean instanceof KafkaEndPointConfiguration) {
159 |
160 | return (KafkaEndPointConfiguration) xbean;
161 |
162 | } else {
163 |
164 | try {
165 |
166 | return KafkaEndPointConfiguration.Factory.parse(xbean.newInputStream());
167 |
168 | } catch (Exception ex) {
169 |
170 | throw new TransportException(ex);
171 |
172 | }
173 |
174 | }
175 |
176 | }
177 |
178 | public static Source getAsText(Source payload, String encoding)
179 | throws TransportException, TransformException {
180 |
181 | return transform(payload, StringSource.class, encoding);
182 |
183 | }
184 |
185 | public static Source getAsBinary(Source payload, String encoding)
186 | throws TransportException, TransformException {
187 |
188 | return transform(payload, ByteArraySource.class, encoding);
189 |
190 | }
191 |
192 | public static boolean isServerRunning() throws Exception {
193 |
194 | if (serverConfig == null) {
195 |
196 | serverConfig = PlatformFactory.get().getServerConfiguration();
197 |
198 | }
199 |
200 | return serverConfig.isRunning();
201 |
202 | }
203 |
204 | @SuppressWarnings("rawtypes")
205 | public static Class loadClass(String className) {
206 |
207 | Class classImpl = null;
208 |
209 | try {
210 |
211 | classImpl = Class.forName(className);
212 |
213 | } catch (ClassNotFoundException cnfe) {}
214 |
215 | return classImpl;
216 |
217 | }
218 |
219 | }
--------------------------------------------------------------------------------
/l10n/oracle/ateam/sb/transports/kafka/KafkaUIBinding.properties:
--------------------------------------------------------------------------------
1 | # General Strings for the UI
2 | PROXY_URI_FORMAT=host1:port,host2:port
3 | BUSINESS_URI_FORMAT=host:port
4 | TOPIC_NAME_LABEL=Topic Name
5 | TOPIC_NAME_DESC=The name of the topic(s).
6 | CUSTOM_PROPS_LABEL=Custom Properties
7 | CUSTOM_PROPS_LABEL_CONSUMER=Consumer Configs
8 | CUSTOM_PROPS_LABEL_PRODUCER=Producer Configs
9 | CUSTOM_PROPS_DESC=Comma-separated list of properties in the key=value format. Use this field to specify any property that does not have an equivalent in this user interface.
10 |
11 | # UI Fields (Inbound)
12 | CONSUMER_THREADS_LABEL=Consumer Threads
13 | CONSUMER_THREADS_DESC=The number of concurrent threads used to fetch messages from the topic(s). Each thread will create a internal consumer that binds to an available partition.
14 | GROUP_ID_LABEL=Group Identifier
15 | GROUP_ID_DESC=A unique string that identifies the consumer group this consumer belongs to. Using the same group identifier for multiple processes creates a cluster of consumers that works in a load balancing fashion. This is the UI equivalent to the 'group.id' property.
16 | PARTITION_ASSIGNMENT_STRATEGY_LABEL=Partition Assignment Strategy
17 | PARTITION_ASSIGNMENT_STRATEGY_DESC=Select between the 'range' or 'roundrobin' strategy for assigning partitions to consumer streams.
18 | SOCKET_TIMEOUT_MS_LABEL=Socket Timeout
19 | SOCKET_TIMEOUT_MS_DESC=The socket timeout, in milliseconds, for network requests.
20 | SOCKET_RECEIVE_BUFFER_BYTES_LABEL=Socket Receive Buffer Bytes
21 | SOCKET_RECEIVE_BUFFER_BYTES_DESC=The socket receive buffer, in bytes, for network requests.
22 | FETCH_MESSAGE_MAX_BYTES_LABEL=Maximum Fetch Message Bytes
23 | FETCH_MESSAGE_MAX_BYTES_DESC=The number of bytes of messages to attempt to fetch for each topic-partition in each fetch request. These bytes will be read into memory for each partition, so this helps control the memory used by the consumer. The fetch request size must be at least as large as the maximum message size the server allows or else it is possible for the producer to send messages larger than the consumer can fetch.
24 | NUM_CONSUMER_FETCHERS_LABEL=Number of Consumer Fetchers
25 | NUM_CONSUMER_FETCHERS_DESC=The number of fetcher threads used to fetch data.
26 | AUTO_COMMIT_ENABLE_LABEL=Auto Commit Enable
27 | AUTO_COMMIT_ENABLE_DESC=If true, periodically commit to ZooKeeper the offset of messages already fetched by the consumer. This committed offset will be used when the process fails as the position from which the new consumer will begin.
28 | AUTO_COMMIT_INTERVAL_MS_LABEL=Auto Commit Interval
29 | AUTO_COMMIT_INTERVAL_MS_DESC=The frequency in milliseconds that the consumer offsets are committed to zookeeper.
30 | QUEUED_MAX_MESSAGE_CHUNKS_LABEL=Maximum Queued Message Chunks
31 | QUEUED_MAX_MESSAGE_CHUNKS_DESC=Max number of message chunks buffered for consumption.
32 | REBALANCE_MAX_RETRIES_LABEL=Maximum Rebalance Retries
33 | REBALANCE_MAX_RETRIES_DESC=When a new consumer joins a consumer group the set of consumers attempt to 'rebalance' the load to assign partitions to each consumer. If the set of consumers changes while this assignment is taking place the rebalance will fail and retry. This setting controls the maximum number of attempts before giving up.
34 | FETCH_MIN_BYTES_LABEL=Minimum Fetch Bytes
35 | FETCH_MIN_BYTES_DESC=The minimum amount of data, in bytes, the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request.
36 | FETCH_WAIT_MAX_MS_LABEL=Maximum Fetch Wait
37 | FETCH_WAIT_MAX_MS_DESC=The maximum amount of time, in milliseconds, the server will block before answering the fetch request if there isn't sufficient data to immediately satisfy minimum fetch bytes.
38 | REBALANCE_BACKOFF_MS_LABEL=Rebalance Backoff
39 | REBALANCE_BACKOFF_MS_DESC=Backoff time, in milliseconds, between retries during rebalance.
40 | REFRESH_LEADER_BACKOFF_MS_LABEL=Refresh Leader Backoff
41 | REFRESH_LEADER_BACKOFF_MS_DESC=Backoff time, in milliseconds, to wait before trying to determine the leader of a partition that has just lost its leader.
42 | AUTO_OFFSET_RESET_LABEL=Auto Offset Reset
43 | AUTO_OFFSET_RESET_DESC=What to do when there is no initial offset in ZooKeeper or if an offset is out of range.
44 | CONSUMER_TIMEOUT_MS_LABEL=Consumer Timeout
45 | CONSUMER_TIMEOUT_MS_DESC=Throw a timeout exception to the consumer if no message is available for consumption after the specified interval in milliseconds.
46 | EXCLUDE_INTERNAL_TOPICS_LABEL=Exclude Internal Topics
47 | EXCLUDE_INTERNAL_TOPICS_DESC=Whether messages from internal topics (such as offsets) should be exposed to the consumer.
48 | ZOOKEEPER_SESSION_TIMEOUT_MS_LABEL=Zookeeper Session Timeout
49 | ZOOKEEPER_SESSION_TIMEOUT_MS_DESC=ZooKeeper session timeout in milliseconds. If the consumer fails to heartbeat to ZooKeeper for this period of time it is considered dead and a rebalance will occur.
50 | ZOOKEEPER_CONNECTION_TIMEOUT_MS_LABEL=Zookeeper Connection Timeout
51 | ZOOKEEPER_CONNECTION_TIMEOUT_MS_DESC=The max time, in milliseconds, that the client waits while establishing a connection to Zookeeper.
52 | ZOOKEEPER_SYNC_TIME_MS_LABEL=Zookeeper Sync Time
53 | ZOOKEEPER_SYNC_TIME_MS_DESC=How far, in milliseconds, a ZK follower can be behind a ZK leader.
54 | OFFSETS_STORAGE_LABEL=Offsets Storage
55 | OFFSETS_STORAGE_DESC=Select where offsets should be stored, if is in Zookeeper or Kafka.
56 | OFFSETS_CHANNEL_BACKOFF_MS_LABEL=Offsets Channel Backoff
57 | OFFSETS_CHANNEL_BACKOFF_MS_DESC=The backoff period, in milliseconds, when reconnecting the offsets channel or retrying failed offset fetch/commit requests.
58 | OFFSETS_CHANNEL_SOCKET_TIMEOUT_MS_LABEL=Offsets Channel Socket Timeout
59 | OFFSETS_CHANNEL_SOCKET_TIMEOUT_MS_DESC=Socket timeout, in milliseconds, when reading responses for offset fetch/commit requests. This timeout is also used for ConsumerMetadata requests that are used to query for the offset manager.
60 | OFFSETS_COMMIT_MAX_RETRIES_LABEL=Maximum Offset Commit Retries
61 | OFFSETS_COMMIT_MAX_RETRIES_DESC=Retry the offset commit up to this many times on failure. This retry count only applies to offset commits during shut-down. It does not apply to commits originating from the auto-commit thread. It also does not apply to attempts to query for the offset coordinator before committing offsets. i.e., if a consumer metadata request fails for any reason, it will be retried and that retry does not count toward this limit.
62 | DUAL_COMMIT_ENABLED_LABEL=Dual Commit Enabled
63 | DUAL_COMMIT_ENABLED_DESC=If you are using 'kafka' as offsets storage, you can dual commit offsets to ZooKeeper (in addition to Kafka). This is required during migration from Zookeeper-based offset storage to Kafka-based offset storage. With respect to any given consumer group, it is safe to turn this off after all instances within that group have been migrated to the new version that commits offsets to the broker (instead of directly to ZooKeeper).
64 |
65 | # UI Fields (Outbound)
66 | ACKS_LABEL=Acknowledge
67 | ACKS_DESC=The number of acknowledgments the producer requires the leader to have received before considering a request complete. This controls the durability of records that are sent. The 'Without Acknowledge'option specifies that the record will be immediately added to the socket buffer and considered sent. The 'Leader Acknowledge' option specifies that the leader will write the record to its local log but will respond without awaiting full acknowledgement from all followers. The 'ISRs Acknowledge' option specifies that the leader will wait for the full set of in-sync replicas to acknowledge the record. This is the UI equivalent to the 'acks' property.
68 | TIMEOUT_MS_LABEL=Request Timeout
69 | TIMEOUT_MS_DESC=The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted. This is the UI equivalent of the property 'request.timeout.ms' property.
70 | BUFFER_MEMORY_LABEL=Buffer Memory
71 | BUFFER_MEMORY_DESC=The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are sent faster than they can be delivered to the server the producer will either block or throw an exception based on the preference specified by block on buffer full.
72 | COMPRESSION_TYPE_LABEL=Compression Type
73 | COMPRESSION_TYPE_DESC=The compression type for all data generated by the producer. Valid values are none, gzip, or snappy. Compression is of full batches of data, so the efficacy of batching will also impact the compression ratio (more batching means better compression).
74 | BATCH_SIZE_LABEL=Batch Size
75 | BATCH_SIZE_DESC=The producer will attempt to batch records together into fewer requests whenever multiple records are being sent to the same partition. This helps performance on both the client and the server. This configuration controls the default batch size in bytes.
76 | LINGER_MS_LABEL=Linger
77 | LINGER_MS_DESC=The producer groups together any records that arrive in between request transmissions into a single batched request. Normally this occurs only under load when records arrive faster than they can be sent out. However in some circumstances the client may want to reduce the number of requests even under moderate load. This setting accomplishes this by adding a small amount of artificial delay. Set this value to zero to no delay.
78 | MAX_REQUEST_SIZE_LABEL=Maximum Request Size
79 | MAX_REQUEST_SIZE_DESC=The maximum size of a request. This is also effectively a cap on the maximum record size. Note that the server has its own cap on record size which may be different from this. This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests.
80 | RECEIVE_BUFFER_BYTES_LABEL=Receive Buffer Bytes
81 | RECEIVE_BUFFER_BYTES_DESC=The size of the TCP receive buffer to use when reading data.
82 | SEND_BUFFER_BYTES_LABEL=Send Buffer Bytes
83 | SEND_BUFFER_BYTES_DESC=The size of the TCP send buffer to use when sending data.
84 | BLOCK_ON_BUFFER_FULL_LABEL=Block On Buffer Full
85 | BLOCK_ON_BUFFER_FULL_DESC=When our memory buffer is exhausted we must either stop accepting new records (block) or throw errors. By default this setting is true and we block, however in some scenarios blocking is not desirable and it is better to immediately give an error. Setting this to false will accomplish that: the producer will throw a BufferExhaustedException if a record is sent and the buffer space is full.
86 | METADATA_FETCH_TIMEOUT_MS_LABEL=Metadata Fetch Timeout
87 | METADATA_FETCH_TIMEOUT_MS_DESC=The first time data is sent to a topic we must fetch metadata about that topic to know which servers host the topic's partitions. This configuration controls the maximum amount of time we will block waiting for the metadata fetch to succeed before throwing an exception back to the client.
88 | METADATA_MAX_AGE_MS_LABEL=Maximum Metadata Age
89 | METADATA_MAX_AGE_MS_DESC=The period of time in milliseconds after which we force a refresh of metadata even if we haven't seen any partition leadership changes to proactively discover any new brokers or partitions.
90 | RECONNECT_BACKOFF_MS_LABEL=Reconnect Backoff
91 | RECONNECT_BACKOFF_MS_DESC=The amount of time to wait before attempting to reconnect to a given host when a connection fails. This avoids a scenario where the client repeatedly attempts to connect to a host in a tight loop.
92 | CLIENT_ID_LABEL=Client Identifier
93 | CLIENT_ID_DESC=The string to pass to the server when making requests. The purpose of this is to be able to track the source of requests beyond just ip/port by allowing a logical application name to be included with the request.
94 |
95 | # Validation Error Messages
96 | INVALID_LOAD_BALANCING=Load balancing is managed by Kafka. Therefore, the load balancing algorithm option must be set to 'None'.
97 | ENDPOINT_INFO_MISSING=The endpoint information about Kafka is missing.
98 | ENDPOINT_INCORRECT=One of the endpoints has an invalid URI format.
99 | NUMBER_THREADS_INVALID=The number of threads must be greater than or equals to one.
100 | TIMEOUT_INVALID=The request timeout must be greater than or equals to one.
101 |
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaTransportProvider.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.net.URL;
33 | import java.util.ArrayList;
34 | import java.util.Collection;
35 | import java.util.Collections;
36 | import java.util.Map;
37 | import java.util.Timer;
38 | import java.util.TimerTask;
39 | import java.util.concurrent.ConcurrentHashMap;
40 |
41 | import org.apache.xmlbeans.SchemaType;
42 | import org.apache.xmlbeans.XmlObject;
43 |
44 | import com.bea.wli.config.Ref;
45 | import com.bea.wli.config.env.NonQualifiedEnvValue;
46 | import com.bea.wli.sb.transports.EndPointConfiguration;
47 | import com.bea.wli.sb.transports.EndPointOperations;
48 | import com.bea.wli.sb.transports.EndPointOperations.CommonOperation;
49 | import com.bea.wli.sb.transports.EndPointOperations.Create;
50 | import com.bea.wli.sb.transports.EndPointOperations.Delete;
51 | import com.bea.wli.sb.transports.EndPointOperations.Resume;
52 | import com.bea.wli.sb.transports.EndPointOperations.Suspend;
53 | import com.bea.wli.sb.transports.EndPointOperations.Update;
54 | import com.bea.wli.sb.transports.ProviderConfigurationDocument;
55 | import com.bea.wli.sb.transports.ServiceTransportSender;
56 | import com.bea.wli.sb.transports.TransportEndPoint;
57 | import com.bea.wli.sb.transports.TransportException;
58 | import com.bea.wli.sb.transports.TransportManagerHelper;
59 | import com.bea.wli.sb.transports.TransportOptions;
60 | import com.bea.wli.sb.transports.TransportProvider;
61 | import com.bea.wli.sb.transports.TransportProviderConfiguration;
62 | import com.bea.wli.sb.transports.TransportSendListener;
63 | import com.bea.wli.sb.transports.TransportSender;
64 | import com.bea.wli.sb.transports.TransportValidationContext;
65 | import com.bea.wli.sb.transports.ui.TransportUIBinding;
66 | import com.bea.wli.sb.transports.ui.TransportUIContext;
67 |
68 | /**
69 | * @author Ricardo Ferreira
70 | */
71 | public class KafkaTransportProvider implements TransportProvider {
72 |
73 | private Timer startupTimer;
74 | private Map endpoints;
75 |
76 | private KafkaTransportProvider() {
77 |
78 | endpoints = new ConcurrentHashMap();
79 |
80 | }
81 |
82 | private void scheduleEndpointsStartup() {
83 |
84 | if (startupTimer == null) {
85 |
86 | int checkInterval = KafkaUtil.getCheckInterval();
87 |
88 | startupTimer = new Timer("Kafka Endpoints Startup");
89 | startupTimer.schedule(new EndpointsStartupTask(),
90 | checkInterval, checkInterval);
91 |
92 | }
93 |
94 | }
95 |
96 | public TransportEndPoint createEndPoint(Create context)
97 | throws TransportException {
98 |
99 | Ref serviceRef = context.getRef();
100 |
101 | KafkaEndpoint endpoint = new KafkaEndpoint(this,
102 | serviceRef, context.getEndPointConfiguration());
103 |
104 | endpoints.put(serviceRef, endpoint);
105 |
106 | return endpoint;
107 |
108 | }
109 |
110 | public TransportEndPoint updateEndPoint(Update context)
111 | throws TransportException {
112 |
113 | deleteEndPoint(EndPointOperations.getDeleteFromUpdate(context));
114 | return createEndPoint(EndPointOperations.getCreateFromUpdate(context));
115 |
116 | }
117 |
118 | public void deleteEndPoint(Delete context) throws TransportException {
119 |
120 | Ref serviceRef = context.getRef();
121 | KafkaEndpoint endpoint = endpoints.get(serviceRef);
122 |
123 | if (endpoint != null) {
124 |
125 | endpoint.stop();
126 | endpoints.remove(serviceRef);
127 |
128 | }
129 |
130 | }
131 |
132 | public void suspendEndPoint(Suspend context) throws TransportException {
133 |
134 | // Nothing to do...
135 |
136 | }
137 |
138 | public void resumeEndPoint(Resume context) throws TransportException {
139 |
140 | // Nothing to do...
141 |
142 | }
143 |
144 | public void activationComplete(CommonOperation context) {
145 |
146 | Ref serviceRef = context.getRef();
147 | EndPointOperations.EndPointOperationTypeEnum type = context.getType();
148 | KafkaEndpoint endpoint = endpoints.get(serviceRef);
149 |
150 | if (TransportManagerHelper.isRuntimeEnabled() && endpoint != null) {
151 |
152 | if (EndPointOperations.EndPointOperationTypeEnum.CREATE.equals(type) ||
153 | EndPointOperations.EndPointOperationTypeEnum.UPDATE.equals(type) ||
154 | EndPointOperations.EndPointOperationTypeEnum.RESUME.equals(type)) {
155 |
156 | try {
157 |
158 | // Before start this endpoint, first we need
159 | // to make sure the server is running. This
160 | // verification is necessary because if the
161 | // endpoint is started during WebLogic bootstrap;
162 | // some of the Work Managers may be on shutdown
163 | // state, causing thread scheduling issues.
164 |
165 | if (KafkaUtil.isServerRunning()) {
166 |
167 | endpoint.start();
168 |
169 | } else {
170 |
171 | // Delay the endpoints start up until the
172 | // reaches the 'RUNNING' state. The method
173 | // below schedules a timer that keep polling
174 | // the server about it's state. The interval
175 | // between check's defaults to five seconds,
176 | // but it can be changed via the following
177 | // JVM property:
178 |
179 | // -Doracle.ateam.sb.transports.kafka.endpoint.startup.checkInterval
180 |
181 | // The timer is scheduled when the first
182 | // endpoint is created. Subsequent calls
183 | // to this method has no effect since the
184 | // timer has been created already.
185 |
186 | scheduleEndpointsStartup();
187 |
188 | }
189 |
190 | } catch (Exception ex) {
191 |
192 | KafkaTransportLogger.error("Error while starting a Kafka endpoint.", ex);
193 |
194 | }
195 |
196 | } else if (EndPointOperations.EndPointOperationTypeEnum.SUSPEND.equals(type)) {
197 |
198 | try {
199 |
200 | endpoint.stop();
201 |
202 | } catch (TransportException te) {
203 |
204 | KafkaTransportLogger.error("Error while stopping a Kafka endpoint.", te);
205 |
206 | }
207 |
208 | }
209 |
210 | }
211 |
212 | }
213 |
214 | public TransportEndPoint getEndPoint(Ref serviceRef) throws TransportException {
215 |
216 | return endpoints.get(serviceRef);
217 |
218 | }
219 |
220 | public Collection getEndPoints()
221 | throws TransportException {
222 |
223 | return Collections.unmodifiableCollection(endpoints.values());
224 |
225 | }
226 |
227 | public TransportProviderConfiguration getProviderConfiguration()
228 | throws TransportException {
229 |
230 | URL configUrl = null;
231 | ProviderConfigurationDocument providerConfigDoc = null;
232 | TransportProviderConfiguration providerConfiguration = null;
233 |
234 | try {
235 |
236 | configUrl = this.getClass().getClassLoader().getResource("kafka-config.xml");
237 | providerConfigDoc = ProviderConfigurationDocument.Factory.parse(configUrl);
238 | providerConfiguration = providerConfigDoc.getProviderConfiguration();
239 |
240 | } catch (Exception ex) {
241 |
242 | throw new TransportException(ex);
243 |
244 | }
245 |
246 | return providerConfiguration;
247 |
248 | }
249 |
250 | @SuppressWarnings({ "rawtypes", "unchecked" })
251 | public Map getBusinessServicePropertiesForProxy(Ref serviceRef)
252 | throws TransportException {
253 |
254 | throw new UnsupportedOperationException();
255 |
256 | }
257 |
258 | public XmlObject getProviderSpecificConfiguration(Ref serviceRef,
259 | Map props) throws TransportException {
260 |
261 | throw new UnsupportedOperationException();
262 |
263 | }
264 |
265 | public SchemaType getEndPointConfigurationSchemaType()
266 | throws TransportException {
267 |
268 | return KafkaEndPointConfiguration.type;
269 |
270 | }
271 |
272 | public SchemaType getRequestMetaDataSchemaType() throws TransportException {
273 |
274 | return KafkaRequestMetaDataXML.type;
275 |
276 | }
277 |
278 | public SchemaType getRequestHeadersSchemaType() throws TransportException {
279 |
280 | return KafkaRequestHeadersXML.type;
281 |
282 | }
283 |
284 | public SchemaType getResponseMetaDataSchemaType() throws TransportException {
285 |
286 | return KafkaResponseMetaDataXML.type;
287 |
288 | }
289 |
290 | public SchemaType getResponseHeadersSchemaType() throws TransportException {
291 |
292 | return KafkaResponseHeadersXML.type;
293 |
294 | }
295 |
296 | public Collection getEnvValues(Ref serviceRef,
297 | EndPointConfiguration endpointConfiguration)
298 | throws TransportException {
299 |
300 | return new ArrayList();
301 |
302 | }
303 |
304 | public void setEnvValues(Ref serviceRef,
305 | EndPointConfiguration endpointConfiguration,
306 | Collection envValues)
307 | throws TransportException {
308 |
309 | }
310 |
311 | public void setExternalReferences(Map props,
312 | EndPointConfiguration endpointConfiguration)
313 | throws TransportException {
314 |
315 | }
316 |
317 | public Collection getExternalReferences(
318 | EndPointConfiguration endpointConfiguration)
319 | throws TransportException {
320 |
321 | return new ArrayList();
322 |
323 | }
324 |
325 | public String getId() {
326 |
327 | return KafkaConstants.KAFKA_PROVIDER_ID;
328 |
329 | }
330 |
331 | public TransportUIBinding getUIBinding(TransportUIContext uiContext)
332 | throws TransportException {
333 |
334 | return new KafkaUIBinding(uiContext);
335 |
336 | }
337 |
338 | public void sendMessageAsync(TransportSender transportSender,
339 | TransportSendListener listener, TransportOptions options)
340 | throws TransportException {
341 |
342 | if (transportSender instanceof ServiceTransportSender) {
343 |
344 | ServiceTransportSender sts = (ServiceTransportSender) transportSender;
345 | KafkaEndpoint endpoint = (KafkaEndpoint) sts.getEndPoint();
346 | endpoint.sendMessageAsync(transportSender, listener, options);
347 |
348 | }
349 |
350 | }
351 |
352 | public void shutdown() throws TransportException {
353 |
354 | if (TransportManagerHelper.isRuntimeEnabled()) {
355 |
356 | Collection endpoints = getEndPoints();
357 |
358 | if (endpoints != null && !endpoints.isEmpty()) {
359 |
360 | for (KafkaEndpoint endpoint : endpoints) {
361 |
362 | endpoint.stop();
363 |
364 | }
365 |
366 | }
367 |
368 | }
369 |
370 | }
371 |
372 | public void validateEndPointConfiguration(TransportValidationContext tvc) {
373 | }
374 |
375 | private class EndpointsStartupTask extends TimerTask {
376 |
377 | private long firstExecution = -1;
378 |
379 | @Override
380 | public void run() {
381 |
382 | long elapsedTime = 0;
383 | Collection endpoints = null;
384 |
385 | try {
386 |
387 | if (firstExecution == -1) {
388 |
389 | firstExecution = System.currentTimeMillis();
390 |
391 | }
392 |
393 | if (KafkaUtil.isServerRunning()) {
394 |
395 | endpoints = getEndPoints();
396 |
397 | for (KafkaEndpoint endpoint : endpoints) {
398 |
399 | endpoint.start();
400 |
401 | }
402 |
403 | // Job done here. There is no need anymore
404 | // to continue with the polling process on
405 | // the server.
406 |
407 | startupTimer.cancel();
408 |
409 | }
410 |
411 | // This thread cannot keep running for ever.
412 | // It's assumed that eventually the server will
413 | // finish it's bootstrap and change it's state to
414 | // 'RUNNING'. If for some reason this does not
415 | // occur or takes too long; we need to ensure that
416 | // this timer thread will timeout. Default timeout
417 | // is five minutes, but it can be changed via the
418 | // following JVM property:
419 |
420 | // -Doracle.ateam.sb.transports.kafka.endpoint.startup.timeout
421 |
422 | elapsedTime = System.currentTimeMillis() - firstExecution;
423 |
424 | if (elapsedTime >= KafkaUtil.getStartupTimeout()) {
425 |
426 | startupTimer.cancel();
427 |
428 | }
429 |
430 | } catch (Exception ex) {
431 |
432 | KafkaTransportLogger.error(ex.getMessage(), ex);
433 |
434 | }
435 |
436 | }
437 |
438 | }
439 |
440 | private static KafkaTransportProvider instance;
441 |
442 | public static KafkaTransportProvider getInstance() {
443 |
444 | if (instance == null) {
445 |
446 | instance = new KafkaTransportProvider();
447 |
448 | }
449 |
450 | return instance;
451 |
452 | }
453 |
454 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaUIBinding.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.math.BigInteger;
33 | import java.util.ArrayList;
34 | import java.util.List;
35 | import java.util.Map;
36 | import java.util.ResourceBundle;
37 |
38 | import org.apache.xmlbeans.XmlObject;
39 |
40 | import com.bea.wli.sb.services.BindingTypeInfo;
41 | import com.bea.wli.sb.transports.EndPointConfiguration;
42 | import com.bea.wli.sb.transports.LoadBalancingAlgorithmEnum.Enum;
43 | import com.bea.wli.sb.transports.TransportException;
44 | import com.bea.wli.sb.transports.TransportManagerHelper;
45 | import com.bea.wli.sb.transports.ui.TransportEditField;
46 | import com.bea.wli.sb.transports.ui.TransportUIBinding;
47 | import com.bea.wli.sb.transports.ui.TransportUIContext;
48 | import com.bea.wli.sb.transports.ui.TransportUIError;
49 | import com.bea.wli.sb.transports.ui.TransportUIFactory;
50 | import com.bea.wli.sb.transports.ui.TransportUIGenericInfo;
51 | import com.bea.wli.sb.transports.ui.TransportViewField;
52 | import com.bea.wli.sb.transports.util.UIBindingUtils;
53 |
54 | /**
55 | * @author Ricardo Ferreira
56 | */
57 | public class KafkaUIBinding implements TransportUIBinding {
58 |
59 | private TransportUIContext uiContext;
60 | private ResourceBundle bundle;
61 |
62 | public KafkaUIBinding(TransportUIContext uiContext) {
63 |
64 | this.uiContext = uiContext;
65 |
66 | this.bundle = ResourceBundle.getBundle(
67 | getClass().getName(),
68 | uiContext.getLocale());
69 |
70 | }
71 |
72 | @Override
73 | public TransportEditField[] getEditPage(
74 | EndPointConfiguration endpointConfig,
75 | BindingTypeInfo bindingTypeInfo) throws TransportException {
76 |
77 | List fields = null;
78 | KafkaEndPointConfiguration kafkaEndpointConfig = null;
79 | String topicName = "test";
80 | String customProps = "";
81 | String customPropsLabel = null;
82 |
83 | // Consumer Properties...
84 | short consumerThreads = 1;
85 | String dispatchPolicy = UIBindingUtils.DEFAULT_PROXY_WORK_MANAGER;
86 | String groupId = uiContext.getServiceRef().getLocalName();
87 |
88 | // Producer properties
89 | String acks = "1";
90 | long timeoutMs = 30000;
91 |
92 | try {
93 |
94 | fields = new ArrayList();
95 | uiContext.put("request-type", bindingTypeInfo.getRequestMessageType());
96 |
97 | // Capture all the data previously stored in the service endpoint
98 | // configuration. If this is the first time its being created then
99 | // set default values for some built-in UI fields.
100 |
101 | if (endpointConfig != null && endpointConfig.isSetProviderSpecific()) {
102 |
103 | kafkaEndpointConfig = KafkaUtil.getConfig(endpointConfig);
104 | topicName = kafkaEndpointConfig.getTopicName();
105 | customProps = kafkaEndpointConfig.getCustomProps();
106 |
107 | if (uiContext.isProxy()) {
108 |
109 | dispatchPolicy = kafkaEndpointConfig.getInboundProperties().getDispatchPolicy();
110 | consumerThreads = kafkaEndpointConfig.getInboundProperties().getConsumerThreads();
111 | groupId = kafkaEndpointConfig.getInboundProperties().getGroupId();
112 |
113 | } else {
114 |
115 | acks = kafkaEndpointConfig.getOutboundProperties().getAcks();
116 | timeoutMs = kafkaEndpointConfig.getOutboundProperties().getTimeoutMs().intValue();
117 |
118 | }
119 |
120 | } else {
121 |
122 | if (!uiContext.isProxy()) {
123 |
124 | endpointConfig.getOutboundProperties().setLoadBalancingAlgorithm(Enum.forInt(4));
125 | endpointConfig.getOutboundProperties().setRetryCount((short) 0);
126 | endpointConfig.getOutboundProperties().setRetryInterval(0);
127 |
128 | }
129 |
130 | }
131 |
132 | // Now we are going to create all the UI fields...
133 |
134 | if (uiContext.isProxy()) {
135 |
136 | // Dispatch Policy
137 |
138 | TransportEditField dispatchPolicyField = UIBindingUtils.createDispatchPolicyField(
139 | dispatchPolicy, uiContext, TransportManagerHelper.isOffline());
140 |
141 | dispatchPolicyField.setRequired(true);
142 | fields.add(dispatchPolicyField);
143 |
144 | // Consumer Threads
145 |
146 | TransportUIFactory.TextBoxObject consumerThreadsTxt =
147 | TransportUIFactory.createTextBox(String.valueOf(consumerThreads), 14);
148 |
149 | TransportEditField consumerThreadsField = TransportUIFactory.createEditField("consumer-threads",
150 | bundle.getString("CONSUMER_THREADS_LABEL"), bundle.getString("CONSUMER_THREADS_DESC"),
151 | consumerThreadsTxt);
152 |
153 | consumerThreadsField.setRequired(true);
154 | fields.add(consumerThreadsField);
155 |
156 | // Topic Name
157 |
158 | TransportUIFactory.TextBoxObject topicNameTxt =
159 | TransportUIFactory.createTextBox(String.valueOf(topicName), 14);
160 |
161 | TransportEditField topicNameField = TransportUIFactory.createEditField("topic-name",
162 | bundle.getString("TOPIC_NAME_LABEL"), bundle.getString("TOPIC_NAME_DESC"),
163 | topicNameTxt);
164 |
165 | topicNameField.setRequired(true);
166 | fields.add(topicNameField);
167 |
168 | // Group ID
169 |
170 | TransportUIFactory.TextBoxObject groupIdTxt =
171 | TransportUIFactory.createTextBox(String.valueOf(groupId), 14);
172 |
173 | TransportEditField groupIdField = TransportUIFactory.createEditField("group-id",
174 | bundle.getString("GROUP_ID_LABEL"), bundle.getString("GROUP_ID_DESC"),
175 | groupIdTxt);
176 |
177 | groupIdField.setRequired(true);
178 | fields.add(groupIdField);
179 |
180 | } else {
181 |
182 | // Topic Name
183 |
184 | TransportUIFactory.TextBoxObject topicNameTxt =
185 | TransportUIFactory.createTextBox(topicName, 14);
186 |
187 | TransportEditField topicNameField = TransportUIFactory.createEditField("topic-name",
188 | bundle.getString("TOPIC_NAME_LABEL"), bundle.getString("TOPIC_NAME_DESC"),
189 | topicNameTxt);
190 |
191 | topicNameField.setRequired(true);
192 | fields.add(topicNameField);
193 |
194 | // Acks
195 |
196 | TransportUIFactory.SelectObject acksTxt = TransportUIFactory.createSelectObject(
197 | new String[]{"0", "1", "all"}, new String[]{"Without Acknowledge", "Leader Acknowledge", "ISRs Acknowledge"},
198 | String.valueOf(acks), TransportUIFactory.SelectObject.DISPLAY_LIST, false);
199 |
200 | TransportEditField acksField = TransportUIFactory.createEditField("acks",
201 | bundle.getString("ACKS_LABEL"), bundle.getString("ACKS_DESC"), acksTxt);
202 |
203 | acksField.setRequired(true);
204 | fields.add(acksField);
205 |
206 | // Timeout
207 |
208 | TransportUIFactory.TextBoxObject timeoutMsTxt =
209 | TransportUIFactory.createTextBox(String.valueOf(timeoutMs), 14);
210 |
211 | TransportEditField timeoutMsField = TransportUIFactory.createEditField("timeout-ms",
212 | bundle.getString("TIMEOUT_MS_LABEL"), bundle.getString("TIMEOUT_MS_DESC"),
213 | timeoutMsTxt);
214 |
215 | timeoutMsField.setRequired(true);
216 | fields.add(timeoutMsField);
217 |
218 | }
219 |
220 | // Custom Properties
221 |
222 | TransportUIFactory.TextBoxObject customPropsJDevTxt = null;
223 | TransportUIFactory.TextAreaObject customPropsWebTxt = null;
224 | TransportEditField customPropsField = null;
225 |
226 | if (uiContext.isProxy()) {
227 |
228 | customPropsLabel = bundle.getString("CUSTOM_PROPS_LABEL_CONSUMER");
229 |
230 | } else {
231 |
232 | customPropsLabel = bundle.getString("CUSTOM_PROPS_LABEL_PRODUCER");
233 |
234 | }
235 |
236 | // Current JDeveloper plugin for Service Bus does not render
237 | // correctly text area objects, causing them to be too small
238 | // for the UI. In this case, we render a standard text box
239 | // that has a default size.
240 |
241 | if (TransportManagerHelper.isOffline()) {
242 |
243 | customPropsJDevTxt = TransportUIFactory.createTextBox(customProps, 14);
244 |
245 | customPropsField = TransportUIFactory.createEditField("custom-props",
246 | customPropsLabel, bundle.getString("CUSTOM_PROPS_DESC"), customPropsJDevTxt);
247 |
248 | } else {
249 |
250 | customPropsWebTxt = TransportUIFactory.createTextArea(customProps, 70, 5, false);
251 |
252 | customPropsField = TransportUIFactory.createEditField("custom-props",
253 | customPropsLabel, bundle.getString("CUSTOM_PROPS_DESC"), customPropsWebTxt);
254 |
255 | }
256 |
257 | customPropsField.setAdvanced(true);
258 | fields.add(customPropsField);
259 |
260 | } catch (Exception ex) {
261 |
262 | throw new TransportException(ex);
263 |
264 | }
265 |
266 | return fields.toArray(new TransportEditField[fields.size()]);
267 |
268 | }
269 |
270 | @Override
271 | public TransportUIGenericInfo getGenericInfo() {
272 |
273 | TransportUIGenericInfo uiGenericInfo = new TransportUIGenericInfo();
274 |
275 | if (uiContext.isProxy()) {
276 |
277 | uiGenericInfo.setUriFormat(bundle.getString("PROXY_URI_FORMAT"));
278 | uiGenericInfo.setUriAutofill(bundle.getString("PROXY_URI_FORMAT"));
279 |
280 | } else {
281 |
282 | uiGenericInfo.setUriFormat(bundle.getString("BUSINESS_URI_FORMAT"));
283 |
284 | }
285 |
286 | return uiGenericInfo;
287 |
288 | }
289 |
290 | @Override
291 | public XmlObject getProviderSpecificConfiguration(
292 | TransportEditField[] fields) throws TransportException {
293 |
294 | KafkaEndPointConfiguration kafkaEndpointConfig = null;
295 | KafkaInboundPropertiesType kafkaInboundProps = null;
296 | KafkaOutboundPropertiesType kafkaOutboundProps = null;
297 | Map map = null;
298 |
299 | try {
300 |
301 | map = TransportEditField.getObjectMap(fields);
302 |
303 | kafkaEndpointConfig = KafkaEndPointConfiguration.Factory.newInstance();
304 | kafkaEndpointConfig.setTopicName(TransportUIFactory.getStringValue(map, "topic-name"));
305 | kafkaEndpointConfig.setRequestType(uiContext.get("request-type").toString());
306 | kafkaEndpointConfig.setResponseType("NONE");
307 | kafkaEndpointConfig.setCustomProps(TransportUIFactory.getStringValue(map, "custom-props"));
308 |
309 | if (uiContext.isProxy()) {
310 |
311 | kafkaInboundProps = kafkaEndpointConfig.addNewInboundProperties();
312 |
313 | kafkaInboundProps.setDispatchPolicy(TransportUIFactory.getStringValue(map, UIBindingUtils.DISPATCH_POLICY));
314 | kafkaInboundProps.setConsumerThreads((short) TransportUIFactory.getIntValue(map, "consumer-threads"));
315 | kafkaInboundProps.setGroupId(TransportUIFactory.getStringValue(map, "group-id"));
316 |
317 | kafkaEndpointConfig.setInboundProperties(kafkaInboundProps);
318 |
319 | } else {
320 |
321 | kafkaOutboundProps = kafkaEndpointConfig.addNewOutboundProperties();
322 |
323 | kafkaOutboundProps.setAcks(TransportUIFactory.getStringValue(map, "acks"));
324 | kafkaOutboundProps.setTimeoutMs(BigInteger.valueOf(TransportUIFactory.getIntValue(map, "timeout-ms")));
325 |
326 | kafkaEndpointConfig.setOutboundProperties(kafkaOutboundProps);
327 |
328 | }
329 |
330 | } catch (Exception ex) {
331 |
332 | throw new TransportException(ex);
333 |
334 | }
335 |
336 | return kafkaEndpointConfig;
337 |
338 | }
339 |
340 | @Override
341 | public TransportViewField[] getViewPage(EndPointConfiguration endpointConfig)
342 | throws TransportException {
343 |
344 | return null;
345 |
346 | }
347 |
348 | @Override
349 | public boolean isServiceTypeSupported(BindingTypeInfo bindingTypeInfo) {
350 |
351 | BindingTypeInfo.BindingTypeEnum bindingTypeEnum = null;
352 | BindingTypeInfo.MessageTypeEnum reqMsgType = null;
353 | BindingTypeInfo.MessageTypeEnum resMsgType = null;
354 |
355 | try {
356 |
357 | bindingTypeEnum = bindingTypeInfo.getType();
358 | reqMsgType = bindingTypeInfo.getRequestMessageType();
359 | resMsgType = bindingTypeInfo.getResponseMessageType();
360 |
361 | if (bindingTypeEnum.equals(BindingTypeInfo.BindingTypeEnum.MIXED)) {
362 |
363 | if (reqMsgType != null) {
364 |
365 | return !reqMsgType.equals(BindingTypeInfo.MessageTypeEnum.XML) &&
366 | !reqMsgType.equals(BindingTypeInfo.MessageTypeEnum.MFL) &&
367 | !reqMsgType.equals(BindingTypeInfo.MessageTypeEnum.JAVA) &&
368 | resMsgType == null;
369 |
370 | }
371 |
372 | }
373 |
374 | return !bindingTypeEnum.equals(BindingTypeInfo.BindingTypeEnum.SOAP) &&
375 | !bindingTypeEnum.equals(BindingTypeInfo.BindingTypeEnum.ABSTRACT_SOAP) &&
376 | !bindingTypeEnum.equals(BindingTypeInfo.BindingTypeEnum.ABSTRACT_XML) &&
377 | !bindingTypeEnum.equals(BindingTypeInfo.BindingTypeEnum.XML) &&
378 | !bindingTypeEnum.equals(BindingTypeInfo.BindingTypeEnum.REST);
379 |
380 | } catch (Exception ex) {
381 |
382 | KafkaTransportLogger.error("Error while detecting the service types supported.", ex);
383 |
384 | }
385 |
386 | return false;
387 | }
388 |
389 | @Override
390 | public TransportEditField[] updateEditPage(TransportEditField[] fields,
391 | String name) throws TransportException {
392 |
393 | return fields;
394 |
395 | }
396 |
397 | @Override
398 | public TransportUIError[] validateMainForm(TransportEditField[] fields) {
399 |
400 | List errors = new ArrayList();
401 | Map map = TransportEditField.getObjectMap(fields);
402 |
403 | String endpoints = null;
404 |
405 | if (uiContext.isProxy()) {
406 |
407 | endpoints = TransportUIFactory.getStringValue(map, PARAM_URI);
408 |
409 | if (endpoints == null || endpoints.length() == 0) {
410 |
411 | errors.add(new TransportUIError(PARAM_URI, bundle.getString("ENDPOINT_INFO_MISSING")));
412 |
413 | }
414 |
415 | String[] _endpoints = endpoints.split(",");
416 |
417 | if (_endpoints == null || _endpoints.length == 0) {
418 |
419 | errors.add(new TransportUIError(PARAM_URI, bundle.getString("ENDPOINT_INFO_MISSING")));
420 |
421 | }
422 |
423 | for (String endpoint : _endpoints) {
424 |
425 | String[] parts = endpoint.split(":");
426 |
427 | if (parts == null || parts.length < 2) {
428 |
429 | errors.add(new TransportUIError(PARAM_URI, bundle.getString("ENDPOINT_INCORRECT")));
430 |
431 | }
432 |
433 | }
434 |
435 | } else {
436 |
437 | String loadBalacing = TransportUIFactory.getStringValue(map, PARAM_LOAD_BALANCING);
438 |
439 | if (!loadBalacing.equals("4")) {
440 |
441 | errors.add(new TransportUIError(PARAM_LOAD_BALANCING,
442 | bundle.getString("INVALID_LOAD_BALANCING")));
443 |
444 | }
445 |
446 | List uris = TransportUIFactory.getStringValues(map, PARAM_URI);
447 |
448 | for (String[] uriStr : uris) {
449 |
450 | for (String _endpointUri : uriStr) {
451 |
452 | String[] _endpoints = _endpointUri.split(",");
453 |
454 | if (_endpoints == null || _endpoints.length == 0) {
455 |
456 | errors.add(new TransportUIError(PARAM_URI, bundle.getString("ENDPOINT_INFO_MISSING")));
457 |
458 | }
459 |
460 | for (String endpoint : _endpoints) {
461 |
462 | String[] parts = endpoint.split(":");
463 |
464 | if (parts == null || parts.length < 2) {
465 |
466 | errors.add(new TransportUIError(PARAM_URI, bundle.getString("ENDPOINT_INCORRECT")));
467 |
468 | }
469 |
470 | }
471 |
472 | }
473 |
474 | }
475 |
476 | }
477 |
478 | return errors.isEmpty() ? null : errors.toArray(
479 | new TransportUIError[errors.size()]);
480 |
481 | }
482 |
483 | @Override
484 | public TransportUIError[] validateProviderSpecificForm(
485 | TransportEditField[] fields) {
486 |
487 | List errors = new ArrayList();
488 | Map map = TransportEditField.getObjectMap(fields);
489 |
490 | if (uiContext.isProxy()) {
491 |
492 | int consumerThreads = TransportUIFactory.getIntValue(map, "consumer-threads");
493 |
494 | if (consumerThreads <= 0) {
495 |
496 | errors.add(new TransportUIError("consumer-threads", bundle.getString("NUMBER_THREADS_INVALID")));
497 |
498 | }
499 |
500 | } else {
501 |
502 | int requestTimeoutMs = TransportUIFactory.getIntValue(map, "timeout-ms");
503 |
504 | if (requestTimeoutMs < 1) {
505 |
506 | errors.add(new TransportUIError("timeout-ms", bundle.getString("TIMEOUT_INVALID")));
507 |
508 | }
509 |
510 | }
511 |
512 | return errors.isEmpty() ? null : errors.toArray(
513 | new TransportUIError[errors.size()]);
514 |
515 | }
516 |
517 | }
--------------------------------------------------------------------------------
/src/oracle/ateam/sb/transports/kafka/KafkaEndpoint.java:
--------------------------------------------------------------------------------
1 | /*
2 | ** Copyright (c) 2014, 2016 Oracle and/or its affiliates
3 | ** The Universal Permissive License (UPL), Version 1.0
4 | *
5 | ** Subject to the condition set forth below, permission is hereby granted to any person obtaining a copy of this
6 | ** software, associated documentation and/or data (collectively the "Software"), free of charge and under any and
7 | ** all copyright rights in the Software, and any and all patent rights owned or freely licensable by each licensor
8 | ** hereunder covering either (i) the unmodified Software as contributed to or provided by such licensor, or
9 | ** (ii) the Larger Works (as defined below), to deal in both
10 | **
11 | ** (a) the Software, and
12 | ** (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if one is included with the Software
13 | ** (each a “Larger Work” to which the Software is contributed by such licensors),
14 | **
15 | ** without restriction, including without limitation the rights to copy, create derivative works of, display,
16 | ** perform, and distribute the Software and make, use, sell, offer for sale, import, export, have made, and have
17 | ** sold the Software and the Larger Work(s), and to sublicense the foregoing rights on either these or other terms.
18 | **
19 | ** This license is subject to the following condition:
20 | ** The above copyright notice and either this complete permission notice or at a minimum a reference to the UPL must
21 | ** be included in all copies or substantial portions of the Software.
22 | **
23 | ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
24 | ** THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 | ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
26 | ** CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 | ** IN THE SOFTWARE.
28 | */
29 |
30 | package oracle.ateam.sb.transports.kafka;
31 |
32 | import java.lang.reflect.Method;
33 | import java.net.URI;
34 | import java.util.ArrayList;
35 | import java.util.Arrays;
36 | import java.util.Collection;
37 | import java.util.HashMap;
38 | import java.util.List;
39 | import java.util.Map;
40 | import java.util.Properties;
41 | import java.util.Set;
42 | import java.util.logging.Level;
43 |
44 | import org.apache.kafka.clients.consumer.ConsumerConfig;
45 | import org.apache.kafka.clients.consumer.ConsumerRecord;
46 | import org.apache.kafka.clients.consumer.ConsumerRecords;
47 | import org.apache.kafka.clients.consumer.KafkaConsumer;
48 | import org.apache.kafka.clients.producer.Callback;
49 | import org.apache.kafka.clients.producer.KafkaProducer;
50 | import org.apache.kafka.clients.producer.ProducerConfig;
51 | import org.apache.kafka.clients.producer.ProducerRecord;
52 | import org.apache.kafka.clients.producer.RecordMetadata;
53 | import org.apache.kafka.common.serialization.ByteArrayDeserializer;
54 | import org.apache.kafka.common.serialization.ByteArraySerializer;
55 | import org.apache.kafka.common.serialization.StringDeserializer;
56 | import org.apache.kafka.common.serialization.StringSerializer;
57 |
58 | import com.bea.wli.config.Ref;
59 | import com.bea.wli.sb.sources.ByteArraySource;
60 | import com.bea.wli.sb.sources.Source;
61 | import com.bea.wli.sb.sources.StringSource;
62 | import com.bea.wli.sb.sources.TransformException;
63 | import com.bea.wli.sb.transports.EndPointConfiguration;
64 | import com.bea.wli.sb.transports.OutboundTransportMessageContext;
65 | import com.bea.wli.sb.transports.RequestHeaders;
66 | import com.bea.wli.sb.transports.TransportException;
67 | import com.bea.wli.sb.transports.TransportManager;
68 | import com.bea.wli.sb.transports.TransportManagerHelper;
69 | import com.bea.wli.sb.transports.TransportOptions;
70 | import com.bea.wli.sb.transports.TransportProvider;
71 | import com.bea.wli.sb.transports.TransportSendListener;
72 | import com.bea.wli.sb.transports.TransportSender;
73 | import com.bea.wli.sb.transports.URIType;
74 | import com.bea.wli.sb.transports.util.AbstractTransportEndPoint;
75 |
76 | /**
77 | * @author Ricardo Ferreira
78 | */
79 | public class KafkaEndpoint extends AbstractTransportEndPoint {
80 |
81 | private final String textType =
82 | KafkaConstants.TEXT_REQUEST_TYPE;
83 |
84 | private final String binaryType =
85 | KafkaConstants.BINARY_REQUEST_TYPE;
86 |
87 | private final String responseWorkManager =
88 | TransportManagerHelper.DEFAULT_RESPONSE_WORKMANAGER;
89 |
90 | private TransportProvider transportProvider;
91 | private TransportManager transportManager;
92 |
93 | private String topicName;
94 | private String requestType;
95 | @SuppressWarnings("unused")
96 | private String responseType;
97 | private String customProps;
98 | private String dispatchPolicy;
99 | private short consumerThreads;
100 |
101 | private List internalConsumers;
102 | private Properties consumerProps;
103 | private Properties producerProps;
104 | @SuppressWarnings("rawtypes")
105 | private KafkaProducer producer;
106 |
107 | protected KafkaEndpoint(TransportProvider transportProvider,
108 | Ref serviceRef, EndPointConfiguration endpointConfig)
109 | throws TransportException {
110 |
111 | super(serviceRef, endpointConfig);
112 | KafkaEndPointConfiguration kafkaEndpointConfig = null;
113 |
114 | try {
115 |
116 | this.transportProvider = transportProvider;
117 | kafkaEndpointConfig = KafkaUtil.getConfig(endpointConfig);
118 |
119 | topicName = kafkaEndpointConfig.getTopicName();
120 | requestType = kafkaEndpointConfig.getRequestType();
121 | responseType = kafkaEndpointConfig.getResponseType();
122 | customProps = kafkaEndpointConfig.getCustomProps();
123 |
124 | if (isInbound()) {
125 |
126 | initConsumerProperties(endpointConfig, kafkaEndpointConfig);
127 |
128 | } else {
129 |
130 | initProducerProperties(endpointConfig, kafkaEndpointConfig);
131 |
132 | }
133 |
134 | } catch (Exception ex) {
135 |
136 | throw new TransportException(ex);
137 |
138 | }
139 |
140 | }
141 |
142 | private void initConsumerProperties(EndPointConfiguration endpointConfig,
143 | KafkaEndPointConfiguration kafkaEndpointConfig) {
144 |
145 | KafkaInboundPropertiesType inboundProps =
146 | kafkaEndpointConfig.getInboundProperties();
147 |
148 | dispatchPolicy = inboundProps.getDispatchPolicy();
149 | consumerThreads = inboundProps.getConsumerThreads();
150 | consumerProps = new Properties();
151 |
152 | consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, endpointConfig.getURIArray()[0].getValue());
153 | consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, inboundProps.getGroupId());
154 | consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
155 |
156 | if (requestType.equals(textType)) {
157 |
158 | consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
159 | StringDeserializer.class.getName());
160 |
161 | } else if (requestType.equals(binaryType)) {
162 |
163 | consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG,
164 | ByteArrayDeserializer.class.getName());
165 |
166 | }
167 |
168 | setCustomProperties(consumerProps);
169 |
170 | if (!consumerProps.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) {
171 |
172 | consumerProps.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, Boolean.TRUE.toString());
173 |
174 | }
175 |
176 | checkPrintProperties(consumerProps);
177 |
178 | }
179 |
180 | private void initProducerProperties(EndPointConfiguration endpointConfig,
181 | KafkaEndPointConfiguration kafkaEndpointConfig) {
182 |
183 | KafkaOutboundPropertiesType outboundProps = kafkaEndpointConfig.getOutboundProperties();
184 | short retryCount = endpointConfig.getOutboundProperties().getRetryCount();
185 | int retryInterval = endpointConfig.getOutboundProperties().getRetryInterval();
186 | producerProps = new Properties();
187 |
188 | producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, getBootstrapServers(endpointConfig.getURIArray()));
189 | producerProps.put(ProducerConfig.ACKS_CONFIG, outboundProps.getAcks());
190 | producerProps.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(outboundProps.getTimeoutMs()));
191 | producerProps.put(ProducerConfig.RETRIES_CONFIG, String.valueOf(retryCount));
192 | producerProps.put(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, calculateRetryBackoff(retryCount, retryInterval));
193 | producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
194 |
195 | if (requestType.equals(textType)) {
196 |
197 | producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
198 | StringSerializer.class.getName());
199 |
200 | } else if (requestType.equals(binaryType)) {
201 |
202 | producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
203 | ByteArraySerializer.class.getName());
204 |
205 | }
206 |
207 | setCustomProperties(producerProps);
208 | checkPrintProperties(producerProps);
209 |
210 | }
211 |
212 | private void setCustomProperties(Properties properties) {
213 |
214 | if (customProps != null && customProps.length() > 0) {
215 |
216 | String[] _customProps = customProps.split(",");
217 |
218 | for (String propEntry : _customProps) {
219 |
220 | String[] property = propEntry.split("=");
221 |
222 | if (property.length == 2) {
223 |
224 | properties.setProperty(property[0], property[1]);
225 |
226 | }
227 |
228 | }
229 |
230 | }
231 |
232 | }
233 |
234 | private void checkPrintProperties(Properties properties) {
235 |
236 | if (KafkaUtil.printProperties()) {
237 |
238 | KafkaTransportLogger.log(Level.INFO, "The endpoint '" + getServiceRef().getLocalName() +
239 | "' is being created using the following properties");
240 |
241 | StringBuilder output = new StringBuilder();
242 | Set