├── .gitattributes ├── .gitignore ├── LICENSE.txt ├── README.md ├── SECURITY.md ├── THIRD_PARTY_LICENSE.txt ├── build.gradle ├── clients ├── okafka.pom ├── pom.xml └── src │ ├── main │ └── java │ │ └── org │ │ └── oracle │ │ └── okafka │ │ ├── clients │ │ ├── ClusterConnectionStates.java │ │ ├── CommonClientConfigs.java │ │ ├── KafkaClient.java │ │ ├── Metadata.java │ │ ├── NetworkClient.java │ │ ├── TopicTeqParameters.java │ │ ├── admin │ │ │ ├── Admin.java │ │ │ ├── AdminClient.java │ │ │ ├── AdminClientConfig.java │ │ │ ├── CreateTopicsResult.java │ │ │ ├── DeleteTopicsOptions.java │ │ │ ├── DeleteTopicsResult.java │ │ │ ├── DescribeTopicsResult.java │ │ │ ├── KafkaAdminClient.java │ │ │ ├── TopicDescription.java │ │ │ └── internals │ │ │ │ └── AQKafkaAdmin.java │ │ ├── consumer │ │ │ ├── ConsumerConfig.java │ │ │ ├── KafkaConsumer.java │ │ │ ├── TxEQAssignor.java │ │ │ └── internals │ │ │ │ ├── AQKafkaConsumer.java │ │ │ │ ├── ConsumerNetworkClient.java │ │ │ │ ├── FetchMetricsRegistry.java │ │ │ │ ├── NoOpConsumerRebalanceListener.java │ │ │ │ ├── OkafkaConsumerMetrics.java │ │ │ │ ├── SubscriptionState.java │ │ │ │ └── TopicMetadataFetcher.java │ │ └── producer │ │ │ ├── KafkaProducer.java │ │ │ ├── ProducerConfig.java │ │ │ └── internals │ │ │ ├── AQKafkaProducer.java │ │ │ ├── FutureRecordMetadata.java │ │ │ ├── IncompleteBatches.java │ │ │ ├── OkafkaProducerMetrics.java │ │ │ ├── OracleTransactionManager.java │ │ │ ├── ProduceRequestResult.java │ │ │ ├── ProducerBatch.java │ │ │ ├── ProducerIdAndEpoch.java │ │ │ ├── RecordAccumulator.java │ │ │ ├── SenderMetricsRegistry.java │ │ │ └── SenderThread.java │ │ └── common │ │ ├── AQException.java │ │ ├── Node.java │ │ ├── config │ │ └── SslConfigs.java │ │ ├── errors │ │ ├── ConnectionException.java │ │ ├── FeatureNotSupportedException.java │ │ ├── InvalidLoginCredentialsException.java │ │ ├── InvalidMessageIdException.java │ │ └── RecordNotFoundSQLException.java │ │ ├── internals │ │ ├── PartitionData.java │ │ ├── QPATInfo.java │ │ ├── QPATInfoList.java │ │ ├── QPIMInfo.java │ │ ├── QPIMInfoList.java │ │ └── SessionData.java │ │ ├── network │ │ ├── AQClient.java │ │ └── SelectorMetrics.java │ │ ├── protocol │ │ └── ApiKeys.java │ │ ├── record │ │ ├── BaseRecords.java │ │ └── BufferSupplier.java │ │ ├── requests │ │ ├── AbstractRequest.java │ │ ├── AbstractResponse.java │ │ ├── CommitRequest.java │ │ ├── CommitResponse.java │ │ ├── ConnectMeRequest.java │ │ ├── ConnectMeResponse.java │ │ ├── CreateTopicsRequest.java │ │ ├── CreateTopicsResponse.java │ │ ├── DeleteGroupsRequest.java │ │ ├── DeleteGroupsResponse.java │ │ ├── DeleteTopicsRequest.java │ │ ├── DeleteTopicsResponse.java │ │ ├── FetchRequest.java │ │ ├── FetchResponse.java │ │ ├── IsolationLevel.java │ │ ├── JoinGroupRequest.java │ │ ├── JoinGroupResponse.java │ │ ├── ListGroupsRequest.java │ │ ├── ListGroupsResponse.java │ │ ├── ListOffsetsRequest.java │ │ ├── ListOffsetsResponse.java │ │ ├── MetadataRequest.java │ │ ├── MetadataResponse.java │ │ ├── OffsetFetchRequest.java │ │ ├── OffsetFetchResponse.java │ │ ├── OffsetResetRequest.java │ │ ├── OffsetResetResponse.java │ │ ├── ProduceRequest.java │ │ ├── ProduceResponse.java │ │ ├── RequestHeader.java │ │ ├── ResponseHeader.java │ │ ├── SubscribeRequest.java │ │ ├── SubscribeResponse.java │ │ ├── SyncGroupRequest.java │ │ ├── SyncGroupResponse.java │ │ ├── UnsubscribeRequest.java │ │ └── UnsubscribeResponse.java │ │ └── utils │ │ ├── ConnectionUtils.java │ │ ├── CreateTopics.java │ │ ├── FetchOffsets.java │ │ ├── MessageIdConverter.java │ │ ├── ReflectionUtil.java │ │ └── TNSParser.java │ └── test │ └── java │ ├── ojdbc.properties │ ├── org │ └── oracle │ │ └── okafka │ │ └── tests │ │ ├── ConsumerMetricsTest.java │ │ ├── DeleteConsumerGroups.java │ │ ├── ListConsumerGroupOffsets.java │ │ ├── ListConsumerGroups.java │ │ ├── OkafkaAutoOffsetReset.java │ │ ├── OkafkaDeleteTopic.java │ │ ├── OkafkaDeleteTopicById.java │ │ ├── OkafkaDescribeTopics.java │ │ ├── OkafkaDescribeTopicsById.java │ │ ├── OkafkaFetchCommittedOffset.java │ │ ├── OkafkaListOffsets.java │ │ ├── OkafkaListTopics.java │ │ ├── OkafkaSeekToBeginning.java │ │ ├── OkafkaSeekToEnd.java │ │ ├── OkafkaSetup.java │ │ ├── OkafkaUnsubscribe.java │ │ ├── ProducerMetricsTest.java │ │ ├── SimpleOkafkaAdmin.java │ │ ├── SimpleOkafkaConsumer.java │ │ ├── SimpleOkafkaProducer.java │ │ └── TestRunner.java │ └── test.config ├── connectors ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── THIRD_PARTY_LICENSE.txt ├── pom.xml ├── samples │ └── ConsumerOfJmsSchema.java └── src │ └── main │ ├── java │ └── oracle │ │ └── jdbc │ │ └── txeventq │ │ └── kafka │ │ └── connect │ │ ├── common │ │ └── utils │ │ │ ├── AppInfoParser.java │ │ │ ├── Constants.java │ │ │ ├── JmsUtils.java │ │ │ └── Node.java │ │ ├── schema │ │ ├── JmsDestination.java │ │ ├── JmsMessage.java │ │ ├── Key.java │ │ └── PropertyValue.java │ │ ├── sink │ │ ├── TxEventQSinkConnector.java │ │ ├── task │ │ │ └── TxEventQSinkTask.java │ │ └── utils │ │ │ ├── TxEventQProducer.java │ │ │ └── TxEventQSinkConfig.java │ │ └── source │ │ ├── TxEventQSourceConnector.java │ │ ├── task │ │ └── TxEventQSourceTask.java │ │ └── utils │ │ ├── TxEventQConnectorConfig.java │ │ ├── TxEventQConsumer.java │ │ └── TxEventQSourceRecord.java │ └── resources │ ├── connect-txeventq-sink.properties │ ├── connect-txeventq-source.properties │ ├── kafka-connect-oracle-version.properties │ └── logback.xml ├── examples ├── consumer │ └── src │ │ └── main │ │ ├── java │ │ └── org │ │ │ └── oracle │ │ │ └── okafka │ │ │ └── examples │ │ │ └── ConsumerOKafka.java │ │ └── resources │ │ └── config.properties ├── ojdbc.properties └── producer │ └── src │ └── main │ ├── java │ └── org │ │ └── oracle │ │ └── okafka │ │ └── examples │ │ └── ProducerOKafka.java │ └── resources │ └── config.properties ├── gradle └── wrapper │ ├── gradle-wrapper.jar │ └── gradle-wrapper.properties ├── gradlew ├── gradlew.bat ├── sbom_generation.yaml └── settings.gradle /.gitattributes: -------------------------------------------------------------------------------- 1 | # 2 | # https://help.github.com/articles/dealing-with-line-endings/ 3 | # 4 | # These are explicitly windows files and should use crlf 5 | *.bat text eol=crlf 6 | 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .vscode 3 | .gradle 4 | 5 | # Maven assets 6 | .mvn 7 | mvnw 8 | mvnw.cmd 9 | 10 | .java-version 11 | 12 | target/ 13 | build/ 14 | bin/ 15 | 16 | .dccache 17 | .DS_Store 18 | 19 | doc/ 20 | /.metadata/ 21 | 22 | .classpath 23 | .project 24 | .settings/ 25 | clients/.classpath 26 | clients/.project 27 | clients/.settings/ 28 | examples/.project 29 | examples/.settings/ 30 | examples/ojdbc.properties 31 | examples/consumer/.classpath 32 | examples/consumer/.project 33 | examples/consumer/.settings/ 34 | examples/producer/.classpath 35 | examples/producer/.project 36 | examples/producer/.settings/ 37 | okafka-github/ 38 | clients/config.properties 39 | clients/ojdbc.properties 40 | clientsafterConsumingOkafka.csv 41 | clientsafterProducingOkafka.csv 42 | examples/.externalToolBuilders/ 43 | /.gitignore/ 44 | -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Reporting security vulnerabilities 2 | 3 | Oracle values the independent security research community and believes that 4 | responsible disclosure of security vulnerabilities helps us ensure the security 5 | and privacy of all our users. 6 | 7 | Please do NOT raise a GitHub Issue to report a security vulnerability. If you 8 | believe you have found a security vulnerability, please submit a report to 9 | [secalert_us@oracle.com][1] preferably with a proof of concept. Please review 10 | some additional information on [how to report security vulnerabilities to Oracle][2]. 11 | We encourage people who contact Oracle Security to use email encryption using 12 | [our encryption key][3]. 13 | 14 | We ask that you do not use other channels or contact the project maintainers 15 | directly. 16 | 17 | Non-vulnerability related security issues including ideas for new or improved 18 | security features are welcome on GitHub Issues. 19 | 20 | ## Security updates, alerts and bulletins 21 | 22 | Security updates will be released on a regular cadence. Many of our projects 23 | will typically release security fixes in conjunction with the 24 | Oracle Critical Patch Update program. Additional 25 | information, including past advisories, is available on our [security alerts][4] 26 | page. 27 | 28 | ## Security-related information 29 | 30 | We will provide security related information such as a threat model, considerations 31 | for secure use, or any known security issues in our documentation. Please note 32 | that labs and sample code are intended to demonstrate a concept and may not be 33 | sufficiently hardened for production use. 34 | 35 | [1]: mailto:secalert_us@oracle.com 36 | [2]: https://www.oracle.com/corporate/security-practices/assurance/vulnerability/reporting.html 37 | [3]: https://www.oracle.com/security-alerts/encryptionkey.html 38 | [4]: https://www.oracle.com/security-alerts/ 39 | -------------------------------------------------------------------------------- /clients/okafka.pom: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4.0.0 4 | com.oracle.database.messaging 5 | okafka 6 | 23.4.0.0 7 | 8 | 9 | okafka 10 | Oracle's implementation of Kafka Java Client for Oracle Transactional Event Queues 11 | https://docs.oracle.com/en/database/oracle/oracle-database/23/okjdc/index.html 12 | 13 | 14 | 15 | 16 | Oracle Free Use Terms and Conditions (FUTC) 17 | 18 | https://www.oracle.com/downloads/licenses/oracle-free-license.html 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | Oracle America, Inc. 28 | http://www.oracle.com 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | UTF-8 37 | UTF-8 38 | 11 39 | ${java.version} 40 | ${java.version} 41 | 23.4.0.24.05 42 | 23.3.0.0 43 | 2.0.1 44 | 1.3 45 | 46 | 47 | 48 | 49 | com.oracle.database.jdbc 50 | ojdbc11 51 | ${oracle-jdbc.version} 52 | 53 | 54 | com.oracle.database.jdbc 55 | ucp 56 | ${oracle-jdbc.version} 57 | 58 | 59 | com.oracle.database.security 60 | oraclepki 61 | ${oracle-jdbc.version} 62 | 63 | 64 | javax.jms 65 | javax.jms-api 66 | ${jms.version} 67 | 68 | 69 | javax.transaction 70 | javax.transaction-api 71 | ${javax-transaction.version} 72 | 73 | 74 | javax.transaction 75 | jta 76 | 1.1 77 | 78 | 79 | com.oracle.database.messaging 80 | aqapi 81 | ${oracle-db-messaging.version} 82 | 83 | 84 | 85 | org.apache.kafka 86 | kafka-clients 87 | 3.7.1 88 | 89 | 90 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/TopicTeqParameters.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.clients; 9 | 10 | 11 | import java.util.HashMap; 12 | 13 | import org.oracle.okafka.clients.admin.TopicDescription; 14 | 15 | public class TopicTeqParameters { 16 | 17 | int keyBased; 18 | int stickyDeq; 19 | int shardNum; 20 | int dbMajorVersion; 21 | int dbMinorVersion; 22 | int msgVersion; 23 | 24 | 25 | public void setKeyBased(int keyBased) 26 | { 27 | this.keyBased = keyBased; 28 | } 29 | 30 | public void setStickyDeq(int stickyDeq) 31 | { 32 | this.stickyDeq = stickyDeq; 33 | } 34 | 35 | public void setShardNum(int shardNum) 36 | { 37 | this.shardNum = shardNum; 38 | } 39 | 40 | private void setMsgVersion(int msgVersion) 41 | { 42 | this.msgVersion = msgVersion; 43 | } 44 | 45 | public int getKeyBased() 46 | { 47 | return this.keyBased; 48 | } 49 | 50 | public int getStickyDeq() 51 | { 52 | return this.stickyDeq; 53 | } 54 | 55 | public int getShardNum() 56 | { 57 | return this.shardNum; 58 | } 59 | 60 | public int getMsgVersion() 61 | { 62 | if(getStickyDeq()!=2) { 63 | this.msgVersion = 1; 64 | } 65 | else { 66 | this.msgVersion = 2; 67 | } 68 | return this.msgVersion; 69 | } 70 | 71 | @Override 72 | public boolean equals(final Object o) { 73 | if (this == o) return true; 74 | if (o == null || getClass() != o.getClass()) return false; 75 | final TopicTeqParameters that = (TopicTeqParameters) o; 76 | return this.keyBased == that.keyBased && 77 | this.stickyDeq == that.stickyDeq && 78 | this.shardNum == that.shardNum && 79 | this.dbMajorVersion == that.dbMajorVersion && 80 | this.dbMinorVersion == that.dbMinorVersion && 81 | this.msgVersion == that.msgVersion; 82 | } 83 | 84 | } 85 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/admin/AdminClient.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package org.oracle.okafka.clients.admin; 26 | 27 | import java.util.Map; 28 | import java.util.Properties; 29 | 30 | import org.apache.kafka.common.annotation.InterfaceStability; 31 | 32 | /** 33 | * The administrative client for Transactional Event Queues(TXEQ), which supports managing and inspecting topics. 34 | * For this release only creation of topic(s) and deletion of topic(s) is supported. 35 | * A topic can be created by invoking {@code #createTopics(Collection)} and deleted by invoking {@code #deleteTopics(Collection)} method. 36 | *

37 | * Topic can be created with following configuration. 38 | *

39 | * retention.ms: Amount of time in milliseconds for which records stay in topic and are available for consumption. Internally, retention.ms value is rounded to the second. Default value for this parameter is 7 days. 40 | *

41 | */ 42 | @InterfaceStability.Evolving 43 | public abstract class AdminClient implements Admin { 44 | 45 | /** 46 | * Create a new AdminClient with the given configuration. 47 | * 48 | * @param props The configuration. 49 | * @return The new KafkaAdminClient. 50 | */ 51 | final static String DUMMY_BOOTSTRAP ="localhost:1521"; 52 | public static AdminClient create(Properties props) { 53 | String bootStrap = (String)props.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG); 54 | if(bootStrap== null) 55 | { 56 | String secProtocol = props.getProperty(AdminClientConfig.SECURITY_PROTOCOL_CONFIG); 57 | if(secProtocol != null && secProtocol.equalsIgnoreCase("SSL")) { 58 | // Connect using Oracle Wallet and tnsnames.ora. 59 | // User does not need to know the database host ip and port. 60 | props.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, DUMMY_BOOTSTRAP); 61 | } 62 | } 63 | return KafkaAdminClient.createInternal(new org.oracle.okafka.clients.admin.AdminClientConfig(props), new KafkaAdminClient.TimeoutProcessorFactory()); 64 | } 65 | 66 | /** 67 | * Create a new AdminClient with the given configuration. 68 | * 69 | * @param conf The configuration. 70 | * @return The new KafkaAdminClient. 71 | */ 72 | public static AdminClient create(Map conf) { 73 | 74 | String bootStrap = (String)conf.get(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG); 75 | if(bootStrap == null) 76 | { 77 | String setSecProtocol = (String)conf.get(AdminClientConfig.SECURITY_PROTOCOL_CONFIG); 78 | if(setSecProtocol != null && setSecProtocol.equalsIgnoreCase("SSL")) 79 | { 80 | // Connect using Wallet and TNSNAMES.ora. 81 | // User does not need to know the database host ip and port. 82 | conf.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, DUMMY_BOOTSTRAP); 83 | } 84 | } 85 | return KafkaAdminClient.createInternal(new AdminClientConfig(conf), null); 86 | } 87 | 88 | } 89 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/admin/CreateTopicsResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | package org.oracle.okafka.clients.admin; 25 | 26 | import org.apache.kafka.common.KafkaFuture; 27 | import org.apache.kafka.common.annotation.InterfaceStability; 28 | import java.util.Collection; 29 | import java.util.Map; 30 | 31 | /** 32 | * The result of {@link Admin#createTopics(Collection)}. 33 | * 34 | * The API of this class is evolving, see {@link Admin} for details. 35 | */ 36 | @InterfaceStability.Evolving 37 | public class CreateTopicsResult extends org.apache.kafka.clients.admin.CreateTopicsResult { 38 | 39 | public CreateTopicsResult(Map> futures) 40 | { 41 | super(futures); 42 | } 43 | 44 | } 45 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsOptions.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package org.oracle.okafka.clients.admin; 26 | 27 | import org.apache.kafka.common.annotation.InterfaceStability; 28 | 29 | import java.util.Collection; 30 | 31 | /** 32 | * Options for {@link Admin#deleteTopics(Collection)}. 33 | * 34 | * The API of this class is evolving, see {@link Admin} for details. 35 | */ 36 | @InterfaceStability.Evolving 37 | public class DeleteTopicsOptions extends org.apache.kafka.clients.admin.DeleteTopicsOptions { 38 | 39 | } 40 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/admin/DeleteTopicsResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package org.oracle.okafka.clients.admin; 26 | 27 | import org.apache.kafka.common.KafkaFuture; 28 | import org.apache.kafka.common.Uuid; 29 | import org.apache.kafka.common.annotation.InterfaceStability; 30 | 31 | import java.util.Collection; 32 | import java.util.Map; 33 | 34 | /** 35 | * The result of the {@link Admin#deleteTopics(Collection)} call. 36 | * 37 | * The API of this class is evolving, see {@link Admin} for details. 38 | */ 39 | @InterfaceStability.Evolving 40 | public class DeleteTopicsResult extends org.apache.kafka.clients.admin.DeleteTopicsResult { 41 | 42 | DeleteTopicsResult(Map> topicIdFutures, Map> nameFutures) { 43 | super(topicIdFutures, nameFutures); 44 | } 45 | 46 | } 47 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/admin/DescribeTopicsResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package org.oracle.okafka.clients.admin; 26 | 27 | import java.util.Map; 28 | 29 | import org.apache.kafka.clients.admin.TopicDescription; 30 | import org.apache.kafka.common.KafkaFuture; 31 | import org.apache.kafka.common.Uuid; 32 | 33 | /** 34 | * The result of the {@link Admin#describeTopics(Collection)} call. 35 | * 36 | * The API of this class is evolving, see {@link Admin} for details. 37 | */ 38 | public class DescribeTopicsResult extends org.apache.kafka.clients.admin.DescribeTopicsResult { 39 | 40 | protected DescribeTopicsResult(Map> topicIdFutures, Map> nameFutures) { 41 | super(topicIdFutures, nameFutures); 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/admin/TopicDescription.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package org.oracle.okafka.clients.admin; 26 | 27 | import java.util.Collections; 28 | import java.util.List; 29 | import java.util.Map; 30 | import java.util.Objects; 31 | 32 | import org.apache.kafka.common.PartitionInfo; 33 | import org.apache.kafka.common.TopicPartitionInfo; 34 | import org.apache.kafka.common.Uuid; 35 | import org.oracle.okafka.clients.TopicTeqParameters; 36 | 37 | /** 38 | * A detailed description of a single topic in the cluster. 39 | */ 40 | public class TopicDescription extends org.apache.kafka.clients.admin.TopicDescription{ 41 | 42 | private final TopicTeqParameters topicParameters; 43 | 44 | public TopicDescription(String name, boolean internal, List partitions, TopicTeqParameters topicTeqParameters, Uuid topicId) { 45 | super(name, internal, partitions,Collections.emptySet(),topicId); 46 | this.topicParameters=topicTeqParameters; 47 | 48 | } 49 | 50 | @Override 51 | public boolean equals(final Object o) { 52 | Boolean superEqual = super.equals(o); 53 | if (superEqual) { 54 | final TopicDescription that = (TopicDescription) o; 55 | return this.topicParameters.equals(that.topicParameters); 56 | } 57 | return false; 58 | } 59 | 60 | @Override 61 | public int hashCode() { 62 | return Objects.hash(this.name(), this.isInternal(), this.partitions(), topicParameters); 63 | } 64 | 65 | private TopicTeqParameters topicTeqParameters() { 66 | return this.topicParameters; 67 | } 68 | 69 | 70 | 71 | 72 | } 73 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/consumer/internals/NoOpConsumerRebalanceListener.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.clients.consumer.internals; 9 | 10 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 11 | import org.apache.kafka.common.TopicPartition; 12 | import java.util.Collection; 13 | 14 | public class NoOpConsumerRebalanceListener implements ConsumerRebalanceListener{ 15 | 16 | @Override 17 | public void onPartitionsAssigned(Collection partitions) {} 18 | 19 | @Override 20 | public void onPartitionsRevoked(Collection partitions) {} 21 | 22 | } 23 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/consumer/internals/OkafkaConsumerMetrics.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.clients.consumer.internals; 9 | 10 | import org.apache.kafka.common.MetricName; 11 | import org.apache.kafka.common.metrics.Measurable; 12 | import org.apache.kafka.common.metrics.Metrics; 13 | import org.apache.kafka.common.metrics.Sensor; 14 | import org.apache.kafka.common.metrics.stats.Avg; 15 | import org.apache.kafka.common.metrics.stats.CumulativeSum; 16 | import org.apache.kafka.common.metrics.stats.Max; 17 | 18 | import java.util.concurrent.TimeUnit; 19 | 20 | public class OkafkaConsumerMetrics implements AutoCloseable { 21 | private final MetricName lastPollMetricName; 22 | private final Sensor timeBetweenPollSensor; 23 | private final Sensor pollIdleSensor; 24 | private final Sensor commitSyncSensor; 25 | private final Metrics metrics; 26 | private long lastPollMs; 27 | private long pollStartMs; 28 | private long timeSinceLastPollMs; 29 | 30 | public OkafkaConsumerMetrics(Metrics metrics, String metricGrpPrefix) { 31 | this.metrics = metrics; 32 | String metricGroupName = metricGrpPrefix + "-metrics"; 33 | Measurable lastPoll = (mConfig, now) -> { 34 | if (lastPollMs == 0L) 35 | return -1d; 36 | else 37 | return TimeUnit.SECONDS.convert(now - lastPollMs, TimeUnit.MILLISECONDS); 38 | }; 39 | this.lastPollMetricName = metrics.metricName("last-poll-seconds-ago", 40 | metricGroupName, "The number of seconds since the last poll() invocation."); 41 | metrics.addMetric(lastPollMetricName, lastPoll); 42 | 43 | this.timeBetweenPollSensor = metrics.sensor("time-between-poll"); 44 | this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-avg", 45 | metricGroupName, 46 | "The average delay between invocations of poll() in milliseconds."), 47 | new Avg()); 48 | this.timeBetweenPollSensor.add(metrics.metricName("time-between-poll-max", 49 | metricGroupName, 50 | "The max delay between invocations of poll() in milliseconds."), 51 | new Max()); 52 | 53 | this.pollIdleSensor = metrics.sensor("poll-idle-ratio-avg"); 54 | this.pollIdleSensor.add(metrics.metricName("poll-idle-ratio-avg", 55 | metricGroupName, 56 | "The average fraction of time the consumer's poll() is idle as opposed to waiting for the user code to process records."), 57 | new Avg()); 58 | 59 | this.commitSyncSensor = metrics.sensor("commit-sync-time-ns-total"); 60 | this.commitSyncSensor.add( 61 | metrics.metricName( 62 | "commit-sync-time-ns-total", 63 | metricGroupName, 64 | "The total time the consumer has spent in commitSync in nanoseconds" 65 | ), 66 | new CumulativeSum() 67 | ); 68 | 69 | } 70 | 71 | public void recordPollStart(long pollStartMs) { 72 | this.pollStartMs = pollStartMs; 73 | this.timeSinceLastPollMs = lastPollMs != 0L ? pollStartMs - lastPollMs : 0; 74 | this.timeBetweenPollSensor.record(timeSinceLastPollMs); 75 | this.lastPollMs = pollStartMs; 76 | } 77 | 78 | public void recordPollEnd(long pollEndMs) { 79 | long pollTimeMs = pollEndMs - pollStartMs; 80 | double pollIdleRatio = pollTimeMs * 1.0 / (pollTimeMs + timeSinceLastPollMs); 81 | this.pollIdleSensor.record(pollIdleRatio); 82 | } 83 | 84 | public void recordCommitSync(long duration) { 85 | this.commitSyncSensor.record(duration); 86 | } 87 | 88 | 89 | @Override 90 | public void close() { 91 | metrics.removeMetric(lastPollMetricName); 92 | metrics.removeSensor(timeBetweenPollSensor.name()); 93 | metrics.removeSensor(pollIdleSensor.name()); 94 | metrics.removeSensor(commitSyncSensor.name()); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/consumer/internals/TopicMetadataFetcher.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.clients.consumer.internals; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Arrays; 5 | import java.util.HashMap; 6 | import java.util.List; 7 | import java.util.Map; 8 | 9 | import org.apache.kafka.clients.ClientResponse; 10 | import org.apache.kafka.common.KafkaException; 11 | import org.apache.kafka.common.PartitionInfo; 12 | import org.apache.kafka.common.errors.TimeoutException; 13 | import org.apache.kafka.common.utils.LogContext; 14 | import org.apache.kafka.common.utils.Timer; 15 | import org.oracle.okafka.common.requests.MetadataRequest; 16 | import org.oracle.okafka.common.requests.MetadataResponse; 17 | import org.slf4j.Logger; 18 | 19 | public class TopicMetadataFetcher { 20 | private final Logger log; 21 | private final ConsumerNetworkClient client; 22 | 23 | public TopicMetadataFetcher(LogContext logContext, ConsumerNetworkClient client) { 24 | this.log = logContext.logger(getClass()); 25 | this.client = client; 26 | 27 | } 28 | 29 | public Map> getAllTopicMetadata(Timer timer) { 30 | MetadataRequest.Builder request = MetadataRequest.Builder.listAllTopics(true); 31 | return getTopicMetadata(request, timer); 32 | } 33 | 34 | private Map> getTopicMetadata(MetadataRequest.Builder builder, Timer timer) { 35 | boolean retry = false; 36 | 37 | do { 38 | retry = false; 39 | ClientResponse response = client.sendMetadataRequest(builder); 40 | MetadataResponse metadataResponse = (MetadataResponse) response.responseBody(); 41 | 42 | if (metadataResponse.getException() == null && !response.wasDisconnected()) { 43 | Map> listTopicsMap = new HashMap<>(); 44 | List partitionInfoList = metadataResponse.partitions(); 45 | for (int i = 0; i < partitionInfoList.size(); i++) { 46 | String topic = partitionInfoList.get(i).topic(); 47 | if (listTopicsMap.containsKey(topic)) { 48 | listTopicsMap.get(topic).add(partitionInfoList.get(i)); 49 | } else { 50 | listTopicsMap.put(topic, new ArrayList<>(Arrays.asList(partitionInfoList.get(i)))); 51 | } 52 | } 53 | return listTopicsMap; 54 | } else if (response.wasDisconnected()) 55 | retry = true; 56 | else { 57 | log.error("Exception Caught: ", metadataResponse.getException()); 58 | throw new KafkaException("Unexpected error listing topics", metadataResponse.getException()); 59 | } 60 | } while (retry && timer.notExpired()); 61 | 62 | throw new TimeoutException("Timeout expired while fetching topic metadata"); 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/producer/internals/IncompleteBatches.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | package org.oracle.okafka.clients.producer.internals; 25 | 26 | import java.util.ArrayList; 27 | import java.util.HashSet; 28 | import java.util.Set; 29 | 30 | /* 31 | * A thread-safe helper class to hold batches that haven't been acknowledged yet (including those 32 | * which have and have not been sent). 33 | */ 34 | class IncompleteBatches { 35 | private final Set incomplete; 36 | 37 | public IncompleteBatches() { 38 | this.incomplete = new HashSet<>(); 39 | } 40 | 41 | public void add(ProducerBatch batch) { 42 | synchronized (incomplete) { 43 | this.incomplete.add(batch); 44 | } 45 | } 46 | 47 | public void remove(ProducerBatch batch) { 48 | synchronized (incomplete) { 49 | boolean removed = this.incomplete.remove(batch); 50 | if (!removed) 51 | throw new IllegalStateException("Remove from the incomplete set failed. This should be impossible."); 52 | } 53 | } 54 | 55 | public Iterable copyAll() { 56 | synchronized (incomplete) { 57 | return new ArrayList<>(this.incomplete); 58 | } 59 | } 60 | 61 | public boolean isEmpty() { 62 | synchronized (incomplete) { 63 | return incomplete.isEmpty(); 64 | } 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProduceRequestResult.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.clients.producer.internals; 31 | 32 | import java.util.List; 33 | import java.util.function.Function; 34 | 35 | import org.apache.kafka.clients.producer.RecordMetadata; 36 | import org.apache.kafka.common.TopicPartition; 37 | import org.oracle.okafka.common.utils.MessageIdConverter.OKafkaOffset; 38 | 39 | /** 40 | * A class that models the future completion of a produce request for a single 41 | * partition. There is one of these per partition in a produce request and it is 42 | * shared by all the {@link RecordMetadata} instances that are batched together 43 | * for the same partition in the request. 44 | */ 45 | public class ProduceRequestResult extends org.apache.kafka.clients.producer.internals.ProduceRequestResult { 46 | 47 | private volatile List msgIds = null; 48 | 49 | /** 50 | * Create an instance of this class. 51 | * 52 | * @param topicPartition The topic and partition to which this record set was 53 | * sent was sent 54 | */ 55 | public ProduceRequestResult(TopicPartition topicPartition) { 56 | super(topicPartition); 57 | } 58 | 59 | /** 60 | * Set the result of the produce request. 61 | * 62 | * @param baseOffset The base offset assigned to the record 63 | * @param logAppendTime The log append time or -1 if CreateTime is being used 64 | * @param error The error that occurred if there was one, or null 65 | */ 66 | public void set(long baseOffset, long logAppendTime, List msgIds, 67 | Function errorsByIndex) { 68 | set(baseOffset, logAppendTime, errorsByIndex); 69 | this.msgIds = msgIds; 70 | } 71 | 72 | public void set(long baseOffset, long logAppendTime, List msgIds, RuntimeException errorsByIndex) { 73 | set(baseOffset, logAppendTime, batchIndex -> errorsByIndex); 74 | this.msgIds = msgIds; 75 | } 76 | 77 | /** 78 | * The base offset for the request (the first offset in the record set) 79 | */ 80 | public List msgIds() { 81 | return msgIds; 82 | } 83 | 84 | } 85 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/clients/producer/internals/ProducerIdAndEpoch.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | package org.oracle.okafka.clients.producer.internals; 25 | 26 | import static org.apache.kafka.common.record.RecordBatch.NO_PRODUCER_EPOCH; 27 | import static org.apache.kafka.common.record.RecordBatch.NO_PRODUCER_ID; 28 | 29 | class ProducerIdAndEpoch { 30 | static final ProducerIdAndEpoch NONE = new ProducerIdAndEpoch(NO_PRODUCER_ID, NO_PRODUCER_EPOCH); 31 | 32 | public final long producerId; 33 | public final short epoch; 34 | 35 | ProducerIdAndEpoch(long producerId, short epoch) { 36 | this.producerId = producerId; 37 | this.epoch = epoch; 38 | } 39 | 40 | public boolean isValid() { 41 | return NO_PRODUCER_ID < producerId; 42 | } 43 | 44 | @Override 45 | public String toString() { 46 | return "(producerId=" + producerId + ", epoch=" + epoch + ")"; 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/AQException.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common; 9 | 10 | /** 11 | * Base class of all other TEQ exceptions. 12 | * 13 | * @author srkarre 14 | * 15 | */ 16 | public class AQException extends RuntimeException { 17 | private final static long serialVersionUID = 1L; 18 | 19 | public AQException(String message, Throwable cause) { 20 | super(message, cause); 21 | } 22 | 23 | public AQException(String message) { 24 | super(message); 25 | } 26 | 27 | public AQException(Throwable cause) { 28 | super(cause); 29 | } 30 | 31 | public AQException() { 32 | super(); 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/config/SslConfigs.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.config; 31 | 32 | import org.apache.kafka.common.config.ConfigDef; 33 | import org.apache.kafka.common.config.ConfigDef.Importance; 34 | 35 | public class SslConfigs extends org.apache.kafka.common.config.SslConfigs { 36 | /* 37 | * NOTE: DO NOT CHANGE EITHER CONFIG NAMES AS THESE ARE PART OF THE PUBLIC API AND CHANGE WILL BREAK USER CODE. 38 | */ 39 | 40 | public static final String TNS_ALIAS = "tns.alias"; 41 | public static final String TNS_ALIAS_DOC = "alias of connection string in tnsnames.ora. This connection is used for connecting to database instance"; 42 | 43 | public static void addClientSslSupport(ConfigDef config) { 44 | org.apache.kafka.common.config.SslConfigs.addClientSslSupport(config); 45 | config.define(SslConfigs.TNS_ALIAS, ConfigDef.Type.STRING, null, Importance.MEDIUM, SslConfigs.TNS_ALIAS_DOC); 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/errors/ConnectionException.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.errors; 9 | 10 | import org.apache.kafka.common.KafkaException; 11 | 12 | /** 13 | * Thrown when OKafka application fails to connect with the Oracle Database. 14 | */ 15 | public class ConnectionException extends KafkaException { 16 | private static final long serialVersionUID = 1L; 17 | 18 | public ConnectionException(Throwable cause) { 19 | super(cause); 20 | } 21 | public ConnectionException(String msg) { 22 | super(msg); 23 | } 24 | 25 | public ConnectionException(String msg, Throwable cause) { 26 | super(msg, cause); 27 | } 28 | 29 | public ConnectionException() { 30 | super(); 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/errors/FeatureNotSupportedException.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.errors; 9 | 10 | import org.oracle.okafka.common.AQException; 11 | 12 | /** 13 | * If a method/api is not supported then this exception is thrown. 14 | * 15 | * @author srkarre 16 | * 17 | */ 18 | public class FeatureNotSupportedException extends AQException { 19 | private static final long serialVersionUID = 1L; 20 | 21 | public FeatureNotSupportedException(String message, Throwable cause) { 22 | super(message, cause); 23 | } 24 | 25 | public FeatureNotSupportedException(String message) { 26 | super(message); 27 | } 28 | 29 | public FeatureNotSupportedException(Throwable cause) { 30 | super(cause); 31 | } 32 | 33 | public FeatureNotSupportedException() { 34 | super(); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/errors/InvalidLoginCredentialsException.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | 9 | package org.oracle.okafka.common.errors; 10 | 11 | import org.oracle.okafka.common.AQException; 12 | 13 | /** 14 | * This exception indicates that user provided invalid login details. 15 | * 16 | * @author srkarre 17 | * 18 | */ 19 | public class InvalidLoginCredentialsException extends AQException { 20 | private final static long serialVersionUID = 1L; 21 | 22 | public InvalidLoginCredentialsException(String message, Throwable cause) { 23 | super(message, cause); 24 | } 25 | 26 | public InvalidLoginCredentialsException(String message) { 27 | super(message); 28 | } 29 | 30 | public InvalidLoginCredentialsException(Throwable cause) { 31 | super(cause); 32 | } 33 | 34 | public InvalidLoginCredentialsException() { 35 | super(); 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/errors/InvalidMessageIdException.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.errors; 9 | 10 | import org.oracle.okafka.common.AQException; 11 | 12 | /** 13 | * This exception indicates that client has received invalid message id from server. 14 | * 15 | * @author srkarre 16 | * 17 | */ 18 | public class InvalidMessageIdException extends AQException { 19 | 20 | private static final long serialVersionUID = 1L; 21 | 22 | public InvalidMessageIdException(String message, Throwable cause) { 23 | super(message, cause); 24 | } 25 | 26 | public InvalidMessageIdException(String message) { 27 | super(message); 28 | } 29 | 30 | public InvalidMessageIdException(Throwable cause) { 31 | super(cause); 32 | } 33 | 34 | public InvalidMessageIdException() { 35 | super(); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/errors/RecordNotFoundSQLException.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.errors; 2 | 3 | import java.sql.SQLException; 4 | 5 | /** 6 | * Exception indicates that either specified topic name/id in describeTopics()/deleteTopic() call is not found. 7 | */ 8 | public class RecordNotFoundSQLException extends SQLException { 9 | public RecordNotFoundSQLException(String message) { 10 | super(message); 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/internals/PartitionData.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.internals; 2 | 3 | import org.apache.kafka.common.TopicPartition; 4 | 5 | public class PartitionData { 6 | private TopicPartition topicPartition; 7 | private int queueId; 8 | private String subscriberName; 9 | private int subscriberId; 10 | private int ownerInstanceId; 11 | private boolean local; 12 | 13 | public PartitionData(String queueName, int queueId, int partitionId, 14 | String subName, int subId, int ownerInstanceId, boolean local) { 15 | this.topicPartition = new TopicPartition(queueName, partitionId); 16 | this.queueId = queueId; 17 | this.subscriberName = subName; 18 | this.subscriberId = subId; 19 | this.ownerInstanceId = ownerInstanceId; 20 | this.local = local; 21 | } 22 | public String toString() 23 | { 24 | if(topicPartition == null) 25 | return "NULL"; 26 | 27 | return "{Topic:"+topicPartition.topic()+",ConsumerGroupID:"+subscriberName+ 28 | ",Partition:"+topicPartition.partition()+",OwnerInstance:"+ownerInstanceId+",}"; 29 | } 30 | 31 | public TopicPartition getTopicPartition() { 32 | return this.topicPartition; 33 | } 34 | public int getOwnerInstanceId() { 35 | return this.ownerInstanceId; 36 | } 37 | 38 | public void setOwnerInstanceId(int instId) 39 | { 40 | this.ownerInstanceId = instId; 41 | } 42 | 43 | public int getQueueId() { 44 | return this.queueId; 45 | } 46 | 47 | public String getSubName() { 48 | return this.subscriberName; 49 | } 50 | 51 | public int getSubId() { 52 | return this.subscriberId; 53 | } 54 | 55 | public void setLocal(boolean _local) 56 | { 57 | local = _local; 58 | } 59 | public boolean getLocal() 60 | { 61 | return local; 62 | } 63 | 64 | public boolean equals(Object obj) 65 | { 66 | if(!(obj instanceof PartitionData)) 67 | return false; 68 | 69 | PartitionData tPart = (PartitionData)obj; 70 | return this.topicPartition.equals(tPart.topicPartition); 71 | } 72 | 73 | } 74 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/internals/QPATInfoList.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.internals; 2 | import java.sql.Connection; 3 | import java.sql.SQLException; 4 | import org.oracle.okafka.common.internals.QPATInfo; 5 | import oracle.jdbc.OracleArray; 6 | import oracle.jdbc.OracleData; 7 | import oracle.jdbc.OracleDataFactory; 8 | import oracle.jdbc.internal.OracleTypes; 9 | import oracle.jpub.runtime.OracleDataMutableArray; 10 | public class QPATInfoList implements OracleData, OracleDataFactory { 11 | public static final String _SQL_NAME = "SYS.AQ$_QPAT_INFO_LIST"; 12 | public static final int _SQL_TYPECODE = OracleTypes.ARRAY; 13 | OracleDataMutableArray _array; 14 | private static final QPATInfoList _QPATInfoList_Factory = new QPATInfoList(); 15 | 16 | public static OracleDataFactory getOracleDataFactory() { 17 | return _QPATInfoList_Factory; 18 | } 19 | public QPATInfoList() 20 | { 21 | this((QPATInfo[])null); 22 | } 23 | public QPATInfoList(QPATInfo[] a) 24 | { 25 | _array = new OracleDataMutableArray(2002, a, QPATInfo.getFactory()); 26 | } 27 | @Override 28 | public OracleData create(Object d, int sqlType) throws SQLException { 29 | if (d == null) return null; 30 | QPATInfoList a = new QPATInfoList(); 31 | a._array = new OracleDataMutableArray(2002, (OracleArray) d, QPATInfo.getFactory()); 32 | return a; 33 | } 34 | @Override 35 | public Object toJDBCObject(Connection con) throws SQLException { 36 | return _array.toJDBCObject(con, _SQL_NAME); 37 | } 38 | 39 | public int length() throws SQLException { 40 | return _array.length(); 41 | } 42 | public int getBaseType() throws SQLException{ 43 | return _array.getBaseType(); 44 | } 45 | public String getBaseTypeName() throws SQLException 46 | { 47 | return _array.getBaseTypeName(); 48 | } 49 | public QPATInfo[] getArray() throws SQLException 50 | { 51 | return (QPATInfo[]) _array.getObjectArray( 52 | new QPATInfo[_array.length()]); 53 | } 54 | public void setArray(QPATInfo[] a) throws SQLException 55 | { 56 | _array.setObjectArray(a); 57 | } 58 | public QPATInfo[] getArray(long index, int count) throws SQLException 59 | { 60 | return (QPATInfo[]) _array.getObjectArray(index, 61 | new QPATInfo[_array.sliceLength(index, count)]); 62 | } 63 | public void setArray(QPATInfo[] a, long index) throws SQLException 64 | { 65 | _array.setObjectArray(a, index); 66 | } 67 | public QPATInfo getElement(long index) throws SQLException 68 | { 69 | return (QPATInfo) _array.getObjectElement(index); 70 | } 71 | public void setElement(QPATInfo a, long index) throws SQLException 72 | { 73 | _array.setObjectElement(a, index); 74 | } 75 | } -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfo.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.internals; 2 | import java.math.BigDecimal; 3 | import java.sql.Connection; 4 | import java.sql.SQLException; 5 | import oracle.jdbc.OracleConnection; 6 | import oracle.jdbc.OracleData; 7 | import oracle.jdbc.OracleDataFactory; 8 | import oracle.jdbc.OracleStruct; 9 | import oracle.jdbc.internal.ObjectData; 10 | import oracle.jdbc.internal.OracleTypes; 11 | import oracle.jpub.runtime.OracleDataMutableStruct; 12 | public class QPIMInfo implements OracleData, OracleDataFactory, ObjectData { 13 | public static final String _SQL_NAME = "SYS.AQ$_QPIM_INFO"; 14 | public static final int _SQL_TYPECODE = OracleTypes.STRUCT; 15 | static int[] _sqlType = 16 | { 17 | 12, 12, 4, 4 18 | }; 19 | static OracleDataFactory[] _factory = new OracleDataFactory[4]; 20 | public static QPIMInfo _QPIMInfo_Factory = new QPIMInfo(); 21 | 22 | OracleDataMutableStruct _struct; 23 | 24 | private OracleConnection con = null; 25 | public static OracleDataFactory getFactory() { 26 | return _QPIMInfo_Factory; 27 | } 28 | 29 | public QPIMInfo() { 30 | _struct = new OracleDataMutableStruct(new Object[4], _sqlType, _factory); 31 | } 32 | @Override 33 | public OracleData create(Object d, int sqlType) throws SQLException { 34 | if (d == null) return null; 35 | QPIMInfo o = new QPIMInfo(); 36 | if( d instanceof QPIMInfo){ 37 | o.shallowCopy((QPIMInfo)d); 38 | }else{ 39 | o._struct = new OracleDataMutableStruct((OracleStruct) d, _sqlType, _factory); 40 | } 41 | return o; 42 | } 43 | @Override 44 | public Object toJDBCObject(Connection con) throws SQLException { 45 | 46 | Object[] attrbs = new Object[13]; 47 | attrbs[0] = getOwner(); 48 | attrbs[1] = getQueueName(); 49 | attrbs[2] = getPartitionId(); 50 | attrbs[3] = getOwnerInstId(); 51 | return con.createStruct(_SQL_NAME, attrbs); 52 | } 53 | 54 | public String toString() 55 | { 56 | if(_struct == null) 57 | return null; 58 | try 59 | { 60 | String str = "{OwnerInstance:"+getOwner()+",TopicName:"+getQueueName()+",Partition:"+getPartitionId()+",OwnerInstanceID:"+getOwnerInstId()+"}"; 61 | return str; 62 | }catch(Exception e) 63 | { 64 | return "Null " +e.getMessage(); 65 | } 66 | } 67 | 68 | void shallowCopy(QPIMInfo d) throws SQLException { 69 | _struct = d._struct; 70 | } 71 | public void setOwner(String owner) throws SQLException{ 72 | _struct.setAttribute(0, owner); 73 | } 74 | 75 | public String getOwner() throws SQLException{ 76 | return (String)_struct.getAttribute(0); 77 | } 78 | 79 | public void setQueueName(String name) throws SQLException{ 80 | _struct.setAttribute(1, name); 81 | } 82 | 83 | public String getQueueName() throws SQLException{ 84 | return (String)_struct.getAttribute(1); 85 | } 86 | public void setPartitionId(int partition) throws SQLException{ 87 | _struct.setAttribute(2, new BigDecimal(partition)); 88 | } 89 | 90 | public Integer getPartitionId() throws SQLException{ 91 | return (Integer)((BigDecimal)_struct.getAttribute(2)).intValue(); 92 | } 93 | 94 | public void setOwnerInstId(int inst) throws SQLException{ 95 | _struct.setAttribute(3, new BigDecimal(inst)); 96 | } 97 | 98 | public Integer getOwnerInstId() throws SQLException{ 99 | return (Integer)((BigDecimal)_struct.getAttribute(3)).intValue(); 100 | } 101 | } -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/internals/QPIMInfoList.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.internals; 2 | import java.sql.Connection; 3 | import java.sql.SQLException; 4 | import org.oracle.okafka.common.internals.QPIMInfo; 5 | import oracle.jdbc.OracleArray; 6 | import oracle.jdbc.OracleData; 7 | import oracle.jdbc.OracleDataFactory; 8 | import oracle.jdbc.internal.OracleTypes; 9 | import oracle.jpub.runtime.OracleDataMutableArray; 10 | public class QPIMInfoList implements OracleData, OracleDataFactory { 11 | public static final String _SQL_NAME = "SYS.AQ$_QPIM_INFO_LIST"; 12 | public static final int _SQL_TYPECODE = OracleTypes.ARRAY; 13 | OracleDataMutableArray _array; 14 | private static final QPIMInfoList _QPIMInfoList_Factory = new QPIMInfoList(); 15 | 16 | public static OracleDataFactory getOracleDataFactory() { 17 | return _QPIMInfoList_Factory; 18 | } 19 | public QPIMInfoList() 20 | { 21 | this((QPIMInfo[])null); 22 | } 23 | public QPIMInfoList(QPIMInfo[] a) 24 | { 25 | _array = new OracleDataMutableArray(2002, a, QPIMInfo.getFactory()); 26 | } 27 | @Override 28 | public OracleData create(Object d, int sqlType) throws SQLException { 29 | if (d == null) return null; 30 | QPIMInfoList a = new QPIMInfoList(); 31 | a._array = new OracleDataMutableArray(2002, (OracleArray) d, QPIMInfo.getFactory()); 32 | return a; 33 | } 34 | @Override 35 | public Object toJDBCObject(Connection con) throws SQLException { 36 | return _array.toJDBCObject(con, _SQL_NAME); 37 | } 38 | 39 | public int length() throws SQLException { 40 | return _array.length(); 41 | } 42 | public int getBaseType() throws SQLException{ 43 | return _array.getBaseType(); 44 | } 45 | public String getBaseTypeName() throws SQLException 46 | { 47 | return _array.getBaseTypeName(); 48 | } 49 | public QPIMInfo[] getArray() throws SQLException 50 | { 51 | return (QPIMInfo[]) _array.getObjectArray( 52 | new QPIMInfo[_array.length()]); 53 | } 54 | public void setArray(QPIMInfo[] a) throws SQLException 55 | { 56 | _array.setObjectArray(a); 57 | } 58 | public QPIMInfo[] getArray(long index, int count) throws SQLException 59 | { 60 | return (QPIMInfo[]) _array.getObjectArray(index, 61 | new QPIMInfo[_array.sliceLength(index, count)]); 62 | } 63 | public void setArray(QPIMInfo[] a, long index) throws SQLException 64 | { 65 | _array.setObjectArray(a, index); 66 | } 67 | public QPIMInfo getElement(long index) throws SQLException 68 | { 69 | return (QPIMInfo) _array.getObjectElement(index); 70 | } 71 | public void setElement(QPIMInfo a, long index) throws SQLException 72 | { 73 | _array.setObjectElement(a, index); 74 | } 75 | 76 | } -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/record/BaseRecords.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package org.oracle.okafka.common.record; 26 | 27 | /** 28 | * Base interface for accessing records which could be contained in the log, or an in-memory materialization of log records. 29 | */ 30 | public interface BaseRecords { 31 | /** 32 | * The size of these records in bytes. 33 | * @return The size in bytes of the records 34 | */ 35 | int sizeInBytes(); 36 | 37 | /** 38 | * Encapsulate this {@link BaseRecords} object into {@link RecordsSend} 39 | * @return Initialized {@link RecordsSend} object 40 | */ 41 | //void toSend(String destination); 42 | } 43 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/AbstractRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import org.oracle.okafka.common.protocol.ApiKeys; 33 | 34 | public abstract class AbstractRequest extends org.apache.kafka.common.requests.AbstractRequest{ 35 | 36 | public AbstractRequest(ApiKeys apiKey, short version) 37 | { 38 | super(ApiKeys.convertToApacheKafkaKey(apiKey), version); 39 | } 40 | 41 | public static abstract class Builder extends org.apache.kafka.common.requests.AbstractRequest.Builder 42 | { 43 | private final ApiKeys apiKey; 44 | 45 | 46 | public Builder(ApiKeys apiKey) { 47 | super(ApiKeys.convertToApacheKafkaKey(apiKey), (short)1); 48 | this.apiKey = apiKey; 49 | } 50 | 51 | public ApiKeys apiKeyOKafka() { 52 | return apiKey; 53 | } 54 | 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/AbstractResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import org.oracle.okafka.common.protocol.ApiKeys; 33 | 34 | public abstract class AbstractResponse extends org.apache.kafka.common.requests.AbstractResponse { 35 | ApiKeys apiKey; 36 | 37 | protected AbstractResponse(ApiKeys apiKey) { 38 | super(ApiKeys.convertToApacheKafkaKey(apiKey)); 39 | this.apiKey = apiKey; 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/CommitRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import java.util.List; 11 | import java.util.Map; 12 | 13 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 14 | import org.oracle.okafka.common.Node; 15 | import org.apache.kafka.common.TopicPartition; 16 | import org.apache.kafka.common.protocol.ApiMessage; 17 | import org.apache.kafka.common.requests.AbstractResponse; 18 | import org.oracle.okafka.common.protocol.ApiKeys; 19 | 20 | public class CommitRequest extends AbstractRequest { 21 | public static class Builder extends AbstractRequest.Builder { 22 | 23 | private final Map> nodeTPMap; 24 | private final Map offsetAndMetadata; 25 | 26 | public Builder(Map> _nodeTPMap, Map offsetAndMetadata) { 27 | super(ApiKeys.COMMIT); 28 | this.nodeTPMap = _nodeTPMap; 29 | this.offsetAndMetadata = offsetAndMetadata; 30 | } 31 | 32 | @Override 33 | public CommitRequest build() { 34 | return new CommitRequest(nodeTPMap, offsetAndMetadata); 35 | } 36 | 37 | @Override 38 | public String toString() { 39 | StringBuilder bld = new StringBuilder(); 40 | bld.append("(type=commitRequest"). 41 | append(")"); 42 | return bld.toString(); 43 | } 44 | 45 | @Override 46 | public CommitRequest build(short version) { 47 | return new CommitRequest(nodeTPMap, offsetAndMetadata); 48 | } 49 | } 50 | 51 | private final Map> nodeTPMap; 52 | private final Map offsetAndMetadata; 53 | private CommitRequest(Map> _nodeTPMap, Map offsetAndMetadata) { 54 | super(ApiKeys.COMMIT,(short)1); 55 | this.nodeTPMap = _nodeTPMap; 56 | this.offsetAndMetadata = offsetAndMetadata; 57 | } 58 | 59 | public Map> nodes() { 60 | return this.nodeTPMap; 61 | } 62 | 63 | public Map offsets() { 64 | return this.offsetAndMetadata; 65 | } 66 | 67 | @Override 68 | public ApiMessage data() { 69 | // TODO Auto-generated method stub 70 | return null; 71 | } 72 | 73 | @Override 74 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 75 | // TODO Auto-generated method stub 76 | return null; 77 | } 78 | 79 | 80 | } 81 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/CommitResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import java.util.List; 11 | import java.util.Map; 12 | 13 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 14 | import org.oracle.okafka.common.Node; 15 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 16 | import org.oracle.okafka.common.protocol.ApiKeys; 17 | import org.apache.kafka.common.TopicPartition; 18 | import org.apache.kafka.common.protocol.ApiMessage; 19 | import org.apache.kafka.common.protocol.Errors; 20 | 21 | public class CommitResponse extends AbstractResponse { 22 | 23 | private final boolean error; 24 | private final Map result; 25 | private final Map> nodes; 26 | private final Map offsets; 27 | 28 | public CommitResponse(Map result, Map> nodes, 29 | Map offsets, boolean error) { 30 | super(ApiKeys.COMMIT); 31 | this.result = result; 32 | this.nodes = nodes; 33 | this.offsets = offsets; 34 | this.error = error; 35 | 36 | } 37 | 38 | public Map getResult() { 39 | return result; 40 | } 41 | 42 | public Map> getNodes() { 43 | return nodes; 44 | } 45 | 46 | public Map offsets() { 47 | return offsets; 48 | } 49 | 50 | public boolean error() { 51 | return error; 52 | } 53 | 54 | @Override 55 | public ApiMessage data() { 56 | // TODO Auto-generated method stub 57 | return null; 58 | } 59 | 60 | @Override 61 | public Map errorCounts() { 62 | // TODO Auto-generated method stub 63 | return null; 64 | } 65 | 66 | @Override 67 | public int throttleTimeMs() { 68 | // TODO Auto-generated method stub 69 | return 0; 70 | } 71 | 72 | @Override 73 | public void maybeSetThrottleTimeMs(int arg0) { 74 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 75 | } 76 | 77 | } 78 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/ConnectMeRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import org.apache.kafka.common.protocol.ApiMessage; 4 | import org.apache.kafka.common.requests.AbstractResponse; 5 | import org.oracle.okafka.common.protocol.ApiKeys; 6 | 7 | public class ConnectMeRequest extends AbstractRequest { 8 | 9 | private String schemaName; 10 | private String topicName; 11 | private String groupId; 12 | 13 | public static class Builder extends AbstractRequest.Builder 14 | { 15 | private String schemaName; 16 | private String topicName; 17 | private String groupId; 18 | 19 | public Builder(String _schemaName , String _topicName, String _groupId) 20 | { 21 | super(ApiKeys.CONNECT_ME); 22 | this.schemaName = _schemaName; 23 | this.topicName = _topicName; 24 | this.groupId = _groupId; 25 | } 26 | 27 | public ConnectMeRequest build() 28 | { 29 | return new ConnectMeRequest(this.schemaName,this.topicName,this.groupId); 30 | } 31 | 32 | @Override 33 | public ConnectMeRequest build(short version) { 34 | return new ConnectMeRequest(this.schemaName,this.topicName,this.groupId); 35 | } 36 | } 37 | 38 | public ConnectMeRequest(String _schemaName , String _topicName, String _groupId) 39 | { 40 | super(ApiKeys.CONNECT_ME,(short)1); 41 | this.schemaName = _schemaName; 42 | this.topicName = _topicName; 43 | this.groupId = _groupId; 44 | } 45 | 46 | public String getSchemaName() 47 | { 48 | return schemaName; 49 | } 50 | public String getToipcName() 51 | { 52 | return topicName; 53 | } 54 | public String getGroupId() 55 | { 56 | return groupId; 57 | } 58 | 59 | @Override 60 | public ApiMessage data() { 61 | // TODO Auto-generated method stub 62 | return null; 63 | } 64 | 65 | @Override 66 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 67 | // TODO Auto-generated method stub 68 | return null; 69 | } 70 | 71 | } 72 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/CreateTopicsResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import java.util.Map; 33 | 34 | import org.apache.kafka.common.Uuid; 35 | import org.apache.kafka.common.protocol.ApiMessage; 36 | import org.apache.kafka.common.protocol.Errors; 37 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 38 | import org.oracle.okafka.common.protocol.ApiKeys; 39 | 40 | public class CreateTopicsResponse extends AbstractResponse { 41 | final Map errors; 42 | final Map topicIdMap; 43 | private Exception requestResult; 44 | 45 | public CreateTopicsResponse(Map errors, Map topicIdMap) { 46 | super(ApiKeys.CREATE_TOPICS); 47 | this.errors = errors; 48 | this.topicIdMap=topicIdMap; 49 | this.requestResult = null; 50 | } 51 | 52 | public Map errors() { 53 | return errors; 54 | } 55 | 56 | public void setResult(Exception ex) { 57 | if(requestResult != null) { 58 | requestResult = ex; 59 | } 60 | } 61 | 62 | public Exception getResult() { 63 | return requestResult; 64 | } 65 | 66 | public Map topicIdMap(){ 67 | return topicIdMap; 68 | } 69 | 70 | @Override 71 | public ApiMessage data() { 72 | // TODO Auto-generated method stub 73 | return null; 74 | } 75 | 76 | @Override 77 | public Map errorCounts() { 78 | // TODO Auto-generated method stub 79 | return null; 80 | } 81 | 82 | @Override 83 | public int throttleTimeMs() { 84 | // TODO Auto-generated method stub 85 | return 0; 86 | } 87 | 88 | @Override 89 | public void maybeSetThrottleTimeMs(int arg0) { 90 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 91 | } 92 | 93 | } 94 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/DeleteGroupsRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | 5 | import org.apache.kafka.common.protocol.ApiMessage; 6 | import org.oracle.okafka.common.protocol.ApiKeys; 7 | 8 | public class DeleteGroupsRequest extends AbstractRequest { 9 | private final List groups; 10 | 11 | public static class Builder extends AbstractRequest.Builder { 12 | private final List groups; 13 | 14 | public Builder(List groups) { 15 | super(ApiKeys.DELETE_GROUPS); 16 | this.groups = groups; 17 | } 18 | 19 | @Override 20 | public DeleteGroupsRequest build(short version) { 21 | return new DeleteGroupsRequest(groups); 22 | } 23 | 24 | @Override 25 | public String toString() { 26 | return groups.toString(); 27 | } 28 | } 29 | 30 | public DeleteGroupsRequest(List groups) { 31 | super(ApiKeys.DELETE_GROUPS, (short)1); 32 | this.groups = groups; 33 | } 34 | 35 | public List groups() { 36 | return this.groups; 37 | } 38 | 39 | @Override 40 | public ApiMessage data() { 41 | // TODO Auto-generated method stub 42 | return null; 43 | } 44 | 45 | @Override 46 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 47 | // TODO Auto-generated method stub 48 | return null; 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/DeleteGroupsResponse.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.Map; 4 | 5 | import org.apache.kafka.common.protocol.ApiMessage; 6 | import org.apache.kafka.common.protocol.Errors; 7 | import org.oracle.okafka.common.protocol.ApiKeys; 8 | 9 | public class DeleteGroupsResponse extends AbstractResponse { 10 | 11 | private final Map errorMap; 12 | private Exception exception; 13 | 14 | public DeleteGroupsResponse(Map errors) { 15 | super(ApiKeys.DELETE_GROUPS); 16 | this.errorMap = errors; 17 | } 18 | 19 | public Map errors(){ 20 | return errorMap; 21 | } 22 | 23 | public void setException(Exception exception) { 24 | this.exception = exception; 25 | } 26 | 27 | public Exception getException() { 28 | return exception; 29 | } 30 | 31 | @Override 32 | public ApiMessage data() { 33 | // TODO Auto-generated method stub 34 | return null; 35 | } 36 | 37 | @Override 38 | public Map errorCounts() { 39 | // TODO Auto-generated method stub 40 | return null; 41 | } 42 | 43 | @Override 44 | public int throttleTimeMs() { 45 | // TODO Auto-generated method stub 46 | return 0; 47 | } 48 | 49 | @Override 50 | public void maybeSetThrottleTimeMs(int throttleTimeMs) { 51 | // TODO Auto-generated method stub 52 | 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import java.util.Set; 33 | 34 | import org.oracle.okafka.common.protocol.ApiKeys; 35 | import org.apache.kafka.common.Uuid; 36 | import org.apache.kafka.common.protocol.ApiMessage; 37 | import org.apache.kafka.common.requests.AbstractResponse; 38 | import org.apache.kafka.common.utils.Utils; 39 | 40 | public class DeleteTopicsRequest extends AbstractRequest { 41 | 42 | private final Set topics; 43 | private final Set topicIds; 44 | private final Integer timeout; 45 | 46 | public static class Builder extends AbstractRequest.Builder { 47 | private final Set topics; 48 | private final Set topicIds; 49 | private final Integer timeout; 50 | 51 | public Builder(Set topics, Set topicIds, Integer timeout) { 52 | super(ApiKeys.DELETE_TOPICS); 53 | this.topics = topics; 54 | this.topicIds=topicIds; 55 | this.timeout = timeout; 56 | } 57 | 58 | @Override 59 | public DeleteTopicsRequest build() { 60 | return new DeleteTopicsRequest(topics, topicIds, timeout); 61 | } 62 | 63 | @Override 64 | public DeleteTopicsRequest build(short version) { 65 | return new DeleteTopicsRequest(topics,topicIds , timeout); 66 | } 67 | 68 | @Override 69 | public String toString() { 70 | StringBuilder bld = new StringBuilder(); 71 | bld.append("(type=DeleteTopicsRequest"). 72 | append(topics!=null ? ", topics=(" : ", topic Ids=("). 73 | append(topics!=null ? Utils.join(topics, ", ") : Utils.join(topicIds, ", ")).append(")"). 74 | append(", timeout=").append(timeout). 75 | append(")"); 76 | return bld.toString(); 77 | } 78 | 79 | 80 | } 81 | 82 | private DeleteTopicsRequest(Set topics, Set topicIds, Integer timeout) { 83 | super(ApiKeys.DELETE_TOPICS, (short)1); 84 | this.topics = topics; 85 | this.topicIds=topicIds; 86 | this.timeout = timeout; 87 | } 88 | 89 | public Set topics() { 90 | return topics; 91 | } 92 | 93 | public Integer timeout() { 94 | return this.timeout; 95 | } 96 | 97 | public Set topicIds(){ 98 | return topicIds; 99 | } 100 | 101 | @Override 102 | public ApiMessage data() { 103 | // TODO Auto-generated method stub 104 | return null; 105 | } 106 | 107 | @Override 108 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 109 | // TODO Auto-generated method stub 110 | return null; 111 | } 112 | 113 | } 114 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/DeleteTopicsResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import java.sql.SQLException; 33 | import java.util.Map; 34 | 35 | import org.apache.kafka.common.Uuid; 36 | import org.apache.kafka.common.protocol.ApiMessage; 37 | import org.apache.kafka.common.protocol.Errors; 38 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 39 | import org.oracle.okafka.common.protocol.ApiKeys; 40 | 41 | 42 | public class DeleteTopicsResponse extends AbstractResponse { 43 | private final Map topicNameErrorMap; 44 | private final Map topicIdErrorMap; 45 | private Exception requestResult; 46 | 47 | public DeleteTopicsResponse(Map topicNameErrorMap, Map topicIdErrorMap) { 48 | super(ApiKeys.DELETE_TOPICS); 49 | this.topicNameErrorMap = topicNameErrorMap; 50 | this.topicIdErrorMap=topicIdErrorMap; 51 | this.requestResult = null; 52 | } 53 | 54 | public Map topicErrormap(){ 55 | return this.topicNameErrorMap; 56 | } 57 | 58 | public Map topicIdErrorMap(){ 59 | return this.topicIdErrorMap; 60 | } 61 | 62 | public void setResult(Exception ex) { 63 | if(requestResult != null) { 64 | requestResult = ex; 65 | } 66 | } 67 | 68 | public Exception getResult() { 69 | return requestResult; 70 | } 71 | 72 | @Override 73 | public ApiMessage data() { 74 | // TODO Auto-generated method stub 75 | return null; 76 | } 77 | 78 | @Override 79 | public Map errorCounts() { 80 | // TODO Auto-generated method stub 81 | return null; 82 | } 83 | 84 | @Override 85 | public int throttleTimeMs() { 86 | // TODO Auto-generated method stub 87 | return 0; 88 | } 89 | 90 | @Override 91 | public void maybeSetThrottleTimeMs(int arg0) { 92 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/FetchRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import org.apache.kafka.common.protocol.ApiMessage; 33 | import org.apache.kafka.common.requests.AbstractResponse; 34 | import org.oracle.okafka.common.protocol.ApiKeys; 35 | 36 | public class FetchRequest extends AbstractRequest { 37 | 38 | public static class Builder extends AbstractRequest.Builder { 39 | 40 | private final String topic; 41 | private final long pollTimeoutMs; 42 | 43 | public Builder(String topic, long pollTimeoutMs) { 44 | super(ApiKeys.FETCH); 45 | this.topic = topic; 46 | this.pollTimeoutMs = pollTimeoutMs; 47 | } 48 | 49 | @Override 50 | public FetchRequest build() { 51 | return new FetchRequest(topic, pollTimeoutMs); 52 | } 53 | 54 | @Override 55 | public String toString() { 56 | StringBuilder bld = new StringBuilder(); 57 | bld.append("(type=fetchRequest"). 58 | append(", topics=").append(topic). 59 | append(")"); 60 | return bld.toString(); 61 | } 62 | 63 | @Override 64 | public FetchRequest build(short version) { 65 | return build(); 66 | } 67 | } 68 | 69 | private final String topic; 70 | private final long pollTimeoutMs; 71 | private FetchRequest(String topic, long pollTimeoutMs) { 72 | super(ApiKeys.FETCH, (short)1); 73 | this.topic = topic; 74 | this.pollTimeoutMs = pollTimeoutMs; 75 | } 76 | 77 | public String topic() { 78 | return this.topic; 79 | } 80 | 81 | public long pollTimeout() { 82 | return this.pollTimeoutMs; 83 | } 84 | 85 | @Override 86 | public ApiMessage data() { 87 | // TODO Auto-generated method stub 88 | return null; 89 | } 90 | 91 | @Override 92 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 93 | // TODO Auto-generated method stub 94 | return null; 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/FetchResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import oracle.jms.AQjmsBytesMessage; 33 | 34 | import java.util.List; 35 | import java.util.Map; 36 | 37 | import org.apache.kafka.common.protocol.ApiMessage; 38 | import org.apache.kafka.common.protocol.Errors; 39 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 40 | import org.oracle.okafka.common.protocol.ApiKeys; 41 | 42 | public class FetchResponse extends AbstractResponse { 43 | private final String topic; 44 | private final List messages; 45 | private final Exception exception; 46 | 47 | public FetchResponse(String topic, List messages, Exception exception) { 48 | super(ApiKeys.FETCH); 49 | this.topic = topic; 50 | this.messages = messages; 51 | this.exception = exception; 52 | } 53 | 54 | public String topic() { 55 | return topic; 56 | } 57 | 58 | public List getMessages() { 59 | return this.messages; 60 | } 61 | 62 | public Exception getException() { 63 | return this.exception; 64 | } 65 | 66 | @Override 67 | public ApiMessage data() { 68 | // TODO Auto-generated method stub 69 | return null; 70 | } 71 | 72 | @Override 73 | public Map errorCounts() { 74 | // TODO Auto-generated method stub 75 | return null; 76 | } 77 | 78 | @Override 79 | public int throttleTimeMs() { 80 | // TODO Auto-generated method stub 81 | return 0; 82 | } 83 | 84 | @Override 85 | public void maybeSetThrottleTimeMs(int arg0) { 86 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 87 | } 88 | 89 | } 90 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/IsolationLevel.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | package org.oracle.okafka.common.requests; 25 | 26 | public enum IsolationLevel { 27 | READ_UNCOMMITTED((byte) 0), READ_COMMITTED((byte) 1); 28 | 29 | private final byte id; 30 | 31 | IsolationLevel(byte id) { 32 | this.id = id; 33 | } 34 | 35 | public byte id() { 36 | return id; 37 | } 38 | 39 | public static IsolationLevel forId(byte id) { 40 | switch (id) { 41 | case 0: 42 | return READ_UNCOMMITTED; 43 | case 1: 44 | return READ_COMMITTED; 45 | default: 46 | throw new IllegalArgumentException("Unknown isolation level " + id); 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import org.apache.kafka.common.protocol.ApiMessage; 4 | import org.apache.kafka.common.requests.AbstractResponse; 5 | import org.oracle.okafka.common.internals.SessionData; 6 | import org.oracle.okafka.common.protocol.ApiKeys; 7 | 8 | public class JoinGroupRequest extends AbstractRequest { 9 | 10 | public static class Builder extends AbstractRequest.Builder { 11 | private SessionData sessionData; 12 | 13 | public Builder(SessionData sessionData) { 14 | super(ApiKeys.JOIN_GROUP); 15 | this.sessionData = sessionData; 16 | } 17 | 18 | @Override 19 | public JoinGroupRequest build() { 20 | return new JoinGroupRequest(sessionData); 21 | } 22 | 23 | @Override 24 | public String toString() { 25 | StringBuilder bld = new StringBuilder(); 26 | bld.append("(type=joinGroupRequest") 27 | .append(")"); 28 | return bld.toString(); 29 | } 30 | 31 | @Override 32 | public JoinGroupRequest build(short version) { 33 | return build(); 34 | } 35 | 36 | } 37 | 38 | private SessionData sessionData; 39 | public JoinGroupRequest(SessionData sessionData ) { 40 | super(ApiKeys.JOIN_GROUP, (short)1); 41 | this.sessionData = sessionData; 42 | 43 | } 44 | 45 | public SessionData getSessionData() { 46 | return this.sessionData; 47 | } 48 | 49 | @Override 50 | public ApiMessage data() { 51 | // TODO Auto-generated method stub 52 | return null; 53 | } 54 | 55 | @Override 56 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 57 | // TODO Auto-generated method stub 58 | return null; 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/JoinGroupResponse.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 7 | import org.oracle.okafka.common.internals.PartitionData; 8 | import org.oracle.okafka.common.internals.SessionData; 9 | import org.oracle.okafka.common.protocol.ApiKeys; 10 | import org.apache.kafka.common.protocol.ApiMessage; 11 | import org.apache.kafka.common.protocol.Errors; 12 | import org.apache.kafka.common.utils.LogContext; 13 | import org.slf4j.Logger; 14 | 15 | public class JoinGroupResponse extends AbstractResponse { 16 | private Map sessionData; 17 | private List partitions; 18 | private int leader; 19 | private int version; 20 | private Exception exception; 21 | protected final Logger log ; 22 | 23 | public JoinGroupResponse(Map sessionData, List partitions, int leader, int version, Exception exception) { 24 | super(ApiKeys.JOIN_GROUP); 25 | this.sessionData= sessionData; 26 | this.partitions = partitions; 27 | this.leader = leader; 28 | this.version = version; 29 | this.exception = exception; 30 | LogContext logContext = new LogContext("[AQ$_JOIN_GROUP:]"); 31 | this.log = logContext.logger(JoinGroupResponse.class) ; 32 | 33 | log.debug("QPAT:"); 34 | 35 | for(String mapSessionDataKeyNow : sessionData.keySet() ) 36 | { 37 | log.debug("MapSessionDataKey " + mapSessionDataKeyNow ); 38 | SessionData sessionDataNow = sessionData.get(mapSessionDataKeyNow); 39 | log.debug("Session Data Now: " + sessionDataNow.toString()); 40 | } 41 | 42 | if(partitions != null) 43 | { 44 | log.debug("QPIM:"); 45 | for(PartitionData pData: partitions) 46 | { 47 | log.debug("PData: " + pData); 48 | } 49 | }else 50 | { 51 | log.debug("QPIM: NULL"); 52 | } 53 | log.debug("Leader = " +leader +", Verssion: " + version ); 54 | } 55 | 56 | public Map getSessionData() { 57 | return this.sessionData; 58 | } 59 | 60 | public List partitions() { 61 | return this.partitions; 62 | } 63 | 64 | public int leader() { 65 | return this.leader; 66 | } 67 | 68 | public int version() { 69 | return this.version; 70 | } 71 | 72 | public Exception getException() { 73 | return this.exception; 74 | } 75 | 76 | @Override 77 | public ApiMessage data() { 78 | // TODO Auto-generated method stub 79 | return null; 80 | } 81 | 82 | @Override 83 | public Map errorCounts() { 84 | // TODO Auto-generated method stub 85 | return null; 86 | } 87 | 88 | @Override 89 | public int throttleTimeMs() { 90 | // TODO Auto-generated method stub 91 | return 0; 92 | } 93 | 94 | @Override 95 | public void maybeSetThrottleTimeMs(int arg0) { 96 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/ListGroupsRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import org.oracle.okafka.common.protocol.ApiKeys; 4 | import org.apache.kafka.common.protocol.ApiMessage; 5 | import org.apache.kafka.common.requests.AbstractResponse; 6 | 7 | public class ListGroupsRequest extends AbstractRequest { 8 | 9 | public static class Builder extends AbstractRequest.Builder { 10 | public Builder() { 11 | super(ApiKeys.LIST_GROUPS); 12 | } 13 | 14 | @Override 15 | public ListGroupsRequest build(short version) { 16 | return new ListGroupsRequest(version); 17 | } 18 | 19 | @Override 20 | public String toString() { 21 | return "(type = ListGroupsRequest)"; 22 | } 23 | } 24 | 25 | public ListGroupsRequest(short version) { 26 | super(ApiKeys.LIST_GROUPS, version); 27 | } 28 | 29 | @Override 30 | public ApiMessage data() { 31 | // TODO Auto-generated method stub 32 | return null; 33 | } 34 | 35 | @Override 36 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 37 | // TODO Auto-generated method stub 38 | return null; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/ListGroupsResponse.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import org.oracle.okafka.common.protocol.ApiKeys; 7 | import org.apache.kafka.common.protocol.ApiMessage; 8 | import org.apache.kafka.common.protocol.Errors; 9 | 10 | public class ListGroupsResponse extends AbstractResponse{ 11 | 12 | private final List groupNames; 13 | private Exception exception; 14 | 15 | public ListGroupsResponse(List groups ) { 16 | super(ApiKeys.LIST_GROUPS); 17 | this.groupNames = groups; 18 | } 19 | 20 | public List groups(){ 21 | return groupNames; 22 | } 23 | 24 | public void setException(Exception ex) { 25 | this.exception = ex; 26 | } 27 | 28 | public Exception getException() { 29 | return exception; 30 | } 31 | 32 | @Override 33 | public ApiMessage data() { 34 | // TODO Auto-generated method stub 35 | return null; 36 | } 37 | @Override 38 | public Map errorCounts() { 39 | // TODO Auto-generated method stub 40 | return null; 41 | } 42 | @Override 43 | public int throttleTimeMs() { 44 | // TODO Auto-generated method stub 45 | return 0; 46 | } 47 | @Override 48 | public void maybeSetThrottleTimeMs(int throttleTimeMs) { 49 | // TODO Auto-generated method stub 50 | 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/ListOffsetsRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import org.apache.kafka.common.protocol.ApiMessage; 7 | import org.apache.kafka.common.requests.AbstractResponse; 8 | import org.oracle.okafka.common.protocol.ApiKeys; 9 | 10 | public class ListOffsetsRequest extends AbstractRequest { 11 | 12 | public static final long EARLIEST_TIMESTAMP = -2L; 13 | public static final long LATEST_TIMESTAMP = -1L; 14 | public static final long MAX_TIMESTAMP = -3L; 15 | 16 | private final Map> topicoffsetPartitionMap; 17 | 18 | public static class Builder extends AbstractRequest.Builder { 19 | private final Map> topicoffsetPartitionMap; 20 | 21 | public Builder(Map> topicoffsetPartitionMap) { 22 | super(ApiKeys.LIST_OFFSETS); 23 | this.topicoffsetPartitionMap = topicoffsetPartitionMap; 24 | } 25 | 26 | @Override 27 | public ListOffsetsRequest build(short version) { 28 | return new ListOffsetsRequest(topicoffsetPartitionMap, version); 29 | } 30 | 31 | @Override 32 | public String toString() { 33 | return "(type=ListOffsetsRequest, " + topicoffsetPartitionMap.toString(); 34 | } 35 | } 36 | 37 | private ListOffsetsRequest(Map> topicoffsetPartitionMap, short version) { 38 | super(ApiKeys.LIST_OFFSETS, version); 39 | this.topicoffsetPartitionMap = topicoffsetPartitionMap; 40 | } 41 | 42 | public Map> getOffsetPartitionMap() { 43 | return topicoffsetPartitionMap; 44 | } 45 | 46 | @Override 47 | public ApiMessage data() { 48 | // TODO Auto-generated method stub 49 | return null; 50 | } 51 | 52 | @Override 53 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 54 | // TODO Auto-generated method stub 55 | return null; 56 | } 57 | 58 | public static class ListOffsetsPartition { 59 | int partitionIndex; 60 | long timestamp; 61 | 62 | public ListOffsetsPartition() { 63 | this.partitionIndex = 0; 64 | this.timestamp = 0L; 65 | } 66 | 67 | @Override 68 | public boolean equals(Object obj) { 69 | if (!(obj instanceof ListOffsetsPartition)) 70 | return false; 71 | ListOffsetsPartition other = (ListOffsetsPartition) obj; 72 | if (partitionIndex != other.partitionIndex) 73 | return false; 74 | if (timestamp != other.timestamp) 75 | return false; 76 | return true; 77 | } 78 | 79 | @Override 80 | public int hashCode() { 81 | int hashCode = 0; 82 | hashCode = 31 * hashCode + partitionIndex; 83 | hashCode = 31 * hashCode + ((int) (timestamp >> 32) ^ (int) timestamp); 84 | return hashCode; 85 | } 86 | 87 | @Override 88 | public String toString() { 89 | return "ListOffsetsPartition(" + "partitionIndex=" + partitionIndex + ", timestamp=" + timestamp + ")"; 90 | } 91 | 92 | public int partitionIndex() { 93 | return this.partitionIndex; 94 | } 95 | 96 | public long timestamp() { 97 | return this.timestamp; 98 | } 99 | 100 | public ListOffsetsPartition setPartitionIndex(int v) { 101 | this.partitionIndex = v; 102 | return this; 103 | } 104 | 105 | public ListOffsetsPartition setTimestamp(long v) { 106 | this.timestamp = v; 107 | return this; 108 | } 109 | 110 | } 111 | 112 | } 113 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/ListOffsetsResponse.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import org.apache.kafka.common.protocol.ApiMessage; 7 | import org.apache.kafka.common.protocol.Errors; 8 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 9 | import org.oracle.okafka.common.protocol.ApiKeys; 10 | 11 | public class ListOffsetsResponse extends AbstractResponse { 12 | final Map> offsetPartitionResponseMap; 13 | private Exception exception; 14 | 15 | public ListOffsetsResponse(Map> offsetPartitionResponseMap) { 16 | super(ApiKeys.LIST_OFFSETS); 17 | this.offsetPartitionResponseMap = offsetPartitionResponseMap; 18 | exception = null; 19 | } 20 | 21 | public Map> getOffsetPartitionResponseMap() { 22 | return offsetPartitionResponseMap; 23 | } 24 | 25 | public void setException(Exception ex) { 26 | if (exception != null) { 27 | exception = ex; 28 | } 29 | } 30 | 31 | public Exception getException() { 32 | return exception; 33 | } 34 | 35 | public static class ListOffsetsPartitionResponse { 36 | int partitionIndex; 37 | Exception error; 38 | long timestamp; 39 | long offset; 40 | 41 | public ListOffsetsPartitionResponse() { 42 | this.partitionIndex = 0; 43 | this.error = null; 44 | this.timestamp = -1L; 45 | this.offset = -1L; 46 | } 47 | 48 | @Override 49 | public boolean equals(Object obj) { 50 | if (!(obj instanceof ListOffsetsPartitionResponse)) 51 | return false; 52 | ListOffsetsPartitionResponse other = (ListOffsetsPartitionResponse) obj; 53 | if (partitionIndex != other.partitionIndex) 54 | return false; 55 | if (error != other.error) 56 | return false; 57 | if (timestamp != other.timestamp) 58 | return false; 59 | if (offset != other.offset) 60 | return false; 61 | return true; 62 | } 63 | 64 | @Override 65 | public int hashCode() { 66 | int hashCode = 0; 67 | hashCode = 31 * hashCode + partitionIndex; 68 | hashCode = 31 * hashCode + ((int) (timestamp >> 32) ^ (int) timestamp); 69 | hashCode = 31 * hashCode + ((int) (offset >> 32) ^ (int) offset); 70 | return hashCode; 71 | } 72 | 73 | @Override 74 | public String toString() { 75 | return "ListOffsetsPartitionResponse(" + "partitionIndex=" + partitionIndex + ", error=" + error 76 | + ", timestamp=" + timestamp + ", offset=" + offset + ")"; 77 | } 78 | 79 | public int partitionIndex() { 80 | return this.partitionIndex; 81 | } 82 | 83 | public Exception getError() { 84 | return this.error; 85 | } 86 | 87 | public long timestamp() { 88 | return this.timestamp; 89 | } 90 | 91 | public long offset() { 92 | return this.offset; 93 | } 94 | 95 | public ListOffsetsPartitionResponse setPartitionIndex(int v) { 96 | this.partitionIndex = v; 97 | return this; 98 | } 99 | 100 | public ListOffsetsPartitionResponse setError(Exception v) { 101 | this.error = v; 102 | return this; 103 | } 104 | 105 | public ListOffsetsPartitionResponse setTimestamp(long v) { 106 | this.timestamp = v; 107 | return this; 108 | } 109 | 110 | public ListOffsetsPartitionResponse setOffset(long v) { 111 | this.offset = v; 112 | return this; 113 | } 114 | } 115 | 116 | @Override 117 | public ApiMessage data() { 118 | // TODO Auto-generated method stub 119 | return null; 120 | } 121 | 122 | @Override 123 | public Map errorCounts() { 124 | // TODO Auto-generated method stub 125 | return null; 126 | } 127 | 128 | @Override 129 | public int throttleTimeMs() { 130 | // TODO Auto-generated method stub 131 | return 0; 132 | } 133 | 134 | @Override 135 | public void maybeSetThrottleTimeMs(int throttleTimeMs) { 136 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 137 | } 138 | 139 | } 140 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/OffsetFetchRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | import java.util.Map; 5 | 6 | import org.apache.kafka.common.TopicPartition; 7 | import org.apache.kafka.common.protocol.ApiMessage; 8 | import org.apache.kafka.common.requests.AbstractResponse; 9 | import org.oracle.okafka.common.protocol.ApiKeys; 10 | 11 | public class OffsetFetchRequest extends AbstractRequest{ 12 | 13 | public static final List ALL_TOPIC_PARTITIONS = null; 14 | private final Map> perGroupTopicPartitions; 15 | 16 | public static class Builder extends AbstractRequest.Builder { 17 | private final Map> perGroupTopicPartitions; 18 | 19 | public Builder(Map> perGroupTopicPartitions) { 20 | super(ApiKeys.OFFSET_FETCH); 21 | this.perGroupTopicPartitions = perGroupTopicPartitions; 22 | } 23 | 24 | @Override 25 | public OffsetFetchRequest build(short version) { 26 | return new OffsetFetchRequest(perGroupTopicPartitions, version); 27 | } 28 | 29 | @Override 30 | public String toString() { 31 | return "(type=ListOffsetsRequest, " + ", " + "GroupIdTopicPartitions:" + perGroupTopicPartitions.toString(); 32 | } 33 | } 34 | 35 | private OffsetFetchRequest(Map> perGroupTopicPartitions, short version) { 36 | super(ApiKeys.OFFSET_FETCH, version); 37 | this.perGroupTopicPartitions = perGroupTopicPartitions; 38 | } 39 | 40 | public Map> perGroupTopicpartitions(){ 41 | return perGroupTopicPartitions; 42 | } 43 | 44 | @Override 45 | public ApiMessage data() { 46 | // TODO Auto-generated method stub 47 | return null; 48 | } 49 | 50 | @Override 51 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 52 | // TODO Auto-generated method stub 53 | return null; 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/OffsetFetchResponse.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.Map; 4 | import java.util.Objects; 5 | 6 | import org.apache.kafka.common.TopicPartition; 7 | import org.apache.kafka.common.protocol.ApiMessage; 8 | import org.apache.kafka.common.protocol.Errors; 9 | import org.oracle.okafka.common.protocol.ApiKeys; 10 | 11 | public class OffsetFetchResponse extends AbstractResponse { 12 | 13 | private final Map> offsetFetchResponseMap; 14 | private Exception exception; 15 | 16 | public static final class PartitionOffsetData { 17 | public final long offset; 18 | public final Exception error; 19 | 20 | public PartitionOffsetData(long offset, Exception error) { 21 | this.offset = offset; 22 | this.error = error; 23 | } 24 | 25 | @Override 26 | public boolean equals(Object other) { 27 | if (!(other instanceof PartitionOffsetData)) 28 | return false; 29 | PartitionOffsetData otherPartition = (PartitionOffsetData) other; 30 | return Objects.equals(this.offset, otherPartition.offset) 31 | && Objects.equals(this.error, otherPartition.error); 32 | } 33 | 34 | @Override 35 | public String toString() { 36 | return "PartitionData(" + "offset=" + offset + ", error='" + error != null ? error.toString() : null + ")"; 37 | } 38 | 39 | @Override 40 | public int hashCode() { 41 | return Objects.hash(offset, error); 42 | } 43 | } 44 | 45 | public OffsetFetchResponse(Map> offsetFetchResponseMap) { 46 | super(ApiKeys.OFFSET_FETCH); 47 | this.offsetFetchResponseMap = offsetFetchResponseMap; 48 | exception = null; 49 | } 50 | 51 | public void setException(Exception ex) { 52 | this.exception=ex; 53 | } 54 | 55 | public Map> getOffsetFetchResponseMap(){ 56 | return offsetFetchResponseMap; 57 | } 58 | 59 | public Exception getException() { 60 | return exception; 61 | } 62 | 63 | @Override 64 | public ApiMessage data() { 65 | // TODO Auto-generated method stub 66 | return null; 67 | } 68 | 69 | @Override 70 | public Map errorCounts() { 71 | // TODO Auto-generated method stub 72 | return null; 73 | } 74 | 75 | @Override 76 | public int throttleTimeMs() { 77 | // TODO Auto-generated method stub 78 | return 0; 79 | } 80 | 81 | @Override 82 | public void maybeSetThrottleTimeMs(int throttleTimeMs) { 83 | // TODO Auto-generated method stub 84 | 85 | } 86 | 87 | } 88 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import java.util.Map; 11 | 12 | import org.apache.kafka.common.TopicPartition; 13 | import org.apache.kafka.common.protocol.ApiMessage; 14 | import org.apache.kafka.common.requests.AbstractResponse; 15 | import org.oracle.okafka.common.protocol.ApiKeys; 16 | 17 | public class OffsetResetRequest extends AbstractRequest { 18 | 19 | public static class Builder extends AbstractRequest.Builder { 20 | 21 | private final Map offsetResetTimestamps; 22 | private final long pollTimeoutMs; 23 | 24 | public Builder(Map offsetResetTimestamps, long pollTimeoutMs) { 25 | super(ApiKeys.OFFSETRESET); 26 | this.offsetResetTimestamps = offsetResetTimestamps; 27 | this.pollTimeoutMs = pollTimeoutMs; 28 | } 29 | 30 | @Override 31 | public OffsetResetRequest build() { 32 | return new OffsetResetRequest(offsetResetTimestamps, pollTimeoutMs); 33 | } 34 | 35 | @Override 36 | public String toString() { 37 | StringBuilder bld = new StringBuilder(); 38 | bld.append("(type=OffsetResetRequest"). 39 | append(", offsetResetTimestampss=").append(offsetResetTimestamps). 40 | append(")"); 41 | return bld.toString(); 42 | } 43 | 44 | @Override 45 | public OffsetResetRequest build(short version) { 46 | return build(); 47 | } 48 | } 49 | 50 | private final Map offsetResetTimestamps; 51 | private final long pollTimeoutMs; 52 | private OffsetResetRequest(Map offsetResetTimestamps, long pollTimeoutMs) { 53 | super(ApiKeys.OFFSETRESET, (short)0); 54 | this.offsetResetTimestamps = offsetResetTimestamps; 55 | this.pollTimeoutMs = pollTimeoutMs; 56 | } 57 | 58 | public Map offsetResetTimestamps() { 59 | return this.offsetResetTimestamps; 60 | } 61 | 62 | public long pollTimeout() { 63 | return this.pollTimeoutMs; 64 | } 65 | 66 | @Override 67 | public ApiMessage data() { 68 | // TODO Auto-generated method stub 69 | return null; 70 | } 71 | 72 | @Override 73 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 74 | // TODO Auto-generated method stub 75 | return null; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/OffsetResetResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import java.util.Map; 11 | import org.apache.kafka.common.TopicPartition; 12 | import org.apache.kafka.common.protocol.ApiMessage; 13 | import org.apache.kafka.common.protocol.Errors; 14 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 15 | import org.oracle.okafka.common.protocol.ApiKeys; 16 | 17 | public class OffsetResetResponse extends AbstractResponse { 18 | private final Map offsetResetResponse; 19 | private final Exception exception ; 20 | public OffsetResetResponse(Map offsetResetResponse, Exception exception) { 21 | super(ApiKeys.OFFSETRESET); 22 | this.offsetResetResponse = offsetResetResponse; 23 | this.exception = exception; 24 | } 25 | 26 | public Map offsetResetResponse() { 27 | return offsetResetResponse; 28 | } 29 | 30 | public Exception getException() { 31 | return exception; 32 | } 33 | 34 | @Override 35 | public ApiMessage data() { 36 | // TODO Auto-generated method stub 37 | return null; 38 | } 39 | 40 | @Override 41 | public Map errorCounts() { 42 | // TODO Auto-generated method stub 43 | return null; 44 | } 45 | 46 | @Override 47 | public int throttleTimeMs() { 48 | // TODO Auto-generated method stub 49 | return 0; 50 | } 51 | 52 | @Override 53 | public void maybeSetThrottleTimeMs(int arg0) { 54 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/RequestHeader.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | import static java.util.Objects.requireNonNull; 33 | 34 | import org.apache.kafka.common.message.RequestHeaderData; 35 | import org.oracle.okafka.common.protocol.ApiKeys; 36 | 37 | /** 38 | * The header for a request in the Kafka protocol 39 | */ 40 | public class RequestHeader {// extends org.apache.kafka.common.requests.RequestHeader{ 41 | 42 | private final RequestHeaderData data; 43 | private final ApiKeys apiKey; 44 | private final String clientId; 45 | private final int correlationId; 46 | 47 | public RequestHeader(ApiKeys apiKey, String clientId, int correlation) { 48 | data = new RequestHeaderData().setClientId(clientId).setCorrelationId(correlation).setRequestApiKey(apiKey.id); 49 | this.apiKey = requireNonNull(apiKey); 50 | this.clientId = clientId; 51 | this.correlationId = correlation; 52 | } 53 | 54 | public ApiKeys apiKey() { 55 | return apiKey; 56 | } 57 | 58 | public String clientId() { 59 | return clientId; 60 | } 61 | 62 | public int correlationId() { 63 | return correlationId; 64 | } 65 | 66 | public ResponseHeader toResponseHeader() { 67 | return new ResponseHeader(correlationId); 68 | } 69 | 70 | @Override 71 | public String toString() { 72 | return "RequestHeader(apiKey=" + apiKey + 73 | ", clientId=" + clientId + 74 | ", correlationId=" + correlationId + 75 | ")"; 76 | } 77 | 78 | @Override 79 | public boolean equals(Object o) { 80 | if (this == o) return true; 81 | if (o == null || getClass() != o.getClass()) return false; 82 | 83 | RequestHeader that = (RequestHeader) o; 84 | return apiKey == that.apiKey && 85 | correlationId == that.correlationId && 86 | (clientId == null ? that.clientId == null : clientId.equals(that.clientId)); 87 | } 88 | 89 | @Override 90 | public int hashCode() { 91 | int result = apiKey.hashCode(); 92 | result = 31 * result + (clientId != null ? clientId.hashCode() : 0); 93 | result = 31 * result + correlationId; 94 | return result; 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/ResponseHeader.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | /* 26 | * 04/20/2020: This file is modified to support Kafka Java Client compatability to Oracle Transactional Event Queues. 27 | * 28 | */ 29 | 30 | package org.oracle.okafka.common.requests; 31 | 32 | /** 33 | * A response header in the kafka protocol. 34 | */ 35 | public class ResponseHeader { 36 | private final int correlationId; 37 | 38 | public ResponseHeader(int correlationId) { 39 | this.correlationId = correlationId; 40 | } 41 | 42 | public int correlationId() { 43 | return correlationId; 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/SubscribeRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import org.apache.kafka.common.protocol.ApiMessage; 11 | import org.apache.kafka.common.requests.AbstractResponse; 12 | import org.oracle.okafka.common.protocol.ApiKeys; 13 | 14 | public class SubscribeRequest extends AbstractRequest { 15 | public static class Builder extends AbstractRequest.Builder { 16 | private final String topic; 17 | public Builder(String topic) { 18 | super(ApiKeys.SUBSCRIBE); 19 | this.topic = topic; 20 | } 21 | 22 | @Override 23 | public SubscribeRequest build() { 24 | return new SubscribeRequest(topic); 25 | } 26 | 27 | @Override 28 | public String toString() { 29 | StringBuilder bld = new StringBuilder(); 30 | bld.append("(type=subscribeRequest"). 31 | append(", topics=").append(topic). 32 | append(")"); 33 | return bld.toString(); 34 | } 35 | 36 | @Override 37 | public SubscribeRequest build(short version) { 38 | return build(); 39 | } 40 | } 41 | private final String topic; 42 | public SubscribeRequest(String topic) { 43 | super(ApiKeys.SUBSCRIBE,(short)1); 44 | this.topic = topic; 45 | } 46 | 47 | public String getTopic() { 48 | return this.topic; 49 | } 50 | 51 | @Override 52 | public ApiMessage data() { 53 | // TODO Auto-generated method stub 54 | return null; 55 | } 56 | 57 | @Override 58 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 59 | // TODO Auto-generated method stub 60 | return null; 61 | } 62 | 63 | } 64 | 65 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/SubscribeResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import java.util.Map; 11 | 12 | import javax.jms.JMSException; 13 | 14 | import org.apache.kafka.common.protocol.ApiMessage; 15 | import org.apache.kafka.common.protocol.Errors; 16 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 17 | import org.oracle.okafka.common.protocol.ApiKeys; 18 | 19 | public class SubscribeResponse extends AbstractResponse { 20 | private final JMSException exception; 21 | private final String topic; 22 | 23 | public SubscribeResponse(String topic, JMSException exception) { 24 | super(ApiKeys.SUBSCRIBE); 25 | this.topic = topic; 26 | this.exception = exception; 27 | } 28 | 29 | public String getTopic() { 30 | return this.topic; 31 | 32 | } 33 | 34 | public JMSException getException() { 35 | return this.exception; 36 | } 37 | 38 | @Override 39 | public ApiMessage data() { 40 | // TODO Auto-generated method stub 41 | return null; 42 | } 43 | 44 | @Override 45 | public Map errorCounts() { 46 | // TODO Auto-generated method stub 47 | return null; 48 | } 49 | 50 | @Override 51 | public int throttleTimeMs() { 52 | // TODO Auto-generated method stub 53 | return 0; 54 | } 55 | 56 | @Override 57 | public void maybeSetThrottleTimeMs(int arg0) { 58 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 59 | } 60 | 61 | } 62 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupRequest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.List; 4 | 5 | import org.apache.kafka.common.protocol.ApiMessage; 6 | import org.apache.kafka.common.requests.AbstractResponse; 7 | import org.oracle.okafka.common.internals.SessionData; 8 | import org.oracle.okafka.common.protocol.ApiKeys; 9 | 10 | public class SyncGroupRequest extends AbstractRequest { 11 | public static class Builder extends AbstractRequest.Builder { 12 | private List sessionData; 13 | private final int version; 14 | 15 | public Builder(List sessionData, int version) { 16 | super(ApiKeys.SYNC_GROUP); 17 | this.sessionData = sessionData; 18 | this.version = version; 19 | } 20 | 21 | @Override 22 | public SyncGroupRequest build() { 23 | return new SyncGroupRequest(sessionData, version); 24 | } 25 | 26 | @Override 27 | public String toString() { 28 | StringBuilder bld = new StringBuilder(); 29 | bld.append("(type=SyncGroupRequest") 30 | .append(")"); 31 | return bld.toString(); 32 | } 33 | 34 | @Override 35 | public SyncGroupRequest build(short version) { 36 | return build(); 37 | } 38 | 39 | } 40 | 41 | private List sessionData; 42 | private int version; 43 | public SyncGroupRequest(List sessionData, int version) { 44 | super(ApiKeys.SYNC_GROUP,(short)1); 45 | this.sessionData = sessionData; 46 | this.version = version; 47 | } 48 | 49 | public List getSessionData() { 50 | return this.sessionData; 51 | } 52 | 53 | public int getVersion() { 54 | return this.version; 55 | } 56 | 57 | @Override 58 | public ApiMessage data() { 59 | // TODO Auto-generated method stub 60 | return null; 61 | } 62 | 63 | @Override 64 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 65 | // TODO Auto-generated method stub 66 | return null; 67 | } 68 | 69 | } -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/SyncGroupResponse.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.requests; 2 | 3 | import java.util.Map; 4 | 5 | import org.apache.kafka.common.protocol.ApiMessage; 6 | import org.apache.kafka.common.protocol.Errors; 7 | import org.oracle.okafka.common.internals.SessionData; 8 | import org.oracle.okafka.common.protocol.ApiKeys; 9 | 10 | public class SyncGroupResponse extends AbstractResponse { 11 | private SessionData sessionData; 12 | private int version; 13 | private Exception exception; 14 | 15 | public SyncGroupResponse(SessionData sessionData, int version, Exception exception) { 16 | super(ApiKeys.SYNC_GROUP); 17 | this.sessionData = sessionData; 18 | this.version = version; 19 | this.exception = exception; 20 | } 21 | 22 | public SessionData getSessionData() { 23 | return this.sessionData; 24 | } 25 | 26 | public int getVersion() { 27 | return this.version; 28 | } 29 | 30 | public Exception getException() { 31 | return this.exception; 32 | } 33 | 34 | @Override 35 | public ApiMessage data() { 36 | // TODO Auto-generated method stub 37 | return null; 38 | } 39 | 40 | @Override 41 | public Map errorCounts() { 42 | // TODO Auto-generated method stub 43 | return null; 44 | } 45 | 46 | @Override 47 | public int throttleTimeMs() { 48 | // TODO Auto-generated method stub 49 | return 0; 50 | } 51 | 52 | @Override 53 | public void maybeSetThrottleTimeMs(int arg0) { 54 | // TODO Auto-generated method stub 55 | 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeRequest.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import org.apache.kafka.common.protocol.ApiMessage; 11 | import org.apache.kafka.common.requests.AbstractResponse; 12 | import org.oracle.okafka.common.protocol.ApiKeys; 13 | 14 | public class UnsubscribeRequest extends AbstractRequest { 15 | public static class Builder extends AbstractRequest.Builder { 16 | 17 | public Builder() { 18 | super(ApiKeys.UNSUBSCRIBE); 19 | } 20 | 21 | @Override 22 | public UnsubscribeRequest build() { 23 | return new UnsubscribeRequest(); 24 | } 25 | 26 | @Override 27 | public String toString() { 28 | StringBuilder bld = new StringBuilder(); 29 | bld.append("(type=unsubscribeRequest"). 30 | append(")"); 31 | return bld.toString(); 32 | } 33 | 34 | @Override 35 | public UnsubscribeRequest build(short version) { 36 | return build(); 37 | } 38 | } 39 | public UnsubscribeRequest() { 40 | super(ApiKeys.UNSUBSCRIBE,(short)1); 41 | } 42 | @Override 43 | public ApiMessage data() { 44 | // TODO Auto-generated method stub 45 | return null; 46 | } 47 | @Override 48 | public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { 49 | // TODO Auto-generated method stub 50 | return null; 51 | } 52 | 53 | } 54 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/requests/UnsubscribeResponse.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.requests; 9 | 10 | import java.util.Map; 11 | 12 | import org.apache.kafka.common.protocol.ApiMessage; 13 | import org.apache.kafka.common.protocol.Errors; 14 | import org.oracle.okafka.common.errors.FeatureNotSupportedException; 15 | import org.oracle.okafka.common.protocol.ApiKeys; 16 | 17 | public class UnsubscribeResponse extends AbstractResponse { 18 | private final Map response; 19 | 20 | public UnsubscribeResponse(Map response) { 21 | super(ApiKeys.UNSUBSCRIBE); 22 | this.response = response; 23 | } 24 | 25 | public Map response() { 26 | return this.response; 27 | 28 | } 29 | 30 | @Override 31 | public ApiMessage data() { 32 | // TODO Auto-generated method stub 33 | return null; 34 | } 35 | 36 | @Override 37 | public Map errorCounts() { 38 | // TODO Auto-generated method stub 39 | return null; 40 | } 41 | 42 | @Override 43 | public int throttleTimeMs() { 44 | // TODO Auto-generated method stub 45 | return 0; 46 | } 47 | 48 | @Override 49 | public void maybeSetThrottleTimeMs(int arg0) { 50 | throw new FeatureNotSupportedException("This feature is not suported for this release."); 51 | } 52 | 53 | } 54 | 55 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/utils/ReflectionUtil.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.common.utils; 2 | 3 | import java.lang.reflect.Constructor; 4 | import java.lang.reflect.InvocationTargetException; 5 | 6 | public class ReflectionUtil { 7 | public static T createInstance(Class clazz, Class[] paramTypes, Object... args) { 8 | T instance = null; 9 | try { 10 | Constructor constructor = clazz.getDeclaredConstructor(paramTypes); 11 | constructor.setAccessible(true); 12 | instance = constructor.newInstance(args); 13 | } catch (NoSuchMethodException | SecurityException | InstantiationException | IllegalAccessException 14 | | IllegalArgumentException | InvocationTargetException e) { 15 | throw new RuntimeException("Failed to create instance via reflection", e); 16 | } 17 | return instance; 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /clients/src/main/java/org/oracle/okafka/common/utils/TNSParser.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.common.utils; 9 | 10 | import org.oracle.okafka.clients.CommonClientConfigs; 11 | import org.apache.kafka.common.config.AbstractConfig; 12 | 13 | import java.io.File; 14 | import java.io.FileNotFoundException; 15 | import java.io.FileReader; 16 | import java.io.IOException; 17 | import java.util.Stack; 18 | import java.util.StringTokenizer; 19 | public class TNSParser { 20 | private final AbstractConfig configs; 21 | private String fileStr ; 22 | private final String hashChar = "#"; 23 | private final String eol = "\r\n"; 24 | 25 | public TNSParser( AbstractConfig configs) { 26 | this.configs = configs; 27 | } 28 | 29 | public static String getProperty(String connStr, String property) { 30 | int index = connStr.indexOf(property); 31 | if(index == -1) 32 | return null; 33 | int index1 = connStr.indexOf("=", index); 34 | if(index1 == -1) 35 | return null; 36 | int index2 = connStr.indexOf(")", index1); 37 | if(index2 == -1) 38 | return null; 39 | return connStr.substring(index1 + 1, index2).trim(); 40 | } 41 | public String getConnectionString(String alias) { 42 | String aliasTmp = alias.trim().toUpperCase(); 43 | Stack stack = new Stack<>(); 44 | int index = -1; 45 | boolean found = false; 46 | while((index = fileStr.indexOf(aliasTmp, index + 1)) != -1 ) { 47 | if( fileStr.indexOf("=(DESCRIPTION", index) == index + aliasTmp.length()) { 48 | found = true; 49 | break; 50 | } 51 | } 52 | if ( found ) { 53 | for(int ind = index; ind < fileStr.length() ; ind++) { 54 | if(fileStr.charAt(ind) == '(') 55 | {stack.push("("); } 56 | else if(fileStr.charAt(ind) == ')'){ 57 | if(stack.empty()) 58 | return null; 59 | stack.pop(); 60 | if(stack.empty()) 61 | return fileStr.substring(index, ind + 1); 62 | //if( ind + 1 < fileStr.length() && (fileStr.charAt(ind + 1) != '(' || fileStr.charAt(ind + 1) != ')')) 63 | //return null; 64 | } 65 | 66 | } 67 | } 68 | return null; 69 | } 70 | private String removeUnwanted(String fileStr) { 71 | 72 | StringBuilder sb = new StringBuilder(); 73 | for(int ind = 0 ; ind < fileStr.length(); ind++) { 74 | if( fileStr.charAt(ind) != ' ' ) 75 | sb.append(fileStr.charAt(ind)); 76 | } 77 | String strtmp = new String (sb.toString()); 78 | String filestr = ""; 79 | String tokenstr = new String (); 80 | StringTokenizer st = new StringTokenizer(strtmp, eol); 81 | while(st.hasMoreTokens()) { 82 | tokenstr = st.nextToken().trim(); 83 | if (!tokenstr.contains(hashChar)) 84 | filestr = filestr + tokenstr + eol; 85 | else { 86 | if(tokenstr.indexOf(hashChar) != 0) 87 | filestr = filestr + tokenstr.substring(0, tokenstr.indexOf(hashChar)) + eol; 88 | } 89 | } 90 | return filestr; 91 | 92 | } 93 | public void readFile() throws FileNotFoundException, IOException { 94 | char[] buf = null; 95 | FileReader fr = null; 96 | try { 97 | File f = new File(configs.getString(CommonClientConfigs.ORACLE_NET_TNS_ADMIN) + "/tnsnames.ora"); 98 | fr = new FileReader(f); 99 | int length = (int)f.length(); 100 | buf = new char[length]; 101 | fr.read(buf, 0, length); 102 | 103 | 104 | String fileStr = new String(buf); 105 | fileStr = fileStr.toUpperCase(); 106 | this.fileStr = removeUnwanted(fileStr); 107 | } finally { 108 | if(fr != null) 109 | fr.close(); 110 | } 111 | 112 | 113 | 114 | } 115 | 116 | } 117 | -------------------------------------------------------------------------------- /clients/src/test/java/ojdbc.properties: -------------------------------------------------------------------------------- 1 | user=OKafka 2 | password=Welcome_123# -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/DeleteConsumerGroups.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Arrays; 5 | import java.util.concurrent.ExecutionException; 6 | 7 | import org.apache.kafka.clients.admin.Admin; 8 | import org.apache.kafka.clients.admin.DeleteConsumerGroupsResult; 9 | import org.apache.kafka.common.KafkaFuture; 10 | import org.junit.Test; 11 | import org.oracle.okafka.clients.admin.AdminClient; 12 | 13 | public class DeleteConsumerGroups { 14 | 15 | @Test 16 | public void DeleteGroupsTest() { 17 | 18 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 19 | 20 | DeleteConsumerGroupsResult delResult = admin.deleteConsumerGroups(new ArrayList<>(Arrays.asList("S1"))); 21 | try { 22 | KafkaFuture groupFutures = delResult.all(); 23 | groupFutures.get(); 24 | System.out.println("Main Thread Out of wait now"); 25 | } catch (InterruptedException | ExecutionException e) { 26 | 27 | throw new IllegalStateException(e); 28 | } 29 | System.out.println("Auto Closing admin now"); 30 | } catch (Exception e) { 31 | System.out.println("Exception while deleting groups " + e); 32 | e.printStackTrace(); 33 | } 34 | System.out.println("Test: DeleteConsumerGroups completed"); 35 | } 36 | 37 | } 38 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/ListConsumerGroupOffsets.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Collections; 5 | import java.util.List; 6 | import java.util.Map; 7 | 8 | import org.apache.kafka.clients.admin.Admin; 9 | import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsResult; 10 | import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; 11 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 12 | import org.apache.kafka.common.KafkaFuture; 13 | import org.apache.kafka.common.TopicPartition; 14 | import org.junit.Test; 15 | import org.oracle.okafka.clients.admin.AdminClient; 16 | 17 | public class ListConsumerGroupOffsets { 18 | 19 | @Test 20 | public void ListConsumerGroupOffsetsTest() { 21 | 22 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 23 | String groupId = "S1"; 24 | List topicPartitions = new ArrayList<>(); 25 | topicPartitions.add(new TopicPartition("TEQ", 0)); 26 | topicPartitions.add(new TopicPartition("TEQ", 1)); 27 | topicPartitions.add(new TopicPartition("TEQ", 2)); 28 | topicPartitions.add(new TopicPartition("TEQ", 3)); 29 | topicPartitions.add(new TopicPartition("TEQ", 4)); 30 | 31 | ListConsumerGroupOffsetsSpec spec = new ListConsumerGroupOffsetsSpec().topicPartitions(topicPartitions); 32 | ListConsumerGroupOffsetsResult result = admin 33 | .listConsumerGroupOffsets(Collections.singletonMap(groupId, spec)); 34 | try { 35 | KafkaFuture>> ftr = result.all(); 36 | System.out.println(ftr.get()); 37 | System.out.println("Main Thread Out of wait now"); 38 | } catch (Exception e) { 39 | System.out.println(e); 40 | } 41 | System.out.println("Auto Closing admin now"); 42 | } catch (Exception e) { 43 | System.out.println("Exception while listing Consumer Group Offsets " + e); 44 | e.printStackTrace(); 45 | } 46 | System.out.println("Test: ListConsumerGroupOffsets completed"); 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/ListConsumerGroups.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.Collection; 4 | import java.util.stream.Collectors; 5 | 6 | import org.apache.kafka.clients.admin.Admin; 7 | import org.apache.kafka.clients.admin.ConsumerGroupListing; 8 | import org.apache.kafka.clients.admin.ListConsumerGroupsResult; 9 | import org.oracle.okafka.clients.admin.AdminClient; 10 | 11 | import org.junit.Test; 12 | 13 | public class ListConsumerGroups { 14 | 15 | @Test 16 | public void ListConsumerGroupsTest() { 17 | 18 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 19 | 20 | ListConsumerGroupsResult result = admin.listConsumerGroups(); 21 | try { 22 | Collection consumerGroups = result.all().get(); 23 | String groupNames = consumerGroups.stream().map(ConsumerGroupListing::groupId) 24 | .collect(Collectors.joining(", ")); 25 | System.out.println("Consumer Groups: " + groupNames); 26 | System.out.println("Main Thread Out of wait now"); 27 | } catch (Exception e) { 28 | System.out.println(e); 29 | } 30 | System.out.println("Auto Closing admin now"); 31 | } catch (Exception e) { 32 | System.out.println("Exception while listing Consumer Groups " + e); 33 | e.printStackTrace(); 34 | } 35 | System.out.println("Test: ListConsumerGroups completed"); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaAutoOffsetReset.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | 4 | import java.io.IOException; 5 | import java.time.Duration; 6 | import java.util.Arrays; 7 | import java.util.Properties; 8 | import org.apache.kafka.clients.consumer.Consumer; 9 | import org.apache.kafka.clients.consumer.ConsumerRecord; 10 | import org.apache.kafka.clients.consumer.ConsumerRecords; 11 | import org.junit.Test; 12 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 13 | 14 | public class OkafkaAutoOffsetReset { 15 | 16 | @Test 17 | public void autoOffsetSeekTest() throws IOException { 18 | Properties prop = new Properties(); 19 | prop = OkafkaSetup.setup(); 20 | prop.put("group.id" , "S1"); 21 | prop.put("max.poll.records", 1000); 22 | prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 23 | prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 24 | prop.put("auto.offset.reset", "earliest"); 25 | 26 | Consumer consumer = new KafkaConsumer(prop); 27 | 28 | consumer.subscribe(Arrays.asList("TEQ")); 29 | 30 | int expectedMsgCnt = 1000; 31 | int msgCnt = 0; 32 | try { 33 | while(true) { 34 | try { 35 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 36 | 37 | for (ConsumerRecord record : records) 38 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); 39 | 40 | if(records != null && records.count() > 0) { 41 | msgCnt += records.count(); 42 | System.out.println("Committing records " + records.count()); 43 | consumer.commitSync(); 44 | 45 | if(msgCnt >= expectedMsgCnt ) 46 | { 47 | System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); 48 | break; 49 | } 50 | } 51 | else { 52 | System.out.println("No Record Fetched. Retrying in 1 second"); 53 | Thread.sleep(1000); 54 | } 55 | }catch(Exception e) 56 | { 57 | throw e; 58 | } 59 | } 60 | }catch(Exception e) 61 | { 62 | System.out.println("Exception from consumer " + e); 63 | e.printStackTrace(); 64 | }finally { 65 | System.out.println("Closing Consumer"); 66 | consumer.close(); 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaDeleteTopic.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Arrays; 5 | import java.util.concurrent.ExecutionException; 6 | import org.apache.kafka.clients.admin.Admin; 7 | import org.apache.kafka.clients.admin.DeleteTopicsResult; 8 | import org.apache.kafka.common.KafkaFuture; 9 | import org.apache.kafka.common.TopicCollection; 10 | import org.junit.Test; 11 | import org.oracle.okafka.clients.admin.AdminClient; 12 | 13 | public class OkafkaDeleteTopic { 14 | 15 | @Test 16 | public void DeleteTopicTest() { 17 | 18 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 19 | DeleteTopicsResult delResult = admin.deleteTopics( 20 | TopicCollection.TopicNameCollection.ofTopicNames(new ArrayList(Arrays.asList("TEQ")))); 21 | try { 22 | KafkaFuture ftr = delResult.all(); 23 | ftr.get(); 24 | System.out.println("Main Thread Out of wait now"); 25 | } catch (InterruptedException | ExecutionException e) { 26 | 27 | throw new IllegalStateException(e); 28 | } 29 | System.out.println("Auto Closing admin now"); 30 | } catch (Exception e) { 31 | System.out.println("Exception while deleting topic " + e); 32 | e.printStackTrace(); 33 | } 34 | System.out.println("Test: OkfakaDeleteTopic completed"); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaDeleteTopicById.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Arrays; 5 | import java.util.concurrent.ExecutionException; 6 | 7 | import org.apache.kafka.clients.admin.Admin; 8 | import org.apache.kafka.clients.admin.CreateTopicsResult; 9 | import org.apache.kafka.clients.admin.DeleteTopicsResult; 10 | import org.apache.kafka.clients.admin.NewTopic; 11 | import org.apache.kafka.common.KafkaFuture; 12 | import org.apache.kafka.common.TopicCollection; 13 | import org.apache.kafka.common.Uuid; 14 | import org.junit.Test; 15 | import org.oracle.okafka.clients.admin.AdminClient; 16 | 17 | public class OkafkaDeleteTopicById { 18 | @Test 19 | public void DeleteTopicByIdTest() { 20 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 21 | 22 | CreateTopicsResult result = admin.createTopics(Arrays.asList(new NewTopic("TEQ", 5, (short) 1))); 23 | Uuid createdTopicId = result.topicId("TEQ").get(); 24 | 25 | DeleteTopicsResult delResult = admin.deleteTopics( 26 | TopicCollection.TopicNameCollection.ofTopicIds(new ArrayList(Arrays.asList(createdTopicId)))); 27 | try { 28 | KafkaFuture ftr = delResult.all(); 29 | ftr.get(); 30 | System.out.println("Main Thread Out of wait now"); 31 | } catch (InterruptedException | ExecutionException e) { 32 | 33 | throw new IllegalStateException(e); 34 | } 35 | System.out.println("Auto Closing admin now"); 36 | } catch (Exception e) { 37 | System.out.println("Exception while deleting topic " + e); 38 | e.printStackTrace(); 39 | } 40 | System.out.println("Test: OkfakaDeleteTopicById completed"); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaDescribeTopics.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Arrays; 5 | import java.util.Map; 6 | 7 | import org.apache.kafka.clients.admin.Admin; 8 | import org.apache.kafka.clients.admin.DescribeTopicsResult; 9 | import org.apache.kafka.clients.admin.TopicDescription; 10 | import org.apache.kafka.common.KafkaFuture; 11 | import org.apache.kafka.common.TopicCollection; 12 | import org.junit.Test; 13 | import org.oracle.okafka.clients.admin.AdminClient; 14 | 15 | public class OkafkaDescribeTopics { 16 | 17 | @Test 18 | public void AdminTest() { 19 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 20 | DescribeTopicsResult res = admin.describeTopics( 21 | TopicCollection.TopicNameCollection.ofTopicNames(new ArrayList(Arrays.asList("TEQ")))); 22 | 23 | Map> description = res.topicNameValues(); 24 | 25 | for (Map.Entry> entry : description.entrySet()) { 26 | System.out.println("Description - " + entry.getValue().get()); 27 | } 28 | } catch (Exception e) { 29 | System.out.println("Exception while Describing topic " + e); 30 | e.printStackTrace(); 31 | } 32 | 33 | System.out.println("Test: OkafkaDescribeTopic Complete"); 34 | 35 | } 36 | } -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaDescribeTopicsById.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.ArrayList; 4 | import java.util.Arrays; 5 | import java.util.Map; 6 | 7 | import org.apache.kafka.clients.admin.Admin; 8 | import org.apache.kafka.clients.admin.DescribeTopicsResult; 9 | import org.apache.kafka.clients.admin.TopicDescription; 10 | 11 | import org.apache.kafka.common.KafkaFuture; 12 | import org.apache.kafka.common.TopicCollection; 13 | import org.apache.kafka.common.Uuid; 14 | import org.junit.Test; 15 | import org.oracle.okafka.clients.admin.AdminClient; 16 | 17 | public class OkafkaDescribeTopicsById { 18 | @Test 19 | public void AdminTest() { 20 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 21 | 22 | DescribeTopicsResult res1 = admin.describeTopics( 23 | TopicCollection.TopicNameCollection.ofTopicNames(new ArrayList(Arrays.asList("TEQ")))); 24 | 25 | Map> description1 = res1.topicNameValues(); 26 | 27 | Uuid topicId = description1.get("TEQ").get().topicId(); 28 | 29 | DescribeTopicsResult res2 = admin.describeTopics( 30 | TopicCollection.TopicIdCollection.ofTopicIds(new ArrayList(Arrays.asList(topicId)))); 31 | 32 | Map> descriptionById = res2.topicIdValues(); 33 | 34 | for (Map.Entry> entry : descriptionById.entrySet()) { 35 | System.out.println("Description - " + entry.getValue().get()); 36 | } 37 | 38 | } catch (Exception e) { 39 | System.out.println("Exception while Describing topic " + e); 40 | e.printStackTrace(); 41 | } 42 | 43 | System.out.println("Test: OkafkaDescribeTopicsById Complete"); 44 | 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaFetchCommittedOffset.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.Arrays; 4 | import java.util.HashSet; 5 | import java.util.Map; 6 | import java.util.Properties; 7 | import java.util.Set; 8 | 9 | import org.apache.kafka.clients.consumer.Consumer; 10 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 11 | import org.apache.kafka.common.TopicPartition; 12 | import org.junit.Test; 13 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 14 | 15 | public class OkafkaFetchCommittedOffset { 16 | 17 | @Test 18 | public void FetchCommittedOffsetTest() { 19 | Properties prop = new Properties(); 20 | prop = OkafkaSetup.setup(); 21 | prop.put("group.id", "S1"); 22 | prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 23 | prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 24 | 25 | Consumer consumer = new KafkaConsumer(prop); 26 | try { 27 | Set topicPartitons = new HashSet<>(); 28 | topicPartitons.add(new TopicPartition("TEQ",0)); 29 | topicPartitons.add(new TopicPartition("TEQ",1)); 30 | topicPartitons.add(new TopicPartition("TEQ",2)); 31 | topicPartitons.add(new TopicPartition("TEQ",3)); 32 | topicPartitons.add(new TopicPartition("TEQ",4)); 33 | 34 | Map committedMap = consumer.committed(topicPartitons); 35 | System.out.println(committedMap); 36 | 37 | } catch (Exception e) { 38 | System.out.println("Exception while Fetching Committed Offset " + e); 39 | e.printStackTrace(); 40 | } finally { 41 | System.out.println("Test: OkafkaFetchCommittedOffset complete"); 42 | System.out.println("Closing Consumer"); 43 | consumer.close(); 44 | } 45 | } 46 | 47 | } 48 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaListOffsets.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.HashMap; 4 | import java.util.Map; 5 | 6 | import org.apache.kafka.clients.admin.Admin; 7 | import org.apache.kafka.clients.admin.ListOffsetsResult; 8 | import org.apache.kafka.clients.admin.OffsetSpec; 9 | import org.apache.kafka.clients.admin.ListOffsetsResult.ListOffsetsResultInfo; 10 | import org.apache.kafka.common.TopicPartition; 11 | import org.junit.Test; 12 | import org.oracle.okafka.clients.admin.AdminClient; 13 | 14 | public class OkafkaListOffsets { 15 | @Test 16 | public void ListOffsetTest() { 17 | 18 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 19 | TopicPartition tp1 = new TopicPartition("TEQ", 0); 20 | TopicPartition tp2 = new TopicPartition("TEQ", 1); 21 | TopicPartition tp3 = new TopicPartition("TEQ", 2); 22 | 23 | Map topicOffsetSpecMap = new HashMap<>(); 24 | topicOffsetSpecMap.put(tp1, OffsetSpec.earliest()); 25 | topicOffsetSpecMap.put(tp2, OffsetSpec.latest()); 26 | topicOffsetSpecMap.put(tp3, OffsetSpec.maxTimestamp()); 27 | 28 | ListOffsetsResult result = admin.listOffsets(topicOffsetSpecMap); 29 | 30 | for (TopicPartition tp : topicOffsetSpecMap.keySet()) { 31 | try { 32 | ListOffsetsResultInfo resInfo = result.partitionResult(tp).get(); 33 | System.out.println(resInfo); 34 | } catch (Exception e) { 35 | System.out.println(e); 36 | } 37 | } 38 | 39 | System.out.println("Auto Closing admin now"); 40 | } catch (Exception e) { 41 | System.out.println("Exception while listing offsets " + e); 42 | e.printStackTrace(); 43 | } 44 | System.out.println("Test: OkafkaListOffsets completed"); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaListTopics.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | 4 | import org.apache.kafka.clients.admin.Admin; 5 | import org.junit.Test; 6 | import org.oracle.okafka.clients.admin.AdminClient; 7 | import org.apache.kafka.clients.admin.ListTopicsOptions; 8 | import org.apache.kafka.clients.admin.ListTopicsResult; 9 | 10 | public class OkafkaListTopics { 11 | 12 | @Test 13 | public void AdminTest() { 14 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 15 | 16 | ListTopicsResult res=admin.listTopics(new ListTopicsOptions()); 17 | 18 | System.out.println(res.names().get()); 19 | 20 | } 21 | catch(Exception e) 22 | { 23 | System.out.println("Exception while Listing Topics " + e); 24 | e.printStackTrace(); 25 | } 26 | 27 | System.out.println("Test: OkafkaListTopics complete"); 28 | 29 | } 30 | } -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToBeginning.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.io.IOException; 4 | import java.time.Duration; 5 | import java.util.Arrays; 6 | import java.util.Collection; 7 | import java.util.Properties; 8 | import org.apache.kafka.clients.consumer.Consumer; 9 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 10 | import org.apache.kafka.clients.consumer.ConsumerRecord; 11 | import org.apache.kafka.clients.consumer.ConsumerRecords; 12 | import org.apache.kafka.common.TopicPartition; 13 | import org.junit.Test; 14 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 15 | 16 | public class OkafkaSeekToBeginning { 17 | @Test 18 | public void SeekBeginningTest() throws IOException { 19 | Properties prop = new Properties(); 20 | prop = OkafkaSetup.setup(); 21 | prop.put("group.id" , "S1"); 22 | prop.put("max.poll.records", 1000); 23 | prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 24 | prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 25 | 26 | Consumer consumer = new KafkaConsumer(prop); 27 | try { 28 | 29 | consumer.subscribe(Arrays.asList("TEQ"), new ConsumerRebalanceListener() { 30 | @Override 31 | public synchronized void onPartitionsRevoked(Collection partitions) { 32 | System.out.println("Partitions revoked for rebalance."); 33 | } 34 | @Override 35 | public synchronized void onPartitionsAssigned(Collection partitions) { 36 | System.out.println("New Partitions assigned after rebalance"); 37 | try { 38 | consumer.seekToBeginning(partitions); 39 | } 40 | catch (Exception e) { 41 | e.printStackTrace(); 42 | } 43 | } 44 | }); 45 | } 46 | catch(Exception e) { 47 | System.out.println(e); 48 | e.printStackTrace(); 49 | } 50 | int expectedMsgCnt = 1000; 51 | int msgCnt = 0; 52 | try { 53 | 54 | while(true) { 55 | try { 56 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 57 | 58 | for (ConsumerRecord record : records) 59 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); 60 | 61 | if(records != null && records.count() > 0) { 62 | msgCnt += records.count(); 63 | System.out.println("Committing records " + records.count()); 64 | consumer.commitSync(); 65 | 66 | if(msgCnt >= expectedMsgCnt ) 67 | { 68 | System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); 69 | break; 70 | } 71 | } 72 | else { 73 | System.out.println("No Record Fetched. Retrying in 1 second"); 74 | Thread.sleep(1000); 75 | } 76 | 77 | }catch(Exception e) 78 | { 79 | throw e; 80 | } 81 | } 82 | 83 | }catch(Exception e) 84 | { 85 | System.out.println("Exception from consumer " + e); 86 | e.printStackTrace(); 87 | }finally { 88 | System.out.println("Closing Consumer"); 89 | consumer.close(); 90 | } 91 | } 92 | 93 | } 94 | 95 | 96 | 97 | 98 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaSeekToEnd.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.io.IOException; 4 | import java.time.Duration; 5 | import java.time.Instant; 6 | import java.util.Arrays; 7 | import java.util.Collection; 8 | import java.util.Properties; 9 | import org.apache.kafka.clients.consumer.Consumer; 10 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 11 | import org.apache.kafka.clients.consumer.ConsumerRecord; 12 | import org.apache.kafka.clients.consumer.ConsumerRecords; 13 | import org.apache.kafka.common.TopicPartition; 14 | import org.junit.Test; 15 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 16 | 17 | public class OkafkaSeekToEnd { 18 | @Test 19 | public void SeekEndTest() throws IOException { 20 | Properties prop = new Properties(); 21 | prop = OkafkaSetup.setup(); 22 | prop.put("group.id" , "S1"); 23 | prop.put("max.poll.records", 1000); 24 | prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 25 | prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 26 | 27 | Consumer consumer = new KafkaConsumer(prop); 28 | try { 29 | consumer.subscribe(Arrays.asList("TEQ"), new ConsumerRebalanceListener() { 30 | @Override 31 | public synchronized void onPartitionsRevoked(Collection partitions) { 32 | System.out.println("Partitions revoked for rebalance."); 33 | } 34 | @Override 35 | public synchronized void onPartitionsAssigned(Collection partitions) { 36 | System.out.println("New Partitions assigned after rebalance"); 37 | try { 38 | consumer.seekToEnd(partitions); 39 | } 40 | catch (Exception e) { 41 | e.printStackTrace(); 42 | } 43 | } 44 | }); 45 | } 46 | catch(Exception e) { 47 | System.out.println(e); 48 | e.printStackTrace(); 49 | } 50 | int expectedMsgCnt = 1000; 51 | int msgCnt = 0; 52 | try { 53 | Instant starttime = Instant.now(); 54 | long runtime =0; 55 | while(true && runtime <=120) { 56 | try { 57 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 58 | 59 | for (ConsumerRecord record : records) 60 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); 61 | 62 | if(records != null && records.count() > 0) { 63 | msgCnt += records.count(); 64 | System.out.println("Committing records " + records.count()); 65 | consumer.commitSync(); 66 | 67 | if(msgCnt >= expectedMsgCnt ) 68 | { 69 | System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); 70 | break; 71 | } 72 | } 73 | else { 74 | System.out.println("No Record Fetched. Retrying in 1 second"); 75 | Thread.sleep(1000); 76 | } 77 | runtime = Duration.between(starttime, Instant.now()).toSeconds(); 78 | 79 | }catch(Exception e) 80 | { 81 | throw e; 82 | } 83 | } 84 | 85 | }catch(Exception e) 86 | { 87 | System.out.println("Exception from consumer " + e); 88 | e.printStackTrace(); 89 | }finally { 90 | System.out.println("Closing Consumer"); 91 | consumer.close(); 92 | } 93 | } 94 | 95 | } 96 | 97 | 98 | 99 | 100 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaSetup.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.io.FileInputStream; 4 | import java.io.InputStream; 5 | import java.util.Properties; 6 | import org.junit.BeforeClass; 7 | 8 | public class OkafkaSetup { 9 | 10 | @BeforeClass 11 | public static Properties setup(){ 12 | 13 | final Properties BaseProperties = new Properties(); 14 | InputStream input; 15 | try { 16 | input = new FileInputStream("src/test/java/test.config"); 17 | BaseProperties.load(input); 18 | } catch (Exception e) { 19 | System.out.println("Exception whlie loading config.properties file. " + e); 20 | e.printStackTrace(); 21 | } 22 | return BaseProperties; 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/OkafkaUnsubscribe.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import org.junit.Test; 4 | import java.io.FileInputStream; 5 | import java.io.FileNotFoundException; 6 | import java.io.IOException; 7 | import java.io.InputStream; 8 | import java.time.Duration; 9 | import java.util.*; 10 | import org.apache.kafka.clients.consumer.Consumer; 11 | import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; 12 | import org.apache.kafka.clients.consumer.ConsumerRecord; 13 | import org.apache.kafka.clients.consumer.ConsumerRecords; 14 | import org.apache.kafka.common.TopicPartition; 15 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 16 | import org.oracle.okafka.clients.consumer.internals.SubscriptionState; 17 | 18 | public class OkafkaUnsubscribe{ 19 | 20 | @Test 21 | public void UnsubscribeTest() throws IOException { 22 | Properties prop = new Properties(); 23 | prop = OkafkaSetup.setup(); 24 | prop.put("group.id" , "S1"); 25 | prop.put("max.poll.records", 1000); 26 | prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 27 | prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 28 | Consumer consumer = new KafkaConsumer(prop); 29 | consumer.subscribe(Arrays.asList("TEQ")); 30 | int expectedMsgCnt = 1000; 31 | int msgCnt = 0; 32 | try { 33 | while(true) { 34 | try { 35 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 36 | Collection partitions = records.partitions(); 37 | for (ConsumerRecord record : records) 38 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); 39 | 40 | if(records != null && records.count() > 0) { 41 | msgCnt += records.count(); 42 | System.out.println("Committing records " + records.count()); 43 | consumer.commitSync(); 44 | 45 | if(msgCnt >= expectedMsgCnt ) 46 | { 47 | System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); 48 | break; 49 | } 50 | } 51 | else { 52 | System.out.println("No Record Fetched. Retrying in 1 second"); 53 | Thread.sleep(1000); 54 | } 55 | }catch(Exception e) 56 | { 57 | throw e; 58 | } 59 | } 60 | try { 61 | consumer.unsubscribe(); 62 | } 63 | catch(Exception e) { 64 | System.out.println("Exception while unsubscribe" + e); 65 | e.printStackTrace(); 66 | } 67 | 68 | while(true) { 69 | try { 70 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 71 | Collection partitions = records.partitions(); 72 | for (ConsumerRecord record : records) 73 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); 74 | 75 | if(records != null && records.count() > 0) { 76 | msgCnt += records.count(); 77 | System.out.println("Committing records " + records.count()); 78 | consumer.commitSync(); 79 | 80 | if(msgCnt >= expectedMsgCnt ) 81 | { 82 | System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt +". Exiting Now."); 83 | break; 84 | } 85 | } 86 | else { 87 | System.out.println("No Record Fetched. Retrying in 1 second"); 88 | Thread.sleep(1000); 89 | } 90 | }catch(Exception e) 91 | { 92 | throw e; 93 | } 94 | } 95 | 96 | 97 | }catch(Exception e) 98 | { 99 | System.out.println("Exception from consumer " + e); 100 | e.printStackTrace(); 101 | }finally { 102 | System.out.println("Closing Consumer"); 103 | consumer.close(); 104 | } 105 | } 106 | } -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/ProducerMetricsTest.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import org.junit.Test; 4 | import org.oracle.okafka.clients.producer.KafkaProducer; 5 | 6 | 7 | import java.lang.Thread; 8 | import java.io.File; 9 | import java.io.FileWriter; 10 | import java.io.IOException; 11 | import java.util.Map; 12 | import java.util.Properties; 13 | import org.apache.kafka.clients.producer.Producer; 14 | import org.apache.kafka.clients.producer.ProducerRecord; 15 | import org.apache.kafka.common.Metric; 16 | import org.apache.kafka.common.MetricName; 17 | 18 | public class ProducerMetricsTest{ 19 | 20 | public static void getMetricData(Producer producer,String fileName) { 21 | try { 22 | Map metricData = producer.metrics(); 23 | File csvFile = new File(System.getProperty("user.dir")+fileName+ ".csv"); 24 | FileWriter fileWriter = new FileWriter(csvFile); 25 | StringBuilder headLine = new StringBuilder(); 26 | headLine.append("Name"); 27 | headLine.append(','); 28 | headLine.append("Group"); 29 | headLine.append(','); 30 | headLine.append("Description"); 31 | headLine.append(','); 32 | headLine.append("Tags"); 33 | headLine.append(','); 34 | headLine.append("Value"); 35 | headLine.append("\n"); 36 | fileWriter.write(headLine.toString()); 37 | metricData.forEach((a, b) -> { 38 | try { 39 | 40 | StringBuilder line = new StringBuilder(); 41 | 42 | line.append(a.name()); 43 | line.append(','); 44 | line.append(a.group()); 45 | line.append(','); 46 | line.append(a.description()); 47 | if(a.tags().containsKey("node-id") || a.tags().containsKey("topic")) { 48 | if(a.tags().containsKey("node-id")) { 49 | line.append(','); 50 | line.append(a.tags().get("node-id")); 51 | } 52 | if(a.tags().containsKey("topic")) { 53 | line.append(','); 54 | line.append("topic-"+a.tags().get("topic")); 55 | } 56 | }else{ 57 | line.append(','); 58 | line.append(""); 59 | 60 | } 61 | 62 | 63 | 64 | line.append(','); 65 | line.append(b.metricValue().toString()); 66 | 67 | line.append("\n"); 68 | fileWriter.write(line.toString()); 69 | 70 | } catch (IOException e) { 71 | e.printStackTrace(); 72 | } 73 | 74 | }); 75 | fileWriter.close(); 76 | } catch(IOException e) { 77 | e.printStackTrace(); 78 | } 79 | } 80 | 81 | @Test 82 | public void ProducerTest() { 83 | try { 84 | 85 | Properties prop = new Properties(); 86 | prop = OkafkaSetup.setup(); 87 | prop.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 88 | prop.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 89 | Producer producer = new KafkaProducer(prop); 90 | 91 | int msgCnt = 100; 92 | for(int i=0;i producerRecord = new ProducerRecord("TEQ", i+"", "Test message # " + i); 94 | producer.send(producerRecord); 95 | } 96 | System.out.println("Produced "+ msgCnt +" messages."); 97 | 98 | Thread.sleep(9000); 99 | ProducerMetricsTest.getMetricData(producer,"afterProducingOkafka"); 100 | 101 | producer.close(); 102 | System.out.println("producer closed"); 103 | } 104 | catch(Exception e) 105 | { 106 | System.out.println("Exception in Main " + e ); 107 | e.printStackTrace(); 108 | } 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaAdmin.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import java.util.Arrays; 4 | import java.util.concurrent.ExecutionException; 5 | 6 | import org.apache.kafka.clients.admin.Admin; 7 | import org.apache.kafka.clients.admin.CreateTopicsResult; 8 | import org.apache.kafka.clients.admin.NewTopic; 9 | import org.apache.kafka.common.KafkaFuture; 10 | import org.junit.Test; 11 | import org.oracle.okafka.clients.admin.AdminClient; 12 | 13 | public class SimpleOkafkaAdmin { 14 | 15 | @Test 16 | public void AdminTest() { 17 | 18 | try (Admin admin = AdminClient.create(OkafkaSetup.setup())) { 19 | CreateTopicsResult result = admin.createTopics(Arrays.asList(new NewTopic("TEQ", 5, (short) 1))); 20 | try { 21 | KafkaFuture ftr = result.all(); 22 | ftr.get(); 23 | System.out.println("Main Thread Out of wait now"); 24 | } catch (InterruptedException | ExecutionException e) { 25 | 26 | throw new IllegalStateException(e); 27 | } 28 | System.out.println("Auto Closing admin now"); 29 | 30 | } catch (Exception e) { 31 | System.out.println("Exception while creating topic " + e); 32 | e.printStackTrace(); 33 | } 34 | 35 | System.out.println("Main thread complete "); 36 | 37 | } 38 | } -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaConsumer.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import org.junit.Test; 4 | import java.time.Duration; 5 | import java.util.*; 6 | import org.apache.kafka.clients.consumer.Consumer; 7 | import org.apache.kafka.clients.consumer.ConsumerRecord; 8 | import org.apache.kafka.clients.consumer.ConsumerRecords; 9 | import org.apache.kafka.common.TopicPartition; 10 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 11 | 12 | public class SimpleOkafkaConsumer { 13 | 14 | @Test 15 | public void ConsumerTest() { 16 | Properties prop = new Properties(); 17 | prop = OkafkaSetup.setup(); 18 | prop.put("group.id", "S1"); 19 | prop.put("max.poll.records", 1000); 20 | prop.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 21 | prop.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); 22 | Consumer consumer = new KafkaConsumer(prop); 23 | consumer.subscribe(Arrays.asList("TEQ")); 24 | int expectedMsgCnt = 1000; 25 | int msgCnt = 0; 26 | try { 27 | while (true) { 28 | try { 29 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 30 | Collection partitions = records.partitions(); 31 | for (ConsumerRecord record : records) 32 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), 33 | record.offset(), record.key(), record.value()); 34 | 35 | if (records != null && records.count() > 0) { 36 | msgCnt += records.count(); 37 | System.out.println("Committing records " + records.count()); 38 | consumer.commitSync(); 39 | 40 | if (msgCnt >= expectedMsgCnt) { 41 | System.out.println("Received " + msgCnt + " Expected " + expectedMsgCnt + ". Exiting Now."); 42 | break; 43 | } 44 | } else { 45 | System.out.println("No Record Fetched. Retrying in 1 second"); 46 | Thread.sleep(1000); 47 | } 48 | } catch (Exception e) { 49 | throw e; 50 | } 51 | } 52 | } catch (Exception e) { 53 | System.out.println("Exception from consumer " + e); 54 | e.printStackTrace(); 55 | } finally { 56 | System.out.println("Closing Consumer"); 57 | consumer.close(); 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/SimpleOkafkaProducer.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import org.junit.Test; 4 | import org.oracle.okafka.clients.producer.KafkaProducer; 5 | import java.util.Properties; 6 | import java.util.concurrent.Future; 7 | import org.apache.kafka.clients.producer.Producer; 8 | import org.apache.kafka.clients.producer.ProducerRecord; 9 | import org.apache.kafka.clients.producer.RecordMetadata; 10 | 11 | public class SimpleOkafkaProducer { 12 | 13 | @Test 14 | public void ProducerTest() { 15 | try { 16 | Properties prop = OkafkaSetup.setup(); 17 | prop.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 18 | prop.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer"); 19 | Producer producer = new KafkaProducer(prop); 20 | Future lastFuture = null; 21 | int msgCnt = 1000; 22 | for (int i = 0; i < msgCnt; i++) { 23 | ProducerRecord producerRecord = new ProducerRecord("TEQ", i + "", 24 | "Test message # " + i); 25 | lastFuture = producer.send(producerRecord); 26 | } 27 | System.out.println("Produced " + msgCnt + " messages."); 28 | lastFuture.get(); 29 | producer.close(); 30 | } catch (Exception e) { 31 | System.out.println("Exception in Main " + e); 32 | e.printStackTrace(); 33 | } 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /clients/src/test/java/org/oracle/okafka/tests/TestRunner.java: -------------------------------------------------------------------------------- 1 | package org.oracle.okafka.tests; 2 | 3 | import org.junit.runner.JUnitCore; 4 | import org.junit.runner.Result; 5 | import org.junit.runner.notification.Failure; 6 | 7 | class TestRunner { 8 | public static void main(String[] args) { 9 | 10 | Result result = new Result(); 11 | 12 | result = JUnitCore.runClasses(SimpleOkafkaAdmin.class, SimpleOkafkaProducer.class, OkafkaListOffsets.class, 13 | OkafkaAutoOffsetReset.class, OkafkaFetchCommittedOffset.class, ListConsumerGroups.class, 14 | ListConsumerGroupOffsets.class, SimpleOkafkaProducer.class, OkafkaSeekToEnd.class, 15 | OkafkaSeekToBeginning.class, SimpleOkafkaProducer.class, OkafkaUnsubscribe.class, 16 | ProducerMetricsTest.class, ConsumerMetricsTest.class, DeleteConsumerGroups.class, 17 | OkafkaDescribeTopics.class, OkafkaListTopics.class, OkafkaDescribeTopicsById.class, 18 | OkafkaDeleteTopic.class, OkafkaDeleteTopicById.class); 19 | 20 | for (Failure failure : result.getFailures()) { 21 | System.out.println("Test failure : " + failure.toString()); 22 | } 23 | System.out.println("Tests ran succesfully: " + result.wasSuccessful()); 24 | } 25 | } -------------------------------------------------------------------------------- /clients/src/test/java/test.config: -------------------------------------------------------------------------------- 1 | #OKafka common properties for common examples 2 | 3 | #Properties to connect to Oracle Database 4 | #Option 1: Connect to Oracle database using plaintext 5 | 6 | security.protocol=PLAINTEXT 7 | bootstrap.servers=localhost:1521 8 | oracle.service.name=FREEPDB1 9 | oracle.net.tns_admin=./src/test/java 10 | 11 | #Option 2: Connect to Oracle Database deployed in Oracle Autonomous Cloud using Wallet 12 | #security.protocol=SSL 13 | #oracle.net.tns_admin= 14 | #tns.alias= -------------------------------------------------------------------------------- /connectors/.gitignore: -------------------------------------------------------------------------------- 1 | target/ 2 | .settings 3 | .classpath 4 | .project 5 | ../.metadata/ 6 | /.metadata/ 7 | -------------------------------------------------------------------------------- /connectors/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | ## 0.1.0 (2022-08-11) 4 | 5 | - design connect source 6 | - use of aqjms 7 | -------------------------------------------------------------------------------- /connectors/SECURITY.md: -------------------------------------------------------------------------------- 1 | ### Reporting Security Vulnerabilities ### 2 | The Oracle Database team values the independent security research community and believes that responsible disclosure of security vulnerabilities in okafka.jar helps us ensure the security and privacy of all our users. 3 | If you believe you have found a security vulnerability, please submit a report to secalert_us@oracle.com preferably with a proof of concept. Please refer to Reporting Vulnerabilities for additional information including our public encryption key for secure email. We ask that you do not contact project contributors directly or through other channels about a report. 4 | 5 | ### Security Updates, Alerts and Bulletins ### 6 | okafka.jar security updates will be released on a quarterly basis in conjunction with Oracle TxEventQ security updates that are part of the Oracle Critical Patch Update program. Security updates are released on the Tuesday closest to the 17th day of January, April, July and October. A pre-release announcement will be published on the Thursday preceding each Critical Patch Update release. For additional information including past advisories, please refer to Security Alerts. 7 | 8 | ### Security-Related Information ### 9 | Non-vulnerability related security issues may be discussed on GitHub Issues or the Security channel in the Oracle Database community forums. 10 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/common/utils/Constants.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2023, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.common.utils; 26 | 27 | /** 28 | * Contains a collection of defined constants. 29 | */ 30 | public final class Constants { 31 | 32 | private Constants() { 33 | } 34 | 35 | /** 36 | * Retriable Ora Errors 37 | */ 38 | // Timeout or End-of-Fetch Error When Dequeuing Messages 39 | public static final int ORA_25228 = 25228; 40 | 41 | // IO Error: Connection reset 42 | public static final int ORA_17002 = 17002; 43 | 44 | // Closed Connection. 45 | public static final int ORA_17008 = 17008; 46 | 47 | // TNS no listener. 48 | public static final int ORA_12541 = 12541; 49 | 50 | // Unknown host specified 51 | public static final int ORA_17868 = 17868; 52 | 53 | // No more data to read from socket 54 | public static final int ORA_17410 = 17410; 55 | 56 | // ORACLE initialization or shutdown in progress 57 | public static final int ORA_01033 = 1033; 58 | 59 | // The Oracle instance is not available for use. Start the instance. 60 | public static final int ORA_01034 = 1034; 61 | 62 | // Immediate shutdown or close in progress 63 | public static final int ORA_01089 = 1089; 64 | 65 | // Cannot connect to event stream owner instance {} of database {} 66 | public static final int ORA_24221 = 24221; 67 | 68 | // Cannot connect to shard owner instance {} of database {} 69 | public static final int ORA_25348 = 25348; 70 | 71 | // Session is closed. 72 | public static final int JMS_131 = 131; 73 | 74 | // Database not open 75 | public static final int ORA_01109 = 1109; 76 | 77 | // Closed statement 78 | public static final int ORA_17009 = 17009; 79 | 80 | // Got minus one from a read call 81 | public static final int ORA_17800 = 17800; 82 | 83 | // Timeout occurred while waiting for lock to flush object string data object string 84 | public static final int ORA_62187 = 62187; 85 | 86 | // Invalid credential or not authorized; logon denied 87 | public static final int ORA_01017 = 1017; 88 | 89 | // Interrupted IO error.: Socket read interrupted 90 | public static final int ORA_18730 = 18730; 91 | 92 | // Database connection closed by peer 93 | public static final int ORA_03113 = 3113; 94 | 95 | // Cannot connect to database. Instance ... 96 | public static final int ORA_12521 = 12521; 97 | } 98 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/common/utils/Node.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2024, 2025 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.common.utils; 26 | 27 | public class Node { 28 | 29 | private final int id; 30 | private final String instanceName; 31 | 32 | public Node(int id, String instanceName) { 33 | if (id <= 0) 34 | id = 0; 35 | this.id = id; 36 | this.instanceName = instanceName; 37 | } 38 | 39 | public int getId() { 40 | return id; 41 | } 42 | 43 | public String getInstanceName() { 44 | return instanceName; 45 | } 46 | 47 | @Override 48 | public String toString() { 49 | return "Node [id=" + id + ", instanceName=" + instanceName; 50 | } 51 | 52 | } 53 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/schema/JmsDestination.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2024, 2025 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.schema; 26 | 27 | import java.sql.SQLException; 28 | 29 | import javax.jms.Destination; 30 | import javax.jms.JMSException; 31 | 32 | import org.apache.kafka.connect.data.Schema; 33 | import org.apache.kafka.connect.data.SchemaBuilder; 34 | import org.apache.kafka.connect.data.Struct; 35 | import org.slf4j.Logger; 36 | import org.slf4j.LoggerFactory; 37 | 38 | import oracle.jdbc.txeventq.kafka.connect.common.utils.JmsUtils; 39 | 40 | public class JmsDestination { 41 | protected static final Logger log = LoggerFactory.getLogger(JmsDestination.class); 42 | 43 | public static final Schema SCHEMA_JMSDESTINATION_V1 = SchemaBuilder.struct() 44 | .name("JMSDestination").version(1).field("type", Schema.STRING_SCHEMA) 45 | .field("name", Schema.STRING_SCHEMA).field("owner", Schema.OPTIONAL_STRING_SCHEMA) 46 | .field("completeName", Schema.OPTIONAL_STRING_SCHEMA) 47 | .field("completeTableName", Schema.OPTIONAL_STRING_SCHEMA).optional().build(); 48 | 49 | private final String type; 50 | private final String name; 51 | private final String owner; 52 | private final String completeName; 53 | private final String completeTableName; 54 | 55 | /** 56 | * Constructs a JmsDestination object. 57 | * 58 | * @param destination The Destination object to get information from. 59 | * @throws JMSException 60 | * @throws SQLException 61 | */ 62 | public JmsDestination(Destination destination) throws JMSException, SQLException { 63 | log.trace("Entry {}.JmsDestination", this.getClass().getName()); 64 | this.type = JmsUtils.destinationType(destination); 65 | this.name = JmsUtils.destinationName(destination); 66 | this.owner = JmsUtils.destinationOwner(destination); 67 | this.completeName = JmsUtils.destinationCompleteName(destination); 68 | this.completeTableName = JmsUtils.destinationCompleteTableName(destination); 69 | log.trace("Exit {}.JmsDestination", this.getClass().getName()); 70 | } 71 | 72 | /** 73 | * Creates a structured record for a JmsDestination. 74 | * 75 | * @return A structured record containing a set of named fields with values, each field using an 76 | * independent Schema. 77 | */ 78 | public Struct toJmsDestinationStructV1() { 79 | log.trace("Entry {}.toJmsDestinationStructV1", this.getClass().getName()); 80 | log.trace("Exit {}.toJmsDestinationStructV1", this.getClass().getName()); 81 | return new Struct(SCHEMA_JMSDESTINATION_V1).put("type", this.type).put("name", this.name) 82 | .put("owner", this.owner).put("completeName", this.completeName) 83 | .put("completeTableName", this.completeTableName); 84 | } 85 | 86 | } 87 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/schema/Key.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2024, 2025 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.schema; 26 | 27 | import org.apache.kafka.connect.data.Schema; 28 | import org.apache.kafka.connect.data.SchemaBuilder; 29 | import org.apache.kafka.connect.data.Struct; 30 | import org.slf4j.Logger; 31 | import org.slf4j.LoggerFactory; 32 | 33 | /** 34 | * This schema is used to store the incoming correlation value on the message interface. If the 35 | * correlation is specified it will be used as the key for the Kafka topic. 36 | * 37 | */ 38 | public class Key { 39 | 40 | protected static final Logger log = LoggerFactory.getLogger(Key.class); 41 | public static final Schema SCHEMA_KEY_V1 = SchemaBuilder.struct().name("Key").version(1) 42 | .field("correlation", Schema.STRING_SCHEMA).optional().build(); 43 | 44 | private final String correlation; 45 | 46 | /** 47 | * Creates a Key with the specified key value. 48 | * 49 | * @param keyValue The key value that will be used by Kafka which will be the correlation id of 50 | * the JMSMessage. 51 | */ 52 | public Key(String keyValue) { 53 | log.trace("Entry {}.Key", this.getClass().getName()); 54 | this.correlation = keyValue; 55 | log.trace("Exit {}.Key", this.getClass().getName()); 56 | } 57 | 58 | /** 59 | * Creates a structured record for Key. 60 | * 61 | * @return A structured record containing a set of named fields with values, each field using an 62 | * independent Schema. 63 | */ 64 | public Struct toKeyStructV1() { 65 | log.trace("Entry {}.toKeyStructV1", this.getClass().getName()); 66 | log.trace("Exit {}.toKeyStructV1", this.getClass().getName()); 67 | return new Struct(SCHEMA_KEY_V1).put("correlation", this.correlation); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/sink/TxEventQSinkConnector.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2023, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.sink; 26 | 27 | import java.util.ArrayList; 28 | import java.util.List; 29 | import java.util.Map; 30 | 31 | import org.apache.kafka.common.config.ConfigDef; 32 | import org.apache.kafka.connect.connector.Task; 33 | import org.apache.kafka.connect.sink.SinkConnector; 34 | import org.slf4j.Logger; 35 | import org.slf4j.LoggerFactory; 36 | 37 | import oracle.jdbc.txeventq.kafka.connect.common.utils.AppInfoParser; 38 | import oracle.jdbc.txeventq.kafka.connect.sink.task.TxEventQSinkTask; 39 | import oracle.jdbc.txeventq.kafka.connect.sink.utils.TxEventQSinkConfig; 40 | 41 | public class TxEventQSinkConnector extends SinkConnector { 42 | private static final Logger log = LoggerFactory.getLogger(TxEventQSinkConnector.class); 43 | 44 | private Map configProperties; 45 | 46 | @Override 47 | public String version() { 48 | return AppInfoParser.getVersion(); 49 | } 50 | 51 | @Override 52 | public void start(Map originalProps) { 53 | log.trace("Entry {}.start", this.getClass().getName()); 54 | this.configProperties = originalProps; 55 | log.trace("Exit {}.start", this.getClass().getName()); 56 | } 57 | 58 | @Override 59 | public Class taskClass() { 60 | return (Class) TxEventQSinkTask.class; 61 | } 62 | 63 | @Override 64 | public List> taskConfigs(int maxTasks) { 65 | log.debug("Setting task configurations for {} workers.", maxTasks); 66 | final List> configs = new ArrayList<>(maxTasks); 67 | for (int i = 0; i < maxTasks; ++i) { 68 | configs.add(configProperties); 69 | } 70 | return configs; 71 | } 72 | 73 | @Override 74 | public void stop() { 75 | log.debug("[{}] Stopping Oracle TxEventQ Sink Connector", Thread.currentThread().getId()); 76 | 77 | } 78 | 79 | @Override 80 | public ConfigDef config() { 81 | return TxEventQSinkConfig.getConfig(); 82 | } 83 | 84 | } 85 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/source/TxEventQSourceConnector.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2023, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.source; 26 | 27 | import java.util.ArrayList; 28 | import java.util.List; 29 | import java.util.Map; 30 | 31 | import org.apache.kafka.common.config.ConfigDef; 32 | import org.apache.kafka.connect.connector.Connector; 33 | import org.apache.kafka.connect.connector.Task; 34 | import org.apache.kafka.connect.source.SourceConnector; 35 | import org.slf4j.Logger; 36 | import org.slf4j.LoggerFactory; 37 | 38 | import oracle.jdbc.txeventq.kafka.connect.common.utils.AppInfoParser; 39 | import oracle.jdbc.txeventq.kafka.connect.source.task.TxEventQSourceTask; 40 | import oracle.jdbc.txeventq.kafka.connect.source.utils.TxEventQConnectorConfig; 41 | 42 | /** 43 | * TxEventQSourceConnector is a connector interface that will pull data from an ORACLE TxEventQ and 44 | * send it to Kafka. 45 | */ 46 | public class TxEventQSourceConnector extends SourceConnector { 47 | private static final Logger log = LoggerFactory.getLogger(TxEventQSourceConnector.class); 48 | 49 | private Map configProperties; 50 | 51 | /** 52 | * Get the version of this task. Usually this should be the same as the corresponding 53 | * {@link Connector} class's version. 54 | * 55 | * @return the version, formatted as a String 56 | */ 57 | @Override 58 | public String version() { 59 | return AppInfoParser.getVersion(); 60 | } 61 | 62 | /** 63 | * Returns the Task implementation for this Connector. 64 | */ 65 | @Override 66 | public Class taskClass() { 67 | return (Class) TxEventQSourceTask.class; 68 | } 69 | 70 | /** 71 | * Returns a set of configurations for Tasks based on the current configuration, producing at 72 | * most count configurations. 73 | * 74 | * @param maxTasks maximum number of configurations to generate 75 | * @return configurations for Tasks 76 | */ 77 | @Override 78 | public List> taskConfigs(int maxTasks) { 79 | log.debug("Setting task configurations for {} workers.", maxTasks); 80 | final List> configs = new ArrayList<>(maxTasks); 81 | for (int i = 0; i < maxTasks; ++i) { 82 | configs.add(configProperties); 83 | } 84 | return configs; 85 | } 86 | 87 | @Override 88 | public void start(Map originalProps) { 89 | log.trace("Entry {}.start,", this.getClass().getName()); 90 | this.configProperties = originalProps; 91 | log.trace("Exit {}.start,", this.getClass().getName()); 92 | } 93 | 94 | @Override 95 | public void stop() { 96 | log.trace("Entry {}.stop,", this.getClass().getName()); 97 | log.trace("Exit {}.stop,", this.getClass().getName()); 98 | } 99 | 100 | /** 101 | * Define the configuration for the connector. 102 | * 103 | * @return The ConfigDef for this connector. 104 | */ 105 | @Override 106 | public ConfigDef config() { 107 | return TxEventQConnectorConfig.getConfig(); 108 | } 109 | } 110 | -------------------------------------------------------------------------------- /connectors/src/main/java/oracle/jdbc/txeventq/kafka/connect/source/utils/TxEventQSourceRecord.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** Kafka Connect for TxEventQ. 3 | ** 4 | ** Copyright (c) 2023, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | /* 9 | * Licensed to the Apache Software Foundation (ASF) under one or more 10 | * contributor license agreements. See the NOTICE file distributed with 11 | * this work for additional information regarding copyright ownership. 12 | * The ASF licenses this file to You under the Apache License, Version 2.0 13 | * (the "License"); you may not use this file except in compliance with 14 | * the License. You may obtain a copy of the License at 15 | * 16 | * http://www.apache.org/licenses/LICENSE-2.0 17 | * 18 | * Unless required by applicable law or agreed to in writing, software 19 | * distributed under the License is distributed on an "AS IS" BASIS, 20 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 21 | * See the License for the specific language governing permissions and 22 | * limitations under the License. 23 | */ 24 | 25 | package oracle.jdbc.txeventq.kafka.connect.source.utils; 26 | 27 | import java.util.Map; 28 | 29 | import org.apache.kafka.connect.data.Schema; 30 | import org.apache.kafka.connect.source.SourceRecord; 31 | 32 | /** 33 | * A message is the unit that is enqueued or dequeued. An TEQ Message object holds both its content, 34 | * or payload, and its properties. This class provides methods to get and set message properties and 35 | * the payload. 36 | * 37 | * @param 38 | */ 39 | 40 | public class TxEventQSourceRecord extends SourceRecord { 41 | 42 | // the id (16 bytes) of this message. 43 | private byte[] messageId = new byte[0]; 44 | 45 | public enum PayloadType { 46 | RAW, JSON, JMS_BYTES, JMS_TEXT, JMS_MAP 47 | } 48 | 49 | private final PayloadType payloadType; 50 | 51 | public TxEventQSourceRecord(Map sourcePartition, Map sourceOffset, 52 | String topic, Integer partition, Schema valueSchema, Object value, PayloadType type, 53 | byte[] msgId) { 54 | super(sourcePartition, sourceOffset, topic, partition, valueSchema, value); 55 | this.payloadType = type; 56 | this.messageId = msgId; 57 | } 58 | 59 | public TxEventQSourceRecord(Map sourcePartition, Map sourceOffset, 60 | String topic, Integer partition, Schema keySchema, java.lang.Object key, 61 | Schema valueSchema, Object value, PayloadType type, byte[] msgId) { 62 | super(sourcePartition, sourceOffset, topic, partition, keySchema, key, valueSchema, value); 63 | this.payloadType = type; 64 | this.messageId = msgId; 65 | } 66 | 67 | public TxEventQSourceRecord(Map sourcePartition, Map sourceOffset, 68 | String topic, Schema valueSchema, Object value, PayloadType type, byte[] msgId) { 69 | super(sourcePartition, sourceOffset, topic, valueSchema, value); 70 | this.payloadType = type; 71 | this.messageId = msgId; 72 | } 73 | 74 | public TxEventQSourceRecord(Map sourcePartition, Map sourceOffset, 75 | String topic, Schema keySchema, java.lang.Object key, Schema valueSchema, Object value, 76 | PayloadType type, byte[] msgId) { 77 | super(sourcePartition, sourceOffset, topic, keySchema, key, valueSchema, value); 78 | this.payloadType = type; 79 | this.messageId = msgId; 80 | } 81 | 82 | public PayloadType getPayloadType() { 83 | return payloadType; 84 | } 85 | 86 | public String getMessageId() { 87 | return byteArrayToHex(messageId); 88 | } 89 | 90 | private static String byteArrayToHex(byte[] a) { 91 | StringBuilder sb = new StringBuilder(a.length * 2); 92 | for (byte b : a) 93 | sb.append(String.format("%02x", b)); 94 | return sb.toString(); 95 | } 96 | } 97 | -------------------------------------------------------------------------------- /connectors/src/main/resources/connect-txeventq-sink.properties: -------------------------------------------------------------------------------- 1 | # 2 | ## Kafka Connect for TxEventQ. 3 | ## 4 | ## Copyright (c) 2023, 2024 Oracle and/or its affiliates. 5 | ## Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | # 7 | 8 | # Licensed to the Apache Software Foundation (ASF) under one or more 9 | # contributor license agreements. See the NOTICE file distributed with 10 | # this work for additional information regarding copyright ownership. 11 | # The ASF licenses this file to You under the Apache License, Version 2.0 12 | # (the "License"); you may not use this file except in compliance with 13 | # the License. You may obtain a copy of the License at 14 | # 15 | # http://www.apache.org/licenses/LICENSE-2.0 16 | # 17 | # Unless required by applicable law or agreed to in writing, software 18 | # distributed under the License is distributed on an "AS IS" BASIS, 19 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 | # See the License for the specific language governing permissions and 21 | # limitations under the License. 22 | 23 | name=TxEventQ-sink 24 | connector.class=oracle.jdbc.txeventq.kafka.connect.sink.TxEventQSinkConnector 25 | 26 | # Maximum number of tasks to use for this connector. 27 | tasks.max=1 28 | 29 | # The Kafka topic to read the data from. 30 | # Note: This property will need to be updated before the Sink Connector can connect. 31 | topic= 32 | 33 | # Indicate the directory location of where the Oracle wallet is place i.e. C:/tmp/wallet. 34 | # The cwallet.sso and ewallet.p12 files should be placed into this directory. 35 | # Oracle Wallet provides a simple and easy method to manage database credentials across multiple domains. 36 | # We will be using the Oracle TNS (Transport Network Substrate) administrative file to hide the details 37 | # of the database connection string (host name, port number, and service name) from the datasource definition 38 | # and instead us an alias. 39 | # Note: This property will need to be updated before the Sink Connector can connect. 40 | wallet.path= 41 | 42 | # Indicate the directory location of the where the tnsnames.ora location is located i.e C:/tmp/tnsnames. 43 | # The entry in the tnsnames.ora should have the following format: 44 | # = (DESCRIPTION =(ADDRESS_LIST =(ADDRESS = (PROTOCOL = TCP)(Host = )(Port = )))(CONNECT_DATA =(SERVICE_NAME = ))) 45 | # Note: This property will need to be updated before the Sink Connector can connect. 46 | tnsnames.path= 47 | 48 | # The TNS alias name for the database to connect to stored in the tnsnames.ora. 49 | # An Oracle Wallet must be created and will be used to connect to the database. 50 | # Note: This property will need to be updated before the Sink Connector can connect. 51 | db_tns_alias= 52 | 53 | # The TxEventQ to put the Kafka data into. 54 | # Note: This property will need to be updated before the Sink Connector can connect. 55 | txeventq.queue.name= 56 | 57 | # The name of the schema for the txEventQ queue specified in the txeventq.queue.name property. 58 | # Note: This property will need to be updated to ensure exactly-once delivery. 59 | txeventq.queue.schema= 60 | 61 | # List of Kafka brokers used for bootstrapping 62 | # format: host1:port1,host2:port2 ... 63 | # Note: This property will need to be updated before the Sink Connector can connect. 64 | bootstrap.servers= 65 | 66 | # Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. 67 | # This controls the format of the keys in messages written to or read from Kafka, and since this is independent 68 | # of connectors it allows any connector to work with any serialization format. 69 | key.converter=org.apache.kafka.connect.storage.StringConverter 70 | 71 | # Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. 72 | # This controls the format of the values in messages written to or read from Kafka, and since this is independent 73 | # of connectors it allows any connector to work with any serialization format. 74 | value.converter=org.apache.kafka.connect.storage.StringConverter 75 | -------------------------------------------------------------------------------- /connectors/src/main/resources/kafka-connect-oracle-version.properties: -------------------------------------------------------------------------------- 1 | name="Kafka Connect for Oracle" 2 | version=23.8.0.25.06 3 | commitId=1 -------------------------------------------------------------------------------- /connectors/src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n 11 | 12 | 13 | 14 | 15 | 16 | 17 | -------------------------------------------------------------------------------- /examples/consumer/src/main/java/org/oracle/okafka/examples/ConsumerOKafka.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.examples; 9 | 10 | import java.io.FileNotFoundException; 11 | import java.io.IOException; 12 | import java.io.InputStream; 13 | import java.util.Properties; 14 | import java.time.Duration; 15 | import java.util.Arrays; 16 | 17 | import org.oracle.okafka.clients.consumer.KafkaConsumer; 18 | 19 | import org.apache.kafka.clients.consumer.Consumer; 20 | import org.apache.kafka.clients.consumer.ConsumerRecords; 21 | import org.apache.kafka.clients.consumer.ConsumerRecord; 22 | import org.apache.kafka.common.TopicPartition; 23 | 24 | public class ConsumerOKafka { 25 | public static void main(String[] args) { 26 | System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "DEBUG"); 27 | 28 | // Get application properties 29 | Properties appProperties = null; 30 | try { 31 | appProperties = getProperties(); 32 | if (appProperties == null) { 33 | System.out.println("Application properties not found!"); 34 | System.exit(-1); 35 | } 36 | } catch (Exception e) { 37 | System.out.println("Application properties not found!"); 38 | System.out.println("Exception: " + e); 39 | System.exit(-1); 40 | } 41 | 42 | String topic = appProperties.getProperty("topic.name", "TXEQ"); 43 | appProperties.remove("topic.name"); // Pass props to build OKafkaProducer 44 | 45 | KafkaConsumer consumer = new KafkaConsumer<>(appProperties); 46 | consumer.subscribe(Arrays.asList(topic)); 47 | 48 | 49 | try { 50 | while(true) { 51 | ConsumerRecords records = consumer.poll(Duration.ofMillis(10000)); 52 | 53 | for (ConsumerRecord record : records) 54 | System.out.printf("partition = %d, offset = %d, key = %s, value =%s\n ", record.partition(), record.offset(), record.key(), record.value()); 55 | 56 | if (records != null && records.count() > 0) { 57 | System.out.println("Committing records" + records.count()); 58 | consumer.commitSync(); 59 | } else { 60 | System.out.println("No Record Fetched. Retrying in 1 second"); 61 | Thread.sleep(1000); 62 | } 63 | } 64 | }catch(Exception e) 65 | { 66 | System.out.println("Exception from consumer " + e); 67 | e.printStackTrace(); 68 | } 69 | finally { 70 | consumer.close(); 71 | } 72 | 73 | } 74 | 75 | private static java.util.Properties getProperties() throws IOException { 76 | InputStream inputStream = null; 77 | Properties appProperties = null; 78 | 79 | try { 80 | Properties prop = new Properties(); 81 | String propFileName = "config.properties"; 82 | inputStream = ConsumerOKafka.class.getClassLoader().getResourceAsStream(propFileName); 83 | if (inputStream != null) { 84 | prop.load(inputStream); 85 | } else { 86 | throw new FileNotFoundException("property file '" + propFileName + "' not found."); 87 | } 88 | appProperties = prop; 89 | 90 | } catch (Exception e) { 91 | System.out.println("Exception: " + e); 92 | throw e; 93 | } finally { 94 | inputStream.close(); 95 | } 96 | return appProperties; 97 | } 98 | } -------------------------------------------------------------------------------- /examples/consumer/src/main/resources/config.properties: -------------------------------------------------------------------------------- 1 | # OKafka Consumer example properties 2 | 3 | #Properties to connect to Oracle Database 4 | #Option 1: Connect to Oracle database using plaintext 5 | bootstrap.servers= 6 | oracle.service.name= 7 | oracle.net.tns_admin= 8 | 9 | 10 | #Option 2: Connect to Oracle Database deployed in Oracle Autonomous Cloud using Wallet 11 | #security.protocol=SSL 12 | #oracle.net.tns_admin= 13 | #tns.alias= 14 | 15 | # Application specific OKafka consumer properties 16 | topic.name= 17 | group.id= 18 | 19 | enable.auto.commit=true 20 | max.poll.records=1000 21 | default.api.timeout.ms=180000 22 | 23 | key.deserializer=org.apache.kafka.common.serialization.StringDeserializer 24 | value.deserializer=org.apache.kafka.common.serialization.StringDeserializer 25 | 26 | -------------------------------------------------------------------------------- /examples/ojdbc.properties: -------------------------------------------------------------------------------- 1 | user= 2 | password= -------------------------------------------------------------------------------- /examples/producer/src/main/java/org/oracle/okafka/examples/ProducerOKafka.java: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | package org.oracle.okafka.examples; 9 | 10 | import org.oracle.okafka.clients.producer.KafkaProducer; 11 | 12 | import org.apache.kafka.common.header.internals.RecordHeader; 13 | import org.apache.kafka.clients.producer.Producer; 14 | import org.apache.kafka.clients.producer.ProducerRecord; 15 | import org.apache.kafka.clients.producer.RecordMetadata; 16 | 17 | import java.io.FileNotFoundException; 18 | import java.io.IOException; 19 | import java.io.InputStream; 20 | import java.util.ArrayList; 21 | import java.util.Properties; 22 | import java.util.concurrent.Future; 23 | 24 | public class ProducerOKafka { 25 | 26 | public static void main(String[] args) { 27 | System.setProperty("org.slf4j.simpleLogger.defaultLogLevel", "DEBUG"); 28 | 29 | // Get application properties 30 | Properties appProperties = null; 31 | try { 32 | appProperties = getProperties(); 33 | if (appProperties == null) { 34 | System.out.println("Application properties not found!"); 35 | System.exit(-1); 36 | } 37 | } catch (Exception e) { 38 | System.out.println("Application properties not found!"); 39 | System.out.println("Exception: " + e); 40 | System.exit(-1); 41 | } 42 | 43 | String topic = appProperties.getProperty("topic.name", "TXEQ"); 44 | appProperties.remove("topic.name"); // Pass props to build OKafkaProducer 45 | 46 | Producer producer = new KafkaProducer<>(appProperties); 47 | 48 | String baseMsg = "This is test with 128 characters Payload used to test Oracle Kafka. "+ 49 | "Read https://github.com/oracle/okafka/blob/master/README.md"; 50 | 51 | Future lastFuture = null; 52 | int msgCnt = 10; 53 | String key = "Just some key for OKafka"; 54 | ArrayList> metadataList = new ArrayList<>(); 55 | 56 | try { 57 | for(int i=0;i producerRecord = 61 | new ProducerRecord<>(topic, key+i, i+ baseMsg); 62 | producerRecord.headers().add(rH1).add(rH2); 63 | lastFuture = producer.send(producerRecord); 64 | metadataList.add(lastFuture); 65 | } 66 | RecordMetadata rd = lastFuture.get(); 67 | System.out.println("Last record placed in " + rd.partition() + " Offset " + rd.offset()); 68 | } 69 | catch(Exception e) { 70 | System.out.println("Failed to send messages:"); 71 | e.printStackTrace(); 72 | } 73 | finally { 74 | System.out.println("Initiating close"); 75 | producer.close(); 76 | } 77 | 78 | } 79 | 80 | private static java.util.Properties getProperties() throws IOException { 81 | InputStream inputStream = null; 82 | Properties appProperties; 83 | 84 | try { 85 | Properties prop = new Properties(); 86 | String propFileName = "config.properties"; 87 | inputStream = ProducerOKafka.class.getClassLoader().getResourceAsStream(propFileName); 88 | if (inputStream != null) { 89 | prop.load(inputStream); 90 | } else { 91 | throw new FileNotFoundException("property file '" + propFileName + "' not found."); 92 | } 93 | appProperties = prop; 94 | 95 | } catch (Exception e) { 96 | System.out.println("Exception: " + e); 97 | throw e; 98 | } finally { 99 | if (inputStream != null) 100 | inputStream.close(); 101 | } 102 | return appProperties; 103 | } 104 | 105 | } 106 | -------------------------------------------------------------------------------- /examples/producer/src/main/resources/config.properties: -------------------------------------------------------------------------------- 1 | # OKafka Producer example properties 2 | 3 | #Properties to connect to Oracle Database 4 | #Option 1: Connect to Oracle database using plaintext 5 | bootstrap.servers= 6 | oracle.service.name= 7 | oracle.net.tns_admin= 8 | 9 | 10 | #Option 2: Connect to Oracle Database deployed in Oracle Autonomous Cloud using Wallet 11 | #security.protocol=SSL 12 | #oracle.net.tns_admin= 13 | #tns.alias= 14 | 15 | #Application specific OKafka Producer properties 16 | topic.name= 17 | 18 | batch.size=200 19 | linger.ms=100 20 | buffer.memory=335544 21 | 22 | enable.idempotence=true 23 | key.serializer=org.apache.kafka.common.serialization.StringSerializer 24 | value.serializer=org.apache.kafka.common.serialization.StringSerializer 25 | 26 | -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oracle/okafka/0a5e80fac0b208093b68a2e2d00a760bad6ad02a/gradle/wrapper/gradle-wrapper.jar -------------------------------------------------------------------------------- /gradle/wrapper/gradle-wrapper.properties: -------------------------------------------------------------------------------- 1 | distributionBase=GRADLE_USER_HOME 2 | distributionPath=wrapper/dists 3 | distributionUrl=https\://services.gradle.org/distributions/gradle-7.3-bin.zip 4 | zipStoreBase=GRADLE_USER_HOME 5 | zipStorePath=wrapper/dists 6 | -------------------------------------------------------------------------------- /gradlew.bat: -------------------------------------------------------------------------------- 1 | @rem 2 | @rem Copyright 2015 the original author or authors. 3 | @rem 4 | @rem Licensed under the Apache License, Version 2.0 (the "License"); 5 | @rem you may not use this file except in compliance with the License. 6 | @rem You may obtain a copy of the License at 7 | @rem 8 | @rem https://www.apache.org/licenses/LICENSE-2.0 9 | @rem 10 | @rem Unless required by applicable law or agreed to in writing, software 11 | @rem distributed under the License is distributed on an "AS IS" BASIS, 12 | @rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | @rem See the License for the specific language governing permissions and 14 | @rem limitations under the License. 15 | @rem 16 | 17 | @if "%DEBUG%" == "" @echo off 18 | @rem ########################################################################## 19 | @rem 20 | @rem Gradle startup script for Windows 21 | @rem 22 | @rem ########################################################################## 23 | 24 | @rem Set local scope for the variables with windows NT shell 25 | if "%OS%"=="Windows_NT" setlocal 26 | 27 | set DIRNAME=%~dp0 28 | if "%DIRNAME%" == "" set DIRNAME=. 29 | set APP_BASE_NAME=%~n0 30 | set APP_HOME=%DIRNAME% 31 | 32 | @rem Resolve any "." and ".." in APP_HOME to make it shorter. 33 | for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi 34 | 35 | @rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. 36 | set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" 37 | 38 | @rem Find java.exe 39 | if defined JAVA_HOME goto findJavaFromJavaHome 40 | 41 | set JAVA_EXE=java.exe 42 | %JAVA_EXE% -version >NUL 2>&1 43 | if "%ERRORLEVEL%" == "0" goto execute 44 | 45 | echo. 46 | echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. 47 | echo. 48 | echo Please set the JAVA_HOME variable in your environment to match the 49 | echo location of your Java installation. 50 | 51 | goto fail 52 | 53 | :findJavaFromJavaHome 54 | set JAVA_HOME=%JAVA_HOME:"=% 55 | set JAVA_EXE=%JAVA_HOME%/bin/java.exe 56 | 57 | if exist "%JAVA_EXE%" goto execute 58 | 59 | echo. 60 | echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% 61 | echo. 62 | echo Please set the JAVA_HOME variable in your environment to match the 63 | echo location of your Java installation. 64 | 65 | goto fail 66 | 67 | :execute 68 | @rem Setup the command line 69 | 70 | set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar 71 | 72 | 73 | @rem Execute Gradle 74 | "%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* 75 | 76 | :end 77 | @rem End local scope for the variables with windows NT shell 78 | if "%ERRORLEVEL%"=="0" goto mainEnd 79 | 80 | :fail 81 | rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of 82 | rem the _cmd.exe /c_ return code! 83 | if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 84 | exit /b 1 85 | 86 | :mainEnd 87 | if "%OS%"=="Windows_NT" endlocal 88 | 89 | :omega 90 | -------------------------------------------------------------------------------- /sbom_generation.yaml: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. 2 | 3 | # This OCI DevOps build specification file [1] generates a Software Bill of Materials (SBOM) of the repository. 4 | # The file is needed to run checks for third-party vulnerabilities and business approval according to Oracle’s GitHub policies. 5 | # [1] https://docs.oracle.com/en-us/iaas/Content/devops/using/build_specs.htm 6 | 7 | version: 0.1 8 | component: build 9 | timeoutInSeconds: 1000 10 | shell: bash 11 | 12 | steps: 13 | - type: Command 14 | name: "Run Gradle cyclonedxBom command" 15 | command: | 16 | # For more details, visit https://github.com/CycloneDX/cyclonedx-gradle-plugin/blob/master/README.md 17 | cat <> init.gradle 18 | initscript { 19 | repositories { 20 | maven { 21 | url "https://plugins.gradle.org/m2/" 22 | } 23 | } 24 | dependencies { 25 | classpath "org.cyclonedx:cyclonedx-gradle-plugin:1.7.4" 26 | } 27 | } 28 | allprojects{ 29 | apply plugin:org.cyclonedx.gradle.CycloneDxPlugin 30 | cyclonedxBom { 31 | includeConfigs = ["runtimeClasspath", "compileClasspath"] 32 | skipConfigs = ["testCompileClasspath"] 33 | projectType = "application" 34 | destination = file(".") 35 | outputName = "artifactSBOM" 36 | outputFormat = "json" 37 | schemaVersion = "1.4" 38 | } 39 | } 40 | EOF 41 | gradle --init-script init.gradle cyclonedxBom -info 42 | outputArtifacts: 43 | - name: artifactSBOM 44 | type: BINARY 45 | location: ${OCI_PRIMARY_SOURCE_DIR}/artifactSBOM.json 46 | -------------------------------------------------------------------------------- /settings.gradle: -------------------------------------------------------------------------------- 1 | /* 2 | ** OKafka Java Client version 23.4. 3 | ** 4 | ** Copyright (c) 2019, 2024 Oracle and/or its affiliates. 5 | ** Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 6 | */ 7 | 8 | rootProject.name = 'okafka' 9 | include(':clients', 'examples:consumer', 'examples:producer') 10 | 11 | --------------------------------------------------------------------------------