├── .editorconfig ├── .gitignore ├── LICENSE ├── README.md ├── bin └── debug.sh ├── config ├── AwsLambdaSinkConnector.properties ├── connect-avro-docker.properties └── connect-json-docker.properties ├── docker-compose.yml ├── pom.xml └── src ├── main ├── assembly │ └── package.xml ├── java │ └── com │ │ └── tm │ │ └── kafka │ │ └── connect │ │ └── aws │ │ └── lambda │ │ ├── AwsLambdaSinkConnector.java │ │ ├── AwsLambdaSinkConnectorConfig.java │ │ ├── AwsLambdaSinkTask.java │ │ ├── ConfigurationAWSCredentialsProvider.java │ │ ├── VersionUtil.java │ │ └── converter │ │ ├── DefaultPayloadConverter.java │ │ ├── JsonPayloadConverter.java │ │ └── SinkRecordToPayloadConverter.java └── resources │ └── logback.xml └── test ├── java └── com │ └── tm │ └── kafka │ └── connect │ └── aws │ └── lambda │ ├── AwsLambdaSinkConnectorConfigTest.java │ ├── AwsLambdaSinkConnectorTest.java │ └── AwsLambdaSinkTaskTest.java └── resources └── logback.xml /.editorconfig: -------------------------------------------------------------------------------- 1 | # top-most EditorConfig file 2 | root = true 3 | 4 | # Unix-style newlines with a newline ending every file 5 | [*] 6 | end_of_line = lf 7 | insert_final_newline = true 8 | indent_style = space 9 | indent_size = 2 10 | 11 | # Matches multiple files with brace expansion notation 12 | # Set default charset 13 | [*.{js,py}] 14 | charset = utf-8 15 | 16 | # 4 space indentation 17 | [*.py] 18 | indent_style = space 19 | indent_size = 2 20 | 21 | # Tab indentation (no size specified) 22 | [Makefile] 23 | indent_style = space 24 | 25 | # Indentation override for all JS under lib directory 26 | [lib/**.js] 27 | indent_style = space 28 | indent_size = 2 29 | 30 | # Matches the exact files either package.json or .travis.yml 31 | [{package.json,.travis.yml}] 32 | indent_style = space 33 | indent_size = 2 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | *~ 3 | 4 | # Build products 5 | target/ 6 | build/ 7 | 8 | # IntelliJ data 9 | *.iml 10 | .idea/ 11 | .ipr 12 | 13 | # Eclipse 14 | .classpath 15 | .project 16 | .settings/ 17 | 18 | # Documentation build output 19 | /docs/_build 20 | 21 | .DS_Store 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Kafka Connect AWS Lambda connector 2 | ================================== 3 | 4 | # Running in development 5 | 6 | The [docker-compose.yml](docker-compose.yml) that is included in this repository is based on the Confluent Platform Docker 7 | images. Take a look at the [quickstart](http://docs.confluent.io/3.0.1/cp-docker-images/docs/quickstart.html#getting-started-with-docker-client) 8 | for the Docker images. 9 | 10 | Examples of the example payload converters: 11 | 12 | ``` 13 | docker-compose up -d 14 | ``` 15 | 16 | With plain json messages: 17 | 18 | ``` 19 | # docker exec -it kafkaconnectawslambda_kafka_1 bash 20 | # cd /data 21 | # ./bin/debug.sh config/connect-json-docker.properties config/AwsLambdaSinkConnector.properties 22 | ``` 23 | 24 | ``` 25 | # docker exec -it kafkaconnectawslambda_connect_1 bash 26 | # kafka-console-producer --broker-list kafka:9092 --topic aws-lambda-topic \ 27 | --property parse.key='true' --property key.separator=':' 28 | > K2:{"f1":"A7"} 29 | ``` 30 | 31 | With JsonPayloadConverter the Lambda function sees: 32 | 33 | ``` 34 | { schema: null, payload: { f1: 'A7' } } 35 | ``` 36 | 37 | With DefaultPayloadConverter the Lambda function sees: 38 | 39 | ``` 40 | { kafkaOffset: 34, 41 | timestampType: 'CREATE_TIME', 42 | topic: 'aws-lambda-topic', 43 | kafkaPartition: 0, 44 | keySchema: { type: 'STRING', optional: true }, 45 | key: 'K2', 46 | value: { f1: 'A7' }, 47 | timestamp: 1518315606220 } 48 | ``` 49 | 50 | With schema-registry and Avro messages: 51 | ``` 52 | # docker exec -it kafkaconnectawslambda_kafka_1 bash 53 | # cd /data 54 | # ./bin/debug.sh config/connect-avro-docker.properties config/AwsLambdaSinkConnector.properties 55 | ``` 56 | 57 | ``` 58 | # docker exec -it kafkaconnectawslambda_connect_1 bash 59 | # kafka-avro-console-producer --broker-list kafka:9092 --topic aws-lambda-topic \ 60 | --property value.schema='{"type":"record","name":"test","fields":[{"name":"f1","type":"string"}]}' \ 61 | --property schema.registry.url='http://schema_registry:8081/' 62 | {"f1":"AZ"} 63 | ``` 64 | 65 | With JsonPayloadConverter the Lambda function sees: 66 | 67 | ``` 68 | { schema: 69 | { type: 'struct', 70 | fields: [ [Object] ], 71 | optional: false, 72 | name: 'test', 73 | version: 1 }, 74 | payload: { f1: 'AZ' } } 75 | ``` 76 | 77 | 78 | With DefaultPayloadConverter the Lambda function sees: 79 | 80 | ``` 81 | { kafkaOffset: 35, 82 | timestampType: 'CREATE_TIME', 83 | topic: 'aws-lambda-topic', 84 | kafkaPartition: 0, 85 | keySchema: { type: 'STRING', optional: true }, 86 | valueSchema: 87 | { type: 'STRUCT', 88 | optional: false, 89 | fields: [ [Object] ], 90 | fieldsByName: { f1: [Object] }, 91 | name: 'test', 92 | version: 1 }, 93 | value: 94 | { schema: 95 | { type: 'STRUCT', 96 | optional: false, 97 | fields: [Object], 98 | fieldsByName: [Object], 99 | name: 'test', 100 | version: 1 }, 101 | values: [ 'AZ' ] }, 102 | timestamp: 1518315749655 } 103 | ``` 104 | 105 | Start the connector with debugging enabled. This will wait for a debugger to attach. 106 | 107 | ``` 108 | export SUSPEND='y' 109 | ./bin/debug.sh config/connect-json-docker.properties config/AwsLambdaSinkConnector.properties 110 | ``` 111 | 112 | -------------------------------------------------------------------------------- /bin/debug.sh: -------------------------------------------------------------------------------- 1 | 2 | #!/usr/bin/env bash 3 | 4 | : ${SUSPEND:='n'} 5 | 6 | set -e 7 | 8 | export KAFKA_JMX_OPTS="-Xdebug -agentlib:jdwp=transport=dt_socket,server=y,suspend=${SUSPEND},address=5005" 9 | export CLASSPATH="$(find target/kafka-connect-aws-lambda-1.0-package/share/java -type f -name '*.jar' | tr '\n' ':')" 10 | 11 | # connect-standalone config/connect-json-docker.properties config/AwsLambdaSinkConnector.properties 12 | 13 | connect-standalone $1 $2 14 | -------------------------------------------------------------------------------- /config/AwsLambdaSinkConnector.properties: -------------------------------------------------------------------------------- 1 | 2 | name=AwsLambdaSinkConnector 3 | topics=aws-lambda-topic 4 | tasks.max=1 5 | connector.class=com.tm.kafka.connect.aws.lambda.AwsLambdaSinkConnector 6 | 7 | aws.region=us-west-2 8 | aws.function.name=kafka-aws-lambda-test 9 | aws.lambda.payload.converter.class=com.tm.kafka.connect.aws.lambda.converter.JsonPayloadConverter 10 | # aws.lambda.payload.converter.class=com.tm.kafka.connect.aws.lambda.converter.DefaultPayloadConverter 11 | # retry.backoff.ms=5000 12 | # aws.lambda.invoke.async=RequestResponse 13 | # aws.lambda.invoke.async=Event 14 | # aws.lambda.invoke.async=DryRun 15 | 16 | # aws.credentials.provider.class=com.amazonaws.auth.DefaultAWSCredentialsProviderChain 17 | aws.credentials.provider.class=com.tm.kafka.connect.aws.lambda.ConfigurationAWSCredentialsProvider 18 | aws.credentials.provider.aws.access.key.id=${file:/root/.aws/credentials:aws_access_key_id} 19 | aws.credentials.provider.aws.secret.access.key=${file:/root/.aws/credentials:aws_secret_access_key} 20 | -------------------------------------------------------------------------------- /config/connect-avro-docker.properties: -------------------------------------------------------------------------------- 1 | # Sample configuration for a standalone Kafka Connect worker that uses Avro serialization and 2 | # integrates the the SchemaConfig Registry. This sample configuration assumes a local installation of 3 | # Confluent Platform with all services running on their default ports. 4 | # Bootstrap Kafka servers. If multiple servers are specified, they should be comma-separated. 5 | bootstrap.servers=kafka:9092 6 | # The converters specify the format of data in Kafka and how to translate it into Connect data. 7 | # Every Connect user will need to configure these based on the format they want their data in 8 | # when loaded from or stored into Kafka 9 | key.converter=org.apache.kafka.connect.storage.StringConverter 10 | key.converter.schemas.enable=false 11 | key.converter.schema.registry.url=http://schema_registry:8081/ 12 | value.converter=io.confluent.connect.avro.AvroConverter 13 | value.converter.schemas.enable=true 14 | value.converter.schema.registry.url=http://schema_registry:8081/ 15 | 16 | # The internal converter used for offsets and config data is configurable and must be specified, 17 | # but most users will always want to use the built-in default. Offset and config data is never 18 | # visible outside of Connect in this format. 19 | internal.key.converter=org.apache.kafka.connect.json.JsonConverter 20 | internal.value.converter=org.apache.kafka.connect.json.JsonConverter 21 | internal.key.converter.schemas.enable=true 22 | internal.value.converter.schemas.enable=true 23 | # Local storage file for offset data 24 | offset.storage.file.filename=/tmp/connect.offsets 25 | # Confuent Control Center Integration -- uncomment these lines to enable Kafka client interceptors 26 | # that will report audit data that can be displayed and analyzed in Confluent Control Center 27 | # producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor 28 | # consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor 29 | -------------------------------------------------------------------------------- /config/connect-json-docker.properties: -------------------------------------------------------------------------------- 1 | # Sample configuration for a standalone Kafka Connect worker that uses Avro serialization and 2 | # integrates the the SchemaConfig Registry. This sample configuration assumes a local installation of 3 | # Confluent Platform with all services running on their default ports. 4 | # Bootstrap Kafka servers. If multiple servers are specified, they should be comma-separated. 5 | bootstrap.servers=kafka:9092 6 | # The converters specify the format of data in Kafka and how to translate it into Connect data. 7 | # Every Connect user will need to configure these based on the format they want their data in 8 | # when loaded from or stored into Kafka 9 | key.converter=org.apache.kafka.connect.storage.StringConverter 10 | key.converter.schemas.enable=false 11 | value.converter=org.apache.kafka.connect.json.JsonConverter 12 | value.converter.schemas.enable=false 13 | 14 | # The internal converter used for offsets and config data is configurable and must be specified, 15 | # but most users will always want to use the built-in default. Offset and config data is never 16 | # visible outside of Connect in this format. 17 | internal.key.converter=org.apache.kafka.connect.json.JsonConverter 18 | internal.value.converter=org.apache.kafka.connect.json.JsonConverter 19 | internal.key.converter.schemas.enable=false 20 | internal.value.converter.schemas.enable=false 21 | # Local storage file for offset data 22 | offset.storage.file.filename=/tmp/connect.offsets 23 | # Confuent Control Center Integration -- uncomment these lines to enable Kafka client interceptors 24 | # that will report audit data that can be displayed and analyzed in Confluent Control Center 25 | # producer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor 26 | # consumer.interceptor.classes=io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor 27 | 28 | config.providers=file 29 | config.providers.file.class=org.apache.kafka.common.config.provider.FileConfigProvider 30 | config.providers.file.param.secrets=/root/.aws/credentials 31 | config.reload.action=restart 32 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | 4 | zookeeper: 5 | image: confluentinc/cp-zookeeper:5.0.1 6 | container_name: zookeeper 7 | environment: 8 | ZOOKEEPER_CLIENT_PORT: 2181 9 | zk_id: "1" 10 | 11 | kafka: 12 | hostname: kafka 13 | image: confluentinc/cp-kafka:5.0.1 14 | container_name: kafka 15 | links: 16 | - zookeeper 17 | ports: 18 | - "9092:9092" 19 | environment: 20 | KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" 21 | KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://:9092" 22 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 23 | volumes: 24 | - ./:/data 25 | - ~/.aws:/root/.aws 26 | 27 | schema-registry: 28 | hostname: schema-registry 29 | image: confluentinc/cp-schema-registry:5.0.1 30 | container_name: schema-registry 31 | links: 32 | - kafka 33 | - zookeeper 34 | ports: 35 | - "8081:8081" 36 | environment: 37 | SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL: "zookeeper:2181" 38 | SCHEMA_REGISTRY_HOST_NAME: schema-registry 39 | 40 | # connect: 41 | # hostname: connect 42 | # image: confluentinc/cp-kafka-connect:5.0.1 43 | # container_name: connect 44 | # depends_on: 45 | # - zookeeper 46 | # - kafka 47 | # - schema-registry 48 | # ports: 49 | # - "8083:8083" 50 | # environment: 51 | # CONNECT_BOOTSTRAP_SERVERS: 'kafka:9092' 52 | # CONNECT_REST_ADVERTISED_HOST_NAME: connect 53 | # CONNECT_REST_PORT: 8083 54 | # CONNECT_GROUP_ID: compose-connect-group 55 | # CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs 56 | # CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 57 | # CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 58 | # CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets 59 | # CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 60 | # CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status 61 | # CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 62 | # CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter 63 | # CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' 64 | # CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter 65 | # CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' 66 | # CONNECT_INTERNAL_KEY_CONVERTER: org.apache.kafka.connect.json.JsonConverter 67 | # CONNECT_INTERNAL_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter 68 | # CONNECT_ZOOKEEPER_CONNECT: 'zookeeper:2181' 69 | # CONNECT_PLUGIN_PATH: /usr/share/java 70 | # volumes: 71 | # - ./:/data 72 | # - ~/.aws:/root/.aws 73 | -------------------------------------------------------------------------------- /pom.xml: -------------------------------------------------------------------------------- 1 | 3 | 4.0.0 4 | 5 | com.tm.kafka 6 | kafka-connect-aws-lambda 7 | 1.0 8 | jar 9 | 10 | kafka-connect-aws-lambda 11 | A Kafka Connect Connector for kafka-connect-aws-lambda 12 | 13 | 14 | 2.0.1 15 | 2.12.1 16 | 5.0.1 17 | http://packages.confluent.io/maven/ 18 | 19 | 20 | 21 | 22 | 23 | org.apache.kafka 24 | connect-api 25 | ${kafka.version} 26 | provided 27 | 28 | 29 | org.apache.kafka 30 | connect-json 31 | ${kafka.version} 32 | provided 33 | 34 | 35 | 36 | io.confluent 37 | kafka-connect-avro-converter 38 | ${confluent.version} 39 | 40 | 41 | org.slf4j 42 | slf4j-log4j12 43 | 44 | 45 | 46 | 47 | 48 | com.amazonaws 49 | aws-java-sdk-lambda 50 | 1.11.452 51 | 52 | 53 | 54 | org.apache.avro 55 | avro 56 | 1.8.2 57 | 58 | 59 | 60 | com.google.code.gson 61 | gson 62 | 2.8.5 63 | 64 | 65 | 66 | junit 67 | junit 68 | 4.13.1 69 | test 70 | 71 | 72 | ch.qos.logback 73 | logback-classic 74 | 1.2.3 75 | test 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | com.fasterxml.jackson.core 84 | jackson-core 85 | ${jackson.version} 86 | 87 | 88 | com.fasterxml.jackson.core 89 | jackson-annotations 90 | ${jackson.version} 91 | 92 | 93 | com.fasterxml.jackson.core 94 | jackson-databind 95 | ${jackson.version} 96 | 97 | 98 | com.fasterxml.jackson.dataformat 99 | jackson-dataformat-cbor 100 | ${jackson.version} 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | confluent 109 | Confluent 110 | ${confluent.maven.repo} 111 | 112 | 113 | 114 | 115 | 116 | 117 | org.codehaus.mojo 118 | versions-maven-plugin 119 | 2.7 120 | 121 | 122 | org.apache.maven.plugins 123 | maven-enforcer-plugin 124 | 3.0.0-M2 125 | 126 | 127 | enforce-versions 128 | 129 | enforce 130 | 131 | 132 | 133 | 134 | 3.0.5 135 | 136 | 137 | 1.8 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | org.apache.maven.plugins 146 | maven-jar-plugin 147 | 3.1.0 148 | 149 | 150 | 151 | true 152 | true 153 | 154 | 155 | 156 | 157 | 158 | org.apache.maven.plugins 159 | maven-compiler-plugin 160 | 3.8.0 161 | true 162 | 163 | 1.8 164 | 1.8 165 | 166 | 167 | 168 | maven-assembly-plugin 169 | 3.1.0 170 | 171 | 172 | src/main/assembly/package.xml 173 | 174 | 175 | 176 | 177 | make-assembly 178 | package 179 | 180 | single 181 | 182 | 183 | 184 | 185 | 186 | 187 | 188 | src/main/resources 189 | true 190 | 191 | 192 | 193 | 194 | -------------------------------------------------------------------------------- /src/main/assembly/package.xml: -------------------------------------------------------------------------------- 1 | 5 | 6 | package 7 | 8 | dir 9 | 10 | false 11 | 12 | 13 | ${project.basedir} 14 | share/doc/${project.name}/ 15 | 16 | README* 17 | LICENSE* 18 | NOTICE* 19 | licenses/ 20 | 21 | 22 | 23 | ${project.basedir}/config 24 | etc/${project.name} 25 | 26 | * 27 | 28 | 29 | 30 | 31 | 32 | share/java/${project.name} 33 | true 34 | true 35 | 36 | org.apache.kafka:connect-api 37 | 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/AwsLambdaSinkConnector.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import org.apache.kafka.common.config.ConfigDef; 4 | import org.apache.kafka.connect.connector.Task; 5 | import org.apache.kafka.connect.sink.SinkConnector; 6 | import org.slf4j.Logger; 7 | import org.slf4j.LoggerFactory; 8 | 9 | import java.util.ArrayList; 10 | import java.util.HashMap; 11 | import java.util.List; 12 | import java.util.Map; 13 | 14 | public class AwsLambdaSinkConnector extends SinkConnector { 15 | private static Logger log = LoggerFactory.getLogger(AwsLambdaSinkConnector.class); 16 | private AwsLambdaSinkConnectorConfig config; 17 | 18 | @Override 19 | public String version() { 20 | return VersionUtil.getVersion(); 21 | } 22 | 23 | @Override 24 | public void start(Map map) { 25 | config = new AwsLambdaSinkConnectorConfig(map); 26 | } 27 | 28 | @Override 29 | public Class taskClass() { 30 | return AwsLambdaSinkTask.class; 31 | } 32 | 33 | @Override 34 | public List> taskConfigs(int maxTasks) { 35 | Map taskProps = new HashMap<>(config.originalsStrings()); 36 | List> taskConfigs = new ArrayList<>(maxTasks); 37 | for (int i = 0; i < maxTasks; ++i) { 38 | taskConfigs.add(taskProps); 39 | } 40 | return taskConfigs; 41 | } 42 | 43 | @Override 44 | public void stop() { 45 | } 46 | 47 | @Override 48 | public ConfigDef config() { 49 | return AwsLambdaSinkConnectorConfig.conf(); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/AwsLambdaSinkConnectorConfig.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import com.amazonaws.auth.AWSCredentialsProvider; 4 | import com.amazonaws.auth.DefaultAWSCredentialsProviderChain; 5 | import com.amazonaws.regions.RegionUtils; 6 | import com.amazonaws.services.lambda.model.InvocationType; 7 | import com.amazonaws.services.lambda.model.InvokeRequest; 8 | import com.tm.kafka.connect.aws.lambda.converter.JsonPayloadConverter; 9 | import com.tm.kafka.connect.aws.lambda.converter.SinkRecordToPayloadConverter; 10 | import org.apache.kafka.common.Configurable; 11 | import org.apache.kafka.common.config.AbstractConfig; 12 | import org.apache.kafka.common.config.ConfigDef; 13 | import org.apache.kafka.common.config.ConfigDef.Importance; 14 | import org.apache.kafka.common.config.ConfigDef.Type; 15 | import org.apache.kafka.common.config.ConfigException; 16 | import org.apache.kafka.common.utils.Utils; 17 | import org.apache.kafka.connect.errors.ConnectException; 18 | 19 | import java.lang.reflect.InvocationTargetException; 20 | import java.util.ArrayList; 21 | import java.util.Arrays; 22 | import java.util.Collections; 23 | import java.util.HashMap; 24 | import java.util.List; 25 | import java.util.Map; 26 | import java.util.function.Function; 27 | 28 | import static org.apache.kafka.common.config.ConfigDef.NO_DEFAULT_VALUE; 29 | 30 | 31 | public class AwsLambdaSinkConnectorConfig extends AbstractConfig { 32 | 33 | static final String REGION_CONFIG = "aws.region"; 34 | private static final String REGION_DOC_CONFIG = "The AWS region."; 35 | private static final String REGION_DISPLAY_CONFIG = "AWS region"; 36 | 37 | private static final String CREDENTIALS_PROVIDER_CLASS_CONFIG = "aws.credentials.provider.class"; 38 | private static final Class CREDENTIALS_PROVIDER_CLASS_DEFAULT = 39 | DefaultAWSCredentialsProviderChain.class; 40 | private static final String CREDENTIALS_PROVIDER_DOC_CONFIG = 41 | "Credentials provider or provider chain to use for authentication to AWS. By default " 42 | + "the connector uses 'DefaultAWSCredentialsProviderChain'."; 43 | private static final String CREDENTIALS_PROVIDER_DISPLAY_CONFIG = "AWS Credentials Provider Class"; 44 | 45 | /** 46 | * The properties that begin with this prefix will be used to configure a class, specified by 47 | * {@code s3.credentials.provider.class} if it implements {@link Configurable}. 48 | */ 49 | public static final String CREDENTIALS_PROVIDER_CONFIG_PREFIX = 50 | CREDENTIALS_PROVIDER_CLASS_CONFIG.substring( 51 | 0, 52 | CREDENTIALS_PROVIDER_CLASS_CONFIG.lastIndexOf(".") + 1 53 | ); 54 | 55 | static final String FUNCTION_NAME_CONFIG = "aws.function.name"; 56 | private static final String FUNCTION_NAME_DOC = "The AWS Lambda function name."; 57 | private static final String FUNCTION_NAME_DISPLAY = "AWS Lambda function Name"; 58 | 59 | private static final String RETRY_BACKOFF_CONFIG = "retry.backoff.ms"; 60 | private static final String RETRY_BACKOFF_DOC = 61 | "The retry backoff in milliseconds. This config is used to notify Kafka connect to retry " 62 | + "delivering a message batch or performing recovery in case of transient exceptions."; 63 | private static final long RETRY_BACKOFF_DEFAULT = 5000L; 64 | private static final String RETRY_BACKOFF_DISPLAY = "Retry Backoff (ms)"; 65 | 66 | private static final String INVOCATION_TYPE_CONFIG = "aws.lambda.invocation.type"; 67 | private static final String INVOCATION_TYPE_DEFAULT = "RequestResponse"; 68 | private static final String INVOCATION_TYPE_DOC_CONFIG = "AWS Lambda function invocation type."; 69 | private static final String INVOCATION_TYPE_DISPLAY_CONFIG = "Invocation type"; 70 | 71 | private static final String PAYLOAD_CONVERTER_CONFIG = "aws.lambda.payload.converter.class"; 72 | private static final Class PAYLOAD_CONVERTER_DEFAULT = 73 | JsonPayloadConverter.class; 74 | private static final String PAYLOAD_CONVERTER_DOC_CONFIG = 75 | "Class to be used to convert Kafka messages from SinkRecord to Aws Lambda input"; 76 | private static final String PAYLOAD_CONVERTER_DISPLAY_CONFIG = "Payload converter class"; 77 | 78 | private final SinkRecordToPayloadConverter sinkRecordToPayloadConverter; 79 | private final InvokeRequest invokeRequest; 80 | 81 | @SuppressWarnings("unchecked") 82 | private AwsLambdaSinkConnectorConfig(ConfigDef config, Map parsedConfig) { 83 | super(config, parsedConfig); 84 | try { 85 | sinkRecordToPayloadConverter = ((Class) 86 | getClass(PAYLOAD_CONVERTER_CONFIG)).getDeclaredConstructor().newInstance(); 87 | } catch (IllegalAccessException | InstantiationException | InvocationTargetException | NoSuchMethodException e) { 88 | throw new ConnectException("Invalid class for: " + PAYLOAD_CONVERTER_CONFIG, e); 89 | } 90 | invokeRequest = new InvokeRequest() 91 | .withFunctionName(getAwsFunctionName()) 92 | .withInvocationType(getAwsLambdaInvocationType()); 93 | } 94 | 95 | AwsLambdaSinkConnectorConfig(Map parsedConfig) { 96 | this(conf(), parsedConfig); 97 | } 98 | 99 | public static ConfigDef conf() { 100 | String group = "AWS"; 101 | int orderInGroup = 0; 102 | return new ConfigDef() 103 | .define(REGION_CONFIG, 104 | Type.STRING, 105 | NO_DEFAULT_VALUE, 106 | new RegionValidator(), 107 | Importance.HIGH, 108 | REGION_DOC_CONFIG, 109 | group, 110 | ++orderInGroup, 111 | ConfigDef.Width.SHORT, 112 | REGION_DISPLAY_CONFIG, 113 | new RegionRecommender()) 114 | 115 | .define(CREDENTIALS_PROVIDER_CLASS_CONFIG, 116 | Type.CLASS, 117 | CREDENTIALS_PROVIDER_CLASS_DEFAULT, 118 | new CredentialsProviderValidator(), 119 | Importance.HIGH, 120 | CREDENTIALS_PROVIDER_DOC_CONFIG, 121 | group, 122 | ++orderInGroup, 123 | ConfigDef.Width.MEDIUM, 124 | CREDENTIALS_PROVIDER_DISPLAY_CONFIG) 125 | 126 | .define(FUNCTION_NAME_CONFIG, 127 | Type.STRING, 128 | NO_DEFAULT_VALUE, 129 | Importance.HIGH, 130 | FUNCTION_NAME_DOC, 131 | group, 132 | ++orderInGroup, 133 | ConfigDef.Width.SHORT, 134 | FUNCTION_NAME_DISPLAY) 135 | 136 | .define(RETRY_BACKOFF_CONFIG, 137 | Type.LONG, 138 | RETRY_BACKOFF_DEFAULT, 139 | Importance.LOW, 140 | RETRY_BACKOFF_DOC, 141 | group, 142 | ++orderInGroup, 143 | ConfigDef.Width.NONE, 144 | RETRY_BACKOFF_DISPLAY) 145 | 146 | .define(INVOCATION_TYPE_CONFIG, 147 | Type.STRING, 148 | INVOCATION_TYPE_DEFAULT, 149 | new InvocationTypeValidator(), 150 | Importance.LOW, 151 | INVOCATION_TYPE_DOC_CONFIG, 152 | group, 153 | ++orderInGroup, 154 | ConfigDef.Width.SHORT, 155 | INVOCATION_TYPE_DISPLAY_CONFIG, 156 | new InvocationTypeRecommender()) 157 | 158 | .define(PAYLOAD_CONVERTER_CONFIG, 159 | Type.CLASS, 160 | PAYLOAD_CONVERTER_DEFAULT, 161 | new PayloadConverterValidator(), 162 | Importance.LOW, 163 | PAYLOAD_CONVERTER_DOC_CONFIG, 164 | group, 165 | ++orderInGroup, 166 | ConfigDef.Width.SHORT, 167 | PAYLOAD_CONVERTER_DISPLAY_CONFIG, 168 | new PayloadConverterRecommender()) 169 | ; 170 | } 171 | 172 | public String getAwsRegion() { 173 | return this.getString(REGION_CONFIG); 174 | } 175 | 176 | @SuppressWarnings("unchecked") 177 | public AWSCredentialsProvider getAwsCredentialsProvider() { 178 | try { 179 | AWSCredentialsProvider awsCredentialsProvider = ((Class) 180 | getClass(CREDENTIALS_PROVIDER_CLASS_CONFIG)).getDeclaredConstructor().newInstance(); 181 | if (awsCredentialsProvider instanceof Configurable) { 182 | Map configs = originalsWithPrefix(CREDENTIALS_PROVIDER_CONFIG_PREFIX); 183 | configs.remove(CREDENTIALS_PROVIDER_CLASS_CONFIG.substring(CREDENTIALS_PROVIDER_CONFIG_PREFIX.length())); 184 | ((Configurable) awsCredentialsProvider).configure(configs); 185 | } 186 | return awsCredentialsProvider; 187 | } catch (IllegalAccessException | InstantiationException | InvocationTargetException | NoSuchMethodException e) { 188 | throw new ConnectException("Invalid class for: " + CREDENTIALS_PROVIDER_CLASS_CONFIG, e); 189 | } 190 | } 191 | 192 | public String getAwsFunctionName() { 193 | return this.getString(FUNCTION_NAME_CONFIG); 194 | } 195 | 196 | public Long getRetryBackoff() { 197 | return this.getLong(RETRY_BACKOFF_CONFIG); 198 | } 199 | 200 | private InvocationType getAwsLambdaInvocationType() { 201 | return InvocationType.fromValue(this.getString(INVOCATION_TYPE_CONFIG)); 202 | } 203 | 204 | public SinkRecordToPayloadConverter getPayloadConverter() { 205 | return sinkRecordToPayloadConverter; 206 | } 207 | 208 | private static class RegionRecommender implements ConfigDef.Recommender { 209 | @Override 210 | public List validValues(String name, Map connectorConfigs) { 211 | return new ArrayList<>(RegionUtils.getRegions()); 212 | } 213 | 214 | @Override 215 | public boolean visible(String name, Map connectorConfigs) { 216 | return true; 217 | } 218 | } 219 | 220 | private static class RegionValidator implements ConfigDef.Validator { 221 | @Override 222 | public void ensureValid(String name, Object region) { 223 | String regionStr = ((String) region).toLowerCase().trim(); 224 | if (RegionUtils.getRegion(regionStr) == null) { 225 | throw new ConfigException(name, region, "Value must be one of: " + Utils.join(RegionUtils.getRegions(), ", ")); 226 | } 227 | } 228 | 229 | @Override 230 | public String toString() { 231 | return "[" + Utils.join(RegionUtils.getRegions(), ", ") + "]"; 232 | } 233 | } 234 | 235 | private static class CredentialsProviderValidator implements ConfigDef.Validator { 236 | @Override 237 | public void ensureValid(String name, Object provider) { 238 | if (provider instanceof Class && AWSCredentialsProvider.class.isAssignableFrom((Class) provider)) { 239 | return; 240 | } 241 | throw new ConfigException(name, provider, "Class must extend: " + AWSCredentialsProvider.class); 242 | } 243 | 244 | @Override 245 | public String toString() { 246 | return "Any class implementing: " + AWSCredentialsProvider.class; 247 | } 248 | } 249 | 250 | private static class InvocationTypeRecommender implements ConfigDef.Recommender { 251 | @Override 252 | public List validValues(String name, Map connectorConfigs) { 253 | return Arrays.asList(InvocationType.values()); 254 | } 255 | 256 | @Override 257 | public boolean visible(String name, Map connectorConfigs) { 258 | return true; 259 | } 260 | } 261 | 262 | private static class InvocationTypeValidator implements ConfigDef.Validator { 263 | @Override 264 | public void ensureValid(String name, Object invocationType) { 265 | try { 266 | InvocationType.fromValue(((String) invocationType).trim()); 267 | } catch (Exception e) { 268 | throw new ConfigException(name, invocationType, "Value must be one of: " + 269 | Utils.join(InvocationType.values(), ", ")); 270 | } 271 | } 272 | 273 | @Override 274 | public String toString() { 275 | return "[" + Utils.join(InvocationType.values(), ", ") + "]"; 276 | } 277 | } 278 | 279 | private static class PayloadConverterRecommender implements ConfigDef.Recommender { 280 | @Override 281 | public List validValues(String name, Map connectorConfigs) { 282 | return Collections.singletonList(JsonPayloadConverter.class); 283 | } 284 | 285 | @Override 286 | public boolean visible(String name, Map connectorConfigs) { 287 | return true; 288 | } 289 | } 290 | 291 | private static class PayloadConverterValidator implements ConfigDef.Validator { 292 | @Override 293 | public void ensureValid(String name, Object provider) { 294 | if (provider instanceof Class && SinkRecordToPayloadConverter.class.isAssignableFrom((Class) provider)) { 295 | return; 296 | } 297 | throw new ConfigException(name, provider, "Class must extend: " + SinkRecordToPayloadConverter.class); 298 | } 299 | 300 | @Override 301 | public String toString() { 302 | return "Any class implementing: " + SinkRecordToPayloadConverter.class; 303 | } 304 | } 305 | 306 | private static ConfigDef getConfig() { 307 | Map everything = new HashMap<>(conf().configKeys()); 308 | ConfigDef visible = new ConfigDef(); 309 | for (ConfigDef.ConfigKey key : everything.values()) { 310 | visible.define(key); 311 | } 312 | return visible; 313 | } 314 | 315 | public Function getInvokeRequestWithPayload() { 316 | return invokeRequest::withPayload; 317 | } 318 | 319 | public static void main(String[] args) { 320 | System.out.println(VersionUtil.getVersion()); 321 | System.out.println(getConfig().toEnrichedRst()); 322 | } 323 | 324 | } 325 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/AwsLambdaSinkTask.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import com.amazonaws.services.lambda.AWSLambda; 4 | import com.amazonaws.services.lambda.AWSLambdaAsyncClientBuilder; 5 | import com.amazonaws.services.lambda.model.InvokeRequest; 6 | import org.apache.kafka.connect.sink.SinkRecord; 7 | import org.apache.kafka.connect.sink.SinkTask; 8 | import org.slf4j.Logger; 9 | import org.slf4j.LoggerFactory; 10 | 11 | import java.util.Collection; 12 | import java.util.Map; 13 | import java.util.Optional; 14 | import java.util.function.Consumer; 15 | import java.util.stream.Stream; 16 | 17 | import static java.nio.charset.StandardCharsets.UTF_8; 18 | 19 | public class AwsLambdaSinkTask extends SinkTask { 20 | private static Logger log = LoggerFactory.getLogger(AwsLambdaSinkTask.class); 21 | 22 | private AwsLambdaSinkConnectorConfig connectorConfig; 23 | private AWSLambda client; 24 | 25 | @Override 26 | public void start(Map map) { 27 | connectorConfig = new AwsLambdaSinkConnectorConfig(map); 28 | context.timeout(connectorConfig.getRetryBackoff()); 29 | if (client == null) { 30 | setClient(AWSLambdaAsyncClientBuilder.standard() 31 | .withRegion(connectorConfig.getAwsRegion()) 32 | .withCredentials(connectorConfig.getAwsCredentialsProvider()) 33 | .build()); 34 | } 35 | } 36 | 37 | void setClient(AWSLambda client) { 38 | this.client = client; 39 | } 40 | 41 | @Override 42 | public void stop() { 43 | log.debug("Stopping sink task, setting client to null"); 44 | client = null; 45 | } 46 | 47 | @Override 48 | public void put(Collection collection) { 49 | loggingWrapper(collection.stream() 50 | .map(connectorConfig.getPayloadConverter()) 51 | .map(connectorConfig.getInvokeRequestWithPayload())) 52 | .forEach(client::invoke); 53 | 54 | if (log.isDebugEnabled()) { 55 | log.debug("Read {} records from Kafka", collection.size()); 56 | } 57 | } 58 | 59 | private Stream loggingWrapper(final Stream stream) { 60 | return getLogFunction() 61 | .map(stream::peek) // if there is a function, stream to logging 62 | .orElse(stream); // or else just return the stream as is 63 | } 64 | 65 | private Optional> getLogFunction() { 66 | if (!log.isDebugEnabled()) { 67 | return Optional.empty(); 68 | } 69 | if (!log.isTraceEnabled()) { 70 | return Optional.of(x -> log.debug("Calling " + connectorConfig.getAwsFunctionName())); 71 | } 72 | return Optional.of(x -> log.trace("Calling " + connectorConfig.getAwsFunctionName(), 73 | UTF_8.decode(x.getPayload()).toString())); 74 | } 75 | 76 | @Override 77 | public String version() { 78 | return VersionUtil.getVersion(); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/ConfigurationAWSCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import com.amazonaws.auth.AWSCredentials; 4 | import com.amazonaws.auth.AWSCredentialsProvider; 5 | import org.apache.kafka.common.Configurable; 6 | 7 | import java.util.Map; 8 | 9 | public class ConfigurationAWSCredentialsProvider implements AWSCredentialsProvider, Configurable { 10 | 11 | private static final String AWS_ACCESS_KEY_ID_CONFIG = "aws.access.key.id"; 12 | private static final String AWS_SECRET_ACCESS_KEY_CONFIG = "aws.secret.access.key"; 13 | 14 | private AWSCredentials awsCredentials; 15 | 16 | @Override 17 | public AWSCredentials getCredentials() { 18 | return awsCredentials; 19 | } 20 | 21 | @Override 22 | public void refresh() { 23 | 24 | } 25 | 26 | @Override 27 | public void configure(final Map configs) { 28 | awsCredentials = new AWSCredentials() { 29 | private final String key = (String) configs.get(AWS_ACCESS_KEY_ID_CONFIG); 30 | private final String secret = (String) configs.get(AWS_SECRET_ACCESS_KEY_CONFIG); 31 | 32 | @Override 33 | public String getAWSAccessKeyId() { 34 | return key; 35 | } 36 | 37 | @Override 38 | public String getAWSSecretKey() { 39 | return secret; 40 | } 41 | }; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/VersionUtil.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | /** 4 | * Created by jeremy on 5/3/16. 5 | */ 6 | class VersionUtil { 7 | public static String getVersion() { 8 | try { 9 | return VersionUtil.class.getPackage().getImplementationVersion(); 10 | } catch (Exception ex) { 11 | return "0.0.0.0"; 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/converter/DefaultPayloadConverter.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda.converter; 2 | 3 | import com.google.gson.Gson; 4 | import org.apache.kafka.connect.sink.SinkRecord; 5 | import org.slf4j.Logger; 6 | import org.slf4j.LoggerFactory; 7 | 8 | public class DefaultPayloadConverter implements SinkRecordToPayloadConverter { 9 | private Logger log = LoggerFactory.getLogger(DefaultPayloadConverter.class); 10 | private Gson gson = new Gson(); 11 | 12 | public String convert(SinkRecord record) { 13 | String payload = gson.toJson(record); 14 | log.trace("P: {}", payload); 15 | return payload; 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/converter/JsonPayloadConverter.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda.converter; 2 | 3 | import com.fasterxml.jackson.core.JsonProcessingException; 4 | import com.fasterxml.jackson.databind.ObjectMapper; 5 | import org.apache.kafka.connect.data.Schema; 6 | import org.apache.kafka.connect.json.JsonConverter; 7 | import org.apache.kafka.connect.json.JsonDeserializer; 8 | import org.apache.kafka.connect.sink.SinkRecord; 9 | import org.slf4j.Logger; 10 | import org.slf4j.LoggerFactory; 11 | 12 | import static java.util.Collections.emptyMap; 13 | 14 | public class JsonPayloadConverter implements SinkRecordToPayloadConverter { 15 | private Logger log = LoggerFactory.getLogger(JsonPayloadConverter.class); 16 | private ObjectMapper objectMapper = new ObjectMapper(); 17 | private JsonConverter jsonConverter = new JsonConverter(); 18 | private JsonDeserializer jsonDeserializer = new JsonDeserializer(); 19 | 20 | public JsonPayloadConverter() { 21 | jsonConverter.configure(emptyMap(), false); 22 | jsonDeserializer.configure(emptyMap(), false); 23 | } 24 | 25 | public String convert(SinkRecord record) throws JsonProcessingException { 26 | String topic = record.topic(); 27 | Schema schema = record.valueSchema(); 28 | Object value = record.value(); 29 | 30 | String payload = objectMapper.writeValueAsString( 31 | jsonDeserializer.deserialize(topic, 32 | jsonConverter.fromConnectData(topic, schema, value))); 33 | 34 | if (log.isTraceEnabled()) { 35 | log.trace("P: {}", payload); 36 | } 37 | 38 | return payload; 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /src/main/java/com/tm/kafka/connect/aws/lambda/converter/SinkRecordToPayloadConverter.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda.converter; 2 | 3 | import org.apache.kafka.connect.errors.RetriableException; 4 | import org.apache.kafka.connect.sink.SinkRecord; 5 | 6 | import java.util.function.Function; 7 | 8 | public interface SinkRecordToPayloadConverter extends Function { 9 | String convert(final SinkRecord record) throws Exception; 10 | 11 | default String apply(final SinkRecord record) { 12 | try { 13 | return convert(record); 14 | } catch (final Exception e) { 15 | throw new RetriableException("Payload converter " + getClass().getName() + " failed to convert '" + record.toString(), e); 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/main/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /src/test/java/com/tm/kafka/connect/aws/lambda/AwsLambdaSinkConnectorConfigTest.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import org.junit.Test; 4 | 5 | public class AwsLambdaSinkConnectorConfigTest { 6 | @Test 7 | public void doc() { 8 | System.out.println(AwsLambdaSinkConnectorConfig.conf().toRst()); 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/test/java/com/tm/kafka/connect/aws/lambda/AwsLambdaSinkConnectorTest.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import org.junit.Test; 4 | 5 | public class AwsLambdaSinkConnectorTest { 6 | @Test 7 | public void test() { 8 | // Congrats on a passing test! 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/test/java/com/tm/kafka/connect/aws/lambda/AwsLambdaSinkTaskTest.java: -------------------------------------------------------------------------------- 1 | package com.tm.kafka.connect.aws.lambda; 2 | 3 | import com.amazonaws.services.lambda.AbstractAWSLambda; 4 | import com.amazonaws.services.lambda.model.InvokeRequest; 5 | import com.amazonaws.services.lambda.model.InvokeResult; 6 | import org.apache.kafka.common.TopicPartition; 7 | import org.apache.kafka.common.record.TimestampType; 8 | import org.apache.kafka.connect.data.Schema; 9 | import org.apache.kafka.connect.data.SchemaBuilder; 10 | import org.apache.kafka.connect.data.Struct; 11 | import org.apache.kafka.connect.sink.SinkRecord; 12 | import org.apache.kafka.connect.sink.SinkTaskContext; 13 | import org.junit.Test; 14 | 15 | import java.util.ArrayList; 16 | import java.util.Collection; 17 | import java.util.HashMap; 18 | import java.util.HashSet; 19 | import java.util.Map; 20 | import java.util.Set; 21 | 22 | import static com.tm.kafka.connect.aws.lambda.AwsLambdaSinkConnectorConfig.FUNCTION_NAME_CONFIG; 23 | import static com.tm.kafka.connect.aws.lambda.AwsLambdaSinkConnectorConfig.REGION_CONFIG; 24 | import static org.apache.kafka.connect.data.Schema.STRING_SCHEMA; 25 | import static org.junit.Assert.assertEquals; 26 | 27 | public class AwsLambdaSinkTaskTest { 28 | 29 | private static final String TOPIC = "aws-lambda-topic"; 30 | private static final int PARTITION = 12; 31 | private static final int PARTITION2 = 13; 32 | 33 | private static final TopicPartition TOPIC_PARTITION = new TopicPartition(TOPIC, PARTITION); 34 | private static final TopicPartition TOPIC_PARTITION2 = new TopicPartition(TOPIC, PARTITION2); 35 | private static final String FUNCTION_NAME = "kafka-aws-lambda-test"; 36 | private static final String REGION = "us-west-2"; 37 | 38 | @Test 39 | public void test() { 40 | Map props = new HashMap() {{ 41 | put(FUNCTION_NAME_CONFIG, FUNCTION_NAME); 42 | put(REGION_CONFIG, REGION); 43 | }}; 44 | 45 | Set assignment = new HashSet<>(); 46 | assignment.add(TOPIC_PARTITION); 47 | assignment.add(TOPIC_PARTITION2); 48 | MockSinkTaskContext context = new MockSinkTaskContext(assignment); 49 | 50 | AwsLambdaSinkTask task = new AwsLambdaSinkTask(); 51 | 52 | 53 | Collection records = new ArrayList<>(); 54 | int partition = 1; 55 | String key = "key1"; 56 | 57 | Schema valueSchema = SchemaBuilder.struct() 58 | .name("com.example.Person") 59 | .field("name", STRING_SCHEMA) 60 | .field("age", Schema.INT32_SCHEMA) 61 | .build(); 62 | 63 | String bobbyMcGee = "Bobby McGee"; 64 | int value21 = 21; 65 | 66 | Struct value = new Struct(valueSchema) 67 | .put("name", bobbyMcGee) 68 | .put("age", value21); 69 | 70 | long offset = 100; 71 | long timestamp = 200L; 72 | SinkRecord sinkRecord = new SinkRecord( 73 | TOPIC, 74 | partition, 75 | STRING_SCHEMA, 76 | key, 77 | valueSchema, 78 | value, 79 | offset, 80 | timestamp, 81 | TimestampType.CREATE_TIME); 82 | records.add(sinkRecord); 83 | 84 | String payload = "{\"schema\":" + 85 | "{\"type\":\"struct\"," + 86 | "\"fields\":[" + 87 | "{\"type\":\"string\",\"optional\":false,\"field\":\"name\"}," + 88 | "{\"type\":\"int32\",\"optional\":false,\"field\":\"age\"}" + 89 | "]," + 90 | "\"optional\":false," + 91 | "\"name\":\"com.example.Person\"}," + 92 | "\"payload\":{\"name\":\"Bobby McGee\",\"age\":21}}"; 93 | 94 | task.setClient(new AbstractAWSLambda() { 95 | @Override 96 | public InvokeResult invoke(final InvokeRequest request) { 97 | assertEquals(FUNCTION_NAME, request.getFunctionName()); 98 | assertEquals(payload, new String(request.getPayload().array())); 99 | return null; 100 | } 101 | }); 102 | 103 | task.initialize(context); 104 | task.start(props); 105 | task.put(records); 106 | } 107 | 108 | protected static class MockSinkTaskContext implements SinkTaskContext { 109 | 110 | private final Map offsets; 111 | private long timeoutMs; 112 | private Set assignment; 113 | 114 | MockSinkTaskContext(Set assignment) { 115 | this.offsets = new HashMap<>(); 116 | this.timeoutMs = -1L; 117 | this.assignment = assignment; 118 | } 119 | 120 | @Override 121 | public Map configs() { 122 | return null; 123 | } 124 | 125 | @Override 126 | public void offset(Map offsets) { 127 | this.offsets.putAll(offsets); 128 | } 129 | 130 | @Override 131 | public void offset(TopicPartition tp, long offset) { 132 | offsets.put(tp, offset); 133 | } 134 | 135 | public Map offsets() { 136 | return offsets; 137 | } 138 | 139 | @Override 140 | public void timeout(long timeoutMs) { 141 | this.timeoutMs = timeoutMs; 142 | } 143 | 144 | public long timeout() { 145 | return timeoutMs; 146 | } 147 | 148 | @Override 149 | public Set assignment() { 150 | return assignment; 151 | } 152 | 153 | public void setAssignment(Set nextAssignment) { 154 | assignment = nextAssignment; 155 | } 156 | 157 | @Override 158 | public void pause(TopicPartition... partitions) { 159 | } 160 | 161 | @Override 162 | public void resume(TopicPartition... partitions) { 163 | } 164 | 165 | @Override 166 | public void requestCommit() { 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /src/test/resources/logback.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | %d{HH:mm:ss.SSS} [%thread] %-5level %logger - %msg%n 7 | 8 | 9 | 10 | 11 | 12 | 13 | --------------------------------------------------------------------------------