├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── support_request.md ├── PULL_REQUEST_TEMPLATE.md ├── auto-label.yaml ├── blunderbuss.yml ├── dependabot.yml ├── release-please.yml ├── release-trigger.yml ├── snippet-bot.yml ├── sync-repo-settings.yaml ├── trusted-contribution.yml └── workflows │ ├── approve-readme.yaml │ ├── auto-release.yaml │ ├── ci.yaml │ └── renovate_config_check.yaml ├── .gitignore ├── .kokoro ├── build.bat ├── build.sh ├── coerce_logs.sh ├── common.cfg ├── common.sh ├── continuous │ ├── common.cfg │ └── integration.cfg ├── dependencies.sh ├── nightly │ ├── common.cfg │ └── integration.cfg ├── populate-secrets.sh ├── presubmit │ ├── common.cfg │ └── integration.cfg ├── release │ ├── bump_snapshot.cfg │ ├── common.cfg │ ├── common.sh │ ├── drop.cfg │ ├── drop.sh │ ├── promote.cfg │ ├── promote.sh │ ├── publish_javadoc.cfg │ ├── publish_javadoc.sh │ ├── publish_javadoc11.cfg │ ├── publish_javadoc11.sh │ ├── snapshot.cfg │ ├── snapshot.sh │ ├── stage.cfg │ └── stage.sh ├── requirements.in ├── requirements.txt └── trampoline.sh ├── .repo-metadata.json ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── SECURITY.md ├── config ├── cps-sink-connector.properties ├── cps-source-connector.properties ├── pubsub-lite-sink-connector.properties └── pubsub-lite-source-connector.properties ├── java.header ├── license-checks.xml ├── migration ├── .gcp │ ├── gmk_bootstrap_servers │ ├── gmk_sasl_service_account │ ├── gmk_sasl_service_account_key │ ├── kafka_config_storage_topic │ ├── kafka_connect_group_id │ ├── kafka_offset_storage_topic │ ├── kafka_sink_topic │ ├── kafka_ssl_truststore_location │ ├── kafka_ssl_truststore_password │ ├── kafka_status_storage_topic │ ├── pubsub_lite_gcp_location │ ├── pubsub_lite_gcp_project │ ├── pubsub_lite_gcp_subscription │ └── pubsub_lite_job_name ├── docker │ ├── Dockerfile │ ├── build-image.sh │ ├── configure-kafka-connect.sh │ ├── kafka-connect.properties │ ├── pubsub-group-kafka-connector-1.2.0.jar │ ├── pubsub_lite_job.json │ ├── push-image.sh │ ├── start-kafka-connect.sh │ └── start-pubsub-lite-connector.sh └── k8s.yaml ├── pom.xml ├── renovate.json ├── src ├── main │ ├── java │ │ └── com │ │ │ └── google │ │ │ ├── pubsub │ │ │ └── kafka │ │ │ │ ├── common │ │ │ │ ├── ConnectorCredentialsProvider.java │ │ │ │ ├── ConnectorUtils.java │ │ │ │ └── Version.java │ │ │ │ ├── sink │ │ │ │ ├── CloudPubSubSinkConnector.java │ │ │ │ └── CloudPubSubSinkTask.java │ │ │ │ └── source │ │ │ │ ├── AckBatchingSubscriber.java │ │ │ │ ├── CloudPubSubGRPCSubscriber.java │ │ │ │ ├── CloudPubSubRoundRobinSubscriber.java │ │ │ │ ├── CloudPubSubSourceConnector.java │ │ │ │ ├── CloudPubSubSourceTask.java │ │ │ │ ├── CloudPubSubSubscriber.java │ │ │ │ ├── StreamingPullSubscriber.java │ │ │ │ └── StreamingPullSubscriberFactory.java │ │ │ └── pubsublite │ │ │ └── kafka │ │ │ ├── common │ │ │ └── Version.java │ │ │ ├── sink │ │ │ ├── ConfigDefs.java │ │ │ ├── Constants.java │ │ │ ├── KafkaPartitionRoutingPolicy.java │ │ │ ├── OrderingMode.java │ │ │ ├── PubSubLiteSinkConnector.java │ │ │ ├── PubSubLiteSinkTask.java │ │ │ ├── PublisherFactory.java │ │ │ ├── PublisherFactoryImpl.java │ │ │ └── Schemas.java │ │ │ └── source │ │ │ ├── ConfigDefs.java │ │ │ ├── Poller.java │ │ │ ├── PollerFactory.java │ │ │ ├── PollerFactoryImpl.java │ │ │ ├── PollerImpl.java │ │ │ ├── PubSubLiteSourceConnector.java │ │ │ └── PubSubLiteSourceTask.java │ └── resources │ │ └── log4j.properties └── test │ ├── java │ ├── com │ │ └── google │ │ │ ├── pubsub │ │ │ └── kafka │ │ │ │ ├── sink │ │ │ │ ├── CloudPubSubSinkConnectorTest.java │ │ │ │ └── CloudPubSubSinkTaskTest.java │ │ │ │ └── source │ │ │ │ ├── AckBatchingSubscriberTest.java │ │ │ │ ├── CloudPubSubSourceConnectorTest.java │ │ │ │ ├── CloudPubSubSourceTaskTest.java │ │ │ │ └── StreamingPullSubscriberTest.java │ │ │ └── pubsublite │ │ │ └── kafka │ │ │ ├── sink │ │ │ ├── PubSubLiteSinkTaskTest.java │ │ │ └── SchemasTest.java │ │ │ └── source │ │ │ ├── PollerImplTest.java │ │ │ └── PubSubLiteSourceTaskTest.java │ └── it │ │ ├── Base.java │ │ └── StandaloneIT.java │ └── resources │ ├── cps-sink-connector-test.properties │ ├── cps-source-connector-test.properties │ ├── kafka_vm_startup_script.sh │ ├── log4j.properties │ ├── pubsub-lite-sink-connector-test.properties │ └── pubsub-lite-source-connector-test.properties └── versions.txt /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # Code owners file. 2 | # This file controls who is tagged for review for any given pull request. 3 | 4 | # For syntax help see: 5 | # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax 6 | 7 | # The @googleapis/api-pubsub is the default owner for changes in this repo 8 | * @googleapis/yoshi-java @googleapis/api-pubsub 9 | 10 | # for handwritten libraries, keep codeowner_team in .repo-metadata.json as owner 11 | **/*.java @googleapis/api-pubsub 12 | 13 | 14 | # The java-samples-reviewers team is the default owner for samples changes 15 | samples/**/*.java @googleapis/java-samples-reviewers 16 | 17 | # Generated snippets should not be owned by samples reviewers 18 | samples/snippets/generated/ @googleapis/yoshi-java 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | 5 | --- 6 | 7 | Thanks for stopping by to let us know something could be better! 8 | 9 | **PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. 10 | 11 | Please run down the following list and make sure you've tried the usual "quick fixes": 12 | 13 | - Search the issues already opened: https://github.com/googleapis/java-pubsub-group-kafka-connector/issues 14 | - Check for answers on StackOverflow: http://stackoverflow.com/questions/tagged/google-cloud-platform 15 | 16 | If you are still having issues, please include as much information as possible: 17 | 18 | #### Environment details 19 | 20 | 1. Specify the API at the beginning of the title. For example, "BigQuery: ..."). 21 | General, Core, and Other are also allowed as types 22 | 2. OS type and version: 23 | 3. Java version: 24 | 4. version(s): 25 | 26 | #### Steps to reproduce 27 | 28 | 1. ? 29 | 2. ? 30 | 31 | #### Code example 32 | 33 | ```java 34 | // example 35 | ``` 36 | 37 | #### Stack trace 38 | ``` 39 | Any relevant stacktrace here. 40 | ``` 41 | 42 | #### External references such as API reference guides 43 | 44 | - ? 45 | 46 | #### Any additional information below 47 | 48 | 49 | Following these steps guarantees the quickest resolution possible. 50 | 51 | Thanks! 52 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this library 4 | 5 | --- 6 | 7 | Thanks for stopping by to let us know something could be better! 8 | 9 | **PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. 10 | 11 | **Is your feature request related to a problem? Please describe.** 12 | What the problem is. Example: I'm always frustrated when [...] 13 | 14 | **Describe the solution you'd like** 15 | What you want to happen. 16 | 17 | **Describe alternatives you've considered** 18 | Any alternative solutions or features you've considered. 19 | 20 | **Additional context** 21 | Any other context or screenshots about the feature request. 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/support_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Support request 3 | about: If you have a support contract with Google, please create an issue in the Google Cloud Support console. 4 | 5 | --- 6 | 7 | **PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. 8 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: 2 | - [ ] Make sure to open an issue as a [bug/issue](https://github.com/googleapis/java-pubsub-group-kafka-connector/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea 3 | - [ ] Ensure the tests and linter pass 4 | - [ ] Code coverage does not decrease (if any source code was changed) 5 | - [ ] Appropriate docs were updated (if necessary) 6 | 7 | Fixes # ☕️ 8 | 9 | If you write sample code, please follow the [samples format]( 10 | https://github.com/GoogleCloudPlatform/java-docs-samples/blob/main/SAMPLE_FORMAT.md). 11 | -------------------------------------------------------------------------------- /.github/auto-label.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2021 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | requestsize: 15 | enabled: true 16 | -------------------------------------------------------------------------------- /.github/blunderbuss.yml: -------------------------------------------------------------------------------- 1 | # Configuration for the Blunderbuss GitHub app. For more info see 2 | # https://github.com/googleapis/repo-automation-bots/tree/main/packages/blunderbuss 3 | assign_prs_by: 4 | - labels: 5 | - samples 6 | to: 7 | - googleapis/java-samples-reviewers -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "maven" 4 | directory: "/" 5 | schedule: 6 | interval: "daily" 7 | # Disable version updates for Maven dependencies 8 | # we use renovate-bot as well as shared-dependencies BOM to update maven dependencies. 9 | ignore: 10 | - dependency-name: "*" 11 | - package-ecosystem: "pip" 12 | directory: "/" 13 | schedule: 14 | interval: "daily" 15 | # Disable version updates for pip dependencies 16 | # If a security vulnerability comes in, we will be notified about 17 | # it via template in the synthtool repository. 18 | ignore: 19 | - dependency-name: "*" 20 | -------------------------------------------------------------------------------- /.github/release-please.yml: -------------------------------------------------------------------------------- 1 | bumpMinorPreMajor: true 2 | handleGHRelease: true 3 | releaseType: java-yoshi 4 | branches: 5 | - bumpMinorPreMajor: true 6 | handleGHRelease: true 7 | releaseType: java-yoshi 8 | branch: java7 9 | -------------------------------------------------------------------------------- /.github/release-trigger.yml: -------------------------------------------------------------------------------- 1 | enabled: true 2 | multiScmName: java-pubsub-group-kafka-connector 3 | -------------------------------------------------------------------------------- /.github/snippet-bot.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googleapis/java-pubsub-group-kafka-connector/c5e9ac378ffb335f205b0ea608df7e933d8a7263/.github/snippet-bot.yml -------------------------------------------------------------------------------- /.github/sync-repo-settings.yaml: -------------------------------------------------------------------------------- 1 | rebaseMergeAllowed: false 2 | squashMergeAllowed: true 3 | mergeCommitAllowed: false 4 | branchProtectionRules: 5 | - pattern: main 6 | isAdminEnforced: true 7 | requiredApprovingReviewCount: 1 8 | requiresCodeOwnerReviews: true 9 | requiresStrictStatusChecks: false 10 | requiredStatusCheckContexts: 11 | - dependencies (8) 12 | - dependencies (11) 13 | - lint 14 | - units (8) 15 | - units (11) 16 | - cla/google 17 | - 'Kokoro - Test: Integration' 18 | permissionRules: 19 | - team: yoshi-admins 20 | permission: admin 21 | - team: yoshi-java-admins 22 | permission: admin 23 | - team: yoshi-java 24 | permission: push 25 | - team: api-pubsub 26 | permission: admin 27 | -------------------------------------------------------------------------------- /.github/trusted-contribution.yml: -------------------------------------------------------------------------------- 1 | trustedContributors: 2 | - renovate-bot 3 | - gcf-owl-bot[bot] 4 | -------------------------------------------------------------------------------- /.github/workflows/approve-readme.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # Github action job to test core java library features on 15 | # downstream client libraries before they are released. 16 | on: 17 | pull_request: 18 | name: auto-merge-readme 19 | jobs: 20 | approve: 21 | runs-on: ubuntu-latest 22 | if: github.repository_owner == 'googleapis' && github.head_ref == 'autosynth-readme' 23 | steps: 24 | - uses: actions/github-script@v6 25 | with: 26 | github-token: ${{secrets.YOSHI_APPROVER_TOKEN}} 27 | script: | 28 | // only approve PRs from yoshi-automation 29 | if (context.payload.pull_request.user.login !== "yoshi-automation") { 30 | return; 31 | } 32 | 33 | // only approve PRs like "chore: release " 34 | if (!context.payload.pull_request.title === "chore: regenerate README") { 35 | return; 36 | } 37 | 38 | // only approve PRs with README.md and synth.metadata changes 39 | const files = new Set( 40 | ( 41 | await github.paginate( 42 | github.pulls.listFiles.endpoint({ 43 | owner: context.repo.owner, 44 | repo: context.repo.repo, 45 | pull_number: context.payload.pull_request.number, 46 | }) 47 | ) 48 | ).map(file => file.filename) 49 | ); 50 | if (files.size != 2 || !files.has("README.md") || !files.has(".github/readme/synth.metadata/synth.metadata")) { 51 | return; 52 | } 53 | 54 | // approve README regeneration PR 55 | await github.pulls.createReview({ 56 | owner: context.repo.owner, 57 | repo: context.repo.repo, 58 | body: 'Rubber stamped PR!', 59 | pull_number: context.payload.pull_request.number, 60 | event: 'APPROVE' 61 | }); 62 | 63 | // attach automerge label 64 | await github.issues.addLabels({ 65 | owner: context.repo.owner, 66 | repo: context.repo.repo, 67 | issue_number: context.payload.pull_request.number, 68 | labels: ['automerge'] 69 | }); 70 | -------------------------------------------------------------------------------- /.github/workflows/auto-release.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # Github action job to test core java library features on 15 | # downstream client libraries before they are released. 16 | on: 17 | pull_request: 18 | name: auto-release 19 | jobs: 20 | approve: 21 | runs-on: ubuntu-latest 22 | if: contains(github.head_ref, 'release-please') 23 | steps: 24 | - uses: actions/github-script@v6 25 | with: 26 | github-token: ${{secrets.YOSHI_APPROVER_TOKEN}} 27 | debug: true 28 | script: | 29 | // only approve PRs from release-please[bot] 30 | if (context.payload.pull_request.user.login !== "release-please[bot]") { 31 | return; 32 | } 33 | 34 | // only approve PRs like "chore(main): release " 35 | if ( !context.payload.pull_request.title.startsWith("chore(main): release") ) { 36 | return; 37 | } 38 | 39 | // only approve PRs with pom.xml and versions.txt changes 40 | const filesPromise = github.rest.pulls.listFiles.endpoint({ 41 | owner: context.repo.owner, 42 | repo: context.repo.repo, 43 | pull_number: context.payload.pull_request.number, 44 | }); 45 | const changed_files = await github.paginate(filesPromise) 46 | 47 | if ( changed_files.length < 1 ) { 48 | console.log( "Not proceeding since PR is empty!" ) 49 | return; 50 | } 51 | 52 | if ( !changed_files.some(v => v.filename.includes("pom")) || !changed_files.some(v => v.filename.includes("versions.txt")) ) { 53 | console.log( "PR file changes do not have pom.xml or versions.txt -- something is wrong. PTAL!" ) 54 | return; 55 | } 56 | 57 | // trigger auto-release when 58 | // 1) it is a SNAPSHOT release (auto-generated post regular release) 59 | // 2) there are dependency updates only 60 | // 3) there are no open dependency update PRs in this repo (to avoid multiple releases) 61 | if ( 62 | context.payload.pull_request.body.includes("Fix") || 63 | context.payload.pull_request.body.includes("Build") || 64 | context.payload.pull_request.body.includes("Documentation") || 65 | context.payload.pull_request.body.includes("BREAKING CHANGES") || 66 | context.payload.pull_request.body.includes("Features") 67 | ) { 68 | console.log( "Not auto-releasing since it is not a dependency-update-only release." ); 69 | return; 70 | } 71 | 72 | const promise = github.rest.pulls.list.endpoint({ 73 | owner: context.repo.owner, 74 | repo: context.repo.repo, 75 | state: 'open' 76 | }); 77 | const open_pulls = await github.paginate(promise) 78 | 79 | if ( open_pulls.length > 1 && !context.payload.pull_request.title.includes("SNAPSHOT") ) { 80 | for ( const pull of open_pulls ) { 81 | if ( pull.title.startsWith("deps: update dependency") ) { 82 | console.log( "Not auto-releasing yet since there are dependency update PRs open in this repo." ); 83 | return; 84 | } 85 | } 86 | } 87 | 88 | // approve release PR 89 | await github.rest.pulls.createReview({ 90 | owner: context.repo.owner, 91 | repo: context.repo.repo, 92 | body: 'Rubber stamped release!', 93 | pull_number: context.payload.pull_request.number, 94 | event: 'APPROVE' 95 | }); 96 | 97 | // attach kokoro:force-run and automerge labels 98 | await github.rest.issues.addLabels({ 99 | owner: context.repo.owner, 100 | repo: context.repo.repo, 101 | issue_number: context.payload.pull_request.number, 102 | labels: ['kokoro:force-run', 'automerge'] 103 | }); 104 | -------------------------------------------------------------------------------- /.github/workflows/ci.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # Github action job to test core java library features on 15 | # downstream client libraries before they are released. 16 | on: 17 | push: 18 | branches: 19 | - main 20 | pull_request: 21 | name: ci 22 | jobs: 23 | units: 24 | runs-on: ubuntu-latest 25 | strategy: 26 | fail-fast: false 27 | matrix: 28 | java: [8, 11] 29 | steps: 30 | - uses: actions/checkout@v3 31 | - uses: actions/setup-java@v3 32 | with: 33 | distribution: zulu 34 | java-version: ${{matrix.java}} 35 | - run: java -version 36 | - run: .kokoro/build.sh 37 | env: 38 | JOB_TYPE: test 39 | windows: 40 | runs-on: windows-latest 41 | steps: 42 | - name: Support longpaths 43 | run: git config --system core.longpaths true 44 | - uses: actions/checkout@v3 45 | - uses: actions/setup-java@v3 46 | with: 47 | distribution: zulu 48 | java-version: 8 49 | - run: java -version 50 | - run: .kokoro/build.bat 51 | env: 52 | JOB_TYPE: test 53 | dependencies: 54 | runs-on: ubuntu-latest 55 | strategy: 56 | matrix: 57 | java: [8, 11] 58 | steps: 59 | - uses: actions/checkout@v3 60 | - uses: actions/setup-java@v3 61 | with: 62 | distribution: zulu 63 | java-version: ${{matrix.java}} 64 | - run: java -version 65 | - run: .kokoro/dependencies.sh 66 | lint: 67 | runs-on: ubuntu-latest 68 | steps: 69 | - uses: actions/checkout@v3 70 | - uses: actions/setup-java@v3 71 | with: 72 | distribution: zulu 73 | java-version: 11 74 | - run: java -version 75 | - run: .kokoro/build.sh 76 | env: 77 | JOB_TYPE: lint 78 | -------------------------------------------------------------------------------- /.github/workflows/renovate_config_check.yaml: -------------------------------------------------------------------------------- 1 | name: Renovate Bot Config Validation 2 | 3 | on: 4 | pull_request: 5 | paths: 6 | - 'renovate.json' 7 | 8 | jobs: 9 | renovate_bot_config_validation: 10 | runs-on: ubuntu-22.04 11 | 12 | steps: 13 | - name: Checkout code 14 | uses: actions/checkout@v4 15 | 16 | - name: Set up Node.js 17 | uses: actions/setup-node@v3 18 | with: 19 | node-version: '20' 20 | 21 | - name: Install Renovate and Config Validator 22 | run: | 23 | npm install -g npm@latest 24 | npm install --global renovate 25 | renovate-config-validator 26 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Maven 2 | target/ 3 | 4 | # Eclipse 5 | .classpath 6 | .project 7 | .settings 8 | 9 | # Intellij 10 | *.iml 11 | .idea/ 12 | 13 | # python utilities 14 | *.pyc 15 | __pycache__ 16 | 17 | .flattened-pom.xml 18 | -------------------------------------------------------------------------------- /.kokoro/build.bat: -------------------------------------------------------------------------------- 1 | :: Copyright 2022 Google LLC 2 | :: 3 | :: Licensed under the Apache License, Version 2.0 (the "License"); 4 | :: you may not use this file except in compliance with the License. 5 | :: You may obtain a copy of the License at 6 | :: 7 | :: http://www.apache.org/licenses/LICENSE-2.0 8 | :: 9 | :: Unless required by applicable law or agreed to in writing, software 10 | :: distributed under the License is distributed on an "AS IS" BASIS, 11 | :: WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | :: See the License for the specific language governing permissions and 13 | :: limitations under the License. 14 | :: Github action job to test core java library features on 15 | :: downstream client libraries before they are released. 16 | :: See documentation in type-shell-output.bat 17 | 18 | "C:\Program Files\Git\bin\bash.exe" %~dp0build.sh 19 | -------------------------------------------------------------------------------- /.kokoro/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2022 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | ## Get the directory of the build script 19 | scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}")) 20 | ## cd to the parent directory, i.e. the root of the git repo 21 | cd ${scriptDir}/.. 22 | 23 | # include common functions 24 | source ${scriptDir}/common.sh 25 | 26 | # Print out Java version 27 | java -version 28 | echo ${JOB_TYPE} 29 | 30 | # attempt to install 3 times with exponential backoff (starting with 10 seconds) 31 | retry_with_backoff 3 10 \ 32 | mvn install -B -V \ 33 | -DskipTests=true \ 34 | -Dclirr.skip=true \ 35 | -Denforcer.skip=true \ 36 | -Dmaven.javadoc.skip=true \ 37 | -Dgcloud.download.skip=true \ 38 | -T 1C 39 | 40 | # if GOOGLE_APPLICATION_CREDIENTIALS is specified as a relative path prepend Kokoro root directory onto it 41 | if [[ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" && "${GOOGLE_APPLICATION_CREDENTIALS}" != /* ]]; then 42 | export GOOGLE_APPLICATION_CREDENTIALS=$(realpath ${KOKORO_GFILE_DIR}/${GOOGLE_APPLICATION_CREDENTIALS}) 43 | fi 44 | 45 | RETURN_CODE=0 46 | set +e 47 | 48 | case ${JOB_TYPE} in 49 | test) 50 | mvn test -B -Dclirr.skip=true -Denforcer.skip=true 51 | RETURN_CODE=$? 52 | ;; 53 | lint) 54 | mvn com.coveo:fmt-maven-plugin:check 55 | RETURN_CODE=$? 56 | ;; 57 | javadoc) 58 | mvn javadoc:javadoc javadoc:test-javadoc 59 | RETURN_CODE=$? 60 | ;; 61 | integration) 62 | if [ -f "${KOKORO_GFILE_DIR}/secret_manager/java-pubsub-group-kafka-connector-secrets" ] 63 | then 64 | source "${KOKORO_GFILE_DIR}/secret_manager/java-pubsub-group-kafka-connector-secrets" 65 | fi 66 | mvn clean test -Dtest=it.StandaloneIT 67 | RETURN_CODE=$? 68 | ;; 69 | *) 70 | ;; 71 | esac 72 | 73 | if [ "${REPORT_COVERAGE}" == "true" ] 74 | then 75 | bash ${KOKORO_GFILE_DIR}/codecov.sh 76 | fi 77 | 78 | # fix output location of logs 79 | bash .kokoro/coerce_logs.sh 80 | 81 | if [[ "${ENABLE_BUILD_COP}" == "true" ]] 82 | then 83 | chmod +x ${KOKORO_GFILE_DIR}/linux_amd64/flakybot 84 | ${KOKORO_GFILE_DIR}/linux_amd64/flakybot -repo=googleapis/java-pubsub-group-kafka-connector 85 | fi 86 | 87 | echo "exiting with ${RETURN_CODE}" 88 | exit ${RETURN_CODE} 89 | -------------------------------------------------------------------------------- /.kokoro/coerce_logs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2019 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # This script finds and moves sponge logs so that they can be found by placer 17 | # and are not flagged as flaky by sponge. 18 | 19 | set -eo pipefail 20 | 21 | ## Get the directory of the build script 22 | scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}")) 23 | ## cd to the parent directory, i.e. the root of the git repo 24 | cd ${scriptDir}/.. 25 | 26 | job=$(basename ${KOKORO_JOB_NAME}) 27 | 28 | echo "coercing sponge logs..." 29 | for xml in `find . -name *-sponge_log.xml` 30 | do 31 | class=$(basename ${xml} | cut -d- -f2) 32 | dir=$(dirname ${xml})/${job}/${class} 33 | text=$(dirname ${xml})/${class}-sponge_log.txt 34 | mkdir -p ${dir} 35 | mv ${xml} ${dir}/sponge_log.xml 36 | mv ${text} ${dir}/sponge_log.txt 37 | done 38 | -------------------------------------------------------------------------------- /.kokoro/common.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Download trampoline resources. These will be in ${KOKORO_GFILE_DIR} 4 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" 5 | 6 | # All builds use the trampoline script to run in docker. 7 | build_file: "java-pubsub-group-kafka-connector/.kokoro/trampoline.sh" 8 | 9 | # Tell the trampoline which build file to use. 10 | env_vars: { 11 | key: "TRAMPOLINE_BUILD_FILE" 12 | value: "github/java-pubsub-group-kafka-connector/.kokoro/build.sh" 13 | } 14 | -------------------------------------------------------------------------------- /.kokoro/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | function retry_with_backoff { 17 | attempts_left=$1 18 | sleep_seconds=$2 19 | shift 2 20 | command=$@ 21 | 22 | 23 | # store current flag state 24 | flags=$- 25 | 26 | # allow a failures to continue 27 | set +e 28 | ${command} 29 | exit_code=$? 30 | 31 | # restore "e" flag 32 | if [[ ${flags} =~ e ]] 33 | then set -e 34 | else set +e 35 | fi 36 | 37 | if [[ $exit_code == 0 ]] 38 | then 39 | return 0 40 | fi 41 | 42 | # failure 43 | if [[ ${attempts_left} > 0 ]] 44 | then 45 | echo "failure (${exit_code}), sleeping ${sleep_seconds}..." 46 | sleep ${sleep_seconds} 47 | new_attempts=$((${attempts_left} - 1)) 48 | new_sleep=$((${sleep_seconds} * 2)) 49 | retry_with_backoff ${new_attempts} ${new_sleep} ${command} 50 | fi 51 | 52 | return $exit_code 53 | } 54 | 55 | ## Helper functionss 56 | function now() { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n'; } 57 | function msg() { println "$*" >&2; } 58 | function println() { printf '%s\n' "$(now) $*"; } 59 | 60 | ## Helper comment to trigger updated repo dependency release -------------------------------------------------------------------------------- /.kokoro/continuous/common.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Build logs will be here 4 | action { 5 | define_artifacts { 6 | regex: "**/*sponge_log.xml" 7 | regex: "**/*sponge_log.txt" 8 | } 9 | } 10 | 11 | # Download trampoline resources. 12 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" 13 | 14 | # Use the trampoline script to run in docker. 15 | build_file: "java-pubsub-group-kafka-connector/.kokoro/trampoline.sh" 16 | 17 | env_vars: { 18 | key: "TRAMPOLINE_BUILD_FILE" 19 | value: "github/java-pubsub-group-kafka-connector/.kokoro/build.sh" 20 | } 21 | 22 | env_vars: { 23 | key: "JOB_TYPE" 24 | value: "test" 25 | } 26 | -------------------------------------------------------------------------------- /.kokoro/continuous/integration.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Configure the docker image for kokoro-trampoline. 4 | env_vars: { 5 | key: "TRAMPOLINE_IMAGE" 6 | value: "gcr.io/cloud-devrel-kokoro-resources/java8" 7 | } 8 | 9 | env_vars: { 10 | key: "JOB_TYPE" 11 | value: "integration" 12 | } 13 | 14 | env_vars: { 15 | key: "GOOGLE_CLOUD_PROJECT" 16 | value: "java-docs-samples-testing" 17 | } 18 | 19 | env_vars: { 20 | key: "ENABLE_FLAKYBOT" 21 | value: "true" 22 | } 23 | 24 | env_vars: { 25 | key: "GOOGLE_APPLICATION_CREDENTIALS" 26 | value: "secret_manager/java-docs-samples-service-account" 27 | } 28 | 29 | env_vars: { 30 | key: "SECRET_MANAGER_KEYS" 31 | value: "java-docs-samples-service-account,java-pubsub-group-kafka-connector-secrets" 32 | } 33 | -------------------------------------------------------------------------------- /.kokoro/dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2019 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | shopt -s nullglob 18 | 19 | ## Get the directory of the build script 20 | scriptDir=$(realpath $(dirname "${BASH_SOURCE[0]}")) 21 | ## cd to the parent directory, i.e. the root of the git repo 22 | cd ${scriptDir}/.. 23 | 24 | # include common functions 25 | source ${scriptDir}/common.sh 26 | 27 | # Print out Java 28 | java -version 29 | echo $JOB_TYPE 30 | 31 | function determineMavenOpts() { 32 | local javaVersion=$( 33 | # filter down to the version line, then pull out the version between quotes, 34 | # then trim the version number down to its minimal number (removing any 35 | # update or suffix number). 36 | java -version 2>&1 | grep "version" \ 37 | | sed -E 's/^.*"(.*?)".*$/\1/g' \ 38 | | sed -E 's/^(1\.[0-9]\.0).*$/\1/g' 39 | ) 40 | 41 | if [[ $javaVersion == 17* ]] 42 | then 43 | # MaxPermSize is no longer supported as of jdk 17 44 | echo -n "-Xmx1024m" 45 | else 46 | echo -n "-Xmx1024m -XX:MaxPermSize=128m" 47 | fi 48 | } 49 | 50 | export MAVEN_OPTS=$(determineMavenOpts) 51 | 52 | # this should run maven enforcer 53 | retry_with_backoff 3 10 \ 54 | mvn install -B -V -ntp \ 55 | -DskipTests=true \ 56 | -Dmaven.javadoc.skip=true \ 57 | -Dclirr.skip=true 58 | 59 | mvn -B dependency:analyze -DfailOnWarning=true 60 | -------------------------------------------------------------------------------- /.kokoro/nightly/common.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Build logs will be here 4 | action { 5 | define_artifacts { 6 | regex: "**/*sponge_log.xml" 7 | regex: "**/*sponge_log.txt" 8 | } 9 | } 10 | 11 | # Download trampoline resources. 12 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" 13 | 14 | # Use the trampoline script to run in docker. 15 | build_file: "java-pubsub-group-kafka-connector/.kokoro/trampoline.sh" 16 | 17 | env_vars: { 18 | key: "TRAMPOLINE_BUILD_FILE" 19 | value: "github/java-pubsub-group-kafka-connector/.kokoro/build.sh" 20 | } 21 | 22 | env_vars: { 23 | key: "JOB_TYPE" 24 | value: "test" 25 | } 26 | -------------------------------------------------------------------------------- /.kokoro/nightly/integration.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Configure the docker image for kokoro-trampoline. 4 | env_vars: { 5 | key: "TRAMPOLINE_IMAGE" 6 | value: "gcr.io/cloud-devrel-kokoro-resources/java8" 7 | } 8 | 9 | env_vars: { 10 | key: "JOB_TYPE" 11 | value: "integration" 12 | } 13 | 14 | env_vars: { 15 | key: "GCLOUD_PROJECT" 16 | value: "java-docs-samples-testing" 17 | } 18 | 19 | env_vars: { 20 | key: "GOOGLE_CLOUD_PROJECT" 21 | value: "java-docs-samples-testing" 22 | } 23 | 24 | env_vars: { 25 | key: "ENABLE_FLAKYBOT" 26 | value: "true" 27 | } 28 | 29 | env_vars: { 30 | key: "GOOGLE_APPLICATION_CREDENTIALS" 31 | value: "secret_manager/java-docs-samples-service-account" 32 | } 33 | 34 | env_vars: { 35 | key: "SECRET_MANAGER_KEYS" 36 | value: "java-docs-samples-service-account,java-pubsub-group-kafka-connector-secrets" 37 | } 38 | -------------------------------------------------------------------------------- /.kokoro/populate-secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2020 Google LLC. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} 19 | function msg { println "$*" >&2 ;} 20 | function println { printf '%s\n' "$(now) $*" ;} 21 | 22 | 23 | # Populates requested secrets set in SECRET_MANAGER_KEYS from service account: 24 | # kokoro-trampoline@cloud-devrel-kokoro-resources.iam.gserviceaccount.com 25 | SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" 26 | msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" 27 | mkdir -p ${SECRET_LOCATION} 28 | for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") 29 | do 30 | msg "Retrieving secret ${key}" 31 | docker run --entrypoint=gcloud \ 32 | --volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR} \ 33 | gcr.io/google.com/cloudsdktool/cloud-sdk \ 34 | secrets versions access latest \ 35 | --project cloud-devrel-kokoro-resources \ 36 | --secret ${key} > \ 37 | "${SECRET_LOCATION}/${key}" 38 | if [[ $? == 0 ]]; then 39 | msg "Secret written to ${SECRET_LOCATION}/${key}" 40 | else 41 | msg "Error retrieving secret ${key}" 42 | fi 43 | done 44 | -------------------------------------------------------------------------------- /.kokoro/presubmit/common.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Build logs will be here 4 | action { 5 | define_artifacts { 6 | regex: "**/*sponge_log.xml" 7 | regex: "**/*sponge_log.txt" 8 | } 9 | } 10 | 11 | # Download trampoline resources. 12 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" 13 | 14 | # Use the trampoline script to run in docker. 15 | build_file: "java-pubsub-group-kafka-connector/.kokoro/trampoline.sh" 16 | 17 | env_vars: { 18 | key: "TRAMPOLINE_BUILD_FILE" 19 | value: "github/java-pubsub-group-kafka-connector/.kokoro/build.sh" 20 | } 21 | 22 | env_vars: { 23 | key: "JOB_TYPE" 24 | value: "test" 25 | } 26 | 27 | before_action { 28 | fetch_keystore { 29 | keystore_resource { 30 | keystore_config_id: 73713 31 | keyname: "dpebot_codecov_token" 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /.kokoro/presubmit/integration.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Configure the docker image for kokoro-trampoline. 4 | env_vars: { 5 | key: "TRAMPOLINE_IMAGE" 6 | value: "gcr.io/cloud-devrel-kokoro-resources/java8" 7 | } 8 | 9 | env_vars: { 10 | key: "JOB_TYPE" 11 | value: "integration" 12 | } 13 | 14 | env_vars: { 15 | key: "GOOGLE_CLOUD_PROJECT" 16 | value: "java-docs-samples-testing" 17 | } 18 | 19 | env_vars: { 20 | key: "GOOGLE_APPLICATION_CREDENTIALS" 21 | value: "secret_manager/java-docs-samples-service-account" 22 | } 23 | 24 | env_vars: { 25 | key: "SECRET_MANAGER_KEYS" 26 | value: "java-docs-samples-service-account,java-pubsub-group-kafka-connector-secrets" 27 | } 28 | -------------------------------------------------------------------------------- /.kokoro/release/bump_snapshot.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Build logs will be here 4 | action { 5 | define_artifacts { 6 | regex: "**/*sponge_log.xml" 7 | } 8 | } 9 | 10 | # Download trampoline resources. 11 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" 12 | 13 | # Use the trampoline script to run in docker. 14 | build_file: "java-pubsub-group-kafka-connector/.kokoro/trampoline.sh" 15 | 16 | # Configure the docker image for kokoro-trampoline. 17 | env_vars: { 18 | key: "TRAMPOLINE_IMAGE" 19 | value: "gcr.io/cloud-devrel-kokoro-resources/node:10-user" 20 | } 21 | 22 | env_vars: { 23 | key: "TRAMPOLINE_BUILD_FILE" 24 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/bump_snapshot.sh" 25 | } 26 | 27 | # tokens used by release-please to keep an up-to-date release PR. 28 | before_action { 29 | fetch_keystore { 30 | keystore_resource { 31 | keystore_config_id: 73713 32 | keyname: "github-magic-proxy-key-release-please" 33 | } 34 | } 35 | } 36 | 37 | before_action { 38 | fetch_keystore { 39 | keystore_resource { 40 | keystore_config_id: 73713 41 | keyname: "github-magic-proxy-token-release-please" 42 | } 43 | } 44 | } 45 | 46 | before_action { 47 | fetch_keystore { 48 | keystore_resource { 49 | keystore_config_id: 73713 50 | keyname: "github-magic-proxy-url-release-please" 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /.kokoro/release/common.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # Download trampoline resources. 4 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" 5 | 6 | # Use the trampoline script to run in docker. 7 | build_file: "java-pubsub-group-kafka-connector/.kokoro/trampoline.sh" 8 | 9 | # Configure the docker image for kokoro-trampoline. 10 | env_vars: { 11 | key: "TRAMPOLINE_IMAGE" 12 | value: "gcr.io/cloud-devrel-kokoro-resources/java8" 13 | } 14 | 15 | before_action { 16 | fetch_keystore { 17 | keystore_resource { 18 | keystore_config_id: 70247 19 | keyname: "maven-gpg-keyring" 20 | } 21 | } 22 | } 23 | 24 | before_action { 25 | fetch_keystore { 26 | keystore_resource { 27 | keystore_config_id: 70247 28 | keyname: "maven-gpg-passphrase" 29 | } 30 | } 31 | } 32 | 33 | before_action { 34 | fetch_keystore { 35 | keystore_resource { 36 | keystore_config_id: 70247 37 | keyname: "maven-gpg-pubkeyring" 38 | } 39 | } 40 | } 41 | 42 | before_action { 43 | fetch_keystore { 44 | keystore_resource { 45 | keystore_config_id: 70247 46 | keyname: "sonatype-credentials" 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /.kokoro/release/common.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | # Get secrets from keystore and set and environment variables 19 | setup_environment_secrets() { 20 | export GPG_PASSPHRASE=$(cat ${KOKORO_KEYSTORE_DIR}/70247_maven-gpg-passphrase) 21 | export GPG_TTY=$(tty) 22 | export GPG_HOMEDIR=/gpg 23 | mkdir $GPG_HOMEDIR 24 | mv ${KOKORO_KEYSTORE_DIR}/70247_maven-gpg-pubkeyring $GPG_HOMEDIR/pubring.gpg 25 | mv ${KOKORO_KEYSTORE_DIR}/70247_maven-gpg-keyring $GPG_HOMEDIR/secring.gpg 26 | export SONATYPE_USERNAME=$(cat ${KOKORO_KEYSTORE_DIR}/70247_sonatype-credentials | cut -f1 -d'|') 27 | export SONATYPE_PASSWORD=$(cat ${KOKORO_KEYSTORE_DIR}/70247_sonatype-credentials | cut -f2 -d'|') 28 | } 29 | 30 | create_settings_xml_file() { 31 | echo " 32 | 33 | 34 | ossrh 35 | ${SONATYPE_USERNAME} 36 | ${SONATYPE_PASSWORD} 37 | 38 | 39 | sonatype-nexus-staging 40 | ${SONATYPE_USERNAME} 41 | ${SONATYPE_PASSWORD} 42 | 43 | 44 | sonatype-nexus-snapshots 45 | ${SONATYPE_USERNAME} 46 | ${SONATYPE_PASSWORD} 47 | 48 | 49 | " > $1 50 | } -------------------------------------------------------------------------------- /.kokoro/release/drop.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | env_vars: { 4 | key: "TRAMPOLINE_BUILD_FILE" 5 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/drop.sh" 6 | } 7 | -------------------------------------------------------------------------------- /.kokoro/release/drop.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | # STAGING_REPOSITORY_ID must be set 19 | if [ -z "${STAGING_REPOSITORY_ID}" ]; then 20 | echo "Missing STAGING_REPOSITORY_ID environment variable" 21 | exit 1 22 | fi 23 | 24 | source $(dirname "$0")/common.sh 25 | pushd $(dirname "$0")/../../ 26 | 27 | setup_environment_secrets 28 | create_settings_xml_file "settings.xml" 29 | 30 | mvn nexus-staging:drop -B \ 31 | --settings=settings.xml \ 32 | -DstagingRepositoryId=${STAGING_REPOSITORY_ID} 33 | -------------------------------------------------------------------------------- /.kokoro/release/promote.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | env_vars: { 4 | key: "TRAMPOLINE_BUILD_FILE" 5 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/promote.sh" 6 | } 7 | -------------------------------------------------------------------------------- /.kokoro/release/promote.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | # STAGING_REPOSITORY_ID must be set 19 | if [ -z "${STAGING_REPOSITORY_ID}" ]; then 20 | echo "Missing STAGING_REPOSITORY_ID environment variable" 21 | exit 1 22 | fi 23 | 24 | source $(dirname "$0")/common.sh 25 | 26 | pushd $(dirname "$0")/../../ 27 | 28 | setup_environment_secrets 29 | create_settings_xml_file "settings.xml" 30 | 31 | mvn nexus-staging:release -B \ 32 | -DperformRelease=true \ 33 | --settings=settings.xml \ 34 | -DstagingRepositoryId=${STAGING_REPOSITORY_ID} 35 | -------------------------------------------------------------------------------- /.kokoro/release/publish_javadoc.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/doc-templates/" 4 | 5 | env_vars: { 6 | key: "STAGING_BUCKET" 7 | value: "docs-staging" 8 | } 9 | 10 | env_vars: { 11 | key: "TRAMPOLINE_BUILD_FILE" 12 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/publish_javadoc.sh" 13 | } 14 | 15 | 16 | before_action { 17 | fetch_keystore { 18 | keystore_resource { 19 | keystore_config_id: 73713 20 | keyname: "docuploader_service_account" 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /.kokoro/release/publish_javadoc.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2019 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | if [[ -z "${CREDENTIALS}" ]]; then 19 | CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account 20 | fi 21 | 22 | if [[ -z "${STAGING_BUCKET}" ]]; then 23 | echo "Need to set STAGING_BUCKET environment variable" 24 | exit 1 25 | fi 26 | 27 | # work from the git root directory 28 | pushd $(dirname "$0")/../../ 29 | 30 | # install docuploader package 31 | python3 -m pip install --require-hashes -r .kokoro/requirements.txt 32 | 33 | # compile all packages 34 | mvn clean install -B -q -DskipTests=true -Dclirr.skip=true 35 | 36 | export NAME=pubsub-group-kafka-connector 37 | export VERSION=$(grep ${NAME}: versions.txt | cut -d: -f3) 38 | 39 | # build the docs 40 | mvn site -B -q 41 | 42 | pushd target/site/apidocs 43 | 44 | # create metadata 45 | python3 -m docuploader create-metadata \ 46 | --name ${NAME} \ 47 | --version ${VERSION} \ 48 | --language java 49 | 50 | # upload docs 51 | python3 -m docuploader upload . \ 52 | --credentials ${CREDENTIALS} \ 53 | --staging-bucket ${STAGING_BUCKET} 54 | -------------------------------------------------------------------------------- /.kokoro/release/publish_javadoc11.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | # cloud-rad production 4 | env_vars: { 5 | key: "STAGING_BUCKET_V2" 6 | value: "docs-staging-v2" 7 | } 8 | 9 | # Configure the docker image for kokoro-trampoline 10 | env_vars: { 11 | key: "TRAMPOLINE_IMAGE" 12 | value: "gcr.io/cloud-devrel-kokoro-resources/java11" 13 | } 14 | 15 | env_vars: { 16 | key: "TRAMPOLINE_BUILD_FILE" 17 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/publish_javadoc11.sh" 18 | } 19 | 20 | before_action { 21 | fetch_keystore { 22 | keystore_resource { 23 | keystore_config_id: 73713 24 | keyname: "docuploader_service_account" 25 | } 26 | } 27 | } 28 | 29 | # Downloads docfx doclet resource. This will be in ${KOKORO_GFILE_DIR}/ 30 | gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/docfx" 31 | -------------------------------------------------------------------------------- /.kokoro/release/publish_javadoc11.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2021 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | if [[ -z "${CREDENTIALS}" ]]; then 19 | CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account 20 | fi 21 | 22 | if [[ -z "${STAGING_BUCKET_V2}" ]]; then 23 | echo "Need to set STAGING_BUCKET_V2 environment variable" 24 | exit 1 25 | fi 26 | 27 | # work from the git root directory 28 | pushd $(dirname "$0")/../../ 29 | 30 | # install docuploader package 31 | python3 -m pip install --require-hashes -r .kokoro/requirements.txt 32 | 33 | # compile all packages 34 | mvn clean install -B -q -DskipTests=true -Dclirr.skip=true 35 | 36 | export NAME=pubsub-group-kafka-connector 37 | export VERSION=$(grep ${NAME}: versions.txt | cut -d: -f3) 38 | 39 | # cloud RAD generation 40 | mvn clean javadoc:aggregate -B -q -P docFX 41 | # include CHANGELOG 42 | cp CHANGELOG.md target/docfx-yml/history.md 43 | 44 | pushd target/docfx-yml 45 | 46 | # create metadata 47 | python3 -m docuploader create-metadata \ 48 | --name ${NAME} \ 49 | --version ${VERSION} \ 50 | --xrefs devsite://java/gax \ 51 | --xrefs devsite://java/google-cloud-core \ 52 | --xrefs devsite://java/api-common \ 53 | --xrefs devsite://java/proto-google-common-protos \ 54 | --xrefs devsite://java/google-api-client \ 55 | --xrefs devsite://java/google-http-client \ 56 | --xrefs devsite://java/protobuf \ 57 | --language java 58 | 59 | # upload yml to production bucket 60 | python3 -m docuploader upload . \ 61 | --credentials ${CREDENTIALS} \ 62 | --staging-bucket ${STAGING_BUCKET_V2} \ 63 | --destination-prefix docfx 64 | -------------------------------------------------------------------------------- /.kokoro/release/snapshot.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | env_vars: { 4 | key: "TRAMPOLINE_BUILD_FILE" 5 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/snapshot.sh" 6 | } -------------------------------------------------------------------------------- /.kokoro/release/snapshot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2019 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | source $(dirname "$0")/common.sh 19 | MAVEN_SETTINGS_FILE=$(realpath $(dirname "$0")/../../)/settings.xml 20 | pushd $(dirname "$0")/../../ 21 | 22 | # ensure we're trying to push a snapshot (no-result returns non-zero exit code) 23 | grep SNAPSHOT versions.txt 24 | 25 | setup_environment_secrets 26 | create_settings_xml_file "settings.xml" 27 | 28 | mvn clean deploy -B \ 29 | --settings ${MAVEN_SETTINGS_FILE} \ 30 | -DperformRelease=true \ 31 | -Dgpg.executable=gpg \ 32 | -Dgpg.passphrase=${GPG_PASSPHRASE} \ 33 | -Dgpg.homedir=${GPG_HOMEDIR} 34 | -------------------------------------------------------------------------------- /.kokoro/release/stage.cfg: -------------------------------------------------------------------------------- 1 | # Format: //devtools/kokoro/config/proto/build.proto 2 | 3 | env_vars: { 4 | key: "TRAMPOLINE_BUILD_FILE" 5 | value: "github/java-pubsub-group-kafka-connector/.kokoro/release/stage.sh" 6 | } 7 | 8 | # Need to save the properties file 9 | action { 10 | define_artifacts { 11 | regex: "github/java-pubsub-group-kafka-connector/target/nexus-staging/staging/*.properties" 12 | strip_prefix: "github/java-pubsub-group-kafka-connector" 13 | } 14 | } 15 | 16 | env_vars: { 17 | key: "SECRET_MANAGER_KEYS" 18 | value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" 19 | } 20 | -------------------------------------------------------------------------------- /.kokoro/release/stage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -eo pipefail 17 | 18 | # Start the releasetool reporter 19 | requirementsFile=$(realpath $(dirname "${BASH_SOURCE[0]}")/../requirements.txt) 20 | python3 -m pip install --require-hashes -r $requirementsFile 21 | python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script 22 | 23 | source $(dirname "$0")/common.sh 24 | source $(dirname "$0")/../common.sh 25 | MAVEN_SETTINGS_FILE=$(realpath $(dirname "$0")/../../)/settings.xml 26 | pushd $(dirname "$0")/../../ 27 | 28 | setup_environment_secrets 29 | create_settings_xml_file "settings.xml" 30 | 31 | # attempt to stage 3 times with exponential backoff (starting with 10 seconds) 32 | retry_with_backoff 3 10 \ 33 | mvn clean deploy -B \ 34 | --settings ${MAVEN_SETTINGS_FILE} \ 35 | -DskipTests=true \ 36 | -Dclirr.skip=true \ 37 | -DperformRelease=true \ 38 | -Dgpg.executable=gpg \ 39 | -Dgpg.passphrase=${GPG_PASSPHRASE} \ 40 | -Dgpg.homedir=${GPG_HOMEDIR} 41 | 42 | if [[ -n "${AUTORELEASE_PR}" ]] 43 | then 44 | mvn nexus-staging:release -B \ 45 | -DperformRelease=true \ 46 | --settings=settings.xml 47 | fi 48 | -------------------------------------------------------------------------------- /.kokoro/requirements.in: -------------------------------------------------------------------------------- 1 | gcp-docuploader 2 | gcp-releasetool>=1.10.5 # required for compatibility with cryptography>=39.x 3 | wheel 4 | setuptools 5 | typing-extensions 6 | click<8.1.0 -------------------------------------------------------------------------------- /.kokoro/trampoline.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2018 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | set -eo pipefail 16 | # Always run the cleanup script, regardless of the success of bouncing into 17 | # the container. 18 | function cleanup() { 19 | chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh 20 | ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh 21 | echo "cleanup"; 22 | } 23 | trap cleanup EXIT 24 | 25 | $(dirname $0)/populate-secrets.sh # Secret Manager secrets. 26 | python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" 27 | -------------------------------------------------------------------------------- /.repo-metadata.json: -------------------------------------------------------------------------------- 1 | { 2 | "api_shortname": "pubsub-group-kafka-connector", 3 | "name_pretty": "Pub/Sub Group Kafka Connector", 4 | "api_reference": "https://cloud.google.com/pubsub/docs/", 5 | "product_documentation": "https://cloud.google.com/pubsub/docs/", 6 | "client_documentation": "https://cloud.google.com/java/docs/reference/pubsub-group-kafka-connector/latest/history", 7 | "api_description": "The Pub/Sub Group Kafka Connector is developed to work with Kafka Connect to publish messages from Kafka to Google Cloud Pub/Sub or Pub/Sub Lite and vice versa.", 8 | "release_level": "stable", 9 | "language": "java", 10 | "min_java_version": 8, 11 | "repo": "googleapis/java-pubsub-group-kafka-connector", 12 | "repo_short": "java-pubsub-group-kafka-connector", 13 | "distribution_name": "com.google.cloud:pubsub-group-kafka-connector", 14 | "codeowner_team": "@googleapis/api-pubsub", 15 | "api_id": "pubsub.googleapis.com", 16 | "library_type": "AGENT", 17 | "transport": "grpc", 18 | "requires_billing": true 19 | } 20 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | 2 | # Code of Conduct 3 | 4 | ## Our Pledge 5 | 6 | In the interest of fostering an open and welcoming environment, we as 7 | contributors and maintainers pledge to making participation in our project and 8 | our community a harassment-free experience for everyone, regardless of age, body 9 | size, disability, ethnicity, gender identity and expression, level of 10 | experience, education, socio-economic status, nationality, personal appearance, 11 | race, religion, or sexual identity and orientation. 12 | 13 | ## Our Standards 14 | 15 | Examples of behavior that contributes to creating a positive environment 16 | include: 17 | 18 | * Using welcoming and inclusive language 19 | * Being respectful of differing viewpoints and experiences 20 | * Gracefully accepting constructive criticism 21 | * Focusing on what is best for the community 22 | * Showing empathy towards other community members 23 | 24 | Examples of unacceptable behavior by participants include: 25 | 26 | * The use of sexualized language or imagery and unwelcome sexual attention or 27 | advances 28 | * Trolling, insulting/derogatory comments, and personal or political attacks 29 | * Public or private harassment 30 | * Publishing others' private information, such as a physical or electronic 31 | address, without explicit permission 32 | * Other conduct which could reasonably be considered inappropriate in a 33 | professional setting 34 | 35 | ## Our Responsibilities 36 | 37 | Project maintainers are responsible for clarifying the standards of acceptable 38 | behavior and are expected to take appropriate and fair corrective action in 39 | response to any instances of unacceptable behavior. 40 | 41 | Project maintainers have the right and responsibility to remove, edit, or reject 42 | comments, commits, code, wiki edits, issues, and other contributions that are 43 | not aligned to this Code of Conduct, or to ban temporarily or permanently any 44 | contributor for other behaviors that they deem inappropriate, threatening, 45 | offensive, or harmful. 46 | 47 | ## Scope 48 | 49 | This Code of Conduct applies both within project spaces and in public spaces 50 | when an individual is representing the project or its community. Examples of 51 | representing a project or community include using an official project e-mail 52 | address, posting via an official social media account, or acting as an appointed 53 | representative at an online or offline event. Representation of a project may be 54 | further defined and clarified by project maintainers. 55 | 56 | This Code of Conduct also applies outside the project spaces when the Project 57 | Steward has a reasonable belief that an individual's behavior may have a 58 | negative impact on the project or its community. 59 | 60 | ## Conflict Resolution 61 | 62 | We do not believe that all conflict is bad; healthy debate and disagreement 63 | often yield positive results. However, it is never okay to be disrespectful or 64 | to engage in behavior that violates the project’s code of conduct. 65 | 66 | If you see someone violating the code of conduct, you are encouraged to address 67 | the behavior directly with those involved. Many issues can be resolved quickly 68 | and easily, and this gives people more control over the outcome of their 69 | dispute. If you are unable to resolve the matter for any reason, or if the 70 | behavior is threatening or harassing, report it. We are dedicated to providing 71 | an environment where participants feel welcome and safe. 72 | 73 | Reports should be directed to *googleapis-stewards@google.com*, the 74 | Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to 75 | receive and address reported violations of the code of conduct. They will then 76 | work with a committee consisting of representatives from the Open Source 77 | Programs Office and the Google Open Source Strategy team. If for any reason you 78 | are uncomfortable reaching out to the Project Steward, please email 79 | opensource@google.com. 80 | 81 | We will investigate every complaint, but you may not receive a direct response. 82 | We will use our discretion in determining when and how to follow up on reported 83 | incidents, which may range from not taking action to permanent expulsion from 84 | the project and project-sponsored spaces. We will notify the accused of the 85 | report and provide them an opportunity to discuss it before any action is taken. 86 | The identity of the reporter will be omitted from the details of the report 87 | supplied to the accused. In potentially harmful situations, such as ongoing 88 | harassment or threats to anyone's safety, we may take action without notice. 89 | 90 | ## Attribution 91 | 92 | This Code of Conduct is adapted from the Contributor Covenant, version 1.4, 93 | available at 94 | https://www.contributor-covenant.org/version/1/4/code-of-conduct.html -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to Contribute 2 | 3 | We'd love to accept your patches and contributions to this project. There are 4 | just a few small guidelines you need to follow. 5 | 6 | ## Contributor License Agreement 7 | 8 | Contributions to this project must be accompanied by a Contributor License 9 | Agreement. You (or your employer) retain the copyright to your contribution; 10 | this simply gives us permission to use and redistribute your contributions as 11 | part of the project. Head over to to see 12 | your current agreements on file or to sign a new one. 13 | 14 | You generally only need to submit a CLA once, so if you've already submitted one 15 | (even if it was for a different project), you probably don't need to do it 16 | again. 17 | 18 | ## Code reviews 19 | 20 | All submissions, including submissions by project members, require review. We 21 | use GitHub pull requests for this purpose. Consult 22 | [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more 23 | information on using pull requests. 24 | 25 | ## Community Guidelines 26 | 27 | This project follows 28 | [Google's Open Source Community Guidelines](https://opensource.google.com/conduct/). 29 | 30 | ## Building the project 31 | 32 | To build, package, and run all unit tests run the command 33 | 34 | ``` 35 | mvn clean verify 36 | ``` 37 | 38 | ### Running Integration tests 39 | 40 | To include integration tests when building the project, you need access to 41 | a GCP Project with a valid service account. 42 | 43 | For instructions on how to generate a service account and corresponding 44 | credentials JSON see: [Creating a Service Account][1]. 45 | 46 | Then run the following to build, package, run all unit tests and run all 47 | integration tests. 48 | 49 | ```bash 50 | export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service/account.json 51 | mvn -Penable-integration-tests clean verify 52 | ``` 53 | 54 | ## Code Samples 55 | 56 | All code samples must be in compliance with the [java sample formatting guide][3]. 57 | Code Samples must be bundled in separate Maven modules. 58 | 59 | The samples must be separate from the primary project for a few reasons: 60 | 1. Primary projects have a minimum Java version of Java 8 whereas samples can have 61 | Java version of Java 11. Due to this we need the ability to 62 | selectively exclude samples from a build run. 63 | 2. Many code samples depend on external GCP services and need 64 | credentials to access the service. 65 | 3. Code samples are not released as Maven artifacts and must be excluded from 66 | release builds. 67 | 68 | ### Building 69 | 70 | ```bash 71 | mvn clean verify 72 | ``` 73 | 74 | Some samples require access to GCP services and require a service account: 75 | 76 | ```bash 77 | export GOOGLE_APPLICATION_CREDENTIALS=/path/to/service/account.json 78 | mvn clean verify 79 | ``` 80 | 81 | ### Code Formatting 82 | 83 | Code in this repo is formatted with 84 | [google-java-format](https://github.com/google/google-java-format). 85 | To run formatting on your project, you can run: 86 | ``` 87 | mvn com.coveo:fmt-maven-plugin:format 88 | ``` 89 | 90 | [1]: https://cloud.google.com/docs/authentication/getting-started#creating_a_service_account 91 | [2]: https://maven.apache.org/settings.html#Active_Profiles 92 | [3]: https://github.com/GoogleCloudPlatform/java-docs-samples/blob/main/SAMPLE_FORMAT.md -------------------------------------------------------------------------------- /SECURITY.md: -------------------------------------------------------------------------------- 1 | # Security Policy 2 | 3 | To report a security issue, please use [g.co/vulnz](https://g.co/vulnz). 4 | 5 | The Google Security Team will respond within 5 working days of your report on g.co/vulnz. 6 | 7 | We use g.co/vulnz for our intake, and do coordination and disclosure here using GitHub Security Advisory to privately discuss and fix the issue. 8 | -------------------------------------------------------------------------------- /config/cps-sink-connector.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Unique name for the Pub/Sub sink connector. 16 | name=CPSSinkConnector 17 | # Tha Java class for the Pub/Sub sink connector. 18 | connector.class=com.google.pubsub.kafka.sink.CloudPubSubSinkConnector 19 | # The maximum number of tasks that should be created for this connector. 20 | tasks.max=10 21 | # Set the key converter for the Pub/Sub sink connector. 22 | key.converter=org.apache.kafka.connect.storage.StringConverter 23 | # Set the value converter for the Pub/Sub sink connector. 24 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 25 | # A comma-seperated list of Kafka topics to use as input for the connector. 26 | # TODO (developer): update to your Kafka topic name(s). 27 | topics=my-kafka-topic 28 | # TODO (developer): update to your GCP project ID, e.g. 29 | # "bar" in topic "/projects/bar/topics/foo". 30 | cps.project=my-project 31 | # TODO (developer): update to your Pub/Sub topic ID, e.g. 32 | # "foo" in topic "/projects/bar/topics/foo", 33 | # where data should be written. 34 | cps.topic=my-cps-topic 35 | 36 | -------------------------------------------------------------------------------- /config/cps-source-connector.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Unique name for the Pub/Sub source connector. 16 | name=CPSSourceConnector 17 | # Tha Java class for the Pub/Sub source connector. 18 | connector.class=com.google.pubsub.kafka.source.CloudPubSubSourceConnector 19 | # The maximum number of tasks that should be created for this connector. 20 | tasks.max=10 21 | # Set the key converter for the Pub/Sub source connector. 22 | key.converter=org.apache.kafka.connect.storage.StringConverter 23 | # Set the value converter for the Pub/Sub source connector. 24 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 25 | # TODO (developer): update to your Kafka topic name. 26 | kafka.topic=my-kafka-topic 27 | # TODO (developer): update to your GCP project ID, e.g. 28 | # "bar" in subscription "/projects/bar/subscriptions/baz". 29 | cps.project=my-project 30 | # TODO (developer): update to your Pub/Sub subscription ID, e.g. 31 | # "baz" in subscription "/projects/bar/subscriptions/baz", 32 | # where data should be read. 33 | cps.subscription=my-cps-subscription 34 | -------------------------------------------------------------------------------- /config/pubsub-lite-sink-connector.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Unique name for the Pub/Sub Lite sink connector. 16 | name=PubSubLiteSinkConnector 17 | # Tha Java clas for the Pub/Sub Lite sink connector. 18 | connector.class=com.google.pubsublite.kafka.sink.PubSubLiteSinkConnector 19 | # The maximum number of tasks that should be created for the connector. 20 | tasks.max=10 21 | # Set the key converter for the Pub/Sub Lite sink connector. 22 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter 23 | # Set the value converter for the Pub/Sub Lite sink connector. 24 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 25 | # A comma-seperated list of Kafka topics to use as input for the connector. 26 | # TODO (developer): update to your Kafka topic name. 27 | topics=my-kafka-topic 28 | # TODO (developer): update to your GCP project ID, e.g. 29 | # "bar" in topic "/projects/bar/locations/us-east1-a/topics/foo". 30 | pubsublite.project=my-project 31 | # TODO (developer): update to your Pub/Sub Lite location, e.g. 32 | # "us-east1-a" in topic "/projects/bar/locations/us-east1-a/topics/foo". 33 | pubsublite.location=europe-south7-q 34 | # TODO (developer): update to your Pub/Sub Lite topic ID, e.g. 35 | # "foo" in topic "/projects/bar/locations/us-east1-a/topics/foo", 36 | # where data should be written. 37 | pubsublite.topic=my-psl-topic 38 | -------------------------------------------------------------------------------- /config/pubsub-lite-source-connector.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Unique name for the Pub/Sub Lite source connector. 16 | name=PubSubLiteSourceConnector 17 | # Tha Java clas for the Pub/Sub Lite source connector. 18 | connector.class=com.google.pubsublite.kafka.source.PubSubLiteSourceConnector 19 | # The maximum number of tasks that should be created for this connector. 20 | tasks.max=10 21 | # Set the key converter for the Pub/Sub Lite source connector. 22 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter 23 | # Set the value converter for the Pub/Sub Lite source connector. 24 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 25 | # TODO (developer): update to your Kafka topic name. 26 | kafka.topic=my-kafka-topic 27 | # TODO (developer): update to your GCP project ID, e.g. 28 | # "bar" in subscription "/projects/bar/locations/us-east1-a/subscriptions/baz". 29 | pubsublite.project=my-project 30 | # TODO (developer): update to your Pub/Sub Lite location, e.g. 31 | # "us-east1-a" in subscription "/projects/bar/locations/us-east1-a/subscriptions/baz". 32 | pubsublite.location=europe-south7-q 33 | # TODO (developer): update to your Pub/Sub Lite subscription ID, e.g. 34 | # "baz" in subscription "/projects/bar/locations/us-east1-a/subscriptions/baz", 35 | # where data should be read. 36 | pubsublite.subscription=my-psl-subscription 37 | 38 | -------------------------------------------------------------------------------- /java.header: -------------------------------------------------------------------------------- 1 | ^/\*$ 2 | ^ \* Copyright \d\d\d\d,? Google (Inc\.|LLC)$ 3 | ^ \*$ 4 | ^ \* Licensed under the Apache License, Version 2\.0 \(the "License"\);$ 5 | ^ \* you may not use this file except in compliance with the License\.$ 6 | ^ \* You may obtain a copy of the License at$ 7 | ^ \*$ 8 | ^ \*[ ]+https?://www.apache.org/licenses/LICENSE-2\.0$ 9 | ^ \*$ 10 | ^ \* Unless required by applicable law or agreed to in writing, software$ 11 | ^ \* distributed under the License is distributed on an "AS IS" BASIS,$ 12 | ^ \* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied\.$ 13 | ^ \* See the License for the specific language governing permissions and$ 14 | ^ \* limitations under the License\.$ 15 | ^ \*/$ 16 | -------------------------------------------------------------------------------- /license-checks.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | -------------------------------------------------------------------------------- /migration/.gcp/gmk_bootstrap_servers: -------------------------------------------------------------------------------- 1 | bootstrap...managedkafka..cloud.goog:9092 -------------------------------------------------------------------------------- /migration/.gcp/gmk_sasl_service_account: -------------------------------------------------------------------------------- 1 | @.iam.gserviceaccount.com -------------------------------------------------------------------------------- /migration/.gcp/gmk_sasl_service_account_key: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_config_storage_topic: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_connect_group_id: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_offset_storage_topic: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_sink_topic: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_ssl_truststore_location: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_ssl_truststore_password: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/kafka_status_storage_topic: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/pubsub_lite_gcp_location: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/pubsub_lite_gcp_project: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/pubsub_lite_gcp_subscription: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /migration/.gcp/pubsub_lite_job_name: -------------------------------------------------------------------------------- 1 | PubSubLiteSourceConnector -------------------------------------------------------------------------------- /migration/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=linux/amd64 eclipse-temurin:21 2 | ARG KAFKA_VERSION="3.4.0" 3 | ARG KAFKA_CONNECT_VERSION="${KAFKA_VERSION}" 4 | ARG KAFKA_SCALA_VERSION="2.13" 5 | ARG PUBSUB_GROUP_KAFKA_CONNECTOR_VERSION="1.2.0" 6 | ARG KAFKA_HOME_ROOT="/opt" 7 | ARG KAFKA_CONFIG_DIR="${KAFKA_HOME}/config" 8 | ARG KAFKA_RELEASE="kafka_${KAFKA_SCALA_VERSION}-${KAFKA_VERSION}" 9 | ARG KAFKA_TARBALL="${KAFKA_RELEASE}.tgz" 10 | ARG KAFKA_DOWNLOAD_URL="https://archive.apache.org/dist/kafka/${KAFKA_VERSION}/${KAFKA_TARBALL}" 11 | ENV KAFKA_HEAP_OPTS="-Xms2G -Xmx2G" 12 | ENV KAFKA_HOME="${KAFKA_HOME_ROOT}/kafka" 13 | ARG KAFKA_PLUGINS_DIR="${KAFKA_HOME}/plugins" 14 | # The pubsub-group-kafka-connector file needs to be pre-built/downloaded using maven or other similar tool. 15 | # References: 16 | # 1) https://github.com/googleapis/java-pubsub-group-kafka-connector/releases/ 17 | # 2) https://central.sonatype.com/artifact/com.google.cloud/pubsub-group-kafka-connector 18 | ARG PUBSUB_GROUP_KAFKA_CONNECTOR_JAR="pubsub-group-kafka-connector-${PUBSUB_GROUP_KAFKA_CONNECTOR_VERSION}.jar" 19 | ARG KAFKA_CONNECT_CONFIGURE_SCRIPT="configure-kafka-connect.sh" 20 | ARG BUILD_KAFKA_CONNECT_STARTUP_SCRIPT="start-kafka-connect.sh" 21 | ARG BUILD_PUBSUB_LITE_JOB_STARTUP_SCRIPT="start-pubsub-lite-connector.sh" 22 | ARG BUILD_KAFKA_CONNECT_CONFIG_FILE="kafka-connect.properties" 23 | ARG BUILD_PUBSUB_LITE_JOB_FILE="pubsub_lite_job.json" 24 | ENV JAVA_HOME="/opt/java/openjdk" 25 | ENV PATH="${KAFKA_HOME}/bin:${JAVA_HOME}/bin:${PATH}" 26 | ENV KAFKA_CONNECT_STARTUP_SCRIPT="${KAFKA_HOME}/bin/${BUILD_KAFKA_CONNECT_STARTUP_SCRIPT}" 27 | ENV PUBSUB_LITE_JOB_STARTUP_SCRIPT="${KAFKA_HOME}/bin/${BUILD_PUBSUB_LITE_JOB_STARTUP_SCRIPT}" 28 | ENV KAFKA_CONNECT_CONFIG_FILE="${KAFKA_CONFIG_DIR}/${BUILD_KAFKA_CONNECT_CONFIG_FILE}" 29 | ENV PUBSUB_LITE_JOB_FILE="${KAFKA_CONFIG_DIR}/${BUILD_PUBSUB_LITE_JOB_FILE}" 30 | 31 | RUN apt-get -y -qq update \ 32 | && apt-get -y -qq install iproute2 bind9-dnsutils 33 | 34 | RUN wget -q ${KAFKA_DOWNLOAD_URL} \ 35 | && tar -xzf ${KAFKA_TARBALL} -C ${KAFKA_HOME_ROOT} \ 36 | && ln -s ${KAFKA_HOME_ROOT}/${KAFKA_RELEASE} ${KAFKA_HOME} \ 37 | && rm -f ${KAFKA_TARBALL} 38 | 39 | RUN mkdir -p ${KAFKA_PLUGINS_DIR} 40 | COPY ${PUBSUB_GROUP_KAFKA_CONNECTOR_JAR} \ 41 | ${KAFKA_PLUGINS_DIR}/${PUBSUB_GROUP_KAFKA_CONNECTOR_JAR} 42 | COPY ${BUILD_KAFKA_CONNECT_CONFIG_FILE} ${KAFKA_CONNECT_CONFIG_FILE} 43 | COPY ${BUILD_PUBSUB_LITE_JOB_FILE} ${PUBSUB_LITE_JOB_FILE} 44 | COPY ${KAFKA_CONNECT_CONFIGURE_SCRIPT} . 45 | COPY ${BUILD_KAFKA_CONNECT_STARTUP_SCRIPT} ${KAFKA_CONNECT_STARTUP_SCRIPT} 46 | COPY ${BUILD_PUBSUB_LITE_JOB_STARTUP_SCRIPT} ${PUBSUB_LITE_JOB_STARTUP_SCRIPT} 47 | RUN chmod +x ${KAFKA_CONNECT_CONFIGURE_SCRIPT} 48 | RUN chmod +x ${KAFKA_CONNECT_STARTUP_SCRIPT} 49 | RUN chmod +x ${PUBSUB_LITE_JOB_STARTUP_SCRIPT} 50 | RUN --mount=type=secret,id=gmk_bootstrap_servers \ 51 | --mount=type=secret,id=gmk_sasl_service_account \ 52 | --mount=type=secret,id=gmk_sasl_service_account_key \ 53 | --mount=type=secret,id=kafka_sink_topic \ 54 | --mount=type=secret,id=kafka_connect_group_id \ 55 | --mount=type=secret,id=pubsub_lite_gcp_project \ 56 | --mount=type=secret,id=pubsub_lite_gcp_location \ 57 | --mount=type=secret,id=pubsub_lite_gcp_subscription \ 58 | --mount=type=secret,id=pubsub_lite_job_name \ 59 | --mount=type=secret,id=kafka_config_storage_topic \ 60 | --mount=type=secret,id=kafka_offset_storage_topic \ 61 | --mount=type=secret,id=kafka_status_storage_topic \ 62 | --mount=type=secret,id=kafka_ssl_truststore_location \ 63 | --mount=type=secret,id=kafka_ssl_truststore_password \ 64 | KAFKA_CONNECT_CONFIG_FILE="${KAFKA_CONNECT_CONFIG_FILE}" \ 65 | KAFKA_BOOTSTRAP_SERVERS="$(cat /run/secrets/gmk_bootstrap_servers)" \ 66 | KAFKA_SASL_SERVICE_ACCOUNT="$(cat /run/secrets/gmk_sasl_service_account)"\ 67 | KAFKA_SASL_SERVICE_ACCOUNT_KEY="$(cat /run/secrets/gmk_sasl_service_account_key)" \ 68 | KAFKA_SINK_TOPIC="$(cat /run/secrets/kafka_sink_topic)" \ 69 | KAFKA_CONNECT_GROUP_ID="$(cat /run/secrets/kafka_connect_group_id)" \ 70 | KAFKA_PLUGINS_DIR=${KAFKA_PLUGINS_DIR} \ 71 | PUBSUB_LITE_GCP_PROJECT="$(cat /run/secrets/pubsub_lite_gcp_project)" \ 72 | PUBSUB_LITE_GCP_LOCATION="$(cat /run/secrets/pubsub_lite_gcp_location)" \ 73 | PUBSUB_LITE_GCP_SUBSCRIPTION="$(cat /run/secrets/pubsub_lite_gcp_subscription)" \ 74 | PUBSUB_LITE_JOB_NAME="$(cat /run/secrets/pubsub_lite_job_name)" \ 75 | KAFKA_CONFIG_STORAGE_TOPIC="$(cat /run/secrets/kafka_config_storage_topic)" \ 76 | KAFKA_OFFSET_STORAGE_TOPIC="$(cat /run/secrets/kafka_offset_storage_topic)" \ 77 | KAFKA_STATUS_STORAGE_TOPIC="$(cat /run/secrets/kafka_status_storage_topic)" \ 78 | KAFKA_SSL_TRUSTSTORE_LOCATION="$(cat /run/secrets/kafka_ssl_truststore_location)" \ 79 | KAFKA_SSL_TRUSTSTORE_PASSWORD="$(cat /run/secrets/kafka_ssl_truststore_password)" \ 80 | ./${KAFKA_CONNECT_CONFIGURE_SCRIPT} \ 81 | && rm -f ./${KAFKA_CONNECT_CONFIGURE_SCRIPT} 82 | 83 | EXPOSE 8083 84 | CMD ${KAFKA_CONNECT_STARTUP_SCRIPT} 85 | -------------------------------------------------------------------------------- /migration/docker/build-image.sh: -------------------------------------------------------------------------------- 1 | SELF_DIR="$(dirname $(readlink -f $0))" 2 | SECRETS_DIR="$(dirname ${SELF_DIR})/.gcp" 3 | docker build --platform=linux/amd64 --file Dockerfile --tag psl-to-gmk:latest \ 4 | --secret id=gmk_sasl_service_account,src="${SECRETS_DIR}/gmk_sasl_service_account" \ 5 | --secret id=gmk_sasl_service_account_key,src="${SECRETS_DIR}/gmk_sasl_service_account_key" \ 6 | --secret id=gmk_bootstrap_servers,src="${SECRETS_DIR}/gmk_bootstrap_servers" \ 7 | --secret id=kafka_sink_topic,src="${SECRETS_DIR}/kafka_sink_topic" \ 8 | --secret id=kafka_connect_group_id,src="${SECRETS_DIR}/kafka_connect_group_id" \ 9 | --secret id=pubsub_lite_gcp_project,src="${SECRETS_DIR}/pubsub_lite_gcp_project" \ 10 | --secret id=pubsub_lite_gcp_location,src="${SECRETS_DIR}/pubsub_lite_gcp_location" \ 11 | --secret id=pubsub_lite_gcp_subscription,src="${SECRETS_DIR}/pubsub_lite_gcp_subscription" \ 12 | --secret id=pubsub_lite_job_name,src="${SECRETS_DIR}/pubsub_lite_job_name" \ 13 | --secret id=kafka_config_storage_topic,src="${SECRETS_DIR}/kafka_config_storage_topic" \ 14 | --secret id=kafka_offset_storage_topic,src="${SECRETS_DIR}/kafka_offset_storage_topic" \ 15 | --secret id=kafka_status_storage_topic,src="${SECRETS_DIR}/kafka_status_storage_topic" \ 16 | --secret id=kafka_ssl_truststore_location,src="${SECRETS_DIR}/kafka_ssl_truststore_location" \ 17 | --secret id=kafka_ssl_truststore_password,src="${SECRETS_DIR}/kafka_ssl_truststore_password" \ 18 | --no-cache . 19 | -------------------------------------------------------------------------------- /migration/docker/configure-kafka-connect.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # All the variables must be supplied as environment variables for this script 3 | # Update Kafka Connect Sink config 4 | sed -i -e "s#__KAFKA_BOOTSTRAP_SERVERS__#${KAFKA_BOOTSTRAP_SERVERS}#g;" \ 5 | "${KAFKA_CONNECT_CONFIG_FILE}" 6 | # Update Kafka Connect internal topics config 7 | sed -i -e "s#__KAFKA_CONFIG_STORAGE_TOPIC__#${KAFKA_CONFIG_STORAGE_TOPIC}#g; s#__KAFKA_OFFSET_STORAGE_TOPIC__#${KAFKA_OFFSET_STORAGE_TOPIC}#g; s#__KAFKA_STATUS_STORAGE_TOPIC__#${KAFKA_STATUS_STORAGE_TOPIC}#g" \ 8 | "${KAFKA_CONNECT_CONFIG_FILE}" 9 | # Update Kafka Connect group id and Kafka Connect plugins directory. Kafka Connect group id needs to be unique and must not conflict with the consumer group ids 10 | sed -i -e "s#__KAFKA_CONNECT_GROUP_ID__#${KAFKA_CONNECT_GROUP_ID}#g; s#__KAFKA_PLUGINS_DIR__#${KAFKA_PLUGINS_DIR}#g" \ 11 | "${KAFKA_CONNECT_CONFIG_FILE}" 12 | # Update Kafka Connect SASL config 13 | sed -i -e "s#__KAFKA_SASL_SERVICE_ACCOUNT__#${KAFKA_SASL_SERVICE_ACCOUNT}#g; s#__KAFKA_SASL_SERVICE_ACCOUNT_KEY__#${KAFKA_SASL_SERVICE_ACCOUNT_KEY}#g" \ 14 | "${KAFKA_CONNECT_CONFIG_FILE}" 15 | # Update Kafka Connect SSL truststore config 16 | sed -i -e "s#__KAFKA_SSL_TRUSTSTORE_LOCATION__#${KAFKA_SSL_TRUSTSTORE_LOCATION}#g; s#__KAFKA_SSL_TRUSTSTORE_PASSWORD__#${KAFKA_SSL_TRUSTSTORE_PASSWORD}#g" \ 17 | "${KAFKA_CONNECT_CONFIG_FILE}" 18 | 19 | #Update PubSub Lite Job File 20 | sed -i -e "s#__PUBSUB_LITE_JOB_NAME__#${PUBSUB_LITE_JOB_NAME}#g; s#__KAFKA_SINK_TOPIC__#${KAFKA_SINK_TOPIC}#g; s#__PUBSUB_LITE_GCP_PROJECT__#${PUBSUB_LITE_GCP_PROJECT}#g; s#__PUBSUB_LITE_GCP_LOCATION__#${PUBSUB_LITE_GCP_LOCATION}#g; s#__PUBSUB_LITE_GCP_SUBSCRIPTION__#${PUBSUB_LITE_GCP_SUBSCRIPTION}#g;" \ 21 | "${PUBSUB_LITE_JOB_FILE}" 22 | 23 | #Update PSL Job Start Script 24 | sed -i -e "s#__PUBSUB_LITE_JOB_NAME__#${PUBSUB_LITE_JOB_NAME}#g;" \ 25 | "${PSL_JOB_STARTUP_SCRIPT}" -------------------------------------------------------------------------------- /migration/docker/kafka-connect.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # Kafka Connect Distributed Mode Configuration 15 | 16 | # Bootstrap servers for Kafka brokers 17 | bootstrap.servers=__KAFKA_BOOTSTRAP_SERVERS__ 18 | 19 | # Group ID for Kafka Connect worker 20 | group.id=__KAFKA_CONNECT_GROUP_ID__ 21 | 22 | # REST API endpoint for Kafka Connect 23 | rest.port=8083 24 | 25 | # Hostname for REST API endpoint 26 | rest.host.name=__KAFKA_REST_ADVERTISED_HOST_NAME__ 27 | 28 | # Client ID for the worker. This will appear in server logs for tracking 29 | client.id=__KAFKA_CONNECT_WORKER_CLIENT_ID__ 30 | 31 | # Classpath for plugins (including connectors) 32 | plugin.path=__KAFKA_PLUGINS_DIR__ 33 | 34 | # Offset commit interval in milliseconds 35 | offset.flush.interval.ms=10000 36 | 37 | # Enable or disable the internal converter used for offset storage 38 | config.storage.topic=__KAFKA_CONFIG_STORAGE_TOPIC__ 39 | offset.storage.topic=__KAFKA_OFFSET_STORAGE_TOPIC__ 40 | status.storage.topic=__KAFKA_STATUS_STORAGE_TOPIC__ 41 | 42 | # Number of worker threads for handling HTTP requests 43 | rest.advertised.host.name=__KAFKA_REST_ADVERTISED_HOST_NAME__ 44 | rest.advertised.port=8083 45 | 46 | # Number of worker threads for handling HTTP requests 47 | rest.threads.max=50 48 | 49 | # Default partition assignment strategy 50 | partition.assignment.strategy=org.apache.kafka.clients.consumer.CooperativeStickyAssignor 51 | 52 | # Kafka Connect-specific settings 53 | offset.storage.replication.factor=3 54 | config.storage.replication.factor=3 55 | status.storage.replication.factor=3 56 | offset.storage.partitions=25 57 | status.storage.partitions=5 58 | 59 | # SASL auth related configuration 60 | sasl.mechanism=PLAIN 61 | security.protocol=SASL_SSL 62 | sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 63 | username="__KAFKA_SASL_SERVICE_ACCOUNT__" \ 64 | password="__KAFKA_SASL_SERVICE_ACCOUNT_KEY__"; 65 | 66 | producer.sasl.mechanism=PLAIN 67 | producer.security.protocol=SASL_SSL 68 | producer.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 69 | username="__KAFKA_SASL_SERVICE_ACCOUNT__" \ 70 | password="__KAFKA_SASL_SERVICE_ACCOUNT_KEY__"; 71 | 72 | consumer.sasl.mechanism=PLAIN 73 | consumer.security.protocol=SASL_SSL 74 | consumer.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ 75 | username="__KAFKA_SASL_SERVICE_ACCOUNT__" \ 76 | password="__KAFKA_SASL_SERVICE_ACCOUNT_KEY__"; 77 | 78 | # SSL Truststore related configuration 79 | ssl.truststore.location=__KAFKA_SSL_TRUSTSTORE_LOCATION__ 80 | ssl.truststore.password=__KAFKA_SSL_TRUSTSTORE_PASSWORD__ 81 | 82 | # Set the key converter for the Pub/Sub Lite source connector. 83 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter 84 | # Set the value converter for the Pub/Sub Lite source connector. 85 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter -------------------------------------------------------------------------------- /migration/docker/pubsub-group-kafka-connector-1.2.0.jar: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/googleapis/java-pubsub-group-kafka-connector/c5e9ac378ffb335f205b0ea608df7e933d8a7263/migration/docker/pubsub-group-kafka-connector-1.2.0.jar -------------------------------------------------------------------------------- /migration/docker/pubsub_lite_job.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "__PUBSUB_LITE_JOB_NAME__", 3 | "config": { 4 | "connector.class": "com.google.pubsublite.kafka.source.PubSubLiteSourceConnector", 5 | "tasks.max": "10", 6 | "kafka.topic": "__KAFKA_SINK_TOPIC__", 7 | "pubsublite.project": "__PUBSUB_LITE_GCP_PROJECT__", 8 | "pubsublite.location": "__PUBSUB_LITE_GCP_LOCATION__", 9 | "pubsublite.subscription": "__PUBSUB_LITE_GCP_SUBSCRIPTION__" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /migration/docker/push-image.sh: -------------------------------------------------------------------------------- 1 | # Copyright 2024 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | # Dockerfile for building Kafka Connect image 15 | DOCKER_IMAGE_NAME="psl-to-gmk" 16 | DOCKER_IMAGE_TAG=latest 17 | GCP_PROJECT="" 18 | DOCKER_REPOSTORY=gcr.io/${GCP_PROJECT} 19 | docker tag ${DOCKER_IMAGE_NAME} \ 20 | ${DOCKER_REPOSTORY}/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG} 21 | docker push ${DOCKER_REPOSTORY}/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG} 22 | -------------------------------------------------------------------------------- /migration/docker/start-kafka-connect.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | ${PSL_JOB_STARTUP_SCRIPT} & 3 | 4 | START_SCRIPT="${KAFKA_HOME}/bin/connect-distributed.sh" 5 | KAFKA_REST_ADVERTISED_HOST_NAME="$(/sbin/ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1)" 6 | KAFKA_CONNECT_WORKER_CLIENT_ID="$(hostname --fqdn)" 7 | sed -i -e "s#__KAFKA_REST_ADVERTISED_HOST_NAME__#${KAFKA_REST_ADVERTISED_HOST_NAME}#g; s#__KAFKA_CONNECT_WORKER_CLIENT_ID__#${KAFKA_CONNECT_WORKER_CLIENT_ID}#g" \ 8 | "${KAFKA_CONNECT_CONFIG_FILE}" 9 | ${START_SCRIPT} ${KAFKA_CONNECT_CONFIG_FILE} -------------------------------------------------------------------------------- /migration/docker/start-pubsub-lite-connector.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | #Poll Kafka Connect until it is up 4 | while true 5 | do 6 | echo "Pinging Connect Rest Endpoint" 7 | CONNECT_PING=$(curl localhost:8083 | grep "version") 8 | if [[ $CONNECT_PING != "" ]]; then 9 | break 10 | fi 11 | sleep 30 12 | done 13 | #Once Kafka Connect is up, if the PubSub Lite migration job 14 | #does not yet exist, submit the Job 15 | CONNECT_JOBS=$(curl localhost:8083/connectors | grep "__PUBSUB_LITE_JOB_NAME__") 16 | if [[ $CONNECT_JOBS == "" ]]; then 17 | echo "No Connect Job found, posting Job" 18 | curl -H "Content-Type: application/json" -H "Accept: application/json" --data "@/opt/kafka/config/PSL_job.json" localhost:8083/connectors 19 | fi 20 | -------------------------------------------------------------------------------- /migration/k8s.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: "apps/v1" 3 | kind: "Deployment" 4 | metadata: 5 | name: "" 6 | namespace: "default" 7 | labels: 8 | app: "" 9 | spec: 10 | replicas: 3 11 | selector: 12 | matchLabels: 13 | app: "" 14 | template: 15 | metadata: 16 | labels: 17 | app: "" 18 | spec: 19 | serviceAccountName: 20 | containers: 21 | - name: "psl-to-gmk-1" 22 | image: "gcr.io//psl-to-gmk:latest" 23 | --- 24 | apiVersion: "autoscaling/v2" 25 | kind: "HorizontalPodAutoscaler" 26 | metadata: 27 | name: "-hpa-iwbr" 28 | namespace: "default" 29 | labels: 30 | app: "" 31 | spec: 32 | scaleTargetRef: 33 | kind: "Deployment" 34 | name: "" 35 | apiVersion: "apps/v1" 36 | minReplicas: 1 37 | maxReplicas: 5 38 | metrics: 39 | - type: "Resource" 40 | resource: 41 | name: "cpu" 42 | target: 43 | type: "Utilization" 44 | averageUtilization: 80 45 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | ":separateMajorReleases", 4 | ":combinePatchMinorReleases", 5 | ":ignoreUnstable", 6 | ":prImmediately", 7 | ":updateNotScheduled", 8 | ":automergeDisabled", 9 | ":ignoreModulesAndTests", 10 | ":maintainLockFilesDisabled", 11 | ":autodetectPinVersions" 12 | ], 13 | "ignorePaths": [ 14 | ".kokoro/requirements.txt", 15 | ".github/workflows/**" 16 | ], 17 | "customManagers": [ 18 | { 19 | "customType": "regex", 20 | "fileMatch": [ 21 | "^.kokoro/presubmit/graalvm-native.*.cfg$" 22 | ], 23 | "matchStrings": ["value: \"gcr.io/cloud-devrel-public-resources/graalvm.*:(?.*?)\""], 24 | "depNameTemplate": "com.google.cloud:sdk-platform-java-config", 25 | "datasourceTemplate": "maven" 26 | } 27 | ], 28 | "packageRules": [ 29 | { 30 | "packagePatterns": [ 31 | "^com.google.guava:" 32 | ], 33 | "versionScheme": "docker" 34 | }, 35 | { 36 | "packagePatterns": [ 37 | "*" 38 | ], 39 | "semanticCommitType": "deps", 40 | "semanticCommitScope": null 41 | }, 42 | { 43 | "packagePatterns": [ 44 | "^org.apache.maven", 45 | "^org.jacoco:", 46 | "^org.codehaus.mojo:", 47 | "^org.sonatype.plugins:", 48 | "^com.coveo:", 49 | "^com.google.cloud:google-cloud-shared-config" 50 | ], 51 | "semanticCommitType": "build", 52 | "semanticCommitScope": "deps" 53 | }, 54 | { 55 | "packagePatterns": [ 56 | "^com.google.cloud:pubsub-group-kafka-connector", 57 | "^com.google.cloud:libraries-bom", 58 | "^com.google.cloud.samples:shared-configuration" 59 | ], 60 | "semanticCommitType": "chore", 61 | "semanticCommitScope": "deps" 62 | }, 63 | { 64 | "packagePatterns": [ 65 | "^junit:junit", 66 | "^com.google.truth:truth", 67 | "^org.mockito:mockito-core", 68 | "^org.objenesis:objenesis", 69 | "^com.google.cloud:google-cloud-conformance-tests", 70 | "^org.graalvm.buildtools:junit-platform-native" 71 | ], 72 | "semanticCommitType": "test", 73 | "semanticCommitScope": "deps" 74 | }, 75 | { 76 | "packagePatterns": [ 77 | "^com.google.cloud:google-cloud-" 78 | ], 79 | "ignoreUnstable": false 80 | }, 81 | { 82 | "packagePatterns": [ 83 | "^com.fasterxml.jackson.core" 84 | ], 85 | "groupName": "jackson dependencies" 86 | } 87 | ], 88 | "semanticCommits": true, 89 | "dependencyDashboard": true 90 | } 91 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/common/ConnectorCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2018 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.common; 17 | 18 | import com.google.api.gax.core.CredentialsProvider; 19 | import com.google.auth.Credentials; 20 | import com.google.auth.oauth2.GoogleCredentials; 21 | import java.io.ByteArrayInputStream; 22 | import java.io.FileInputStream; 23 | import java.io.IOException; 24 | import java.util.Arrays; 25 | import java.util.List; 26 | import java.util.Map; 27 | 28 | public class ConnectorCredentialsProvider implements CredentialsProvider { 29 | private static final List GCP_SCOPE = 30 | Arrays.asList("https://www.googleapis.com/auth/cloud-platform"); 31 | 32 | CredentialsProvider impl; 33 | 34 | private ConnectorCredentialsProvider(CredentialsProvider impl) { 35 | this.impl = impl; 36 | } 37 | 38 | public static ConnectorCredentialsProvider fromConfig(Map config) { 39 | String credentialsPath = config.get(ConnectorUtils.GCP_CREDENTIALS_FILE_PATH_CONFIG).toString(); 40 | String credentialsJson = config.get(ConnectorUtils.GCP_CREDENTIALS_JSON_CONFIG).toString(); 41 | if (!credentialsPath.isEmpty()) { 42 | if (!credentialsJson.isEmpty()) { 43 | throw new IllegalArgumentException( 44 | "May not set both " 45 | + ConnectorUtils.GCP_CREDENTIALS_FILE_PATH_CONFIG 46 | + " and " 47 | + ConnectorUtils.GCP_CREDENTIALS_JSON_CONFIG); 48 | } 49 | return ConnectorCredentialsProvider.fromFile(credentialsPath); 50 | } else if (!credentialsJson.isEmpty()) { 51 | return ConnectorCredentialsProvider.fromJson(credentialsJson); 52 | } else { 53 | return ConnectorCredentialsProvider.fromDefault(); 54 | } 55 | } 56 | 57 | public static ConnectorCredentialsProvider fromFile(String credentialPath) { 58 | return new ConnectorCredentialsProvider( 59 | () -> 60 | GoogleCredentials.fromStream(new FileInputStream(credentialPath)) 61 | .createScoped(GCP_SCOPE)); 62 | } 63 | 64 | public static ConnectorCredentialsProvider fromJson(String credentialsJson) { 65 | return new ConnectorCredentialsProvider( 66 | () -> 67 | GoogleCredentials.fromStream(new ByteArrayInputStream(credentialsJson.getBytes())) 68 | .createScoped(GCP_SCOPE)); 69 | } 70 | 71 | public static ConnectorCredentialsProvider fromDefault() { 72 | return new ConnectorCredentialsProvider( 73 | () -> GoogleCredentials.getApplicationDefault().createScoped(GCP_SCOPE)); 74 | } 75 | 76 | @Override 77 | public Credentials getCredentials() throws IOException { 78 | return impl.getCredentials(); 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/common/ConnectorUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.common; 17 | 18 | import com.google.common.util.concurrent.ThreadFactoryBuilder; 19 | import com.google.protobuf.ByteString; 20 | import java.util.Optional; 21 | import java.util.concurrent.Executors; 22 | import java.util.concurrent.ScheduledExecutorService; 23 | 24 | /** Utility methods and constants that are repeated across one or more classes. */ 25 | public class ConnectorUtils { 26 | public static final String SCHEMA_NAME = ByteString.class.getName(); 27 | public static final String CPS_SUBSCRIPTION_FORMAT = "projects/%s/subscriptions/%s"; 28 | public static final String CPS_PROJECT_CONFIG = "cps.project"; 29 | public static final String CPS_TOPIC_CONFIG = "cps.topic"; 30 | public static final String CPS_ENDPOINT = "cps.endpoint"; 31 | public static final String CPS_DEFAULT_ENDPOINT = "pubsub.googleapis.com:443"; 32 | public static final String CPS_MESSAGE_KEY_ATTRIBUTE = "key"; 33 | public static final String CPS_ORDERING_KEY_ATTRIBUTE = "orderingKey"; 34 | public static final String GCP_CREDENTIALS_FILE_PATH_CONFIG = "gcp.credentials.file.path"; 35 | public static final String GCP_CREDENTIALS_JSON_CONFIG = "gcp.credentials.json"; 36 | public static final String KAFKA_MESSAGE_CPS_BODY_FIELD = "message"; 37 | public static final String KAFKA_TOPIC_ATTRIBUTE = "kafka.topic"; 38 | public static final String KAFKA_PARTITION_ATTRIBUTE = "kafka.partition"; 39 | public static final String KAFKA_OFFSET_ATTRIBUTE = "kafka.offset"; 40 | public static final String KAFKA_TIMESTAMP_ATTRIBUTE = "kafka.timestamp"; 41 | 42 | private static ScheduledExecutorService newDaemonExecutor(String prefix) { 43 | return Executors.newScheduledThreadPool( 44 | Math.max(4, Runtime.getRuntime().availableProcessors() * 5), 45 | new ThreadFactoryBuilder().setDaemon(true).setNameFormat(prefix + "-%d").build()); 46 | } 47 | 48 | // A shared executor for Pub/Sub clients to use. 49 | private static Optional SYSTEM_EXECUTOR = Optional.empty(); 50 | 51 | public static synchronized ScheduledExecutorService getSystemExecutor() { 52 | if (!SYSTEM_EXECUTOR.isPresent()) { 53 | SYSTEM_EXECUTOR = Optional.of(newDaemonExecutor("pubsub-connect-system")); 54 | } 55 | return SYSTEM_EXECUTOR.get(); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/common/Version.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.common; 17 | 18 | public class Version { 19 | private static String version = "unknown"; 20 | 21 | static { 22 | String implementationVersion = Version.class.getPackage().getImplementationVersion(); 23 | if (implementationVersion != null) { 24 | version = implementationVersion; 25 | } 26 | } 27 | 28 | public static String version() { 29 | return version; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/source/AckBatchingSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.source; 17 | 18 | import com.google.api.core.ApiFuture; 19 | import com.google.api.core.ApiFutureCallback; 20 | import com.google.api.core.ApiFutures; 21 | import com.google.api.core.SettableApiFuture; 22 | import com.google.common.util.concurrent.MoreExecutors; 23 | import com.google.errorprone.annotations.concurrent.GuardedBy; 24 | import com.google.protobuf.Empty; 25 | import com.google.pubsub.v1.ReceivedMessage; 26 | import java.util.ArrayDeque; 27 | import java.util.ArrayList; 28 | import java.util.Collection; 29 | import java.util.Deque; 30 | import java.util.List; 31 | import java.util.concurrent.Future; 32 | 33 | public class AckBatchingSubscriber implements CloudPubSubSubscriber { 34 | interface AlarmFactory { 35 | Future newAlarm(Runnable runnable); 36 | } 37 | 38 | private static class IdsAndFuture { 39 | Collection ids; 40 | SettableApiFuture future; 41 | } 42 | 43 | private final CloudPubSubSubscriber underlying; 44 | 45 | @GuardedBy("this") 46 | private final Deque toSend = new ArrayDeque<>(); 47 | 48 | private final Future alarm; 49 | 50 | public AckBatchingSubscriber(CloudPubSubSubscriber underlying, AlarmFactory alarmFactory) { 51 | this.underlying = underlying; 52 | this.alarm = alarmFactory.newAlarm(this::flush); 53 | } 54 | 55 | @Override 56 | public ApiFuture> pull() { 57 | return underlying.pull(); 58 | } 59 | 60 | @Override 61 | public synchronized ApiFuture ackMessages(Collection ackIds) { 62 | IdsAndFuture idsAndFuture = new IdsAndFuture(); 63 | idsAndFuture.ids = ackIds; 64 | idsAndFuture.future = SettableApiFuture.create(); 65 | toSend.add(idsAndFuture); 66 | return idsAndFuture.future; 67 | } 68 | 69 | private void flush() { 70 | List ackIds = new ArrayList<>(); 71 | List> futures = new ArrayList<>(); 72 | synchronized (this) { 73 | if (toSend.isEmpty()) { 74 | return; 75 | } 76 | toSend.forEach( 77 | pair -> { 78 | ackIds.addAll(pair.ids); 79 | futures.add(pair.future); 80 | }); 81 | toSend.clear(); 82 | } 83 | ApiFuture response = underlying.ackMessages(ackIds); 84 | ApiFutures.addCallback( 85 | response, 86 | new ApiFutureCallback() { 87 | @Override 88 | public void onFailure(Throwable t) { 89 | futures.forEach(future -> future.setException(t)); 90 | } 91 | 92 | @Override 93 | public void onSuccess(Empty result) { 94 | futures.forEach(future -> future.set(result)); 95 | } 96 | }, 97 | MoreExecutors.directExecutor()); 98 | } 99 | 100 | @Override 101 | public void close() { 102 | alarm.cancel(false); 103 | try { 104 | alarm.get(); 105 | } catch (Throwable ignored) { 106 | } 107 | flush(); 108 | underlying.close(); 109 | } 110 | } 111 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/source/CloudPubSubGRPCSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.source; 17 | 18 | import com.google.api.core.ApiFuture; 19 | import com.google.api.core.ApiFutures; 20 | import com.google.api.gax.core.CredentialsProvider; 21 | import com.google.cloud.pubsub.v1.stub.GrpcSubscriberStub; 22 | import com.google.cloud.pubsub.v1.stub.SubscriberStubSettings; 23 | import com.google.common.util.concurrent.MoreExecutors; 24 | import com.google.protobuf.Empty; 25 | import com.google.pubsub.v1.AcknowledgeRequest; 26 | import com.google.pubsub.v1.ProjectSubscriptionName; 27 | import com.google.pubsub.v1.PullRequest; 28 | import com.google.pubsub.v1.PullResponse; 29 | import com.google.pubsub.v1.ReceivedMessage; 30 | import java.io.IOException; 31 | import java.util.Collection; 32 | import java.util.List; 33 | import java.util.concurrent.ThreadLocalRandom; 34 | import org.slf4j.Logger; 35 | import org.slf4j.LoggerFactory; 36 | 37 | /** 38 | * A {@link CloudPubSubSubscriber} that uses gRPC to pull messages 39 | * from Google Cloud Pub/Sub. This class is not 40 | * thread-safe. 41 | */ 42 | public class CloudPubSubGRPCSubscriber implements CloudPubSubSubscriber { 43 | 44 | private static final Logger log = LoggerFactory.getLogger(CloudPubSubGRPCSubscriber.class); 45 | private long nextSubscriberResetTime = 0; 46 | private GrpcSubscriberStub subscriber; 47 | private final CredentialsProvider gcpCredentialsProvider; 48 | private final String endpoint; 49 | private final ProjectSubscriptionName subscriptionName; 50 | private final int cpsMaxBatchSize; 51 | 52 | CloudPubSubGRPCSubscriber( 53 | CredentialsProvider gcpCredentialsProvider, 54 | String endpoint, 55 | ProjectSubscriptionName subscriptionName, 56 | int cpsMaxBatchSize) { 57 | this.gcpCredentialsProvider = gcpCredentialsProvider; 58 | this.endpoint = endpoint; 59 | this.subscriptionName = subscriptionName; 60 | this.cpsMaxBatchSize = cpsMaxBatchSize; 61 | makeSubscriber(); 62 | } 63 | 64 | @Override 65 | public ApiFuture> pull() { 66 | if (System.currentTimeMillis() > nextSubscriberResetTime) { 67 | makeSubscriber(); 68 | } 69 | return ApiFutures.transform( 70 | subscriber 71 | .pullCallable() 72 | .futureCall( 73 | PullRequest.newBuilder() 74 | .setSubscription(subscriptionName.toString()) 75 | .setMaxMessages(cpsMaxBatchSize) 76 | .build()), 77 | PullResponse::getReceivedMessagesList, 78 | MoreExecutors.directExecutor()); 79 | } 80 | 81 | @Override 82 | public ApiFuture ackMessages(Collection ackIds) { 83 | if (System.currentTimeMillis() > nextSubscriberResetTime) { 84 | makeSubscriber(); 85 | } 86 | return subscriber 87 | .acknowledgeCallable() 88 | .futureCall( 89 | AcknowledgeRequest.newBuilder() 90 | .setSubscription(subscriptionName.toString()) 91 | .addAllAckIds(ackIds) 92 | .build()); 93 | } 94 | 95 | @Override 96 | public void close() { 97 | subscriber.close(); 98 | } 99 | 100 | private void makeSubscriber() { 101 | try { 102 | if (subscriber != null) { 103 | subscriber.close(); 104 | } 105 | log.info("Creating subscriber."); 106 | SubscriberStubSettings subscriberStubSettings = 107 | SubscriberStubSettings.newBuilder() 108 | .setTransportChannelProvider( 109 | SubscriberStubSettings.defaultGrpcTransportProviderBuilder() 110 | .setMaxInboundMessageSize(20 << 20) // 20MB 111 | .build()) 112 | .setCredentialsProvider(gcpCredentialsProvider) 113 | .setEndpoint(endpoint) 114 | .build(); 115 | subscriber = GrpcSubscriberStub.create(subscriberStubSettings); 116 | // We change the subscriber every 25 - 35 minutes in order to avoid GOAWAY errors. 117 | nextSubscriberResetTime = 118 | System.currentTimeMillis() 119 | + ThreadLocalRandom.current().nextInt(10 * 60 * 1000) 120 | + 25 * 60 * 1000; 121 | } catch (IOException e) { 122 | throw new RuntimeException("Could not create subscriber stub; no subscribing can occur.", e); 123 | } 124 | } 125 | } 126 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/source/CloudPubSubRoundRobinSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.source; 17 | 18 | import com.google.api.core.ApiFuture; 19 | import com.google.api.gax.core.CredentialsProvider; 20 | import com.google.protobuf.Empty; 21 | import com.google.pubsub.v1.ProjectSubscriptionName; 22 | import com.google.pubsub.v1.ReceivedMessage; 23 | import java.util.ArrayList; 24 | import java.util.Collection; 25 | import java.util.List; 26 | 27 | /** 28 | * A {@link CloudPubSubSubscriber} that distributes a single subscription in round-robin fashion 29 | * over a set of {@link CloudPubSubGRPCSubscriber}s. 30 | */ 31 | public class CloudPubSubRoundRobinSubscriber implements CloudPubSubSubscriber { 32 | 33 | private final List subscribers; 34 | private int currentSubscriberIndex = 0; 35 | 36 | public CloudPubSubRoundRobinSubscriber( 37 | int subscriberCount, 38 | CredentialsProvider gcpCredentialsProvider, 39 | String endpoint, 40 | ProjectSubscriptionName subscriptionName, 41 | int cpsMaxBatchSize) { 42 | subscribers = new ArrayList<>(); 43 | for (int i = 0; i < subscriberCount; ++i) { 44 | subscribers.add( 45 | new CloudPubSubGRPCSubscriber( 46 | gcpCredentialsProvider, endpoint, subscriptionName, cpsMaxBatchSize)); 47 | } 48 | } 49 | 50 | @Override 51 | public void close() { 52 | for (CloudPubSubSubscriber subscriber : subscribers) { 53 | subscriber.close(); 54 | } 55 | } 56 | 57 | @Override 58 | public ApiFuture> pull() { 59 | currentSubscriberIndex = (currentSubscriberIndex + 1) % subscribers.size(); 60 | return subscribers.get(currentSubscriberIndex).pull(); 61 | } 62 | 63 | @Override 64 | public ApiFuture ackMessages(Collection ackIds) { 65 | currentSubscriberIndex = (currentSubscriberIndex + 1) % subscribers.size(); 66 | return subscribers.get(currentSubscriberIndex).ackMessages(ackIds); 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/source/CloudPubSubSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.source; 17 | 18 | import com.google.api.core.ApiFuture; 19 | import com.google.protobuf.Empty; 20 | import com.google.pubsub.v1.ReceivedMessage; 21 | import java.util.Collection; 22 | import java.util.List; 23 | 24 | /** 25 | * An interface for clients that want to subscribe to messages from to Google Cloud Pub/Sub. 27 | */ 28 | public interface CloudPubSubSubscriber extends AutoCloseable { 29 | ApiFuture> pull(); 30 | 31 | ApiFuture ackMessages(Collection ackIds); 32 | 33 | void close(); 34 | } 35 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/source/StreamingPullSubscriber.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsub.kafka.source; 18 | 19 | import com.google.api.core.ApiFuture; 20 | import com.google.api.core.ApiFutureCallback; 21 | import com.google.api.core.ApiFutures; 22 | import com.google.api.core.ApiService.Listener; 23 | import com.google.api.core.ApiService.State; 24 | import com.google.api.core.SettableApiFuture; 25 | import com.google.api.gax.rpc.ApiException; 26 | import com.google.api.gax.rpc.StatusCode; 27 | import com.google.cloud.pubsub.v1.AckReplyConsumer; 28 | import com.google.cloud.pubsub.v1.SubscriberInterface; 29 | import com.google.common.collect.ImmutableList; 30 | import com.google.common.util.concurrent.MoreExecutors; 31 | import com.google.errorprone.annotations.concurrent.GuardedBy; 32 | import com.google.protobuf.Empty; 33 | import com.google.pubsub.v1.PubsubMessage; 34 | import com.google.pubsub.v1.ReceivedMessage; 35 | import java.util.ArrayDeque; 36 | import java.util.Collection; 37 | import java.util.Deque; 38 | import java.util.HashMap; 39 | import java.util.List; 40 | import java.util.Map; 41 | import java.util.Optional; 42 | import java.util.concurrent.ExecutionException; 43 | 44 | public class StreamingPullSubscriber implements CloudPubSubSubscriber { 45 | 46 | private final SubscriberInterface underlying; 47 | 48 | @GuardedBy("this") 49 | private Optional error = Optional.empty(); 50 | 51 | @GuardedBy("this") 52 | private final Deque messages = new ArrayDeque<>(); 53 | 54 | @GuardedBy("this") 55 | private long nextId = 0; 56 | 57 | @GuardedBy("this") 58 | private final Map ackConsumers = new HashMap<>(); 59 | 60 | @GuardedBy("this") 61 | private Optional> notification = Optional.empty(); 62 | 63 | public StreamingPullSubscriber(StreamingPullSubscriberFactory factory) throws ApiException { 64 | underlying = factory.newSubscriber(this::addMessage); 65 | underlying.addListener( 66 | new Listener() { 67 | @Override 68 | public void failed(State state, Throwable throwable) { 69 | fail(toApiException(throwable)); 70 | } 71 | }, 72 | MoreExecutors.directExecutor()); 73 | underlying.startAsync().awaitRunning(); 74 | } 75 | 76 | private static ApiException toApiException(Throwable t) { 77 | try { 78 | throw t; 79 | } catch (ApiException e) { 80 | return e; 81 | } catch (ExecutionException e) { 82 | return toApiException(e.getCause()); 83 | } catch (Throwable t2) { 84 | return new ApiException( 85 | t2, 86 | new StatusCode() { 87 | @Override 88 | public Code getCode() { 89 | return Code.INTERNAL; 90 | } 91 | 92 | @Override 93 | public Object getTransportCode() { 94 | return null; 95 | } 96 | }, 97 | false); 98 | } 99 | } 100 | 101 | private synchronized void fail(ApiException e) { 102 | if (!error.isPresent()) { 103 | error = Optional.of(e); 104 | } 105 | if (notification.isPresent()) { 106 | notification.get().setException(e); 107 | notification = Optional.empty(); 108 | } 109 | ackConsumers.values().forEach(AckReplyConsumer::nack); 110 | ackConsumers.clear(); 111 | } 112 | 113 | private synchronized void addMessage(PubsubMessage message, AckReplyConsumer consumer) { 114 | if (error.isPresent()) { 115 | consumer.nack(); 116 | return; 117 | } 118 | String ackId = Long.toString(nextId++); 119 | messages.add(ReceivedMessage.newBuilder().setMessage(message).setAckId(ackId).build()); 120 | ackConsumers.put(ackId, consumer); 121 | if (notification.isPresent()) { 122 | notification.get().set(null); 123 | notification = Optional.empty(); 124 | } 125 | } 126 | 127 | private synchronized ApiFuture onData() { 128 | if (error.isPresent()) { 129 | return ApiFutures.immediateFailedFuture(error.get()); 130 | } 131 | if (!messages.isEmpty()) { 132 | return ApiFutures.immediateFuture(null); 133 | } 134 | if (!notification.isPresent()) { 135 | notification = Optional.of(SettableApiFuture.create()); 136 | } 137 | return notification.get(); 138 | } 139 | 140 | private synchronized List takeMessages() { 141 | List toReturn = ImmutableList.copyOf(messages); 142 | messages.clear(); 143 | return toReturn; 144 | } 145 | 146 | @Override 147 | public void close() { 148 | synchronized (this) { 149 | fail(toApiException(new Throwable("Subscriber client shut down"))); 150 | } 151 | underlying.stopAsync().awaitTerminated(); 152 | } 153 | 154 | @Override 155 | public synchronized ApiFuture> pull() { 156 | SettableApiFuture> toReturn = SettableApiFuture.create(); 157 | ApiFutures.addCallback( 158 | onData(), 159 | new ApiFutureCallback() { 160 | @Override 161 | public void onFailure(Throwable t) { 162 | toReturn.setException(toApiException(t)); 163 | } 164 | 165 | @Override 166 | public void onSuccess(Void result) { 167 | toReturn.set(takeMessages()); 168 | } 169 | }, 170 | MoreExecutors.directExecutor()); 171 | return toReturn; 172 | } 173 | 174 | @Override 175 | public synchronized ApiFuture ackMessages(Collection ackIds) { 176 | ackIds.forEach( 177 | id -> Optional.ofNullable(ackConsumers.remove(id)).ifPresent(AckReplyConsumer::ack)); 178 | return ApiFutures.immediateFuture(null); 179 | } 180 | } 181 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsub/kafka/source/StreamingPullSubscriberFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsub.kafka.source; 18 | 19 | import com.google.cloud.pubsub.v1.MessageReceiver; 20 | import com.google.cloud.pubsub.v1.SubscriberInterface; 21 | 22 | public interface StreamingPullSubscriberFactory { 23 | SubscriberInterface newSubscriber(MessageReceiver receiver); 24 | } 25 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/common/Version.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.common; 17 | 18 | public class Version { 19 | private static String version = "unknown"; 20 | 21 | static { 22 | String implementationVersion = Version.class.getPackage().getImplementationVersion(); 23 | if (implementationVersion != null) { 24 | version = implementationVersion; 25 | } 26 | } 27 | 28 | public static String version() { 29 | return version; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/ConfigDefs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | import com.google.pubsub.kafka.common.ConnectorUtils; 19 | import org.apache.kafka.common.config.ConfigDef; 20 | import org.apache.kafka.common.config.ConfigDef.Importance; 21 | 22 | final class ConfigDefs { 23 | 24 | private ConfigDefs() {} 25 | 26 | static final String PROJECT_FLAG = "pubsublite.project"; 27 | static final String LOCATION_FLAG = "pubsublite.location"; 28 | static final String TOPIC_NAME_FLAG = "pubsublite.topic"; 29 | static final String ORDERING_MODE_FLAG = "pubsublite.ordering.mode"; 30 | 31 | static ConfigDef config() { 32 | return new ConfigDef() 33 | .define( 34 | PROJECT_FLAG, 35 | ConfigDef.Type.STRING, 36 | Importance.HIGH, 37 | "The project containing the topic to which to publish.") 38 | .define( 39 | LOCATION_FLAG, 40 | ConfigDef.Type.STRING, 41 | Importance.HIGH, 42 | "The cloud zone (like europe-south7-q) containing the topic to which to publish.") 43 | .define( 44 | TOPIC_NAME_FLAG, 45 | ConfigDef.Type.STRING, 46 | Importance.HIGH, 47 | "The name of the topic to which to publish.") 48 | .define( 49 | ORDERING_MODE_FLAG, 50 | ConfigDef.Type.STRING, 51 | OrderingMode.DEFAULT.name(), 52 | Importance.HIGH, 53 | "The ordering mode to use for publishing to Pub/Sub Lite. If set to `KAFKA`, messages will be republished to the same partition index they were read from on the source topic. Note that this means the Pub/Sub Lite topic *must* have the same number of partitions as the source Kafka topic.") 54 | .define( 55 | ConnectorUtils.GCP_CREDENTIALS_FILE_PATH_CONFIG, 56 | ConfigDef.Type.STRING, 57 | "", 58 | Importance.HIGH, 59 | "The path to the GCP credentials file") 60 | .define( 61 | ConnectorUtils.GCP_CREDENTIALS_JSON_CONFIG, 62 | ConfigDef.Type.STRING, 63 | "", 64 | Importance.HIGH, 65 | "GCP JSON credentials"); 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/Constants.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | public final class Constants { 19 | 20 | private Constants() {} 21 | 22 | public static final String KAFKA_TOPIC_HEADER = "x-goog-pubsublite-source-kafka-topic"; 23 | public static final String KAFKA_PARTITION_HEADER = "x-goog-pubsublite-source-kafka-partition"; 24 | public static final String KAFKA_OFFSET_HEADER = "x-goog-pubsublite-source-kafka-offset"; 25 | public static final String KAFKA_EVENT_TIME_TYPE_HEADER = 26 | "x-goog-pubsublite-source-kafka-event-time-type"; 27 | } 28 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/KafkaPartitionRoutingPolicy.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | import static com.google.cloud.pubsublite.internal.ExtractStatus.toCanonical; 19 | 20 | import com.google.api.gax.rpc.StatusCode.Code; 21 | import com.google.cloud.pubsublite.Partition; 22 | import com.google.cloud.pubsublite.internal.CheckedApiException; 23 | import com.google.cloud.pubsublite.internal.RoutingPolicy; 24 | import com.google.cloud.pubsublite.proto.PubSubMessage; 25 | 26 | /** A routing policy that extracts the original kafka partition and routes to that partition. */ 27 | class KafkaPartitionRoutingPolicy implements RoutingPolicy { 28 | private final long numPartitions; 29 | 30 | KafkaPartitionRoutingPolicy(long numPartitions) { 31 | this.numPartitions = numPartitions; 32 | } 33 | 34 | @Override 35 | public Partition route(PubSubMessage message) throws CheckedApiException { 36 | Partition partition = getPartition(message); 37 | if (partition.value() >= numPartitions) { 38 | throw new CheckedApiException( 39 | "Kafka topic has more partitions than Pub/Sub Lite topic. OrderingMode.KAFKA cannot be used.", 40 | Code.FAILED_PRECONDITION); 41 | } 42 | return partition; 43 | } 44 | 45 | private Partition getPartition(PubSubMessage message) throws CheckedApiException { 46 | try { 47 | return Partition.of( 48 | Long.parseLong( 49 | message 50 | .getAttributesOrThrow(Constants.KAFKA_PARTITION_HEADER) 51 | .getValues(0) 52 | .toStringUtf8())); 53 | } catch (Throwable t) { 54 | throw toCanonical(t); 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/OrderingMode.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2023 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | public enum OrderingMode { 19 | /* Order based on the standard Pub/Sub Lite logic. */ 20 | DEFAULT, 21 | /* Send messages to the same partition index they were from in Kafka. */ 22 | KAFKA 23 | } 24 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/PubSubLiteSinkConnector.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | import com.google.pubsublite.kafka.common.Version; 19 | import java.util.Collections; 20 | import java.util.List; 21 | import java.util.Map; 22 | import org.apache.kafka.common.config.ConfigDef; 23 | import org.apache.kafka.connect.connector.Task; 24 | import org.apache.kafka.connect.sink.SinkConnector; 25 | 26 | public class PubSubLiteSinkConnector extends SinkConnector { 27 | private Map props; 28 | 29 | @Override 30 | public String version() { 31 | return Version.version(); 32 | } 33 | 34 | @Override 35 | public void start(Map map) { 36 | props = map; 37 | } 38 | 39 | @Override 40 | public Class taskClass() { 41 | return PubSubLiteSinkTask.class; 42 | } 43 | 44 | @Override 45 | public List> taskConfigs(int i) { 46 | return Collections.nCopies(i, props); 47 | } 48 | 49 | @Override 50 | public void stop() {} 51 | 52 | @Override 53 | public ConfigDef config() { 54 | return ConfigDefs.config(); 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/PubSubLiteSinkTask.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | import static com.google.pubsublite.kafka.sink.Schemas.encodeToBytes; 19 | 20 | import com.google.api.core.ApiService.State; 21 | import com.google.cloud.pubsublite.MessageMetadata; 22 | import com.google.cloud.pubsublite.internal.Publisher; 23 | import com.google.cloud.pubsublite.proto.AttributeValues; 24 | import com.google.cloud.pubsublite.proto.PubSubMessage; 25 | import com.google.common.annotations.VisibleForTesting; 26 | import com.google.common.collect.ImmutableListMultimap; 27 | import com.google.protobuf.ByteString; 28 | import com.google.protobuf.util.Timestamps; 29 | import java.io.IOException; 30 | import java.util.Collection; 31 | import java.util.Map; 32 | import javax.annotation.Nullable; 33 | import org.apache.kafka.clients.consumer.OffsetAndMetadata; 34 | import org.apache.kafka.common.TopicPartition; 35 | import org.apache.kafka.connect.header.ConnectHeaders; 36 | import org.apache.kafka.connect.header.Header; 37 | import org.apache.kafka.connect.sink.SinkRecord; 38 | import org.apache.kafka.connect.sink.SinkTask; 39 | 40 | public class PubSubLiteSinkTask extends SinkTask { 41 | 42 | private final PublisherFactory factory; 43 | private @Nullable Publisher publisher; 44 | 45 | @VisibleForTesting 46 | PubSubLiteSinkTask(PublisherFactory factory) { 47 | this.factory = factory; 48 | } 49 | 50 | public PubSubLiteSinkTask() { 51 | this(new PublisherFactoryImpl()); 52 | } 53 | 54 | @Override 55 | public String version() { 56 | return new PubSubLiteSinkConnector().version(); 57 | } 58 | 59 | @Override 60 | public void start(Map map) { 61 | if (publisher != null) { 62 | throw new IllegalStateException("Called start when publisher already exists."); 63 | } 64 | publisher = factory.newPublisher(map); 65 | publisher.startAsync().awaitRunning(); 66 | } 67 | 68 | @Override 69 | public void put(Collection collection) { 70 | if (publisher.state() != State.RUNNING) { 71 | if (publisher.state() == State.FAILED) { 72 | throw new IllegalStateException("Publisher has failed.", publisher.failureCause()); 73 | } else { 74 | throw new IllegalStateException("Publisher not currently running."); 75 | } 76 | } 77 | for (SinkRecord record : collection) { 78 | PubSubMessage.Builder message = PubSubMessage.newBuilder(); 79 | if (record.key() != null) { 80 | message.setKey(encodeToBytes(record.keySchema(), record.key())); 81 | } 82 | if (record.value() != null) { 83 | message.setData(encodeToBytes(record.valueSchema(), record.value())); 84 | } 85 | ImmutableListMultimap.Builder attributes = 86 | ImmutableListMultimap.builder(); 87 | getRecordHeaders(record) 88 | .forEach( 89 | header -> 90 | attributes.put( 91 | header.key(), Schemas.encodeToBytes(header.schema(), header.value()))); 92 | 93 | if (record.topic() != null) { 94 | attributes.put(Constants.KAFKA_TOPIC_HEADER, ByteString.copyFromUtf8(record.topic())); 95 | } 96 | if (record.kafkaPartition() != null) { 97 | attributes.put( 98 | Constants.KAFKA_PARTITION_HEADER, 99 | ByteString.copyFromUtf8(record.kafkaPartition().toString())); 100 | attributes.put( 101 | Constants.KAFKA_OFFSET_HEADER, 102 | ByteString.copyFromUtf8(Long.toString(record.kafkaOffset()))); 103 | } 104 | if (record.timestamp() != null) { 105 | attributes.put( 106 | Constants.KAFKA_EVENT_TIME_TYPE_HEADER, 107 | ByteString.copyFromUtf8(record.timestampType().name)); 108 | message.setEventTime(Timestamps.fromMillis(record.timestamp())); 109 | } 110 | attributes 111 | .build() 112 | .asMap() 113 | .forEach( 114 | (key, values) -> 115 | message.putAttributes( 116 | key, AttributeValues.newBuilder().addAllValues(values).build())); 117 | publisher.publish(message.build()); 118 | } 119 | } 120 | 121 | private Iterable getRecordHeaders(SinkRecord record) { 122 | ConnectHeaders headers = new ConnectHeaders(); 123 | if (record.headers() != null) { 124 | for (Header header : record.headers()) { 125 | headers.add(header); 126 | } 127 | } 128 | return headers; 129 | } 130 | 131 | @Override 132 | public void flush(Map currentOffsets) { 133 | try { 134 | if (publisher != null) { 135 | publisher.flush(); 136 | } 137 | } catch (IOException e) { 138 | throw new RuntimeException(e); 139 | } 140 | } 141 | 142 | @Override 143 | public void stop() { 144 | if (publisher == null) { 145 | throw new IllegalStateException("Called stop when publisher doesn't exist."); 146 | } 147 | try { 148 | publisher.flush(); 149 | publisher.stopAsync().awaitTerminated(); 150 | } catch (IOException e) { 151 | throw new RuntimeException(e); 152 | } finally { 153 | publisher = null; 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/PublisherFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.sink; 18 | 19 | import com.google.cloud.pubsublite.MessageMetadata; 20 | import com.google.cloud.pubsublite.internal.Publisher; 21 | import java.util.Map; 22 | 23 | interface PublisherFactory { 24 | 25 | Publisher newPublisher(Map params); 26 | } 27 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/PublisherFactoryImpl.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.sink; 17 | 18 | import static com.google.cloud.pubsublite.internal.ExtractStatus.toCanonical; 19 | import static com.google.cloud.pubsublite.internal.wire.ServiceClients.addDefaultSettings; 20 | import static com.google.cloud.pubsublite.internal.wire.ServiceClients.getCallContext; 21 | 22 | import com.google.api.gax.rpc.ApiCallContext; 23 | import com.google.api.gax.rpc.ApiException; 24 | import com.google.cloud.pubsublite.AdminClient; 25 | import com.google.cloud.pubsublite.AdminClientSettings; 26 | import com.google.cloud.pubsublite.CloudRegionOrZone; 27 | import com.google.cloud.pubsublite.MessageMetadata; 28 | import com.google.cloud.pubsublite.Partition; 29 | import com.google.cloud.pubsublite.ProjectPath; 30 | import com.google.cloud.pubsublite.TopicName; 31 | import com.google.cloud.pubsublite.TopicPath; 32 | import com.google.cloud.pubsublite.cloudpubsub.PublisherSettings; 33 | import com.google.cloud.pubsublite.internal.Publisher; 34 | import com.google.cloud.pubsublite.internal.wire.PartitionCountWatchingPublisherSettings; 35 | import com.google.cloud.pubsublite.internal.wire.PartitionPublisherFactory; 36 | import com.google.cloud.pubsublite.internal.wire.PubsubContext; 37 | import com.google.cloud.pubsublite.internal.wire.PubsubContext.Framework; 38 | import com.google.cloud.pubsublite.internal.wire.RoutingMetadata; 39 | import com.google.cloud.pubsublite.internal.wire.SinglePartitionPublisherBuilder; 40 | import com.google.cloud.pubsublite.v1.AdminServiceClient; 41 | import com.google.cloud.pubsublite.v1.AdminServiceSettings; 42 | import com.google.cloud.pubsublite.v1.PublisherServiceClient; 43 | import com.google.cloud.pubsublite.v1.PublisherServiceSettings; 44 | import com.google.pubsub.kafka.common.ConnectorCredentialsProvider; 45 | import java.io.IOException; 46 | import java.util.Map; 47 | import java.util.Optional; 48 | 49 | class PublisherFactoryImpl implements PublisherFactory { 50 | 51 | private static final Framework FRAMEWORK = Framework.of("KAFKA_CONNECT"); 52 | 53 | private PartitionPublisherFactory getPartitionPublisherFactory( 54 | TopicPath topic, ConnectorCredentialsProvider credentialsProvider) { 55 | 56 | return new PartitionPublisherFactory() { 57 | private Optional publisherServiceClient = Optional.empty(); 58 | 59 | private synchronized PublisherServiceClient getServiceClient() throws ApiException { 60 | if (publisherServiceClient.isPresent()) return publisherServiceClient.get(); 61 | try { 62 | publisherServiceClient = 63 | Optional.of( 64 | PublisherServiceClient.create( 65 | addDefaultSettings( 66 | topic.location().extractRegion(), 67 | PublisherServiceSettings.newBuilder() 68 | .setCredentialsProvider(credentialsProvider)))); 69 | return publisherServiceClient.get(); 70 | } catch (Throwable t) { 71 | throw toCanonical(t).underlying; 72 | } 73 | } 74 | 75 | @Override 76 | public Publisher newPublisher(Partition partition) throws ApiException { 77 | PublisherServiceClient client = getServiceClient(); 78 | SinglePartitionPublisherBuilder.Builder singlePartitionBuilder = 79 | SinglePartitionPublisherBuilder.newBuilder() 80 | .setTopic(topic) 81 | .setPartition(partition) 82 | .setBatchingSettings(PublisherSettings.DEFAULT_BATCHING_SETTINGS) 83 | .setStreamFactory( 84 | responseStream -> { 85 | ApiCallContext context = 86 | getCallContext( 87 | PubsubContext.of(FRAMEWORK), RoutingMetadata.of(topic, partition)); 88 | return client.publishCallable().splitCall(responseStream, context); 89 | }); 90 | return singlePartitionBuilder.build(); 91 | } 92 | 93 | @Override 94 | public void close() {} 95 | }; 96 | } 97 | 98 | @Override 99 | public Publisher newPublisher(Map params) { 100 | Map config = ConfigDefs.config().parse(params); 101 | ConnectorCredentialsProvider credentialsProvider = 102 | ConnectorCredentialsProvider.fromConfig(config); 103 | CloudRegionOrZone location = 104 | CloudRegionOrZone.parse(config.get(ConfigDefs.LOCATION_FLAG).toString()); 105 | PartitionCountWatchingPublisherSettings.Builder builder = 106 | PartitionCountWatchingPublisherSettings.newBuilder(); 107 | TopicPath topic = 108 | TopicPath.newBuilder() 109 | .setProject( 110 | ProjectPath.parse("projects/" + config.get(ConfigDefs.PROJECT_FLAG)).project()) 111 | .setLocation(location) 112 | .setName(TopicName.of(config.get(ConfigDefs.TOPIC_NAME_FLAG).toString())) 113 | .build(); 114 | builder.setTopic(topic); 115 | builder.setPublisherFactory(getPartitionPublisherFactory(topic, credentialsProvider)); 116 | try { 117 | builder.setAdminClient( 118 | AdminClient.create( 119 | AdminClientSettings.newBuilder() 120 | .setRegion(location.extractRegion()) 121 | .setServiceClient( 122 | AdminServiceClient.create( 123 | addDefaultSettings( 124 | location.extractRegion(), 125 | AdminServiceSettings.newBuilder() 126 | .setCredentialsProvider(credentialsProvider)))) 127 | .build())); 128 | } catch (IOException e) { 129 | throw new IllegalStateException(e); 130 | } 131 | if (OrderingMode.valueOf(config.get(ConfigDefs.ORDERING_MODE_FLAG).toString()) 132 | == OrderingMode.KAFKA) { 133 | builder.setRoutingPolicyFactory(KafkaPartitionRoutingPolicy::new); 134 | } 135 | return builder.build().instantiate(); 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/sink/Schemas.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.sink; 18 | 19 | import com.google.common.annotations.VisibleForTesting; 20 | import com.google.protobuf.ByteString; 21 | import com.google.protobuf.ListValue; 22 | import com.google.protobuf.Struct; 23 | import com.google.protobuf.Value; 24 | import java.nio.ByteBuffer; 25 | import java.util.Base64; 26 | import java.util.List; 27 | import java.util.Map; 28 | import javax.annotation.Nullable; 29 | import org.apache.kafka.connect.data.Field; 30 | import org.apache.kafka.connect.data.Schema; 31 | import org.apache.kafka.connect.errors.DataException; 32 | 33 | /** 34 | * Schema handling for Pub/Sub Lite. 35 | * 36 | *

null schemas are treated as Schema.STRING_SCHEMA 37 | * 38 | *

Top level BYTES payloads are unmodified. Top level STRING payloads are encoded using 39 | * copyFromUtf8. Top level Integral payloads are converted using 40 | * copyFromUtf8(Long.toString(x.longValue())) Top level Floating point payloads are converted using 41 | * copyFromUtf8(Double.toString(x.doubleValue())) 42 | * 43 | *

All other payloads are encoded into a protobuf Value, then converted to a ByteString. Nested 44 | * STRING fields are encoded into a protobuf Value. Nested BYTES fields are encoded to a protobuf 45 | * Value holding the base64 encoded bytes. Nested Numeric fields are encoded as a double into a 46 | * protobuf Value. 47 | * 48 | *

Maps with Array, Map, or Struct keys are not supported. BYTES keys in maps are base64 encoded. 49 | * Integral keys are converted using Long.toString(x.longValue()) Floating point keys are converted 50 | * using Double.toString(x.doubleValue()) 51 | */ 52 | final class Schemas { 53 | 54 | private Schemas() {} 55 | 56 | private static Schema.Type safeSchemaType(@Nullable Schema schema) { 57 | if (schema == null) { 58 | return Schema.Type.STRING; 59 | } 60 | return schema.type(); 61 | } 62 | 63 | static ByteString encodeToBytes(@Nullable Schema schema, Object object) { 64 | switch (safeSchemaType(schema)) { 65 | case INT8: 66 | case INT16: 67 | case INT32: 68 | case INT64: 69 | case FLOAT32: 70 | case FLOAT64: 71 | case BOOLEAN: 72 | case STRING: 73 | return ByteString.copyFromUtf8(stringRep(schema, object)); 74 | case BYTES: 75 | return extractBytes(object); 76 | case ARRAY: 77 | case MAP: 78 | case STRUCT: 79 | return encode(schema, object).toByteString(); 80 | } 81 | throw new DataException("Invalid schema type."); 82 | } 83 | 84 | @VisibleForTesting 85 | static Value encode(@Nullable Schema schema, Object object) { 86 | switch (safeSchemaType(schema)) { 87 | case INT8: 88 | case INT16: 89 | case INT32: 90 | case INT64: 91 | case FLOAT32: 92 | case FLOAT64: 93 | return toValue((Number) object); 94 | case BOOLEAN: 95 | return Value.newBuilder().setBoolValue((Boolean) object).build(); 96 | case STRING: 97 | return Value.newBuilder().setStringValue(object.toString()).build(); 98 | case BYTES: 99 | ByteString bytes = extractBytes(object); 100 | return Value.newBuilder() 101 | .setStringValue(Base64.getEncoder().encodeToString(bytes.toByteArray())) 102 | .build(); 103 | case ARRAY: 104 | { 105 | ListValue.Builder listBuilder = ListValue.newBuilder(); 106 | List objects = (List) object; 107 | for (Object o : objects) { 108 | listBuilder.addValues(encode(schema.valueSchema(), o)); 109 | } 110 | return Value.newBuilder().setListValue(listBuilder).build(); 111 | } 112 | case MAP: 113 | { 114 | Struct.Builder builder = Struct.newBuilder(); 115 | Map map = (Map) object; 116 | for (Object key : map.keySet()) { 117 | builder.putFields( 118 | stringRep(schema.keySchema(), key), encode(schema.valueSchema(), map.get(key))); 119 | } 120 | return Value.newBuilder().setStructValue(builder).build(); 121 | } 122 | case STRUCT: 123 | { 124 | Struct.Builder builder = Struct.newBuilder(); 125 | org.apache.kafka.connect.data.Struct struct = 126 | (org.apache.kafka.connect.data.Struct) object; 127 | for (Field f : schema.fields()) { 128 | builder.putFields(f.name(), encode(f.schema(), struct.get(f))); 129 | } 130 | return Value.newBuilder().setStructValue(builder).build(); 131 | } 132 | } 133 | throw new DataException("Invalid schema type."); 134 | } 135 | 136 | private static String stringRep(@Nullable Schema schema, Object object) { 137 | switch (safeSchemaType(schema)) { 138 | case INT8: 139 | case INT16: 140 | case INT32: 141 | case INT64: 142 | return Long.toString(((Number) object).longValue()); 143 | case FLOAT32: 144 | case FLOAT64: 145 | return Double.toString(((Number) object).doubleValue()); 146 | case BOOLEAN: 147 | case STRING: 148 | return object.toString(); 149 | case BYTES: 150 | return Base64.getEncoder().encodeToString(extractBytes(object).toByteArray()); 151 | case ARRAY: 152 | case MAP: 153 | case STRUCT: 154 | throw new DataException("Cannot convert ARRAY, MAP, or STRUCT to String."); 155 | } 156 | throw new DataException("Invalid schema type."); 157 | } 158 | 159 | private static ByteString extractBytes(Object object) { 160 | if (object instanceof byte[]) { 161 | return ByteString.copyFrom((byte[]) object); 162 | } else if (object instanceof ByteBuffer) { 163 | return ByteString.copyFrom((ByteBuffer) object); 164 | } 165 | throw new DataException("Unexpected value class with BYTES schema type."); 166 | } 167 | 168 | private static Value toValue(Number val) { 169 | return Value.newBuilder().setNumberValue(val.doubleValue()).build(); 170 | } 171 | } 172 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/ConfigDefs.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.source; 17 | 18 | import com.google.pubsub.kafka.common.ConnectorUtils; 19 | import org.apache.kafka.common.config.ConfigDef; 20 | import org.apache.kafka.common.config.ConfigDef.Importance; 21 | 22 | final class ConfigDefs { 23 | 24 | private ConfigDefs() {} 25 | 26 | static final String PROJECT_FLAG = "pubsublite.project"; 27 | static final String LOCATION_FLAG = "pubsublite.location"; 28 | static final String SUBSCRIPTION_NAME_FLAG = "pubsublite.subscription"; 29 | static final String KAFKA_TOPIC_FLAG = "kafka.topic"; 30 | static final String FLOW_CONTROL_PARTITION_MESSAGES_FLAG = 31 | "pubsublite.partition_flow_control.messages"; 32 | static final String FLOW_CONTROL_PARTITION_BYTES_FLAG = "pubsublite.partition_flow_control.bytes"; 33 | 34 | static ConfigDef config() { 35 | return new ConfigDef() 36 | .define( 37 | PROJECT_FLAG, 38 | ConfigDef.Type.STRING, 39 | Importance.HIGH, 40 | "The project containing the topic to which to publish.") 41 | .define( 42 | LOCATION_FLAG, 43 | ConfigDef.Type.STRING, 44 | Importance.HIGH, 45 | "The cloud zone (like europe-south7-q) containing the topic to which to publish.") 46 | .define( 47 | SUBSCRIPTION_NAME_FLAG, 48 | ConfigDef.Type.STRING, 49 | Importance.HIGH, 50 | "The name of the topic to which to publish.") 51 | .define( 52 | KAFKA_TOPIC_FLAG, 53 | ConfigDef.Type.STRING, 54 | Importance.HIGH, 55 | "The topic in Kafka which will receive messages that were pulled from Pub/Sub Lite.") 56 | .define( 57 | FLOW_CONTROL_PARTITION_MESSAGES_FLAG, 58 | ConfigDef.Type.LONG, 59 | Long.MAX_VALUE, 60 | Importance.MEDIUM, 61 | "The number of outstanding messages per-partition allowed. Set to Long.MAX_VALUE by default.") 62 | .define( 63 | FLOW_CONTROL_PARTITION_BYTES_FLAG, 64 | ConfigDef.Type.LONG, 65 | 20_000_000, 66 | Importance.MEDIUM, 67 | "The number of outstanding bytes per-partition allowed. Set to 20MB by default.") 68 | .define( 69 | ConnectorUtils.GCP_CREDENTIALS_FILE_PATH_CONFIG, 70 | ConfigDef.Type.STRING, 71 | "", 72 | Importance.HIGH, 73 | "The path to the GCP credentials file") 74 | .define( 75 | ConnectorUtils.GCP_CREDENTIALS_JSON_CONFIG, 76 | ConfigDef.Type.STRING, 77 | "", 78 | Importance.HIGH, 79 | "GCP JSON credentials"); 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/Poller.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.source; 17 | 18 | import java.util.List; 19 | import javax.annotation.Nullable; 20 | import org.apache.kafka.connect.source.SourceRecord; 21 | 22 | interface Poller extends AutoCloseable { 23 | 24 | @Nullable 25 | List poll(); 26 | 27 | void close(); 28 | } 29 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/PollerFactory.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.source; 18 | 19 | import java.util.Map; 20 | 21 | interface PollerFactory { 22 | 23 | Poller newPoller(Map params); 24 | } 25 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/PollerFactoryImpl.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.source; 17 | 18 | import com.google.cloud.pubsublite.CloudZone; 19 | import com.google.cloud.pubsublite.ProjectPath; 20 | import com.google.cloud.pubsublite.SubscriptionName; 21 | import com.google.cloud.pubsublite.SubscriptionPath; 22 | import com.google.cloud.pubsublite.cloudpubsub.FlowControlSettings; 23 | import com.google.cloud.pubsublite.kafka.ConsumerSettings; 24 | import java.util.Map; 25 | import org.apache.kafka.clients.consumer.Consumer; 26 | import org.apache.kafka.common.config.ConfigValue; 27 | 28 | class PollerFactoryImpl implements PollerFactory { 29 | 30 | @Override 31 | public Poller newPoller(Map params) { 32 | Map config = ConfigDefs.config().validateAll(params); 33 | SubscriptionPath path = 34 | SubscriptionPath.newBuilder() 35 | .setProject( 36 | ProjectPath.parse("projects/" + config.get(ConfigDefs.PROJECT_FLAG).value()) 37 | .project()) 38 | .setLocation(CloudZone.parse(config.get(ConfigDefs.LOCATION_FLAG).value().toString())) 39 | .setName( 40 | SubscriptionName.of( 41 | config.get(ConfigDefs.SUBSCRIPTION_NAME_FLAG).value().toString())) 42 | .build(); 43 | FlowControlSettings flowControlSettings = 44 | FlowControlSettings.builder() 45 | .setMessagesOutstanding( 46 | (Long) config.get(ConfigDefs.FLOW_CONTROL_PARTITION_MESSAGES_FLAG).value()) 47 | .setBytesOutstanding( 48 | (Long) config.get(ConfigDefs.FLOW_CONTROL_PARTITION_BYTES_FLAG).value()) 49 | .build(); 50 | Consumer consumer = 51 | ConsumerSettings.newBuilder() 52 | .setAutocommit(true) 53 | .setSubscriptionPath(path) 54 | .setPerPartitionFlowControlSettings(flowControlSettings) 55 | .build() 56 | .instantiate(); 57 | // There is only one topic for Pub/Sub Lite subscriptions, and the consumer only exposes this 58 | // topic. 59 | consumer.subscribe(consumer.listTopics().keySet()); 60 | return new PollerImpl(config.get(ConfigDefs.KAFKA_TOPIC_FLAG).value().toString(), consumer); 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/PollerImpl.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.source; 18 | 19 | import com.google.common.annotations.VisibleForTesting; 20 | import com.google.common.collect.ImmutableList; 21 | import com.google.common.collect.ImmutableMap; 22 | import java.time.Duration; 23 | import java.util.List; 24 | import javax.annotation.Nullable; 25 | import org.apache.kafka.clients.consumer.Consumer; 26 | import org.apache.kafka.clients.consumer.ConsumerRecords; 27 | import org.apache.kafka.common.errors.TimeoutException; 28 | import org.apache.kafka.common.errors.WakeupException; 29 | import org.apache.kafka.common.header.Header; 30 | import org.apache.kafka.connect.data.Schema; 31 | import org.apache.kafka.connect.header.ConnectHeaders; 32 | import org.apache.kafka.connect.source.SourceRecord; 33 | 34 | class PollerImpl implements Poller { 35 | 36 | @VisibleForTesting static final Duration POLL_DURATION = Duration.ofSeconds(10); 37 | private final String kafkaTopic; 38 | private final Consumer consumer; 39 | 40 | PollerImpl(String kafkaTopic, Consumer consumer) { 41 | this.kafkaTopic = kafkaTopic; 42 | this.consumer = consumer; 43 | } 44 | 45 | @Override 46 | public @Nullable List poll() { 47 | try { 48 | ConsumerRecords records = consumer.poll(POLL_DURATION); 49 | ImmutableList.Builder output = ImmutableList.builder(); 50 | records.forEach( 51 | consumerRecord -> { 52 | final ConnectHeaders headers = new ConnectHeaders(); 53 | for (Header header : consumerRecord.headers()) { 54 | headers.addBytes(header.key(), header.value()); 55 | } 56 | boolean keyNullOrEmpty = 57 | consumerRecord.key() == null || consumerRecord.key().length == 0; 58 | output.add( 59 | new SourceRecord( 60 | ImmutableMap.of( 61 | "topic", consumerRecord.topic(), "partition", consumerRecord.partition()), 62 | ImmutableMap.of("offset", consumerRecord.offset()), 63 | kafkaTopic, 64 | // Null partition uses the default kafka partitioner which has key affinity if 65 | // the key 66 | // is null. Pub/Sub Lite messages with an empty key are treated as if they have 67 | // no key, 68 | // and are given a null key instead to get this routing behavior. 69 | // https://docs.confluent.io/3.3.0/connect/connect-storage-cloud/kafka-connect-s3/docs/configuration_options.html#partitioner 70 | null, 71 | Schema.OPTIONAL_BYTES_SCHEMA, 72 | keyNullOrEmpty ? null : consumerRecord.key(), 73 | Schema.BYTES_SCHEMA, 74 | consumerRecord.value(), 75 | consumerRecord.timestamp(), 76 | headers)); 77 | }); 78 | return output.build(); 79 | } catch (TimeoutException | WakeupException e) { 80 | return null; 81 | } 82 | } 83 | 84 | @Override 85 | public void close() { 86 | consumer.wakeup(); 87 | consumer.unsubscribe(); 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/PubSubLiteSourceConnector.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.source; 17 | 18 | import com.google.pubsublite.kafka.common.Version; 19 | import java.util.Collections; 20 | import java.util.List; 21 | import java.util.Map; 22 | import org.apache.kafka.common.config.ConfigDef; 23 | import org.apache.kafka.connect.connector.Task; 24 | import org.apache.kafka.connect.source.SourceConnector; 25 | 26 | public class PubSubLiteSourceConnector extends SourceConnector { 27 | 28 | private Map props; 29 | 30 | @Override 31 | public String version() { 32 | return Version.version(); 33 | } 34 | 35 | @Override 36 | public void start(Map map) { 37 | props = map; 38 | } 39 | 40 | @Override 41 | public Class taskClass() { 42 | return PubSubLiteSourceTask.class; 43 | } 44 | 45 | @Override 46 | public List> taskConfigs(int i) { 47 | return Collections.nCopies(i, props); 48 | } 49 | 50 | @Override 51 | public void stop() {} 52 | 53 | @Override 54 | public ConfigDef config() { 55 | return ConfigDefs.config(); 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/main/java/com/google/pubsublite/kafka/source/PubSubLiteSourceTask.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsublite.kafka.source; 17 | 18 | import com.google.common.annotations.VisibleForTesting; 19 | import java.util.List; 20 | import java.util.Map; 21 | import javax.annotation.Nullable; 22 | import org.apache.kafka.connect.source.SourceRecord; 23 | import org.apache.kafka.connect.source.SourceTask; 24 | 25 | public class PubSubLiteSourceTask extends SourceTask { 26 | 27 | private final PollerFactory factory; 28 | private @Nullable Poller poller; 29 | 30 | @VisibleForTesting 31 | PubSubLiteSourceTask(PollerFactory factory) { 32 | this.factory = factory; 33 | } 34 | 35 | public PubSubLiteSourceTask() { 36 | this(new PollerFactoryImpl()); 37 | } 38 | 39 | @Override 40 | public String version() { 41 | return new PubSubLiteSourceConnector().version(); 42 | } 43 | 44 | @Override 45 | public void start(Map props) { 46 | if (poller != null) { 47 | throw new IllegalStateException("Called start when poller already exists."); 48 | } 49 | poller = factory.newPoller(props); 50 | } 51 | 52 | @Override 53 | public @Nullable List poll() { 54 | return poller.poll(); 55 | } 56 | 57 | @Override 58 | public void stop() { 59 | if (poller == null) { 60 | throw new IllegalStateException("Called stop when poller doesn't exist."); 61 | } 62 | try { 63 | poller.close(); 64 | } finally { 65 | poller = null; 66 | } 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /src/main/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | log4j.rootLogger=DEBUG, CONSOLE 16 | log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender 17 | log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout 18 | log4j.org.apache.kafka = OFF 19 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsub/kafka/sink/CloudPubSubSinkConnectorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.sink; 17 | 18 | import static org.junit.Assert.assertEquals; 19 | 20 | import com.google.pubsub.kafka.common.ConnectorUtils; 21 | import java.util.HashMap; 22 | import java.util.List; 23 | import java.util.Map; 24 | import org.junit.Before; 25 | import org.junit.Test; 26 | 27 | /** Tests for {@link CloudPubSubSinkConnector}. */ 28 | public class CloudPubSubSinkConnectorTest { 29 | 30 | private static final int NUM_TASKS = 10; 31 | private static final String CPS_PROJECT = "hello"; 32 | private static final String CPS_TOPIC = "world"; 33 | 34 | private CloudPubSubSinkConnector connector; 35 | private Map props; 36 | 37 | @Before 38 | public void setup() { 39 | connector = new CloudPubSubSinkConnector(); 40 | props = new HashMap<>(); 41 | props.put(ConnectorUtils.CPS_PROJECT_CONFIG, CPS_PROJECT); 42 | props.put(ConnectorUtils.CPS_TOPIC_CONFIG, CPS_TOPIC); 43 | } 44 | 45 | @Test 46 | public void testTaskConfigs() { 47 | connector.start(props); 48 | List> taskConfigs = connector.taskConfigs(NUM_TASKS); 49 | assertEquals(taskConfigs.size(), NUM_TASKS); 50 | for (int i = 0; i < taskConfigs.size(); ++i) { 51 | assertEquals(taskConfigs.get(i), props); 52 | } 53 | } 54 | 55 | @Test 56 | public void testTaskClass() { 57 | assertEquals(CloudPubSubSinkTask.class, connector.taskClass()); 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsub/kafka/source/AckBatchingSubscriberTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsub.kafka.source; 18 | 19 | import static com.google.common.truth.Truth.assertThat; 20 | import static org.junit.Assert.assertThrows; 21 | import static org.mockito.ArgumentMatchers.any; 22 | import static org.mockito.Mockito.mock; 23 | import static org.mockito.Mockito.times; 24 | import static org.mockito.Mockito.verify; 25 | import static org.mockito.Mockito.verifyNoMoreInteractions; 26 | import static org.mockito.Mockito.when; 27 | 28 | import com.google.api.core.ApiFuture; 29 | import com.google.api.core.SettableApiFuture; 30 | import com.google.api.gax.rpc.StatusCode.Code; 31 | import com.google.cloud.pubsublite.internal.CheckedApiException; 32 | import com.google.cloud.pubsublite.internal.ExtractStatus; 33 | import com.google.common.collect.ImmutableList; 34 | import com.google.common.util.concurrent.Futures; 35 | import com.google.protobuf.Empty; 36 | import com.google.pubsub.kafka.source.AckBatchingSubscriber.AlarmFactory; 37 | import java.util.Optional; 38 | import java.util.concurrent.ExecutionException; 39 | import java.util.concurrent.Future; 40 | import org.junit.Before; 41 | import org.junit.Test; 42 | import org.junit.runner.RunWith; 43 | import org.junit.runners.JUnit4; 44 | 45 | @RunWith(JUnit4.class) 46 | public class AckBatchingSubscriberTest { 47 | private final AlarmFactory alarmFactory = mock(AlarmFactory.class); 48 | private final CloudPubSubSubscriber underlying = mock(CloudPubSubSubscriber.class); 49 | private Runnable onAlarm; 50 | private CloudPubSubSubscriber subscriber; 51 | 52 | @Before 53 | public void setUp() { 54 | when(alarmFactory.newAlarm(any())) 55 | .thenAnswer( 56 | args -> { 57 | onAlarm = args.getArgument(0); 58 | return Futures.immediateVoidFuture(); 59 | }); 60 | subscriber = new AckBatchingSubscriber(underlying, alarmFactory); 61 | assertThat(onAlarm).isNotNull(); 62 | } 63 | 64 | @Test 65 | public void pullProxies() { 66 | subscriber.pull(); 67 | verify(underlying, times(1)).pull(); 68 | verifyNoMoreInteractions(underlying); 69 | } 70 | 71 | @Test 72 | public void closeProxies() { 73 | subscriber.close(); 74 | verify(underlying, times(1)).close(); 75 | verifyNoMoreInteractions(underlying); 76 | } 77 | 78 | public static void assertFutureThrowsCode(Future f, Code code) { 79 | ExecutionException exception = assertThrows(ExecutionException.class, f::get); 80 | assertThrowableMatches(exception.getCause(), code); 81 | } 82 | 83 | public static void assertThrowableMatches(Throwable t, Code code) { 84 | Optional statusOr = ExtractStatus.extract(t); 85 | assertThat(statusOr.get().code()).isEqualTo(code); 86 | } 87 | 88 | @Test 89 | public void partialFlushFailure() { 90 | ApiFuture future1 = subscriber.ackMessages(ImmutableList.of("a", "b")); 91 | ApiFuture future2 = subscriber.ackMessages(ImmutableList.of("c")); 92 | SettableApiFuture batchDone = SettableApiFuture.create(); 93 | when(underlying.ackMessages(ImmutableList.of("a", "b", "c"))).thenReturn(batchDone); 94 | onAlarm.run(); 95 | ApiFuture future3 = subscriber.ackMessages(ImmutableList.of("d")); 96 | assertThat(future1.isDone()).isFalse(); 97 | assertThat(future2.isDone()).isFalse(); 98 | assertThat(future3.isDone()).isFalse(); 99 | batchDone.setException(new CheckedApiException(Code.INTERNAL).underlying); 100 | assertFutureThrowsCode(future1, Code.INTERNAL); 101 | assertFutureThrowsCode(future2, Code.INTERNAL); 102 | assertThat(future3.isDone()).isFalse(); 103 | } 104 | 105 | @Test 106 | public void flushOnClose() throws Exception { 107 | ApiFuture future1 = subscriber.ackMessages(ImmutableList.of("a", "b")); 108 | ApiFuture future2 = subscriber.ackMessages(ImmutableList.of("c")); 109 | SettableApiFuture batchDone = SettableApiFuture.create(); 110 | when(underlying.ackMessages(ImmutableList.of("a", "b", "c"))).thenReturn(batchDone); 111 | subscriber.close(); 112 | verify(underlying).ackMessages(any()); 113 | verify(underlying).close(); 114 | assertThat(future1.isDone()).isFalse(); 115 | assertThat(future2.isDone()).isFalse(); 116 | batchDone.set(null); 117 | future1.get(); 118 | future2.get(); 119 | } 120 | } 121 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsub/kafka/source/CloudPubSubSourceConnectorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | package com.google.pubsub.kafka.source; 17 | 18 | import static org.junit.Assert.assertEquals; 19 | import static org.mockito.ArgumentMatchers.any; 20 | import static org.mockito.ArgumentMatchers.anyString; 21 | import static org.mockito.Mockito.doNothing; 22 | import static org.mockito.Mockito.doThrow; 23 | import static org.mockito.Mockito.spy; 24 | 25 | import com.google.pubsub.kafka.common.ConnectorCredentialsProvider; 26 | import com.google.pubsub.kafka.common.ConnectorUtils; 27 | import java.util.HashMap; 28 | import java.util.List; 29 | import java.util.Map; 30 | import org.apache.kafka.common.config.ConfigException; 31 | import org.apache.kafka.connect.errors.ConnectException; 32 | import org.junit.Before; 33 | import org.junit.Test; 34 | 35 | /** Tests for {@link CloudPubSubSourceConnector}. */ 36 | public class CloudPubSubSourceConnectorTest { 37 | 38 | private static final int NUM_TASKS = 10; 39 | private static final String CPS_PROJECT = "hello"; 40 | private static final String CPS_SUBSCRIPTION = "big"; 41 | private static final String KAFKA_TOPIC = "world"; 42 | 43 | private CloudPubSubSourceConnector connector; 44 | private Map props; 45 | 46 | @Before 47 | public void setup() { 48 | connector = spy(new CloudPubSubSourceConnector()); 49 | props = new HashMap<>(); 50 | props.put(CloudPubSubSourceConnector.CPS_SUBSCRIPTION_CONFIG, CPS_SUBSCRIPTION); 51 | props.put(ConnectorUtils.CPS_PROJECT_CONFIG, CPS_PROJECT); 52 | props.put(CloudPubSubSourceConnector.KAFKA_TOPIC_CONFIG, KAFKA_TOPIC); 53 | } 54 | 55 | @Test(expected = ConnectException.class) 56 | public void testStartWhenSubscriptionNonexistant() { 57 | doThrow(new ConnectException("")) 58 | .when(connector) 59 | .verifySubscription(anyString(), anyString(), any(ConnectorCredentialsProvider.class)); 60 | connector.start(props); 61 | } 62 | 63 | @Test(expected = ConfigException.class) 64 | public void testStartWhenRequiredConfigMissing() { 65 | connector.start(new HashMap()); 66 | } 67 | 68 | @Test 69 | public void testTaskConfigs() { 70 | doNothing() 71 | .when(connector) 72 | .verifySubscription(anyString(), anyString(), any(ConnectorCredentialsProvider.class)); 73 | connector.start(props); 74 | List> taskConfigs = connector.taskConfigs(NUM_TASKS); 75 | assertEquals(taskConfigs.size(), NUM_TASKS); 76 | for (int i = 0; i < taskConfigs.size(); ++i) { 77 | assertEquals(taskConfigs.get(i), props); 78 | } 79 | } 80 | 81 | @Test 82 | public void testSourceConnectorTaskClass() { 83 | assertEquals(CloudPubSubSourceTask.class, connector.taskClass()); 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsub/kafka/source/StreamingPullSubscriberTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2016 Google Inc. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsub.kafka.source; 18 | 19 | import static com.google.common.truth.Truth.assertThat; 20 | import static org.junit.Assert.assertThrows; 21 | import static org.mockito.ArgumentMatchers.any; 22 | import static org.mockito.Mockito.doAnswer; 23 | import static org.mockito.Mockito.inOrder; 24 | import static org.mockito.Mockito.mock; 25 | import static org.mockito.Mockito.times; 26 | import static org.mockito.Mockito.verify; 27 | import static org.mockito.Mockito.when; 28 | 29 | import com.google.api.core.ApiService; 30 | import com.google.api.gax.rpc.ApiException; 31 | import com.google.api.gax.rpc.StatusCode.Code; 32 | import com.google.cloud.pubsub.v1.AckReplyConsumer; 33 | import com.google.cloud.pubsub.v1.MessageReceiver; 34 | import com.google.cloud.pubsub.v1.SubscriberInterface; 35 | import com.google.cloud.pubsublite.internal.CheckedApiException; 36 | import com.google.common.collect.ImmutableList; 37 | import com.google.protobuf.ByteString; 38 | import com.google.pubsub.v1.PubsubMessage; 39 | import com.google.pubsub.v1.ReceivedMessage; 40 | import java.util.List; 41 | import java.util.concurrent.ExecutionException; 42 | import java.util.concurrent.ExecutorService; 43 | import java.util.concurrent.Executors; 44 | import java.util.concurrent.Future; 45 | import java.util.stream.Collectors; 46 | import org.junit.Before; 47 | import org.junit.Test; 48 | import org.mockito.InOrder; 49 | import org.mockito.stubbing.Answer; 50 | 51 | public class StreamingPullSubscriberTest { 52 | private final StreamingPullSubscriberFactory underlyingFactory = 53 | mock(StreamingPullSubscriberFactory.class); 54 | private final SubscriberInterface underlying = mock(SubscriberInterface.class); 55 | // Initialized in setUp. 56 | private StreamingPullSubscriber subscriber; 57 | private MessageReceiver messageReceiver; 58 | private ApiService.Listener errorListener; 59 | private final ExecutorService executorService = Executors.newCachedThreadPool(); 60 | 61 | private static List messagesFor(List received) { 62 | return received.stream().map(ReceivedMessage::getMessage).collect(Collectors.toList()); 63 | } 64 | 65 | @Before 66 | public void setUp() throws Exception { 67 | when(underlying.startAsync()).thenReturn(underlying); 68 | when(underlyingFactory.newSubscriber(any())) 69 | .thenAnswer( 70 | args -> { 71 | messageReceiver = args.getArgument(0); 72 | return underlying; 73 | }); 74 | doAnswer( 75 | (Answer) 76 | args -> { 77 | errorListener = args.getArgument(0); 78 | return null; 79 | }) 80 | .when(underlying) 81 | .addListener(any(), any()); 82 | 83 | subscriber = new StreamingPullSubscriber(underlyingFactory); 84 | 85 | InOrder inOrder = inOrder(underlyingFactory, underlying); 86 | inOrder.verify(underlyingFactory).newSubscriber(any()); 87 | inOrder.verify(underlying).addListener(any(), any()); 88 | inOrder.verify(underlying).startAsync(); 89 | inOrder.verify(underlying).awaitRunning(); 90 | 91 | assertThat(messageReceiver).isNotNull(); 92 | assertThat(errorListener).isNotNull(); 93 | } 94 | 95 | @Test 96 | public void closeStops() { 97 | when(underlying.stopAsync()).thenReturn(underlying); 98 | subscriber.close(); 99 | verify(underlying).stopAsync(); 100 | verify(underlying).awaitTerminated(); 101 | } 102 | 103 | @Test 104 | public void pullAfterErrorThrows() { 105 | ApiException expected = new CheckedApiException(Code.INTERNAL).underlying; 106 | errorListener.failed(null, expected); 107 | ExecutionException e = assertThrows(ExecutionException.class, () -> subscriber.pull().get()); 108 | assertThat(expected).isEqualTo(e.getCause()); 109 | } 110 | 111 | @Test 112 | public void pullBeforeErrorThrows() throws Exception { 113 | ApiException expected = new CheckedApiException(Code.INTERNAL).underlying; 114 | Future> future = subscriber.pull(); 115 | Thread.sleep(1000); 116 | assertThat(future.isDone()).isFalse(); 117 | 118 | errorListener.failed(null, expected); 119 | ExecutionException e = assertThrows(ExecutionException.class, future::get); 120 | assertThat(expected).isEqualTo(e.getCause()); 121 | } 122 | 123 | @Test 124 | public void pullSuccess() throws Exception { 125 | PubsubMessage message = 126 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 127 | Future> future = executorService.submit(() -> subscriber.pull().get()); 128 | messageReceiver.receiveMessage(message, mock(AckReplyConsumer.class)); 129 | assertThat(messagesFor(future.get())).isEqualTo(ImmutableList.of(message)); 130 | } 131 | 132 | @Test 133 | public void pullMultiple() throws Exception { 134 | PubsubMessage message1 = 135 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 136 | PubsubMessage message2 = 137 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 138 | messageReceiver.receiveMessage(message1, mock(AckReplyConsumer.class)); 139 | messageReceiver.receiveMessage(message2, mock(AckReplyConsumer.class)); 140 | assertThat(messagesFor(subscriber.pull().get())) 141 | .isEqualTo(ImmutableList.of(message1, message2)); 142 | } 143 | 144 | @Test 145 | public void pullMessageWhenError() { 146 | ApiException expected = new CheckedApiException(Code.INTERNAL).underlying; 147 | errorListener.failed(null, expected); 148 | ExecutionException e = assertThrows(ExecutionException.class, () -> subscriber.pull().get()); 149 | assertThat(e.getCause()).isEqualTo(expected); 150 | } 151 | 152 | @Test 153 | public void messageAfterErrorNacked() { 154 | ApiException expected = new CheckedApiException(Code.INTERNAL).underlying; 155 | errorListener.failed(null, expected); 156 | ExecutionException e = assertThrows(ExecutionException.class, () -> subscriber.pull().get()); 157 | assertThat(e.getCause()).isEqualTo(expected); 158 | PubsubMessage message1 = 159 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 160 | AckReplyConsumer consumer = mock(AckReplyConsumer.class); 161 | messageReceiver.receiveMessage(message1, consumer); 162 | verify(consumer, times(1)).nack(); 163 | } 164 | 165 | @Test 166 | public void messagesNackedOnError() { 167 | PubsubMessage message1 = 168 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 169 | AckReplyConsumer consumer = mock(AckReplyConsumer.class); 170 | messageReceiver.receiveMessage(message1, consumer); 171 | ApiException expected = new CheckedApiException(Code.INTERNAL).underlying; 172 | errorListener.failed(null, expected); 173 | ExecutionException e = assertThrows(ExecutionException.class, () -> subscriber.pull().get()); 174 | assertThat(e.getCause()).isEqualTo(expected); 175 | verify(consumer, times(1)).nack(); 176 | } 177 | 178 | @Test 179 | public void pullMessagePrioritizeErrorOverExistingMessage() { 180 | ApiException expected = new CheckedApiException(Code.INTERNAL).underlying; 181 | errorListener.failed(null, expected); 182 | PubsubMessage message = 183 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 184 | messageReceiver.receiveMessage(message, mock(AckReplyConsumer.class)); 185 | 186 | ExecutionException e = assertThrows(ExecutionException.class, () -> subscriber.pull().get()); 187 | assertThat(e.getCause()).isEqualTo(expected); 188 | } 189 | 190 | @Test 191 | public void pullThenAck() throws Exception { 192 | PubsubMessage message = 193 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 194 | Future> future = executorService.submit(() -> subscriber.pull().get()); 195 | AckReplyConsumer ackReplyConsumer = mock(AckReplyConsumer.class); 196 | messageReceiver.receiveMessage(message, ackReplyConsumer); 197 | List batch = future.get(); 198 | assertThat(batch.size()).isEqualTo(1); 199 | ReceivedMessage received = batch.get(0); 200 | assertThat(received.getMessage()).isEqualTo(message); 201 | verify(ackReplyConsumer, times(0)).ack(); 202 | // Invalid ack id ignored. 203 | subscriber.ackMessages(ImmutableList.of("not a real ack id", received.getAckId())).get(); 204 | verify(ackReplyConsumer, times(1)).ack(); 205 | } 206 | 207 | @Test 208 | public void multiAck() throws Exception { 209 | PubsubMessage message1 = 210 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("abc")).build(); 211 | PubsubMessage message2 = 212 | PubsubMessage.newBuilder().setData(ByteString.copyFromUtf8("def")).build(); 213 | AckReplyConsumer ackReplyConsumer1 = mock(AckReplyConsumer.class); 214 | AckReplyConsumer ackReplyConsumer2 = mock(AckReplyConsumer.class); 215 | messageReceiver.receiveMessage(message1, ackReplyConsumer1); 216 | messageReceiver.receiveMessage(message2, ackReplyConsumer2); 217 | List batch = subscriber.pull().get(); 218 | assertThat(batch.size()).isEqualTo(2); 219 | ReceivedMessage received1 = batch.get(0); 220 | ReceivedMessage received2 = batch.get(1); 221 | assertThat(received1.getMessage()).isEqualTo(message1); 222 | assertThat(received2.getMessage()).isEqualTo(message2); 223 | verify(ackReplyConsumer1, times(0)).ack(); 224 | verify(ackReplyConsumer2, times(0)).ack(); 225 | subscriber.ackMessages(ImmutableList.of(received2.getAckId(), received1.getAckId())).get(); 226 | verify(ackReplyConsumer1, times(1)).ack(); 227 | verify(ackReplyConsumer2, times(1)).ack(); 228 | // Duplicate ack ignored. 229 | subscriber.ackMessages(ImmutableList.of(received2.getAckId(), received1.getAckId())).get(); 230 | verify(ackReplyConsumer1, times(1)).ack(); 231 | verify(ackReplyConsumer2, times(1)).ack(); 232 | } 233 | } 234 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsublite/kafka/sink/SchemasTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.sink; 18 | 19 | import static com.google.common.truth.Truth.assertThat; 20 | 21 | import com.google.common.collect.ImmutableList; 22 | import com.google.common.collect.ImmutableMap; 23 | import com.google.protobuf.ByteString; 24 | import com.google.protobuf.ListValue; 25 | import com.google.protobuf.Struct; 26 | import com.google.protobuf.Value; 27 | import java.util.Base64; 28 | import java.util.List; 29 | import java.util.Map; 30 | import org.apache.kafka.connect.data.Schema; 31 | import org.apache.kafka.connect.data.SchemaBuilder; 32 | import org.junit.Test; 33 | import org.junit.runner.RunWith; 34 | import org.junit.runners.JUnit4; 35 | 36 | @RunWith(JUnit4.class) 37 | public class SchemasTest { 38 | 39 | private static List primitiveSchemas() { 40 | return ImmutableList.of( 41 | Schema.INT8_SCHEMA, 42 | Schema.INT16_SCHEMA, 43 | Schema.INT32_SCHEMA, 44 | Schema.INT64_SCHEMA, 45 | Schema.FLOAT32_SCHEMA, 46 | Schema.FLOAT64_SCHEMA, 47 | Schema.BOOLEAN_SCHEMA, 48 | Schema.STRING_SCHEMA, 49 | Schema.BYTES_SCHEMA); 50 | } 51 | 52 | private static Object example(Schema.Type t) { 53 | ImmutableMap.Builder values = ImmutableMap.builder(); 54 | values.put(Schema.Type.INT8, (byte) 3); 55 | values.put(Schema.Type.INT16, (short) 4); 56 | values.put(Schema.Type.INT32, (int) 5); 57 | values.put(Schema.Type.INT64, (long) 6L); 58 | values.put(Schema.Type.FLOAT32, (float) 2.5); 59 | values.put(Schema.Type.FLOAT64, (double) 3.5); 60 | values.put(Schema.Type.BOOLEAN, (Boolean) true); 61 | values.put(Schema.Type.STRING, "abc"); 62 | values.put(Schema.Type.BYTES, ByteString.copyFromUtf8("def").toByteArray()); 63 | return values.build().get(t); 64 | } 65 | 66 | private static Value exampleValue(Schema.Type t) { 67 | Value.Builder single = Value.newBuilder(); 68 | switch (t) { 69 | case INT8: 70 | case INT16: 71 | case INT32: 72 | case INT64: 73 | case FLOAT32: 74 | case FLOAT64: 75 | single.setNumberValue(((Number) example(t)).doubleValue()); 76 | break; 77 | case BOOLEAN: 78 | single.setBoolValue((Boolean) example(t)); 79 | break; 80 | case STRING: 81 | single.setStringValue(example(t).toString()); 82 | break; 83 | case BYTES: 84 | single.setStringValue(Base64.getEncoder().encodeToString((byte[]) example(t))); 85 | break; 86 | default: 87 | throw new RuntimeException(""); 88 | } 89 | return single.build(); 90 | } 91 | 92 | @Test 93 | public void testConvertPrimitives() { 94 | assertThat(Schemas.encodeToBytes(Schema.INT8_SCHEMA, (byte) 3)) 95 | .isEqualTo(ByteString.copyFromUtf8("3")); 96 | assertThat(Schemas.encodeToBytes(Schema.INT16_SCHEMA, (short) 4)) 97 | .isEqualTo(ByteString.copyFromUtf8("4")); 98 | assertThat(Schemas.encodeToBytes(Schema.INT32_SCHEMA, 5)) 99 | .isEqualTo(ByteString.copyFromUtf8("5")); 100 | assertThat(Schemas.encodeToBytes(Schema.INT64_SCHEMA, 6L)) 101 | .isEqualTo(ByteString.copyFromUtf8("6")); 102 | assertThat(Schemas.encodeToBytes(Schema.FLOAT32_SCHEMA, (float) 2.5)) 103 | .isEqualTo(ByteString.copyFromUtf8(Double.toString(2.5))); 104 | assertThat(Schemas.encodeToBytes(Schema.FLOAT64_SCHEMA, (float) 3.5)) 105 | .isEqualTo(ByteString.copyFromUtf8(Double.toString(3.5))); 106 | assertThat(Schemas.encodeToBytes(Schema.BOOLEAN_SCHEMA, true)) 107 | .isEqualTo(ByteString.copyFromUtf8("true")); 108 | assertThat(Schemas.encodeToBytes(null, "abc")).isEqualTo(ByteString.copyFromUtf8("abc")); 109 | assertThat(Schemas.encodeToBytes(Schema.STRING_SCHEMA, "def")) 110 | .isEqualTo(ByteString.copyFromUtf8("def")); 111 | assertThat( 112 | Schemas.encodeToBytes( 113 | Schema.BYTES_SCHEMA, ByteString.copyFromUtf8("ghi").asReadOnlyByteBuffer())) 114 | .isEqualTo(ByteString.copyFromUtf8("ghi")); 115 | assertThat( 116 | Schemas.encodeToBytes( 117 | Schema.BYTES_SCHEMA, ByteString.copyFromUtf8("jkl").toByteArray())) 118 | .isEqualTo(ByteString.copyFromUtf8("jkl")); 119 | } 120 | 121 | @Test 122 | public void testConvertArray() { 123 | for (Schema schema : primitiveSchemas()) { 124 | Value expected = 125 | Value.newBuilder() 126 | .setListValue( 127 | ListValue.newBuilder() 128 | .addValues(exampleValue(schema.type())) 129 | .addValues(exampleValue(schema.type()))) 130 | .build(); 131 | List objects = ImmutableList.of(example(schema.type()), example(schema.type())); 132 | assertThat(Schemas.encodeToBytes(SchemaBuilder.array(schema).build(), objects)) 133 | .isEqualTo(expected.toByteString()); 134 | } 135 | } 136 | 137 | @Test 138 | public void testConvertStruct() { 139 | SchemaBuilder schemaBuilder = 140 | SchemaBuilder.struct() 141 | .field("byte", Schema.INT8_SCHEMA) 142 | .field("short", Schema.INT16_SCHEMA) 143 | .field("int", Schema.INT32_SCHEMA) 144 | .field("long", Schema.INT64_SCHEMA) 145 | .field("float", Schema.FLOAT32_SCHEMA) 146 | .field("double", Schema.FLOAT64_SCHEMA) 147 | .field("bool", Schema.BOOLEAN_SCHEMA) 148 | .field("bytes", Schema.BYTES_SCHEMA) 149 | .field("string", Schema.STRING_SCHEMA); 150 | org.apache.kafka.connect.data.Struct kafkaStruct = 151 | new org.apache.kafka.connect.data.Struct(schemaBuilder.build()); 152 | Struct.Builder struct = Struct.newBuilder(); 153 | kafkaStruct.put("byte", (byte) 3); 154 | struct.putFields("byte", Value.newBuilder().setNumberValue(3).build()); 155 | kafkaStruct.put("short", (short) 4); 156 | struct.putFields("short", Value.newBuilder().setNumberValue(4).build()); 157 | kafkaStruct.put("int", (int) 5); 158 | struct.putFields("int", Value.newBuilder().setNumberValue(5).build()); 159 | kafkaStruct.put("long", (long) 6L); 160 | struct.putFields("long", Value.newBuilder().setNumberValue(6).build()); 161 | kafkaStruct.put("float", (float) 2.5); 162 | struct.putFields("float", Value.newBuilder().setNumberValue(2.5).build()); 163 | kafkaStruct.put("double", (double) 3.5); 164 | struct.putFields("double", Value.newBuilder().setNumberValue(3.5).build()); 165 | kafkaStruct.put("bool", true); 166 | struct.putFields("bool", Value.newBuilder().setBoolValue(true).build()); 167 | kafkaStruct.put("bytes", ByteString.copyFromUtf8("abc").toByteArray()); 168 | struct.putFields( 169 | "bytes", 170 | Value.newBuilder() 171 | .setStringValue( 172 | Base64.getEncoder().encodeToString(ByteString.copyFromUtf8("abc").toByteArray())) 173 | .build()); 174 | kafkaStruct.put("string", "def"); 175 | struct.putFields("string", Value.newBuilder().setStringValue("def").build()); 176 | Value value = Value.newBuilder().setStructValue(struct).build(); 177 | 178 | assertThat(Schemas.encodeToBytes(kafkaStruct.schema(), kafkaStruct)) 179 | .isEqualTo(value.toByteString()); 180 | } 181 | 182 | @Test 183 | public void testConvertMap() { 184 | for (Schema keySchema : primitiveSchemas()) { 185 | for (Schema valueSchema : primitiveSchemas()) { 186 | Schema mapSchema = SchemaBuilder.map(keySchema, valueSchema).build(); 187 | String key; 188 | switch (keySchema.type()) { 189 | case INT8: 190 | case INT16: 191 | case INT32: 192 | case INT64: 193 | key = Long.toString(((Number) example(keySchema.type())).longValue()); 194 | break; 195 | case FLOAT32: 196 | case FLOAT64: 197 | key = Double.toString(((Number) example(keySchema.type())).doubleValue()); 198 | break; 199 | case BOOLEAN: 200 | key = Boolean.toString((Boolean) example(keySchema.type())); 201 | break; 202 | case STRING: 203 | key = example(keySchema.type()).toString(); 204 | break; 205 | case BYTES: 206 | key = Base64.getEncoder().encodeToString((byte[]) example(keySchema.type())); 207 | break; 208 | default: 209 | throw new RuntimeException(""); 210 | } 211 | Map map = 212 | ImmutableMap.of(example(keySchema.type()), example(valueSchema.type())); 213 | Value expected = 214 | Value.newBuilder() 215 | .setStructValue( 216 | Struct.newBuilder().putFields(key, exampleValue(valueSchema.type()))) 217 | .build(); 218 | assertThat(Schemas.encodeToBytes(mapSchema, map)).isEqualTo(expected.toByteString()); 219 | } 220 | } 221 | } 222 | 223 | @Test 224 | public void testConvertComplexSchema() { 225 | Schema schema = 226 | SchemaBuilder.struct() 227 | .field( 228 | "field", 229 | SchemaBuilder.array(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.BOOLEAN_SCHEMA))) 230 | .build(); 231 | org.apache.kafka.connect.data.Struct kafkaStruct = 232 | new org.apache.kafka.connect.data.Struct(schema); 233 | Map map = ImmutableMap.of("one", true, "two", false); 234 | List array = ImmutableList.of(map, map); 235 | kafkaStruct.put("field", array); 236 | Value expectedMap = 237 | Value.newBuilder() 238 | .setStructValue( 239 | Struct.newBuilder() 240 | .putFields("one", Value.newBuilder().setBoolValue(true).build()) 241 | .putFields("two", Value.newBuilder().setBoolValue(false).build())) 242 | .build(); 243 | Value expectedArray = 244 | Value.newBuilder() 245 | .setListValue(ListValue.newBuilder().addValues(expectedMap).addValues(expectedMap)) 246 | .build(); 247 | Value expected = 248 | Value.newBuilder() 249 | .setStructValue(Struct.newBuilder().putFields("field", expectedArray)) 250 | .build(); 251 | assertThat(Schemas.encodeToBytes(schema, kafkaStruct)).isEqualTo(expected.toByteString()); 252 | } 253 | } 254 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsublite/kafka/source/PollerImplTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.source; 18 | 19 | import static com.google.cloud.pubsublite.internal.testing.UnitTestExamples.example; 20 | import static com.google.common.truth.Truth.assertThat; 21 | import static org.mockito.Mockito.inOrder; 22 | import static org.mockito.Mockito.when; 23 | import static org.mockito.MockitoAnnotations.initMocks; 24 | 25 | import com.google.cloud.pubsublite.Offset; 26 | import com.google.cloud.pubsublite.Partition; 27 | import com.google.cloud.pubsublite.TopicPath; 28 | import com.google.common.collect.ImmutableList; 29 | import com.google.common.collect.ImmutableListMultimap; 30 | import com.google.common.collect.ImmutableMap; 31 | import com.google.common.collect.Iterators; 32 | import com.google.common.collect.ListMultimap; 33 | import com.google.protobuf.ByteString; 34 | import java.util.List; 35 | import org.apache.kafka.clients.consumer.Consumer; 36 | import org.apache.kafka.clients.consumer.ConsumerRecord; 37 | import org.apache.kafka.clients.consumer.ConsumerRecords; 38 | import org.apache.kafka.common.TopicPartition; 39 | import org.apache.kafka.common.errors.TimeoutException; 40 | import org.apache.kafka.common.errors.WakeupException; 41 | import org.apache.kafka.common.header.Header; 42 | import org.apache.kafka.common.header.Headers; 43 | import org.apache.kafka.common.record.TimestampType; 44 | import org.apache.kafka.connect.data.Schema; 45 | import org.apache.kafka.connect.source.SourceRecord; 46 | import org.junit.Before; 47 | import org.junit.Test; 48 | import org.mockito.InOrder; 49 | import org.mockito.Mock; 50 | 51 | public class PollerImplTest { 52 | 53 | private static final String KAFKA_TOPIC = "kafka-topic"; 54 | 55 | @Mock Consumer underlying; 56 | @Mock Headers mockHeaders; 57 | Poller poller; 58 | 59 | @Before 60 | public void setUp() { 61 | initMocks(this); 62 | poller = new PollerImpl(KAFKA_TOPIC, underlying); 63 | } 64 | 65 | @Test 66 | public void pollTimeout() { 67 | when(underlying.poll(PollerImpl.POLL_DURATION)).thenThrow(new TimeoutException("")); 68 | assertThat(poller.poll()).isNull(); 69 | } 70 | 71 | @Test 72 | public void pollWakeup() { 73 | when(underlying.poll(PollerImpl.POLL_DURATION)).thenThrow(new WakeupException()); 74 | assertThat(poller.poll()).isNull(); 75 | } 76 | 77 | private static Header toHeader(String key, ByteString value) { 78 | return new Header() { 79 | @Override 80 | public String key() { 81 | return key; 82 | } 83 | 84 | @Override 85 | public byte[] value() { 86 | return value.toByteArray(); 87 | } 88 | }; 89 | } 90 | 91 | @Test 92 | public void pollTranslates() { 93 | ByteString key = ByteString.copyFromUtf8("key"); 94 | ByteString value = ByteString.copyFromUtf8("value"); 95 | ImmutableListMultimap sourceHeadersMap = 96 | ImmutableListMultimap.of( 97 | "one", ByteString.copyFromUtf8("a"), 98 | "two", ByteString.copyFromUtf8("b"), 99 | "two", ByteString.copyFromUtf8("c")); 100 | when(mockHeaders.iterator()) 101 | .thenReturn( 102 | Iterators.transform( 103 | sourceHeadersMap.entries().iterator(), 104 | entry -> toHeader(entry.getKey(), entry.getValue()))); 105 | ConsumerRecord record = 106 | new ConsumerRecord<>( 107 | example(TopicPath.class).toString(), 108 | (int) example(Partition.class).value(), 109 | example(Offset.class).value(), 110 | 1000L, 111 | TimestampType.NO_TIMESTAMP_TYPE, 112 | 0L, 113 | key.size(), 114 | value.size(), 115 | key.toByteArray(), 116 | value.toByteArray(), 117 | mockHeaders); 118 | when(underlying.poll(PollerImpl.POLL_DURATION)) 119 | .thenReturn( 120 | new ConsumerRecords<>( 121 | ImmutableMap.of( 122 | new TopicPartition( 123 | example(TopicPath.class).toString(), 124 | (int) example(Partition.class).value()), 125 | ImmutableList.of(record)))); 126 | 127 | List results = poller.poll(); 128 | assertThat(results).isNotNull(); 129 | assertThat(results.size()).isEqualTo(1); 130 | SourceRecord result = results.get(0); 131 | assertThat(result.sourcePartition()) 132 | .isEqualTo( 133 | ImmutableMap.of( 134 | "topic", 135 | example(TopicPath.class).toString(), 136 | "partition", 137 | (int) example(Partition.class).value())); 138 | assertThat(result.sourceOffset()) 139 | .isEqualTo(ImmutableMap.of("offset", example(Offset.class).value())); 140 | assertThat(result.timestamp()).isEqualTo(1000L); 141 | assertThat(result.keySchema().isOptional()).isTrue(); 142 | assertThat(result.keySchema().type()).isEqualTo(Schema.Type.BYTES); 143 | assertThat(result.key()).isEqualTo(key.toByteArray()); 144 | assertThat(result.valueSchema().isOptional()).isFalse(); 145 | assertThat(result.valueSchema().type()).isEqualTo(Schema.Type.BYTES); 146 | assertThat(result.value()).isEqualTo(value.toByteArray()); 147 | ImmutableListMultimap.Builder resultHeadersBuilder = 148 | ImmutableListMultimap.builder(); 149 | result 150 | .headers() 151 | .forEach( 152 | header -> { 153 | resultHeadersBuilder.put(header.key(), (byte[]) header.value()); 154 | assertThat(header.schema().isOptional()).isFalse(); 155 | assertThat(header.schema().type()).isEqualTo(Schema.Type.BYTES); 156 | }); 157 | ListMultimap resultHeaders = resultHeadersBuilder.build(); 158 | assertThat(resultHeaders.get("one").size()).isEqualTo(1); 159 | assertThat(resultHeaders.get("two").size()).isEqualTo(2); 160 | assertThat(resultHeaders.get("one").get(0)) 161 | .isEqualTo(sourceHeadersMap.get("one").get(0).toByteArray()); 162 | assertThat(resultHeaders.get("two").get(0)) 163 | .isEqualTo(sourceHeadersMap.get("two").get(0).toByteArray()); 164 | assertThat(resultHeaders.get("two").get(1)) 165 | .isEqualTo(sourceHeadersMap.get("two").get(1).toByteArray()); 166 | } 167 | 168 | @Test 169 | public void pollTreatsEmptyKeyAsNull() { 170 | ByteString key = ByteString.copyFromUtf8(""); 171 | ByteString value = ByteString.copyFromUtf8("value"); 172 | ConsumerRecord record = 173 | new ConsumerRecord<>( 174 | example(TopicPath.class).toString(), 175 | (int) example(Partition.class).value(), 176 | example(Offset.class).value(), 177 | 1000L, 178 | TimestampType.NO_TIMESTAMP_TYPE, 179 | 0L, 180 | key.size(), 181 | value.size(), 182 | key.toByteArray(), 183 | value.toByteArray()); 184 | when(underlying.poll(PollerImpl.POLL_DURATION)) 185 | .thenReturn( 186 | new ConsumerRecords<>( 187 | ImmutableMap.of( 188 | new TopicPartition( 189 | example(TopicPath.class).toString(), 190 | (int) example(Partition.class).value()), 191 | ImmutableList.of(record)))); 192 | 193 | List results = poller.poll(); 194 | assertThat(results).isNotNull(); 195 | assertThat(results.size()).isEqualTo(1); 196 | SourceRecord result = results.get(0); 197 | assertThat(result.key()).isNull(); 198 | } 199 | 200 | @Test 201 | public void closeCallsWakeup() { 202 | poller.close(); 203 | InOrder order = inOrder(underlying); 204 | order.verify(underlying).wakeup(); 205 | order.verify(underlying).unsubscribe(); 206 | } 207 | } 208 | -------------------------------------------------------------------------------- /src/test/java/com/google/pubsublite/kafka/source/PubSubLiteSourceTaskTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Copyright 2020 Google LLC 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"); 5 | * you may not use this file except in compliance with the License. 6 | * You may obtain a copy of the License at 7 | * 8 | * http://www.apache.org/licenses/LICENSE-2.0 9 | * 10 | * Unless required by applicable law or agreed to in writing, software 11 | * distributed under the License is distributed on an "AS IS" BASIS, 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | * See the License for the specific language governing permissions and 14 | * limitations under the License. 15 | */ 16 | 17 | package com.google.pubsublite.kafka.source; 18 | 19 | import static com.google.common.truth.Truth.assertThat; 20 | import static org.junit.Assert.assertThrows; 21 | import static org.mockito.ArgumentMatchers.any; 22 | import static org.mockito.Mockito.verify; 23 | import static org.mockito.Mockito.when; 24 | import static org.mockito.MockitoAnnotations.initMocks; 25 | 26 | import com.google.common.collect.ImmutableList; 27 | import com.google.common.collect.ImmutableMap; 28 | import org.junit.Before; 29 | import org.junit.Test; 30 | import org.junit.runner.RunWith; 31 | import org.junit.runners.JUnit4; 32 | import org.mockito.Mock; 33 | 34 | @RunWith(JUnit4.class) 35 | public class PubSubLiteSourceTaskTest { 36 | 37 | @Mock PollerFactory factory; 38 | @Mock Poller poller; 39 | PubSubLiteSourceTask task; 40 | 41 | @Before 42 | public void setUp() { 43 | initMocks(this); 44 | when(factory.newPoller(any())).thenReturn(poller); 45 | task = new PubSubLiteSourceTask(factory); 46 | task.start(ImmutableMap.of()); 47 | verify(factory).newPoller(ImmutableMap.of()); 48 | assertThrows(IllegalStateException.class, () -> task.start(ImmutableMap.of())); 49 | } 50 | 51 | @Test 52 | public void poll() { 53 | when(poller.poll()).thenReturn(ImmutableList.of()); 54 | assertThat(task.poll()).isEmpty(); 55 | } 56 | 57 | @Test 58 | public void stop() { 59 | task.stop(); 60 | verify(poller).close(); 61 | assertThrows(IllegalStateException.class, () -> task.stop()); 62 | } 63 | } 64 | -------------------------------------------------------------------------------- /src/test/resources/cps-sink-connector-test.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name=CPSSinkConnector 16 | connector.class=com.google.pubsub.kafka.sink.CloudPubSubSinkConnector 17 | tasks.max=2 18 | topics=cps-sink-test-kafka-topic 19 | cps.topic=cps-sink-topic- 20 | cps.project= 21 | key.converter=org.apache.kafka.connect.storage.StringConverter 22 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 23 | -------------------------------------------------------------------------------- /src/test/resources/cps-source-connector-test.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name=CPSSourceConnector 16 | connector.class=com.google.pubsub.kafka.source.CloudPubSubSourceConnector 17 | tasks.max=2 18 | cps.project= 19 | cps.subscription=cps-source-subscription- 20 | kafka.topic=cps-source-test-kafka-topic 21 | key.converter=org.apache.kafka.connect.storage.StringConverter 22 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 23 | -------------------------------------------------------------------------------- /src/test/resources/kafka_vm_startup_script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2022 Google LLC 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | set -x 17 | sudo apt-get update 18 | sudo apt-get install -yq wget openjdk-11-jdk maven 19 | 20 | # Download connector JARs and properties files 21 | GCS_BUCKET=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/gcs_bucket -H "Metadata-Flavor: Google") 22 | CPS_CONNECTOR_JAR=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/cps_connector_jar_name -H "Metadata-Flavor: Google") 23 | CPS_SINK_CONNECTOR_PROPERTIES=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/cps_sink_connector_properties_name -H "Metadata-Flavor: Google") 24 | CPS_SOURCE_CONNECTOR_PROPERTIES=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/cps_source_connector_properties_name -H "Metadata-Flavor: Google") 25 | PSL_SINK_CONNECTOR_PROPERTIES=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/psl_sink_connector_properties_name -H "Metadata-Flavor: Google") 26 | PSL_SOURCE_CONNECTOR_PROPERTIES=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/psl_source_connector_properties_name -H "Metadata-Flavor: Google") 27 | GCS_DIR='gcs_resources' 28 | 29 | mkdir $GCS_DIR 30 | gsutil cp "gs://$GCS_BUCKET/$CPS_CONNECTOR_JAR" $GCS_DIR/ 31 | gsutil cp "gs://$GCS_BUCKET/$CPS_SINK_CONNECTOR_PROPERTIES" $GCS_DIR/ 32 | gsutil cp "gs://$GCS_BUCKET/$CPS_SOURCE_CONNECTOR_PROPERTIES" $GCS_DIR/ 33 | gsutil cp "gs://$GCS_BUCKET/$PSL_SINK_CONNECTOR_PROPERTIES" $GCS_DIR/ 34 | gsutil cp "gs://$GCS_BUCKET/$PSL_SOURCE_CONNECTOR_PROPERTIES" $GCS_DIR/ 35 | echo "Files in $GCS_DIR: " 36 | ls -l $GCS_DIR/ 37 | 38 | # Prepare properties files for this run 39 | RUN_ID=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/run_id -H "Metadata-Flavor: Google") 40 | PROJECT_NAME=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/project_id -H "Metadata-Flavor: Google") 41 | PSL_ZONE=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/psl_zone -H "Metadata-Flavor: Google") 42 | 43 | sed -i "s//$RUN_ID/g" $GCS_DIR/*.properties 44 | sed -i "s//$PROJECT_NAME/g" $GCS_DIR/*.properties 45 | sed -i "s//$PSL_ZONE/g" $GCS_DIR/*.properties 46 | 47 | # Install and run Kafka brokers 48 | KAFKA_VERSION=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/kafka_version -H "Metadata-Flavor: Google") 49 | SCALA_VERSION=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/scala_version -H "Metadata-Flavor: Google") 50 | KAFKA_URL="https://archive.apache.org/dist/kafka/$KAFKA_VERSION/kafka_$SCALA_VERSION-$KAFKA_VERSION.tgz" 51 | KAFKA_DIR="kafka_$SCALA_VERSION-$KAFKA_VERSION" 52 | EXTERNAL_IP=$(curl http://metadata/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip -H "Metadata-Flavor: Google") 53 | 54 | wget $KAFKA_URL 55 | tar -xzf "$KAFKA_DIR.tgz" 56 | sed -i "s@#advertised.listeners@advertised.listeners@g" $KAFKA_DIR/config/server.properties 57 | sed -i "s@your.host.name@$EXTERNAL_IP@g" $KAFKA_DIR/config/server.properties 58 | $KAFKA_DIR/bin/zookeeper-server-start.sh $KAFKA_DIR/config/zookeeper.properties & 59 | $KAFKA_DIR/bin/kafka-server-start.sh $KAFKA_DIR/config/server.properties & 60 | 61 | # Run connectors 62 | sed -i "s@#plugin.path=@plugin.path=$(pwd)\/$GCS_DIR@g" $KAFKA_DIR/config/connect-standalone.properties 63 | ## Create kafka topics for connectors 64 | $KAFKA_DIR/bin/kafka-topics.sh --create --topic 'cps-sink-test-kafka-topic' --bootstrap-server localhost:9092 65 | $KAFKA_DIR/bin/kafka-topics.sh --create --topic 'cps-source-test-kafka-topic' --bootstrap-server localhost:9092 66 | $KAFKA_DIR/bin/kafka-topics.sh --create --topic 'psl-sink-test-topic' --bootstrap-server localhost:9092 67 | $KAFKA_DIR/bin/kafka-topics.sh --create --topic 'psl-source-test-topic' --bootstrap-server localhost:9092 68 | ## Start connectors 69 | $KAFKA_DIR/bin/connect-standalone.sh $KAFKA_DIR/config/connect-standalone.properties \ 70 | $GCS_DIR/$CPS_SINK_CONNECTOR_PROPERTIES \ 71 | $GCS_DIR/$CPS_SOURCE_CONNECTOR_PROPERTIES \ 72 | $GCS_DIR/$PSL_SINK_CONNECTOR_PROPERTIES \ 73 | $GCS_DIR/$PSL_SOURCE_CONNECTOR_PROPERTIES & 74 | 75 | set +x 76 | -------------------------------------------------------------------------------- /src/test/resources/log4j.properties: -------------------------------------------------------------------------------- 1 | # Root logger option 2 | log4j.rootLogger=INFO, stdout 3 | 4 | # Direct log messages to stdout 5 | log4j.appender.stdout=org.apache.log4j.ConsoleAppender 6 | log4j.appender.stdout.Target=System.out 7 | log4j.appender.stdout.layout=org.apache.log4j.PatternLayout 8 | log4j.appender.stdout.layout.ConversionPattern=%d{yyyy-MM-dd HH:mm:ss} %-5p %c{1} - %m%n 9 | -------------------------------------------------------------------------------- /src/test/resources/pubsub-lite-sink-connector-test.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name=PubSubLiteSinkConnector 16 | connector.class=com.google.pubsublite.kafka.sink.PubSubLiteSinkConnector 17 | tasks.max=2 18 | topics=psl-sink-test-topic 19 | pubsublite.project= 20 | pubsublite.location= 21 | pubsublite.topic=psl-sink-topic- 22 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter 23 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 24 | -------------------------------------------------------------------------------- /src/test/resources/pubsub-lite-source-connector-test.properties: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Google LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | name=PubSubLiteSourceConnector 16 | connector.class=com.google.pubsublite.kafka.source.PubSubLiteSourceConnector 17 | tasks.max=2 18 | pubsublite.project= 19 | pubsublite.location= 20 | pubsublite.subscription=psl-source-subscription- 21 | kafka.topic=psl-source-test-topic 22 | key.converter=org.apache.kafka.connect.converters.ByteArrayConverter 23 | value.converter=org.apache.kafka.connect.converters.ByteArrayConverter 24 | -------------------------------------------------------------------------------- /versions.txt: -------------------------------------------------------------------------------- 1 | # Format: 2 | # module:released-version:current-version 3 | 4 | pubsub-group-kafka-connector:1.3.2:1.3.2 5 | --------------------------------------------------------------------------------