├── .asf.yaml ├── .github ├── PULL_REQUEST_TEMPLATE.md ├── boring-cyborg.yml └── workflows │ ├── common.yml │ ├── nightly.yml │ └── push_pr.yml ├── .gitignore ├── .gitmodules ├── LICENSE ├── NOTICE ├── README.md ├── docs ├── content.zh │ └── docs │ │ └── connectors │ │ └── datastream │ │ └── prometheus.md ├── content │ └── docs │ │ └── connectors │ │ └── datastream │ │ └── prometheus.md └── data │ └── prometheus.yml ├── flink-connector-prometheus-request-signer-amp ├── README.md ├── archunit-violations │ ├── 6e53c00e-cca1-4df3-a67b-55a742f82f49 │ ├── b84f2d4b-67b7-44ff-9dc5-9a946a67c5e9 │ └── stored.rules ├── pom.xml └── src │ ├── main │ └── java │ │ └── org │ │ └── apache │ │ └── flink │ │ └── connector │ │ └── prometheus │ │ └── sink │ │ └── aws │ │ ├── AWS4SignerForAMP.java │ │ └── AmazonManagedPrometheusWriteRequestSigner.java │ └── test │ ├── java │ ├── architecture │ │ └── TestCodeArchitectureTest.java │ └── org │ │ └── apache │ │ └── flink │ │ └── connector │ │ └── prometheus │ │ └── sink │ │ └── aws │ │ ├── AWS4SignerForAMPTest.java │ │ ├── AmazonManagedPrometheusWriteRequestSignerSerializationTest.java │ │ ├── AmazonManagedPrometheusWriteRequestSignerTest.java │ │ ├── DummAwsSessionCredentialProvider.java │ │ ├── DummyAwsBasicCredentialsProvider.java │ │ └── RequestHeaderTestUtil.java │ └── resources │ └── archunit.properties ├── flink-connector-prometheus ├── README.md ├── archunit-violations │ ├── 75250488-2d5e-433d-8fc4-51ff28cb11b3 │ ├── d962399a-ad51-48ff-ab30-23969cef546c │ └── stored.rules ├── pom.xml └── src │ ├── main │ └── java │ │ └── org │ │ └── apache │ │ └── flink │ │ └── connector │ │ └── prometheus │ │ └── sink │ │ ├── HttpResponseCallback.java │ │ ├── PrometheusRemoteWriteHttpRequestBuilder.java │ │ ├── PrometheusRequestSigner.java │ │ ├── PrometheusSink.java │ │ ├── PrometheusSinkBuilder.java │ │ ├── PrometheusSinkConfiguration.java │ │ ├── PrometheusSinkException.java │ │ ├── PrometheusSinkWriter.java │ │ ├── PrometheusStateSerializer.java │ │ ├── PrometheusTimeSeries.java │ │ ├── PrometheusTimeSeriesConverter.java │ │ ├── PrometheusTimeSeriesLabelsAndMetricNameKeySelector.java │ │ ├── RequestEntrySizeUtils.java │ │ ├── errorhandling │ │ └── PrometheusSinkWriteException.java │ │ ├── http │ │ ├── PrometheusAsyncHttpClientBuilder.java │ │ ├── RemoteWriteResponseClassifier.java │ │ ├── RemoteWriteResponseType.java │ │ ├── RemoteWriteRetryStrategy.java │ │ └── RethrowingIOSessionListener.java │ │ ├── metrics │ │ ├── SinkMetrics.java │ │ └── SinkMetricsCallback.java │ │ ├── prometheus │ │ ├── Remote.java │ │ └── Types.java │ │ └── protobuf │ │ └── GoGoProtos.java │ └── test │ ├── java │ └── org │ │ └── apache │ │ ├── architecture │ │ └── TestCodeArchitectureTest.java │ │ └── flink │ │ └── connector │ │ └── prometheus │ │ └── sink │ │ ├── CapturingPrometheusRequestSigner.java │ │ ├── DummyPrometheusRequestSigner.java │ │ ├── HttpResponseCallbackTest.java │ │ ├── HttpResponseCallbackTestUtils.java │ │ ├── HttpResponseHandlingBehaviorIT.java │ │ ├── HttpTestUtils.java │ │ ├── InspectableMetricGroup.java │ │ ├── InspectableMetricGroupAssertions.java │ │ ├── PrometheusRemoteWriteHttpRequestBuilderTest.java │ │ ├── PrometheusSinkBuilderTest.java │ │ ├── PrometheusSinkSerializationTest.java │ │ ├── PrometheusSinkWriterTest.java │ │ ├── PrometheusStateSerializerTest.java │ │ ├── PrometheusTimeSeriesConverterTest.java │ │ ├── PrometheusTimeSeriesLabelsAndMetricNameKeySelectorTest.java │ │ ├── PrometheusTimeSeriesTest.java │ │ ├── RequestEntrySizeUtilsTest.java │ │ ├── SinkWriterErrorHandlingBehaviorConfigurationTest.java │ │ ├── VerifyableResponseCallback.java │ │ ├── examples │ │ └── DataStreamExample.java │ │ ├── http │ │ ├── AsyncHttpClientRetryIT.java │ │ ├── HttpClientTestUtils.java │ │ ├── RemoteWriteResponseClassifierTest.java │ │ ├── RemoteWriteRetryStrategyTest.java │ │ └── RethrowingIOSessionListenerTest.java │ │ └── metrics │ │ ├── SinkMetricsCallbackTest.java │ │ └── VerifybleSinkMetricsCallback.java │ └── resources │ ├── archunit.properties │ └── log4j2.properties ├── pom.xml └── tools └── maven ├── checkstyle.xml └── suppressions.xml /.asf.yaml: -------------------------------------------------------------------------------- 1 | github: 2 | enabled_merge_buttons: 3 | squash: true 4 | merge: false 5 | rebase: true 6 | labels: 7 | - flink 8 | - prometheus 9 | autolink_jira: FLINK 10 | collaborators: 11 | - flinkbot 12 | notifications: 13 | commits: commits@flink.apache.org 14 | issues: issues@flink.apache.org 15 | pullrequests: issues@flink.apache.org 16 | jobs: builds@flink.apache.org 17 | jira_options: link label 18 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | ## Purpose of the change 13 | 14 | *For example: Implements the Datastream API for the Prometheus Sink.* 15 | 16 | ## Verifying this change 17 | 18 | Please make sure both new and modified tests in this PR follows the conventions defined in our code quality guide: https://flink.apache.org/contributing/code-style-and-quality-common.html#testing 19 | 20 | *(Please pick either of the following options)* 21 | 22 | This change is a trivial rework / code cleanup without any test coverage. 23 | 24 | *(or)* 25 | 26 | This change is already covered by existing tests, such as *(please describe tests)*. 27 | 28 | *(or)* 29 | 30 | This change added tests and can be verified as follows: 31 | 32 | *(example:)* 33 | - *Added integration tests for end-to-end deployment* 34 | - *Added unit tests* 35 | - *Manually verified by running the Kinesis connector on a local Flink cluster.* 36 | 37 | ## Significant changes 38 | *(Please check any boxes [x] if the answer is "yes". You can first publish the PR and check them afterwards, for convenience.)* 39 | - [ ] Dependencies have been added or upgraded 40 | - [ ] Public API has been changed (Public API is any class annotated with `@Public(Evolving)`) 41 | - [ ] Serializers have been changed 42 | - [ ] New feature has been introduced 43 | - If yes, how is this documented? (not applicable / docs / JavaDocs / not documented) 44 | -------------------------------------------------------------------------------- /.github/boring-cyborg.yml: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | labelPRBasedOnFilePath: 20 | component=BuildSystem: 21 | - .github/**/* 22 | - tools/maven/* 23 | 24 | component=Documentation: 25 | - docs/**/* 26 | 27 | component=Connectors/Prometheus: 28 | - flink-connector-prometheus/**/* 29 | - flink-connector-prometheus-request-signer-amp/**/* 30 | 31 | ###### IssueLink Adder ################################################################################################# 32 | # Insert Issue (Jira/Github etc) link in PR description based on the Issue ID in PR title. 33 | insertIssueLinkInPrDescription: 34 | # specify the placeholder for the issue link that should be present in the description 35 | descriptionIssuePlaceholderRegexp: "^Issue link: (.*)$" 36 | matchers: 37 | # you can have several matches - for different types of issues 38 | # only the first matching entry is replaced 39 | jiraIssueMatch: 40 | # specify the regexp of issue id that you can find in the title of the PR 41 | # the match groups can be used to build the issue id (${1}, ${2}, etc.). 42 | titleIssueIdRegexp: \[(FLINK-[0-9]+)\] 43 | # the issue link to be added. ${1}, ${2} ... are replaced with the match groups from the 44 | # title match (remember to use quotes) 45 | descriptionIssueLink: "[${1}](https://issues.apache.org/jira/browse/${1}/)" 46 | docOnlyIssueMatch: 47 | titleIssueIdRegexp: \[hotfix\] 48 | descriptionIssueLink: "`Hotfix, no JIRA issue`" 49 | 50 | ###### Title Validator ################################################################################################# 51 | # Verifies if commit/PR titles match the regexp specified 52 | verifyTitles: 53 | # Regular expression that should be matched by titles of commits or PR 54 | titleRegexp: ^\[FLINK-[0-9]+\].*$|^\[FLINK-XXXXX\].*$|^\[hotfix].*$ 55 | # If set to true, it will always check the PR title (as opposed to the individual commits). 56 | alwaysUsePrTitle: false 57 | # If set to true, it will only check the commit in case there is a single commit. 58 | # In case of multiple commits it will check PR title. 59 | # This reflects the standard behaviour of Github that for `Squash & Merge` GitHub 60 | # uses the PR title rather than commit messages for the squashed commit ¯\_(ツ)_/¯ 61 | # For single-commit PRs it takes the squashed commit message from the commit as expected. 62 | # 63 | # If set to false it will check all commit messages. This is useful when you do not squash commits at merge. 64 | validateEitherPrOrSingleCommitTitle: true 65 | # The title the GitHub status should appear from. 66 | statusTitle: "Title Validator" 67 | # A custom message to be displayed when the title passes validation. 68 | successMessage: "Validation successful!" 69 | # A custom message to be displayed when the title fails validation. 70 | # Allows insertion of ${type} (commit/PR), ${title} (the title validated) and ${regex} (the titleRegexp above). 71 | failureMessage: "Wrong ${type} title: ${title}" 72 | 73 | # Various Flags to control behaviour of the "Labeler" 74 | labelerFlags: 75 | # If this flag is changed to 'false', labels would only be added when the PR is first created 76 | # and not when existing PR is updated. 77 | # The default is 'true' which means the labels would be added when PR is updated even if they 78 | # were removed by the user 79 | labelOnPRUpdates: true 80 | 81 | # Comment to be posted to welcome users when they open their first PR 82 | firstPRWelcomeComment: > 83 | Thanks for opening this pull request! Please check out our contributing guidelines. (https://flink.apache.org/contributing/how-to-contribute.html) 84 | # Comment to be posted to congratulate user on their first merged PR 85 | firstPRMergeComment: > 86 | Awesome work, congrats on your first merged pull request! -------------------------------------------------------------------------------- /.github/workflows/common.yml: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | on: 20 | workflow_call: 21 | inputs: 22 | flink_url: 23 | description: "Url to Flink binary." 24 | required: true 25 | type: string 26 | flink_version: 27 | description: "Flink version to test against." 28 | required: true 29 | type: string 30 | jdk_version: 31 | description: "Jdk version to test against." 32 | required: false 33 | default: 8, 11 34 | type: string 35 | cache_flink_binary: 36 | description: "Whether to cache the Flink binary. Should be false for SNAPSHOT URLs, true otherwise." 37 | required: true 38 | type: boolean 39 | timeout_global: 40 | description: "The timeout in minutes for the entire workflow." 41 | required: false 42 | type: number 43 | default: 60 44 | timeout_test: 45 | description: "The timeout in minutes for the compile and test step." 46 | required: false 47 | type: number 48 | default: 50 49 | 50 | jobs: 51 | compile_and_test: 52 | runs-on: ubuntu-latest 53 | strategy: 54 | matrix: 55 | jdk: ${{ fromJSON(format('[{0}]', inputs.jdk_version)) }} 56 | timeout-minutes: ${{ inputs.timeout_global }} 57 | env: 58 | MVN_COMMON_OPTIONS: -U -B --no-transfer-progress -Dflink.version=${{ inputs.flink_version }} 59 | MVN_CONNECTION_OPTIONS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.httpconnectionManager.ttlSeconds=120 60 | FLINK_CACHE_DIR: "/tmp/cache/flink" 61 | MVN_BUILD_OUTPUT_FILE: "/tmp/mvn_build_output.out" 62 | MVN_VALIDATION_DIR: "/tmp/flink-validation-deployment" 63 | steps: 64 | - run: echo "Running CI pipeline for JDK version ${{ matrix.jdk }}" 65 | 66 | - name: Check out repository code 67 | uses: actions/checkout@v3 68 | 69 | - name: Set JDK 70 | uses: actions/setup-java@v3 71 | with: 72 | java-version: ${{ matrix.jdk }} 73 | distribution: 'temurin' 74 | cache: 'maven' 75 | 76 | - name: Set Maven 3.8.5 77 | uses: stCarolas/setup-maven@v4.5 78 | with: 79 | maven-version: 3.8.5 80 | 81 | - name: Create cache dirs 82 | run: mkdir -p ${{ env.FLINK_CACHE_DIR }} 83 | 84 | - name: Cache Flink binary 85 | if: ${{ inputs.cache_flink_binary }} 86 | uses: actions/cache@v3 87 | id: cache-flink 88 | with: 89 | path: ${{ env.FLINK_CACHE_DIR }} 90 | key: ${{ inputs.flink_url }} 91 | 92 | - name: Download Flink binary 93 | working-directory: ${{ env.FLINK_CACHE_DIR }} 94 | if: steps.cache-flink.outputs.cache-hit != 'true' 95 | run: wget -q -c ${{ inputs.flink_url }} -O - | tar -xz 96 | 97 | - name: Compile and test flink-connector-prometheus 98 | timeout-minutes: ${{ inputs.timeout_test }} 99 | run: | 100 | set -o pipefail 101 | 102 | mvn clean install -Dflink.convergence.phase=install -Pcheck-convergence -U -B ${{ env.MVN_CONNECTION_OPTIONS }} \ 103 | -DaltDeploymentRepository=validation_repository::default::file:${{ env.MVN_VALIDATION_DIR }} \ 104 | -Dflink.version=${{ inputs.flink_version }} | tee ${{ env.MVN_BUILD_OUTPUT_FILE }} 105 | 106 | - name: Check licensing 107 | run: | 108 | mvn ${MVN_COMMON_OPTIONS} exec:java@check-license -N \ 109 | -Dexec.args="${{ env.MVN_BUILD_OUTPUT_FILE }} $(pwd) $(pwd)" \ 110 | ${{ env.MVN_CONNECTION_OPTIONS }} \ 111 | -Dlog4j.configurationFile=file://$(pwd)/tools/ci/log4j.properties 112 | -------------------------------------------------------------------------------- /.github/workflows/nightly.yml: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | name: "flink-connector-prometheus: nightly build" 20 | on: 21 | schedule: 22 | - cron: "0 0 * * *" 23 | jobs: 24 | compile_and_test: 25 | if: github.repository_owner == 'apache' 26 | strategy: 27 | matrix: 28 | flink: [1.19-SNAPSHOT, 1.20-SNAPSHOT] 29 | java: [ '8, 11, 17'] 30 | uses: ./.github/workflows/common.yml 31 | with: 32 | flink_version: ${{ matrix.flink }} 33 | jdk_version: ${{ matrix.java }} 34 | flink_url: https://s3.amazonaws.com/flink-nightly/flink-${{ matrix.flink }}-bin-scala_2.12.tgz 35 | cache_flink_binary: false 36 | secrets: inherit 37 | 38 | -------------------------------------------------------------------------------- /.github/workflows/push_pr.yml: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | name: "flink-connector-prometheus: build on pull request" 20 | on: [push, pull_request, workflow_dispatch] 21 | concurrency: 22 | group: ${{ github.workflow }}-${{ github.ref }} 23 | cancel-in-progress: true 24 | jobs: 25 | compile_and_test: 26 | uses: ./.github/workflows/common.yml 27 | strategy: 28 | matrix: 29 | flink: [1.19.1, 1.20.0] 30 | java: [ '8, 11, 17'] 31 | with: 32 | flink_version: ${{ matrix.flink }} 33 | jdk_version: ${{ matrix.java }} 34 | flink_url: https://archive.apache.org/dist/flink/flink-${{ matrix.flink }}/flink-${{ matrix.flink }}-bin-scala_2.12.tgz 35 | cache_flink_binary: true 36 | secrets: inherit 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .eslintcache 2 | .cache 3 | scalastyle-output.xml 4 | .classpath 5 | .idea/ 6 | !.idea/vcs.xml 7 | .run/ 8 | .metadata 9 | .settings 10 | .project 11 | .version.properties 12 | filter.properties 13 | logs.zip 14 | .mvn/wrapper/*.jar 15 | target/ 16 | out/ 17 | tmp 18 | *.class 19 | *.iml 20 | *.swp 21 | *.jar 22 | *.zip 23 | *.log 24 | *.pyc 25 | .DS_Store 26 | build-target 27 | atlassian-ide-plugin.xml 28 | /docs/api 29 | /docs/.bundle 30 | /docs/.rubydeps 31 | /docs/ruby2/.bundle 32 | /docs/ruby2/.rubydeps 33 | /docs/.jekyll-metadata 34 | *.ipr 35 | *.iws 36 | tools/flink 37 | tools/flink-* 38 | tools/releasing/release 39 | tools/japicmp-output 40 | dependency-reduced-pom.xml 41 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "tools/releasing/shared"] 2 | path = tools/releasing/shared 3 | url = https://github.com/apache/flink-connector-shared-utils 4 | branch = release_utils 5 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Apache Flink Prometheus connector 2 | Copyright 2024 The Apache Software Foundation 3 | 4 | This product includes software developed at 5 | The Apache Software Foundation (http://www.apache.org/). 6 | 7 | Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby 8 | granted, provided that this permission notice appear in all copies. 9 | 10 | THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING 11 | ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, 12 | DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, 13 | WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE 14 | USE OR PERFORMANCE OF THIS SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Apache Flink Prometheus Connector 2 | 3 | This repository contains the official Apache Flink Prometheus connector. 4 | 5 | * [More details](flink-connector-prometheus/README.md) about the connector and its usage. 6 | * [Example application](flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/examples/DataStreamExample.java) 7 | demonstrating the usage of the connector. 8 | 9 | ## Apache Flink 10 | 11 | Apache Flink is an open source stream processing framework with powerful stream- and batch-processing capabilities. 12 | 13 | Learn more about Flink at [https://flink.apache.org/](https://flink.apache.org/) 14 | 15 | ## Modules 16 | 17 | This repository contains the following modules 18 | 19 | * [Prometheus Connector](./flink-connector-prometheus): Flink Prometheus Connector implementation; supports optional 20 | request signer 21 | * [Sample application](./flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/examples/DataStreamExample.java): 22 | This class contains a complete application demonstrating set up and usage of the sink connector. 23 | * [Amazon Managed Prometheus Request Signer](./flink-connector-prometheus-request-signer-amp): Implementation of request 24 | signer for Amazon Managed Prometheus (AMP) 25 | 26 | ## Building the Apache Flink Prometheus Connector from Source 27 | 28 | Prerequisites: 29 | 30 | * Unix-like environment (we use Linux, Mac OS X) 31 | * Git 32 | * Maven (we recommend version 3.8.6) 33 | * Java 11 34 | 35 | ``` 36 | git clone https://github.com/apache/flink-connector-prometheus.git 37 | cd flink-connector-prometheus 38 | mvn clean package -DskipTests 39 | ``` 40 | 41 | The resulting jars can be found in the `target` directory of the respective module. 42 | 43 | ## Developing Flink 44 | 45 | The Flink committers use IntelliJ IDEA to develop the Flink codebase. 46 | We recommend IntelliJ IDEA for developing projects that involve Scala code. 47 | 48 | Minimal requirements for an IDE are: 49 | 50 | * Support for Java and Scala (also mixed projects) 51 | * Support for Maven with Java and Scala 52 | 53 | ### IntelliJ IDEA 54 | 55 | The IntelliJ IDE supports Maven out of the box and offers a plugin for Scala development. 56 | 57 | * IntelliJ download: [https://www.jetbrains.com/idea/](https://www.jetbrains.com/idea/) 58 | * IntelliJ Scala Plugin: [https://plugins.jetbrains.com/plugin/?id=1347](https://plugins.jetbrains.com/plugin/?id=1347) 59 | 60 | Check out 61 | our [Setting up IntelliJ](https://nightlies.apache.org/flink/flink-docs-master/flinkDev/ide_setup.html#intellij-idea) 62 | guide for details. 63 | 64 | ## Support 65 | 66 | Don’t hesitate to ask! 67 | 68 | Contact the developers and community on the [mailing lists](https://flink.apache.org/community.html#mailing-lists) if 69 | you need any help. 70 | 71 | [Open an issue](https://issues.apache.org/jira/browse/FLINK) if you found a bug in Flink. 72 | 73 | ## Documentation 74 | 75 | The documentation of Apache Flink is located on the website: [https://flink.apache.org](https://flink.apache.org) 76 | or in the `docs/` directory of the source code. 77 | 78 | ## Fork and Contribute 79 | 80 | This is an active open-source project. We are always open to people who want to use the system or contribute to it. 81 | Contact us if you are looking for implementation tasks that fit your skills. 82 | This article 83 | describes [how to contribute to Apache Flink](https://flink.apache.org/contributing/how-to-contribute.html). 84 | 85 | ## About 86 | 87 | Apache Flink is an open source project of The Apache Software Foundation (ASF). 88 | The Apache Flink project originated from the [Stratosphere](http://stratosphere.eu) research project. 89 | -------------------------------------------------------------------------------- /docs/data/prometheus.yml: -------------------------------------------------------------------------------- 1 | ################################################################################ 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | ################################################################################ 18 | 19 | version: 1.1-SNAPSHOT 20 | variants: 21 | - maven: flink-connector-prometheus 22 | - maven: flink-connector-prometheus-request-signer-amp 23 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/README.md: -------------------------------------------------------------------------------- 1 | ## Request Signer for Amazon Managed Prometheus (AMP) 2 | 3 | Request signer implementation for Amazon Managed Prometheus (AMP). 4 | 5 | The signer retrieves AWS credential using the default credential provider chain, that searches for credentials 6 | in the following order: ENV_VARS, SYS_PROPS, WEB_IDENTITY_TOKEN, PROFILE and EC2/ECS credentials provider. 7 | 8 | The Flink application requires `RemoteWrite` permissions to the AMP workspace (e.g. `AmazonPromethusRemoteWriteAccess` 9 | policy). 10 | 11 | ### Sample usage 12 | 13 | To enable request signing for Amazon Managed Prometheus, and instance of `AmazonManagedPrometheusWriteRequestSigner` 14 | must be provided when building the `PrometheusSink` instance. The only required parameters are the AWS region and the 15 | AMP remote-write URL. 16 | 17 | ```java 18 | 19 | // AWS region of the AMP workspace 20 | String prometheusRegion = "us-east-1"; 21 | 22 | // Remote-Write URL of the AMP workspace 23 | String prometheusRemoteWriteUrl = "https://aps-workspaces.us-east-1.amazonaws.com/workspaces/ws-091245678-9abc-def0-1234-56789abcdef0/api/v1/remote_write"; 24 | 25 | // Build the sink to AMP using the request signer 26 | AsyncSinkBase sink = PrometheusSink.builder() 27 | .setPrometheusRemoteWriteUrl(prometheusRemoteWriteUrl) 28 | .setRequestSigner(new AmazonManagedPrometheusWriteRequestSigner(prometheusRemoteWriteUrl, prometheusRegion)) 29 | .build(); 30 | ``` 31 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/archunit-violations/6e53c00e-cca1-4df3-a67b-55a742f82f49: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/flink-connector-prometheus/959abc051bf79309dd89c523c9881c23b7e75fae/flink-connector-prometheus-request-signer-amp/archunit-violations/6e53c00e-cca1-4df3-a67b-55a742f82f49 -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/archunit-violations/b84f2d4b-67b7-44ff-9dc5-9a946a67c5e9: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/flink-connector-prometheus/959abc051bf79309dd89c523c9881c23b7e75fae/flink-connector-prometheus-request-signer-amp/archunit-violations/b84f2d4b-67b7-44ff-9dc5-9a946a67c5e9 -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/archunit-violations/stored.rules: -------------------------------------------------------------------------------- 1 | # 2 | #Mon Oct 28 11:45:11 GMT 2024 3 | ITCASE\ tests\ should\ use\ a\ MiniCluster\ resource\ or\ extension=6e53c00e-cca1-4df3-a67b-55a742f82f49 4 | Tests\ inheriting\ from\ AbstractTestBase\ should\ have\ name\ ending\ with\ ITCase=b84f2d4b-67b7-44ff-9dc5-9a946a67c5e9 5 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 20 | 23 | 4.0.0 24 | 25 | org.apache.flink 26 | flink-connector-prometheus-parent 27 | 1.1-SNAPSHOT 28 | 29 | 30 | Flink : Connectors : Prometheus : Amazon Managed Prometheus Request Signer 31 | flink-connector-prometheus-request-signer-amp 32 | 33 | 34 | UTF-8 35 | 36 | 37 | 38 | 39 | 40 | software.amazon.awssdk 41 | bom 42 | ${aws.sdkv2.version} 43 | pom 44 | import 45 | 46 | 47 | 48 | 49 | 50 | 51 | org.apache.flink 52 | flink-connector-prometheus 53 | ${project.version} 54 | provided 55 | 56 | 57 | 58 | software.amazon.awssdk 59 | auth 60 | 61 | 62 | 63 | org.apache.flink 64 | flink-core 65 | ${flink.version} 66 | provided 67 | 68 | 69 | 70 | 71 | org.junit.jupiter 72 | junit-jupiter 73 | test 74 | 75 | 76 | 77 | 78 | org.apache.flink 79 | flink-architecture-tests-test 80 | ${flink.version} 81 | test 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/main/java/org/apache/flink/connector/prometheus/sink/aws/AmazonManagedPrometheusWriteRequestSigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import org.apache.flink.annotation.PublicEvolving; 21 | import org.apache.flink.annotation.VisibleForTesting; 22 | import org.apache.flink.connector.prometheus.sink.PrometheusRequestSigner; 23 | import org.apache.flink.util.Preconditions; 24 | 25 | import org.apache.commons.lang3.StringUtils; 26 | import software.amazon.awssdk.auth.credentials.AwsCredentials; 27 | import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; 28 | import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; 29 | import software.amazon.awssdk.auth.credentials.DefaultCredentialsProvider; 30 | 31 | import java.net.MalformedURLException; 32 | import java.net.URL; 33 | import java.util.Map; 34 | 35 | /** 36 | * Sign a Remote-Write request to Amazon Managed Service for Prometheus (AMP). 37 | * 38 | *

On every request, AWS credentials are retrieved using an AwsCredentialsProvider, and used to 39 | * sign the request, with AWS Signature Version 4a. 40 | */ 41 | @PublicEvolving 42 | public class AmazonManagedPrometheusWriteRequestSigner implements PrometheusRequestSigner { 43 | // Header names 44 | private static final String X_AMZ_CONTENT_SHA_256 = "x-amz-content-sha256"; 45 | private static final String AUTHORIZATION = "Authorization"; 46 | 47 | private final URL remoteWriteUrl; 48 | private final String awsRegion; 49 | 50 | // The credential provider cannot be created in the constructor or passed as parameter, because 51 | // it is not serializable. Flink would fail serializing the sink instance when initializing the 52 | // job. 53 | private transient AwsCredentialsProvider credentialsProvider; 54 | 55 | /** 56 | * Creates a signer instance using the default AWS credentials provider chain. 57 | * 58 | * @param remoteWriteUrl URL of the remote-write endpoint 59 | * @param awsRegion Region of the AMP workspace 60 | */ 61 | public AmazonManagedPrometheusWriteRequestSigner(String remoteWriteUrl, String awsRegion) { 62 | Preconditions.checkArgument( 63 | StringUtils.isNotBlank(awsRegion), "awsRegion cannot be null or empty"); 64 | Preconditions.checkArgument( 65 | StringUtils.isNotBlank(remoteWriteUrl), "remoteWriteUrl cannot be null or empty"); 66 | 67 | this.awsRegion = awsRegion; 68 | try { 69 | this.remoteWriteUrl = new URL(remoteWriteUrl); 70 | } catch (MalformedURLException e) { 71 | throw new IllegalArgumentException( 72 | "Invalid AMP remote-write URL: " + remoteWriteUrl, e); 73 | } 74 | } 75 | 76 | /** 77 | * Setting the credential provider explicitly is exposed, at package level only, for testing 78 | * signature generaiton with different types of credentials. In the actual application, the 79 | * credential provider must be initialized lazily, because AwsCredentialsProvider 80 | * implementations are not serializable. 81 | * 82 | * @param credentialsProvider an instance of AwsCredentialsProvider 83 | */ 84 | @VisibleForTesting 85 | void setCredentialsProvider(AwsCredentialsProvider credentialsProvider) { 86 | this.credentialsProvider = credentialsProvider; 87 | } 88 | 89 | /** 90 | * Initialize the credentials provider lazily. 91 | * 92 | * @return an instance of DefaultCredentialsProvider. 93 | */ 94 | private AwsCredentialsProvider getCredentialsProvider() { 95 | if (credentialsProvider == null) { 96 | credentialsProvider = DefaultCredentialsProvider.create(); 97 | } 98 | return credentialsProvider; 99 | } 100 | 101 | /** 102 | * Add the additional Http request headers required by Amazon Managed Prometheus: 103 | * 'x-amz-content-sha256', 'Host', 'X-Amz-Date', 'x-amz-security-token' and 'Authorization`. 104 | * 105 | * @param requestHeaders original Http request headers. It must be mutable. For efficiency, any 106 | * new header is added to the map, instead of making a copy. 107 | * @param requestBody request body, already compressed 108 | */ 109 | @Override 110 | public void addSignatureHeaders(Map requestHeaders, byte[] requestBody) { 111 | byte[] contentHash = AWS4SignerForAMP.hash(requestBody); 112 | String contentHashString = AWS4SignerForAMP.toHex(contentHash); 113 | 114 | // x-amz-content-sha256 must be included before generating the Authorization header 115 | requestHeaders.put(X_AMZ_CONTENT_SHA_256, contentHashString); 116 | 117 | // Get the credentials from the default credential provider chain 118 | AwsCredentials awsCreds = getCredentialsProvider().resolveCredentials(); 119 | 120 | // If the credentials are from a session, also get the session token 121 | String sessionToken = 122 | (awsCreds instanceof AwsSessionCredentials) 123 | ? ((AwsSessionCredentials) awsCreds).sessionToken() 124 | : null; 125 | 126 | AWS4SignerForAMP signer = new AWS4SignerForAMP(remoteWriteUrl, awsRegion); 127 | 128 | // computeSignature also adds 'Host', 'X-Amz-Date' and 'x-amz-security-token' to the 129 | // requestHeaders Map 130 | String authorization = 131 | signer.computeSignature( 132 | requestHeaders, 133 | null, // no query parameters 134 | contentHashString, 135 | awsCreds.accessKeyId(), 136 | awsCreds.secretAccessKey(), 137 | sessionToken); 138 | requestHeaders.put(AUTHORIZATION, authorization); 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/architecture/TestCodeArchitectureTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package architecture; 19 | 20 | import org.apache.flink.architecture.TestCodeArchitectureTestBase; 21 | import org.apache.flink.architecture.common.ImportOptions; 22 | 23 | import com.tngtech.archunit.core.importer.ImportOption; 24 | import com.tngtech.archunit.junit.AnalyzeClasses; 25 | import com.tngtech.archunit.junit.ArchTest; 26 | import com.tngtech.archunit.junit.ArchTests; 27 | 28 | /** Architecture tests for test code. */ 29 | @AnalyzeClasses( 30 | packages = "org.apache.flink.connector.prometheus", 31 | importOptions = { 32 | ImportOption.OnlyIncludeTests.class, 33 | ImportOptions.ExcludeScalaImportOption.class, 34 | ImportOptions.ExcludeShadedImportOption.class 35 | }) 36 | public class TestCodeArchitectureTest { 37 | @ArchTest 38 | public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class); 39 | } 40 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/org/apache/flink/connector/prometheus/sink/aws/AWS4SignerForAMPTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import java.net.MalformedURLException; 23 | import java.net.URL; 24 | import java.util.HashMap; 25 | import java.util.Map; 26 | 27 | import static org.apache.flink.connector.prometheus.sink.aws.RequestHeaderTestUtil.assertContainsHeader; 28 | import static org.apache.flink.connector.prometheus.sink.aws.RequestHeaderTestUtil.assertDoesNotContainHeader; 29 | import static org.junit.jupiter.api.Assertions.assertTrue; 30 | 31 | public class AWS4SignerForAMPTest { 32 | 33 | private final AWS4SignerForAMP signer = 34 | new AWS4SignerForAMP(new URL("http://example.com/endpoint"), "us-east-1"); 35 | 36 | public AWS4SignerForAMPTest() throws MalformedURLException {} 37 | 38 | @Test 39 | public void shouldAddExpectedHeaders_noSessionToken() throws Exception { 40 | 41 | Map headers = new HashMap<>(); 42 | 43 | signer.computeSignature(headers, null, "body-hash", "access-key", "secret-key", null); 44 | 45 | assertContainsHeader("x-amz-date", headers); 46 | assertContainsHeader("Host", headers); 47 | assertDoesNotContainHeader("x-amz-security-token", headers); 48 | } 49 | 50 | @Test 51 | public void shouldAddExpectedHeaders_sessionToken() throws Exception { 52 | Map headers = new HashMap<>(); 53 | 54 | signer.computeSignature( 55 | headers, null, "body-hash", "access-key", "secret-key", "session-token"); 56 | 57 | assertContainsHeader("x-amz-date", headers); 58 | assertContainsHeader("Host", headers); 59 | assertContainsHeader("x-amz-security-token", headers); 60 | } 61 | 62 | @Test 63 | public void signatureShouldMatchPattern_SessionToken() throws Exception { 64 | String signature = 65 | signer.computeSignature( 66 | new HashMap<>(), 67 | null, 68 | "body-hash", 69 | "ACC355K3Y", 70 | "secret-key", 71 | "session-token"); 72 | 73 | String expectedSignaturePatternWirthSecurityToken = 74 | "AWS4-HMAC-SHA256 Credential=([A-Za-z0-9]+)/([0-9]{8})/([a-z0-9-]+)/aps/aws4_request, SignedHeaders=host;x-amz-date;x-amz-security-token, Signature=([a-f0-9]{64})"; 75 | 76 | assertTrue(signature.matches(expectedSignaturePatternWirthSecurityToken)); 77 | } 78 | 79 | @Test 80 | public void signatureShouldMatchPattern_noSessionToken() { 81 | String signature = 82 | signer.computeSignature( 83 | new HashMap<>(), null, "body-hash", "ACC355K3Y", "secret-key", null); 84 | 85 | String expectedSignaturePatternWithoutSecurityToken = 86 | "AWS4-HMAC-SHA256 Credential=([A-Za-z0-9]+)/([0-9]{8})/([a-z0-9-]+)/aps/aws4_request, SignedHeaders=host;x-amz-date, Signature=([a-f0-9]{64})"; 87 | 88 | assertTrue(signature.matches(expectedSignaturePatternWithoutSecurityToken)); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/org/apache/flink/connector/prometheus/sink/aws/AmazonManagedPrometheusWriteRequestSignerSerializationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import static org.apache.flink.util.InstantiationUtil.isSerializable; 23 | import static org.junit.jupiter.api.Assertions.assertTrue; 24 | 25 | public class AmazonManagedPrometheusWriteRequestSignerSerializationTest { 26 | 27 | @Test 28 | void shouldBeActuallySerializable() { 29 | AmazonManagedPrometheusWriteRequestSigner signer = 30 | new AmazonManagedPrometheusWriteRequestSigner( 31 | "http://example.com/endpoint", "us-east-1"); 32 | assertTrue(isSerializable(signer), "The request signer should be serializable"); 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/org/apache/flink/connector/prometheus/sink/aws/AmazonManagedPrometheusWriteRequestSignerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import java.nio.charset.StandardCharsets; 23 | import java.util.HashMap; 24 | import java.util.Map; 25 | 26 | import static org.apache.flink.connector.prometheus.sink.aws.RequestHeaderTestUtil.assertContainsHeader; 27 | import static org.apache.flink.connector.prometheus.sink.aws.RequestHeaderTestUtil.assertDoesNotContainHeader; 28 | import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; 29 | import static org.junit.jupiter.api.Assertions.assertThrows; 30 | 31 | class AmazonManagedPrometheusWriteRequestSignerTest { 32 | 33 | @Test 34 | public void constructorShouldFailIfRemoteWriteUrlIsBlankString() { 35 | assertThrows( 36 | IllegalArgumentException.class, 37 | () -> { 38 | new AmazonManagedPrometheusWriteRequestSigner(" ", "us-east-1"); 39 | }); 40 | } 41 | 42 | @Test 43 | public void constructorShouldFailIfRemoteWriteUrlIsNull() { 44 | assertThrows( 45 | IllegalArgumentException.class, 46 | () -> { 47 | new AmazonManagedPrometheusWriteRequestSigner(null, "us-east-1"); 48 | }); 49 | } 50 | 51 | @Test 52 | public void constructorShouldSucceedIfRemoteWriteUrlIsValidURL() { 53 | assertDoesNotThrow( 54 | () -> { 55 | new AmazonManagedPrometheusWriteRequestSigner( 56 | "https://example.com", "us-east-1"); 57 | }); 58 | } 59 | 60 | @Test 61 | public void constructorShouldFaiIfRemoteWriteUrlIsInvalidURL() { 62 | assertThrows( 63 | IllegalArgumentException.class, 64 | () -> { 65 | new AmazonManagedPrometheusWriteRequestSigner("invalid-url", "us-east-1"); 66 | }); 67 | } 68 | 69 | @Test 70 | public void constructorShouldFaiIfRegionIsBlankString() { 71 | assertThrows( 72 | IllegalArgumentException.class, 73 | () -> { 74 | new AmazonManagedPrometheusWriteRequestSigner("https://example.com", " "); 75 | }); 76 | } 77 | 78 | @Test 79 | public void constructorShouldFaiIfRegionIsNull() { 80 | assertThrows( 81 | IllegalArgumentException.class, 82 | () -> { 83 | new AmazonManagedPrometheusWriteRequestSigner("https://example.com", null); 84 | }); 85 | } 86 | 87 | @Test 88 | public void shouldAddExpectedHeaders_BasicCredentials() { 89 | AmazonManagedPrometheusWriteRequestSigner signer = 90 | new AmazonManagedPrometheusWriteRequestSigner( 91 | "https://example.com/endpoint", "us-east-1"); 92 | signer.setCredentialsProvider( 93 | new DummyAwsBasicCredentialsProvider("access-key-id", "secret-access-key")); 94 | 95 | Map requestHeaders = new HashMap<>(); 96 | byte[] requestBody = "request-payload".getBytes(StandardCharsets.UTF_8); 97 | 98 | signer.addSignatureHeaders(requestHeaders, requestBody); 99 | 100 | assertContainsHeader("Authorization", requestHeaders); 101 | assertContainsHeader("x-amz-content-sha256", requestHeaders); 102 | assertContainsHeader("x-amz-date", requestHeaders); 103 | assertContainsHeader("Host", requestHeaders); 104 | // The security token is only expected with session credentials 105 | assertDoesNotContainHeader("x-amz-security-token", requestHeaders); 106 | } 107 | 108 | @Test 109 | public void shouldAddExpectedHeaders_SessionCredentials() { 110 | AmazonManagedPrometheusWriteRequestSigner signer = 111 | new AmazonManagedPrometheusWriteRequestSigner( 112 | "https://example.com/endpoint", "us-east-1"); 113 | signer.setCredentialsProvider( 114 | new DummAwsSessionCredentialProvider( 115 | "access-key-id", "secret-access-key", "session-key")); 116 | 117 | Map requestHeaders = new HashMap<>(); 118 | byte[] requestBody = "request-payload".getBytes(StandardCharsets.UTF_8); 119 | 120 | signer.addSignatureHeaders(requestHeaders, requestBody); 121 | 122 | assertContainsHeader("Authorization", requestHeaders); 123 | assertContainsHeader("x-amz-content-sha256", requestHeaders); 124 | assertContainsHeader("x-amz-date", requestHeaders); 125 | assertContainsHeader("Host", requestHeaders); 126 | // With Session credentials should have the securitu token 127 | assertContainsHeader("x-amz-security-token", requestHeaders); 128 | } 129 | } 130 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/org/apache/flink/connector/prometheus/sink/aws/DummAwsSessionCredentialProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import software.amazon.awssdk.auth.credentials.AwsCredentials; 21 | import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; 22 | import software.amazon.awssdk.auth.credentials.AwsSessionCredentials; 23 | 24 | /** Dummy AwsCredentialsProvider returning AwsSessionCredentials. */ 25 | public class DummAwsSessionCredentialProvider implements AwsCredentialsProvider { 26 | 27 | private final String accessKeyId; 28 | private final String secretAccessKey; 29 | private final String sessionToken; 30 | 31 | public DummAwsSessionCredentialProvider( 32 | String accessKeyId, String secretAccessKey, String sessionToken) { 33 | this.accessKeyId = accessKeyId; 34 | this.secretAccessKey = secretAccessKey; 35 | this.sessionToken = sessionToken; 36 | } 37 | 38 | @Override 39 | public AwsCredentials resolveCredentials() { 40 | return AwsSessionCredentials.builder() 41 | .accessKeyId(accessKeyId) 42 | .secretAccessKey(secretAccessKey) 43 | .sessionToken(sessionToken) 44 | .build(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/org/apache/flink/connector/prometheus/sink/aws/DummyAwsBasicCredentialsProvider.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; 21 | import software.amazon.awssdk.auth.credentials.AwsCredentials; 22 | import software.amazon.awssdk.auth.credentials.AwsCredentialsProvider; 23 | 24 | /** Dummy AwsCredentialsProvider returning AwsBasicCredentials. */ 25 | public class DummyAwsBasicCredentialsProvider implements AwsCredentialsProvider { 26 | 27 | private final String accessKeyId; 28 | private final String secretAccessKey; 29 | 30 | public DummyAwsBasicCredentialsProvider(String accessKeyId, String secretAccessKey) { 31 | this.accessKeyId = accessKeyId; 32 | this.secretAccessKey = secretAccessKey; 33 | } 34 | 35 | @Override 36 | public AwsCredentials resolveCredentials() { 37 | return AwsBasicCredentials.builder() 38 | .accessKeyId(accessKeyId) 39 | .secretAccessKey(secretAccessKey) 40 | .build(); 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/java/org/apache/flink/connector/prometheus/sink/aws/RequestHeaderTestUtil.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.aws; 19 | 20 | import java.util.Map; 21 | 22 | import static org.junit.jupiter.api.Assertions.assertFalse; 23 | import static org.junit.jupiter.api.Assertions.assertTrue; 24 | 25 | public class RequestHeaderTestUtil { 26 | static void assertContainsHeader(String expectedHeaderName, Map requestHeader) { 27 | assertTrue( 28 | requestHeader.containsKey(expectedHeaderName), 29 | "Missing header: " + expectedHeaderName); 30 | } 31 | 32 | static void assertDoesNotContainHeader( 33 | String expectedHeaderName, Map requestHeader) { 34 | assertFalse( 35 | requestHeader.containsKey(expectedHeaderName), 36 | "Unexpected header: " + expectedHeaderName); 37 | } 38 | } 39 | -------------------------------------------------------------------------------- /flink-connector-prometheus-request-signer-amp/src/test/resources/archunit.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # By default we allow removing existing violations, but fail when new violations are added. 19 | freeze.store.default.allowStoreUpdate=true 20 | 21 | # Enable this if a new (frozen) rule has been added in order to create the initial store and record the existing violations. 22 | #freeze.store.default.allowStoreCreation=true 23 | 24 | # Enable this to add allow new violations to be recorded. 25 | # NOTE: Adding new violations should be avoided when possible. If the rule was correct to flag a new 26 | # violation, please try to avoid creating the violation. If the violation was created due to a 27 | # shortcoming of the rule, file a JIRA issue so the rule can be improved. 28 | #freeze.refreeze=true 29 | 30 | freeze.store.default.path=archunit-violations -------------------------------------------------------------------------------- /flink-connector-prometheus/archunit-violations/75250488-2d5e-433d-8fc4-51ff28cb11b3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/flink-connector-prometheus/959abc051bf79309dd89c523c9881c23b7e75fae/flink-connector-prometheus/archunit-violations/75250488-2d5e-433d-8fc4-51ff28cb11b3 -------------------------------------------------------------------------------- /flink-connector-prometheus/archunit-violations/d962399a-ad51-48ff-ab30-23969cef546c: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/apache/flink-connector-prometheus/959abc051bf79309dd89c523c9881c23b7e75fae/flink-connector-prometheus/archunit-violations/d962399a-ad51-48ff-ab30-23969cef546c -------------------------------------------------------------------------------- /flink-connector-prometheus/archunit-violations/stored.rules: -------------------------------------------------------------------------------- 1 | # 2 | #Mon Oct 28 11:44:17 GMT 2024 3 | ITCASE\ tests\ should\ use\ a\ MiniCluster\ resource\ or\ extension=d962399a-ad51-48ff-ab30-23969cef546c 4 | Tests\ inheriting\ from\ AbstractTestBase\ should\ have\ name\ ending\ with\ ITCase=75250488-2d5e-433d-8fc4-51ff28cb11b3 5 | -------------------------------------------------------------------------------- /flink-connector-prometheus/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 20 | 23 | 4.0.0 24 | 25 | org.apache.flink 26 | flink-connector-prometheus-parent 27 | 1.1-SNAPSHOT 28 | 29 | 30 | Flink : Connectors : Prometheus 31 | org.apache.flink 32 | flink-connector-prometheus 33 | jar 34 | 35 | 36 | 37 | UTF-8 38 | 39 | 40 | 41 | 42 | org.apache.flink 43 | flink-streaming-java 44 | ${flink.version} 45 | provided 46 | 47 | 48 | org.apache.flink 49 | flink-connector-base 50 | ${flink.version} 51 | provided 52 | 53 | 54 | 55 | 56 | com.google.protobuf 57 | protobuf-java 58 | ${protobuf.version} 59 | 60 | 61 | com.google.protobuf 62 | protobuf-java-util 63 | ${protobuf.version} 64 | 65 | 66 | org.xerial.snappy 67 | snappy-java 68 | 69 | 70 | 71 | 72 | org.apache.httpcomponents.client5 73 | httpclient5 74 | 75 | 76 | org.apache.httpcomponents.core5 77 | httpcore5 78 | 79 | 80 | 81 | 82 | org.apache.flink 83 | flink-test-utils-junit 84 | ${flink.version} 85 | test 86 | 87 | 88 | com.github.tomakehurst 89 | wiremock-jre8 90 | 2.35.1 91 | test 92 | 93 | 94 | org.awaitility 95 | awaitility 96 | 4.2.0 97 | test 98 | 99 | 100 | org.apache.flink 101 | flink-connector-base 102 | ${flink.version} 103 | test-jar 104 | test 105 | 106 | 107 | org.apache.flink 108 | flink-connector-test-utils 109 | ${flink.version} 110 | test 111 | 112 | 113 | 114 | 115 | org.apache.flink 116 | flink-architecture-tests-test 117 | test 118 | 119 | 120 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/PrometheusRemoteWriteHttpRequestBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | import org.apache.flink.util.Preconditions; 22 | 23 | import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; 24 | import org.apache.hc.client5.http.async.methods.SimpleRequestBuilder; 25 | import org.apache.hc.core5.http.ContentType; 26 | import org.apache.hc.core5.http.HttpHeaders; 27 | 28 | import java.util.HashMap; 29 | import java.util.Map; 30 | 31 | /** Builds the POST request to the Remote-Write endpoint for a given binary payload. */ 32 | @Internal 33 | public class PrometheusRemoteWriteHttpRequestBuilder { 34 | 35 | private static final ContentType CONTENT_TYPE = ContentType.create("application/x-protobuf"); 36 | 37 | private static final String CONTENT_ENCODING = "snappy"; 38 | private static final String REMOTE_WRITE_VERSION_HEADER = "X-Prometheus-Remote-Write-Version"; 39 | private static final String REMOTE_WRITE_VERSION = "0.1.0"; 40 | 41 | public static final String DEFAULT_USER_AGENT = "Flink-Prometheus"; 42 | 43 | private final String prometheusRemoteWriteUrl; 44 | private final PrometheusRequestSigner requestSigner; 45 | 46 | private final Map fixedHeaders; 47 | 48 | public PrometheusRemoteWriteHttpRequestBuilder( 49 | String prometheusRemoteWriteUrl, 50 | PrometheusRequestSigner requestSigner, 51 | String httpUserAgent) { 52 | Preconditions.checkNotNull(httpUserAgent, "User-Agent not specified"); 53 | 54 | this.prometheusRemoteWriteUrl = prometheusRemoteWriteUrl; 55 | this.requestSigner = requestSigner; 56 | this.fixedHeaders = new HashMap<>(); 57 | fixedHeaders.put(HttpHeaders.CONTENT_ENCODING, CONTENT_ENCODING); 58 | fixedHeaders.put(REMOTE_WRITE_VERSION_HEADER, REMOTE_WRITE_VERSION); 59 | fixedHeaders.put(HttpHeaders.USER_AGENT, httpUserAgent); 60 | } 61 | 62 | public SimpleHttpRequest buildHttpRequest(byte[] httpRequestBody) { 63 | Map headers = new HashMap<>(fixedHeaders); 64 | if (requestSigner != null) { 65 | requestSigner.addSignatureHeaders(headers, httpRequestBody); 66 | } 67 | 68 | SimpleRequestBuilder builder = 69 | SimpleRequestBuilder.post() 70 | .setUri(prometheusRemoteWriteUrl) 71 | .setBody(httpRequestBody, CONTENT_TYPE); 72 | 73 | for (Map.Entry header : headers.entrySet()) { 74 | builder.addHeader(header.getKey(), header.getValue()); 75 | } 76 | 77 | return builder.build(); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/PrometheusRequestSigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.annotation.PublicEvolving; 21 | 22 | import java.io.Serializable; 23 | import java.util.Map; 24 | 25 | /** 26 | * Interface for a request signer, specific of the Prometheus implementation. 27 | * 28 | *

A request signer implementation can generate additional Http request headers, based on the 29 | * existing headers and the request body. 30 | * 31 | *

The signature method is called on every write request. For performance reasons, the Map of 32 | * HTTP headers is mutated, instead of making a copy. 33 | */ 34 | @PublicEvolving 35 | public interface PrometheusRequestSigner extends Serializable { 36 | 37 | /** 38 | * Add to the existing http request headers any additional header required by the specific 39 | * Prometheus implementation for signing. 40 | * 41 | * @param requestHeaders original Http request headers. For efficiency, the implementation is 42 | * expected to modify the Map in place. The Map is expected to be mutable. 43 | * @param requestBody request body, already compressed. 44 | */ 45 | void addSignatureHeaders(Map requestHeaders, byte[] requestBody); 46 | } 47 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/PrometheusSinkException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink; 20 | 21 | import org.apache.flink.annotation.PublicEvolving; 22 | 23 | /** 24 | * A {@link RuntimeException} wrapper indicating a generic unhandled exception was thrown by the 25 | * Prometheus sink. 26 | */ 27 | @PublicEvolving 28 | public class PrometheusSinkException extends RuntimeException { 29 | public PrometheusSinkException(String message) { 30 | super(message); 31 | } 32 | 33 | public PrometheusSinkException(String message, Throwable cause) { 34 | super(message, cause); 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/PrometheusStateSerializer.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | import org.apache.flink.connector.base.sink.writer.AsyncSinkWriterStateSerializer; 22 | import org.apache.flink.connector.base.sink.writer.BufferedRequestState; 23 | import org.apache.flink.connector.base.sink.writer.RequestEntryWrapper; 24 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 25 | 26 | import java.io.ByteArrayInputStream; 27 | import java.io.ByteArrayOutputStream; 28 | import java.io.DataInputStream; 29 | import java.io.DataOutputStream; 30 | import java.io.IOException; 31 | import java.util.ArrayList; 32 | import java.util.Collection; 33 | import java.util.List; 34 | 35 | /** 36 | * Serializes/deserializes the sink request-entry, the protobuf-generated {@link Types.TimeSeries}, 37 | * using protobuf. 38 | */ 39 | @Internal 40 | public class PrometheusStateSerializer extends AsyncSinkWriterStateSerializer { 41 | private static final int VERSION = 1; 42 | 43 | // Copied from AsyncSinkWriterStateSerializer.DATA_IDENTIFIER 44 | private static final long DATA_IDENTIFIER = -1; 45 | 46 | @Override 47 | protected void serializeRequestToStream(Types.TimeSeries request, DataOutputStream out) 48 | throws IOException { 49 | byte[] serializedRequest = request.toByteArray(); 50 | out.write(serializedRequest); 51 | } 52 | 53 | @Override 54 | protected Types.TimeSeries deserializeRequestFromStream(long requestSize, DataInputStream in) 55 | throws IOException { 56 | // The size written into the serialized stat is the size of the protobuf-serialized 57 | // time-series 58 | byte[] requestData = new byte[(int) requestSize]; 59 | in.read(requestData); 60 | return Types.TimeSeries.parseFrom(requestData); 61 | } 62 | 63 | @Override 64 | public int getVersion() { 65 | return VERSION; 66 | } 67 | 68 | /** 69 | * Overrides the original implementation that assumes the serialized size is the value returned 70 | * by {@link PrometheusSinkWriter#getSizeInBytes(Types.TimeSeries)} 71 | * 72 | *

Most of the code is copied from the original implementation of 73 | * AsyncSinkWriterStateSerializer.serialize(). 74 | * 75 | *

The state is serialized in form of 76 | * [DATA_IDENTIFIER,NUM_OF_ELEMENTS,SIZE1,REQUEST1,SIZE2,REQUEST2....], where REQUEST{n} is the 77 | * Protobuf-serialized representation of a {@link Types.TimeSeries TimeSeries}. In this 78 | * implementation SIZE{n} is the size of the Protobuf serialization, in bytes, that does not 79 | * match the "size" of a {@link RequestEntryWrapper}. 80 | * 81 | * @param bufferedRequestState The buffered request state to be serialized 82 | * @return serialized buffered request state 83 | * @throws IOException 84 | */ 85 | @Override 86 | public byte[] serialize(BufferedRequestState bufferedRequestState) 87 | throws IOException { 88 | Collection> bufferState = 89 | bufferedRequestState.getBufferedRequestEntries(); 90 | 91 | try (final ByteArrayOutputStream baos = new ByteArrayOutputStream(); 92 | final DataOutputStream out = new DataOutputStream(baos)) { 93 | 94 | out.writeLong(DATA_IDENTIFIER); // DATA_IDENTIFIER 95 | out.writeInt(bufferState.size()); // NUM_OF_ELEMENTS 96 | 97 | for (RequestEntryWrapper wrapper : bufferState) { 98 | // In the serialized state we write the size of the serialized representation, 99 | // rather than the size 100 | // held in RequestEntryWrapper, that is the output of 101 | // AsyncSinkWriter.getSizeInBytes() 102 | long requestEntrySize = 103 | RequestEntrySizeUtils.requestSerializedSize(wrapper.getRequestEntry()); 104 | out.writeLong(requestEntrySize); // SIZE{n} 105 | serializeRequestToStream(wrapper.getRequestEntry(), out); // REQUEST{n} 106 | } 107 | 108 | return baos.toByteArray(); 109 | } 110 | } 111 | 112 | /** 113 | * Overrides the original implementation that assumes the serialized size is the value returned 114 | * by {@link PrometheusSinkWriter#getSizeInBytes(Types.TimeSeries)} 115 | * 116 | *

See {@link PrometheusStateSerializer#serialize(BufferedRequestState)} for more details. 117 | * 118 | * @param version The version in which the data was serialized 119 | * @param serialized The serialized data 120 | * @return a buffered request state wrapping the deserialized time-series. 121 | * @throws IOException 122 | */ 123 | @Override 124 | public BufferedRequestState deserialize(int version, byte[] serialized) 125 | throws IOException { 126 | try (final ByteArrayInputStream bais = new ByteArrayInputStream(serialized); 127 | final DataInputStream in = new DataInputStream(bais)) { 128 | 129 | validateIdentifier(in); // DATA_IDENTIFIER 130 | int numberOfElements = in.readInt(); // NUM_OF_ELEMENTS 131 | 132 | List> serializedState = new ArrayList<>(); 133 | for (int i = 0; i < numberOfElements; i++) { 134 | // This is the size of a request-entry in the serialized state 135 | long requestSerializedSize = in.readLong(); // SIZE (the serialized size) 136 | Types.TimeSeries requestEntry = 137 | deserializeRequestFromStream(requestSerializedSize, in); 138 | 139 | // The "size" of RequestEntryWrapper must be the size returned by 140 | // PrometheusSinkWriter.getSizeInBytes(), 141 | // used for batching 142 | long requestEntrySize = RequestEntrySizeUtils.requestSizeForBatching(requestEntry); 143 | serializedState.add(new RequestEntryWrapper<>(requestEntry, requestEntrySize)); 144 | } 145 | 146 | return new BufferedRequestState<>(serializedState); 147 | } 148 | } 149 | 150 | // Copy of the private implementation of AsyncSinkWriterStateSerializer.validateIdentifier() 151 | private void validateIdentifier(DataInputStream in) throws IOException { 152 | if (in.readLong() != DATA_IDENTIFIER) { 153 | throw new IllegalStateException("Corrupted data to deserialize"); 154 | } 155 | } 156 | } 157 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/PrometheusTimeSeries.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.annotation.PublicEvolving; 21 | 22 | import org.apache.commons.lang3.builder.EqualsBuilder; 23 | import org.apache.commons.lang3.builder.HashCodeBuilder; 24 | 25 | import java.io.Serializable; 26 | import java.util.ArrayList; 27 | import java.util.Arrays; 28 | import java.util.List; 29 | import java.util.Objects; 30 | 31 | /** 32 | * Pojo used as sink input, containing a single TimeSeries: a list of Labels and a list of Samples. 33 | * 34 | *

metricName is mapped in Prometheus to the value of the mandatory label named '__name__' 35 | * labels. The other labels, as key/value, are appended after the '__name__' label. 36 | */ 37 | @PublicEvolving 38 | public class PrometheusTimeSeries implements Serializable { 39 | /** A single Label. */ 40 | public static class Label implements Serializable { 41 | private final String name; 42 | private final String value; 43 | 44 | public Label(String name, String value) { 45 | this.name = name; 46 | this.value = value; 47 | } 48 | 49 | public String getName() { 50 | return name; 51 | } 52 | 53 | public String getValue() { 54 | return value; 55 | } 56 | 57 | @Override 58 | public boolean equals(Object o) { 59 | if (this == o) { 60 | return true; 61 | } 62 | if (o == null || getClass() != o.getClass()) { 63 | return false; 64 | } 65 | Label label = (Label) o; 66 | return new EqualsBuilder() 67 | .append(name, label.name) 68 | .append(value, label.value) 69 | .isEquals(); 70 | } 71 | 72 | @Override 73 | public int hashCode() { 74 | return new HashCodeBuilder(17, 37).append(name).append(value).toHashCode(); 75 | } 76 | } 77 | 78 | /** A single Sample. */ 79 | public static class Sample implements Serializable { 80 | private final double value; 81 | private final long timestamp; 82 | 83 | public Sample(double value, long timestamp) { 84 | this.value = value; 85 | this.timestamp = timestamp; 86 | } 87 | 88 | public double getValue() { 89 | return value; 90 | } 91 | 92 | public long getTimestamp() { 93 | return timestamp; 94 | } 95 | 96 | @Override 97 | public boolean equals(Object o) { 98 | if (this == o) { 99 | return true; 100 | } 101 | if (o == null || getClass() != o.getClass()) { 102 | return false; 103 | } 104 | Sample sample = (Sample) o; 105 | return Double.compare(value, sample.value) == 0 && timestamp == sample.timestamp; 106 | } 107 | 108 | @Override 109 | public int hashCode() { 110 | return Objects.hash(value, timestamp); 111 | } 112 | } 113 | 114 | private final Label[] labels; 115 | private final Sample[] samples; 116 | private final String metricName; 117 | 118 | public PrometheusTimeSeries(String metricName, Label[] labels, Sample[] samples) { 119 | this.metricName = metricName; 120 | this.labels = labels; 121 | this.samples = samples; 122 | } 123 | 124 | public Label[] getLabels() { 125 | return labels; 126 | } 127 | 128 | public Sample[] getSamples() { 129 | return samples; 130 | } 131 | 132 | public String getMetricName() { 133 | return metricName; 134 | } 135 | 136 | public static Builder builder() { 137 | return new Builder(); 138 | } 139 | 140 | public static Builder builderFrom(PrometheusTimeSeries other) { 141 | return new Builder( 142 | Arrays.asList(other.labels), Arrays.asList(other.samples), other.metricName); 143 | } 144 | 145 | /** Builder for sink input pojo instance. */ 146 | public static final class Builder { 147 | private List

For using the sink with parallelism > 1, the input of the sink must be a keyedStream using 30 | * this KeySelector to extract the key. This guarantees TimeSeries with the same set of labels are 31 | * written to Prometheus in the same order they are sent to the sink. 32 | * 33 | *

The partition key is the hash of all labels AND the metricName. The metricName is added as 34 | * additional label by the sink, before writing to Prometheus. 35 | */ 36 | @PublicEvolving 37 | public class PrometheusTimeSeriesLabelsAndMetricNameKeySelector 38 | implements KeySelector { 39 | @Override 40 | public Integer getKey(PrometheusTimeSeries timeSeries) throws Exception { 41 | return new HashCodeBuilder(17, 37) 42 | .append(timeSeries.getLabels()) 43 | .append(timeSeries.getMetricName()) 44 | .build(); 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/RequestEntrySizeUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 22 | 23 | import java.util.Collection; 24 | 25 | /** Collection of methods to calculate the sink RequestEntry "size". */ 26 | @Internal 27 | public class RequestEntrySizeUtils { 28 | 29 | /** 30 | * Size of a request entry (a {@link Types.TimeSeries time-series}) for the purpose of batching. 31 | * Count the number of {@link Types.Sample samples} 32 | * 33 | * @param requestEntry a time-series 34 | * @return number of Samples in the TimeSeries 35 | */ 36 | public static long requestSizeForBatching(Types.TimeSeries requestEntry) { 37 | return requestEntry.getSamplesCount(); 38 | } 39 | 40 | /** 41 | * Serialized size of a request entry {@link Types.TimeSeries TimeSeries}: the number of bytes 42 | * of the protobuf- serialized representation of the TimeSeries. 43 | * 44 | * @param requestEntry a time-series 45 | * @return number of bytes 46 | */ 47 | public static long requestSerializedSize(Types.TimeSeries requestEntry) { 48 | return requestEntry.getSerializedSize(); 49 | } 50 | 51 | /** 52 | * Count the number of {@link Types.Sample samples} in a collection of {@link Types.TimeSeries 53 | * time-series} (a batch). 54 | * 55 | * @param requestEntries collection of time-series 56 | * @return number of samples 57 | */ 58 | public static long countSamples(Collection requestEntries) { 59 | long count = 0; 60 | for (Types.TimeSeries requestEntry : requestEntries) { 61 | count += requestSizeForBatching(requestEntry); 62 | } 63 | return count; 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/errorhandling/PrometheusSinkWriteException.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.errorhandling; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | 22 | /** Exception during writing to Prometheus Remote-Write endpoint. */ 23 | @Internal 24 | public class PrometheusSinkWriteException extends RuntimeException { 25 | 26 | public PrometheusSinkWriteException(String reason) { 27 | super("Reason: " + reason); 28 | } 29 | 30 | public PrometheusSinkWriteException(String reason, Exception cause) { 31 | super("Reason: " + reason, cause); 32 | } 33 | 34 | public PrometheusSinkWriteException( 35 | String reason, 36 | int httpStatusCode, 37 | String httpReasonPhrase, 38 | int timeSeriesCount, 39 | long sampleCount, 40 | String httpResponseBody) { 41 | super( 42 | String.format( 43 | "Reason: %s. Http response: %d,%s (%s) .The offending write-request contains %d time-series and %d samples", 44 | reason, 45 | httpStatusCode, 46 | httpReasonPhrase, 47 | httpResponseBody, 48 | timeSeriesCount, 49 | sampleCount)); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/http/PrometheusAsyncHttpClientBuilder.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.annotation.PublicEvolving; 21 | import org.apache.flink.connector.prometheus.sink.PrometheusSinkConfiguration; 22 | import org.apache.flink.connector.prometheus.sink.metrics.SinkMetricsCallback; 23 | import org.apache.flink.util.Preconditions; 24 | 25 | import org.apache.hc.client5.http.config.TlsConfig; 26 | import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; 27 | import org.apache.hc.client5.http.impl.async.HttpAsyncClients; 28 | import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManager; 29 | import org.apache.hc.client5.http.impl.nio.PoolingAsyncClientConnectionManagerBuilder; 30 | import org.apache.hc.core5.http2.HttpVersionPolicy; 31 | import org.apache.hc.core5.reactor.IOReactorConfig; 32 | import org.apache.hc.core5.util.Timeout; 33 | 34 | import java.io.Serializable; 35 | import java.util.Optional; 36 | 37 | /** 38 | * Builder for async http client that will retry, based on the {@link RemoteWriteRetryStrategy} 39 | * specified. 40 | */ 41 | @PublicEvolving 42 | public class PrometheusAsyncHttpClientBuilder implements Serializable { 43 | public static final int DEFAULT_SOCKET_TIMEOUT_MS = 5000; 44 | 45 | private final PrometheusSinkConfiguration.RetryConfiguration retryConfiguration; 46 | private Integer socketTimeoutMs; 47 | 48 | public PrometheusAsyncHttpClientBuilder( 49 | PrometheusSinkConfiguration.RetryConfiguration retryConfiguration) { 50 | this.retryConfiguration = retryConfiguration; 51 | } 52 | 53 | public PrometheusAsyncHttpClientBuilder setSocketTimeout(int socketTimeoutMs) { 54 | this.socketTimeoutMs = socketTimeoutMs; 55 | return this; 56 | } 57 | 58 | public CloseableHttpAsyncClient buildAndStartClient(SinkMetricsCallback metricsCallback) { 59 | int actualSocketTimeoutMs = 60 | Optional.ofNullable(socketTimeoutMs).orElse(DEFAULT_SOCKET_TIMEOUT_MS); 61 | 62 | Preconditions.checkNotNull(retryConfiguration, "Missing retry configuration"); 63 | Preconditions.checkArgument( 64 | retryConfiguration.getInitialRetryDelayMS() >= 0, 65 | "Initial retry delay must be >= 0"); 66 | Preconditions.checkArgument( 67 | retryConfiguration.getMaxRetryDelayMS() 68 | >= retryConfiguration.getInitialRetryDelayMS(), 69 | "Max retry delay must be >= initial retry delay"); 70 | Preconditions.checkArgument( 71 | retryConfiguration.getMaxRetryCount() >= 0, "Max retry count must be >= 0"); 72 | Preconditions.checkArgument(actualSocketTimeoutMs >= 0, "Socket timeout must be >= 0"); 73 | 74 | final IOReactorConfig ioReactorConfig = 75 | IOReactorConfig.custom() 76 | // Remote-Writes must be single-threaded to prevent out-of-order writes 77 | .setIoThreadCount(1) 78 | .setSoTimeout(Timeout.ofMilliseconds(actualSocketTimeoutMs)) 79 | .build(); 80 | 81 | TlsConfig tlsConfig = 82 | TlsConfig.custom().setVersionPolicy(HttpVersionPolicy.FORCE_HTTP_1).build(); 83 | PoolingAsyncClientConnectionManager connectionManager = 84 | PoolingAsyncClientConnectionManagerBuilder.create() 85 | .setDefaultTlsConfig(tlsConfig) 86 | .build(); 87 | CloseableHttpAsyncClient client = 88 | HttpAsyncClients.custom() 89 | .setConnectionManager(connectionManager) 90 | .setIOReactorConfig(ioReactorConfig) 91 | .setIOSessionListener(new RethrowingIOSessionListener()) 92 | .setRetryStrategy( 93 | new RemoteWriteRetryStrategy(retryConfiguration, metricsCallback)) 94 | .build(); 95 | 96 | client.start(); 97 | return client; 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/http/RemoteWriteResponseClassifier.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | 22 | import org.apache.hc.core5.http.HttpResponse; 23 | 24 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.FATAL_ERROR; 25 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.NON_RETRYABLE_ERROR; 26 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.RETRYABLE_ERROR; 27 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.SUCCESS; 28 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.UNHANDLED; 29 | 30 | /** Classify http responses based on the status code. */ 31 | @Internal 32 | public class RemoteWriteResponseClassifier { 33 | 34 | public static RemoteWriteResponseType classify(HttpResponse response) { 35 | int statusCode = response.getCode(); 36 | if (statusCode >= 200 && statusCode < 300) { 37 | // 2xx: success 38 | return SUCCESS; 39 | } else if (statusCode == 429) { 40 | // 429, Too Many Requests: throttling 41 | return RETRYABLE_ERROR; 42 | } else if (statusCode == 403 || statusCode == 404) { 43 | // 403, Forbidden: authentication error 44 | // 404, Not Found: wrong endpoint URL path 45 | return FATAL_ERROR; 46 | } else if (statusCode >= 400 && statusCode < 500) { 47 | // 4xx (except 403, 404, 429): wrong request/bad data 48 | return NON_RETRYABLE_ERROR; 49 | } else if (statusCode >= 500) { 50 | // 5xx: internal errors, recoverable 51 | return RETRYABLE_ERROR; 52 | } else { 53 | // Other status code are unhandled 54 | return UNHANDLED; 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/http/RemoteWriteResponseType.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | 22 | /** 23 | * Type of response, from Remote-Write endpoint, classified by {@link 24 | * RemoteWriteResponseClassifier}. 25 | */ 26 | @Internal 27 | public enum RemoteWriteResponseType { 28 | /** The Write-Request was successfully accepted. */ 29 | SUCCESS, 30 | /** Write-Request temporarily rejected. The request can be retried. */ 31 | RETRYABLE_ERROR, 32 | /** 33 | * Write-Request permanently rejected. It cannot be retried. The error condition is recoverable, 34 | * after discarding the offending request. 35 | */ 36 | NON_RETRYABLE_ERROR, 37 | /** Unrecoverable error condition. */ 38 | FATAL_ERROR, 39 | /** Unhandled status code. */ 40 | UNHANDLED; 41 | } 42 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/http/RemoteWriteRetryStrategy.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | import org.apache.flink.connector.prometheus.sink.PrometheusSinkConfiguration; 22 | import org.apache.flink.connector.prometheus.sink.metrics.SinkMetricsCallback; 23 | 24 | import org.apache.hc.client5.http.HttpRequestRetryStrategy; 25 | import org.apache.hc.core5.http.HttpRequest; 26 | import org.apache.hc.core5.http.HttpResponse; 27 | import org.apache.hc.core5.http.protocol.HttpContext; 28 | import org.apache.hc.core5.util.TimeValue; 29 | import org.slf4j.Logger; 30 | import org.slf4j.LoggerFactory; 31 | 32 | import javax.net.ssl.SSLException; 33 | 34 | import java.io.IOException; 35 | import java.io.InterruptedIOException; 36 | import java.net.ConnectException; 37 | import java.net.NoRouteToHostException; 38 | import java.net.UnknownHostException; 39 | import java.util.ArrayList; 40 | import java.util.Collections; 41 | import java.util.List; 42 | 43 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseClassifier.classify; 44 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.RETRYABLE_ERROR; 45 | 46 | /** 47 | * Retry strategy for the http client. 48 | * 49 | *

Based on the http status code returned or the exception thrown, this strategy either retries 50 | * with an exponential backoff strategy or immediately fail. 51 | * 52 | *

Response status codes are classified as retryable or non-retryable using {@link 53 | * RemoteWriteResponseClassifier}. 54 | * 55 | *

All {@link IOException} are considered retryable, except for {@link InterruptedIOException}, 56 | * {@link UnknownHostException}, {@link ConnectException}, {@link NoRouteToHostException}, and 57 | * {@link SSLException}. 58 | */ 59 | @Internal 60 | public class RemoteWriteRetryStrategy implements HttpRequestRetryStrategy { 61 | private static final Logger LOG = LoggerFactory.getLogger(RemoteWriteRetryStrategy.class); 62 | 63 | /** List of exceptions considered non-recoverable (non-retryable). */ 64 | private static final List> NON_RECOVERABLE_EXCEPTIONS = 65 | Collections.unmodifiableList( 66 | new ArrayList>() { 67 | { 68 | add(InterruptedIOException.class); 69 | add(UnknownHostException.class); 70 | add(ConnectException.class); 71 | add(NoRouteToHostException.class); 72 | add(SSLException.class); 73 | } 74 | }); 75 | 76 | private final long initialRetryDelayMs; 77 | private final long maxRetryDelayMs; 78 | private final int maxRetryCount; 79 | 80 | private final SinkMetricsCallback metricsCallback; 81 | 82 | public RemoteWriteRetryStrategy( 83 | PrometheusSinkConfiguration.RetryConfiguration retryConfiguration, 84 | SinkMetricsCallback metricsCallback) { 85 | this.initialRetryDelayMs = retryConfiguration.getInitialRetryDelayMS(); 86 | this.maxRetryDelayMs = retryConfiguration.getMaxRetryDelayMS(); 87 | this.maxRetryCount = retryConfiguration.getMaxRetryCount(); 88 | this.metricsCallback = metricsCallback; 89 | } 90 | 91 | @Override 92 | public boolean retryRequest( 93 | HttpRequest httpRequest, IOException e, int execCount, HttpContext httpContext) { 94 | // Retry on any IOException except those considered non-recoverable 95 | boolean retry = 96 | (execCount <= maxRetryCount) 97 | && !(NON_RECOVERABLE_EXCEPTIONS.contains(e.getClass())); 98 | LOG.debug( 99 | "{} retry on {}, at execution {}", 100 | (retry) ? "DO" : "DO NOT", 101 | e.getClass(), 102 | execCount); 103 | countRetry(retry); 104 | return retry; 105 | } 106 | 107 | @Override 108 | public boolean retryRequest(HttpResponse httpResponse, int execCount, HttpContext httpContext) { 109 | boolean retry = (execCount <= maxRetryCount) && (classify(httpResponse) == RETRYABLE_ERROR); 110 | LOG.debug( 111 | "{} retry on response {} {}, at execution {}", 112 | (retry) ? "DO" : "DO NOT", 113 | httpResponse.getCode(), 114 | httpResponse.getReasonPhrase(), 115 | execCount); 116 | countRetry(retry); 117 | return retry; 118 | } 119 | 120 | @Override 121 | public TimeValue getRetryInterval( 122 | HttpResponse httpResponse, int execCount, HttpContext httpContext) { 123 | long calculatedDelay = initialRetryDelayMs << (execCount - 1); 124 | return TimeValue.ofMilliseconds(Math.min(calculatedDelay, maxRetryDelayMs)); 125 | } 126 | 127 | private void countRetry(boolean retry) { 128 | if (retry) { 129 | metricsCallback.onWriteRequestRetry(); 130 | } 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/http/RethrowingIOSessionListener.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.annotation.Internal; 21 | import org.apache.flink.connector.prometheus.sink.errorhandling.PrometheusSinkWriteException; 22 | 23 | import org.apache.hc.core5.reactor.IOSession; 24 | import org.apache.hc.core5.reactor.IOSessionListener; 25 | 26 | /** 27 | * Selectively rethrow PrometheusSinkWriteException, causing the httpclient to fail. Otherwise, the 28 | * exception would be swallowed by the IOReactor. 29 | */ 30 | @Internal 31 | public class RethrowingIOSessionListener implements IOSessionListener { 32 | @Override 33 | public void exception(IOSession ioSession, Exception e) { 34 | if (e instanceof PrometheusSinkWriteException) { 35 | // Rethrow the exception 36 | throw (PrometheusSinkWriteException) e; 37 | } 38 | } 39 | 40 | @Override 41 | public void connected(IOSession ioSession) { 42 | // Nothing to do 43 | } 44 | 45 | @Override 46 | public void startTls(IOSession ioSession) { 47 | // Nothing to do 48 | } 49 | 50 | @Override 51 | public void inputReady(IOSession ioSession) { 52 | // Nothing to do 53 | } 54 | 55 | @Override 56 | public void outputReady(IOSession ioSession) { 57 | // Nothing to do 58 | } 59 | 60 | @Override 61 | public void timeout(IOSession ioSession) { 62 | // Nothing to do 63 | } 64 | 65 | @Override 66 | public void disconnected(IOSession ioSession) { 67 | // Nothing to do 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/metrics/SinkMetrics.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink.metrics; 20 | 21 | import org.apache.flink.annotation.Internal; 22 | import org.apache.flink.metrics.Counter; 23 | import org.apache.flink.metrics.MetricGroup; 24 | 25 | /** Wraps all metrics in a single class. */ 26 | @Internal 27 | public class SinkMetrics { 28 | private final Counter[] counters; 29 | 30 | private SinkMetrics(Counter[] counters) { 31 | this.counters = counters; 32 | } 33 | 34 | public void inc(SinkCounter counter, long value) { 35 | counters[counter.ordinal()].inc(value); 36 | } 37 | 38 | public void inc(SinkCounter counter) { 39 | counters[counter.ordinal()].inc(); 40 | } 41 | 42 | /** Register all custom sink metrics and return an of this wrapper class. */ 43 | public static SinkMetrics registerSinkMetrics(MetricGroup metrics) { 44 | // Register all counters 45 | Counter[] counters = new Counter[SinkCounter.values().length]; 46 | for (SinkCounter metric : SinkCounter.values()) { 47 | counters[metric.ordinal()] = metrics.counter(metric.getMetricName()); 48 | } 49 | return new SinkMetrics(counters); 50 | } 51 | 52 | /** Enum defining all sink counters. */ 53 | public enum SinkCounter { 54 | 55 | /** Total number of Samples that were dropped because of causing non-retryable errors. */ 56 | NUM_SAMPLES_NON_RETRYABLE_DROPPED("numSamplesNonRetryableDropped"), 57 | 58 | /** Number of Samples dropped after reaching retry limit on retryable errors. */ 59 | NUM_SAMPLES_RETRY_LIMIT_DROPPED("numSamplesRetryLimitDropped"), 60 | 61 | /** 62 | * Total number of Samples dropped due to any reasons: retryable errors reaching retry 63 | * limit, non-retryable errors, unexpected IO errors. 64 | */ 65 | NUM_SAMPLES_DROPPED("numSamplesDropped"), 66 | 67 | /** Number of Samples successfully written to Prometheus. */ 68 | NUM_SAMPLES_OUT("numSamplesOut"), 69 | 70 | /** Number of WriteRequests successfully sent to Prometheus. */ 71 | NUM_WRITE_REQUESTS_OUT("numWriteRequestsOut"), 72 | 73 | /** Number of permanently failed WriteRequests. */ 74 | NUM_WRITE_REQUESTS_PERMANENTLY_FAILED("numWriteRequestsPermanentlyFailed"), 75 | 76 | /** Number of WriteRequests retries. */ 77 | NUM_WRITE_REQUESTS_RETRIES("numWriteRequestsRetries"); 78 | 79 | private final String metricName; 80 | 81 | public String getMetricName() { 82 | return metricName; 83 | } 84 | 85 | SinkCounter(String metricName) { 86 | this.metricName = metricName; 87 | } 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/main/java/org/apache/flink/connector/prometheus/sink/metrics/SinkMetricsCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink.metrics; 20 | 21 | import org.apache.flink.annotation.Internal; 22 | 23 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_DROPPED; 24 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_NON_RETRYABLE_DROPPED; 25 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_OUT; 26 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_RETRY_LIMIT_DROPPED; 27 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_WRITE_REQUESTS_OUT; 28 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_WRITE_REQUESTS_PERMANENTLY_FAILED; 29 | 30 | /** Callback updating {@link SinkMetrics} on specific request outcomes. */ 31 | @Internal 32 | public class SinkMetricsCallback { 33 | 34 | private final SinkMetrics metrics; 35 | 36 | /** 37 | * Instantiate a callback passing the collection of metrics to mutate. 38 | * 39 | * @param metrics collection of metrics that will be mutated 40 | */ 41 | public SinkMetricsCallback(SinkMetrics metrics) { 42 | this.metrics = metrics; 43 | } 44 | 45 | private void onFailedWriteRequest(long sampleCount) { 46 | metrics.inc(NUM_SAMPLES_DROPPED, sampleCount); 47 | metrics.inc(NUM_WRITE_REQUESTS_PERMANENTLY_FAILED); 48 | } 49 | 50 | public void onSuccessfulWriteRequest(long sampleCount) { 51 | metrics.inc(NUM_SAMPLES_OUT, sampleCount); 52 | metrics.inc(NUM_WRITE_REQUESTS_OUT); 53 | } 54 | 55 | public void onFailedWriteRequestForNonRetryableError(long sampleCount) { 56 | metrics.inc(NUM_SAMPLES_NON_RETRYABLE_DROPPED, sampleCount); 57 | onFailedWriteRequest(sampleCount); 58 | } 59 | 60 | public void onFailedWriteRequestForRetryLimitExceeded(long sampleCount) { 61 | metrics.inc(NUM_SAMPLES_RETRY_LIMIT_DROPPED, sampleCount); 62 | onFailedWriteRequest(sampleCount); 63 | } 64 | 65 | public void onFailedWriteRequestForHttpClientIoFail(long sampleCount) { 66 | onFailedWriteRequest(sampleCount); 67 | } 68 | 69 | public void onWriteRequestRetry() { 70 | metrics.inc(SinkMetrics.SinkCounter.NUM_WRITE_REQUESTS_RETRIES); 71 | } 72 | } 73 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/architecture/TestCodeArchitectureTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.architecture; 19 | 20 | import org.apache.flink.architecture.TestCodeArchitectureTestBase; 21 | import org.apache.flink.architecture.common.ImportOptions; 22 | 23 | import com.tngtech.archunit.core.importer.ImportOption; 24 | import com.tngtech.archunit.junit.AnalyzeClasses; 25 | import com.tngtech.archunit.junit.ArchTest; 26 | import com.tngtech.archunit.junit.ArchTests; 27 | 28 | /** Architecture tests for test code. */ 29 | @AnalyzeClasses( 30 | packages = "org.apache.flink.connector.prometheus", 31 | importOptions = { 32 | ImportOption.OnlyIncludeTests.class, 33 | ImportOptions.ExcludeScalaImportOption.class, 34 | ImportOptions.ExcludeShadedImportOption.class 35 | }) 36 | public class TestCodeArchitectureTest { 37 | @ArchTest 38 | public static final ArchTests COMMON_TESTS = ArchTests.in(TestCodeArchitectureTestBase.class); 39 | } 40 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/CapturingPrometheusRequestSigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import java.util.ArrayList; 21 | import java.util.List; 22 | import java.util.Map; 23 | 24 | /** 25 | * Simple implementation of {@link 26 | * org.apache.flink.connector.prometheus.sink.PrometheusRequestSigner} that allows captures the 27 | * headers to be signed and pass through everything unmodified. 28 | */ 29 | public class CapturingPrometheusRequestSigner implements PrometheusRequestSigner { 30 | 31 | private final List> requestHeadersList = new ArrayList<>(); 32 | private final List requestBodyList = new ArrayList<>(); 33 | 34 | @Override 35 | public void addSignatureHeaders(Map requestHeaders, byte[] requestBody) { 36 | requestHeadersList.add(requestHeaders); 37 | requestBodyList.add(requestBody); 38 | } 39 | 40 | public int getInvocationCount() { 41 | return requestHeadersList.size(); 42 | } 43 | 44 | public Map getRequestHeadersAtInvocationCount(int invocationCount) { 45 | if (invocationCount <= requestHeadersList.size()) { 46 | return requestHeadersList.get(invocationCount - 1); 47 | } else { 48 | return null; 49 | } 50 | } 51 | 52 | public byte[] getRequestBodyAtInvocationCount(int invocationCount) { 53 | if (invocationCount <= requestBodyList.size()) { 54 | return requestBodyList.get(invocationCount - 1); 55 | } else { 56 | return null; 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/DummyPrometheusRequestSigner.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink; 20 | 21 | import java.util.Map; 22 | 23 | /** Dummy implementation of {@link PrometheusRequestSigner} that does nothing. */ 24 | public class DummyPrometheusRequestSigner implements PrometheusRequestSigner { 25 | @Override 26 | public void addSignatureHeaders(Map requestHeaders, byte[] requestBody) { 27 | /* Do Nothing */ 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/HttpResponseCallbackTestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 21 | 22 | import org.junit.jupiter.api.Assertions; 23 | 24 | import java.util.List; 25 | import java.util.function.Consumer; 26 | 27 | import static org.junit.jupiter.api.Assertions.assertEquals; 28 | import static org.junit.jupiter.api.Assertions.assertNull; 29 | import static org.junit.jupiter.api.Assertions.assertTrue; 30 | 31 | /** 32 | * Utilities for test involving the {@link 33 | * org.apache.flink.connector.prometheus.sink.HttpResponseCallback}. 34 | */ 35 | public class HttpResponseCallbackTestUtils { 36 | public static Consumer> getRequestResult( 37 | List requeuedResults) { 38 | return requeuedResults::addAll; 39 | } 40 | 41 | public static void assertNoReQueuedResult(List emittedResults) { 42 | assertTrue( 43 | emittedResults.isEmpty(), 44 | emittedResults.size() + " results were re-queued, but none was expected"); 45 | } 46 | 47 | public static void assertCallbackCompletedOnceWithNoException( 48 | VerifyableResponseCallback callback) { 49 | int actualCompletionCount = callback.getCompletedResponsesCount(); 50 | assertEquals( 51 | 1, 52 | actualCompletionCount, 53 | "The callback was completed " 54 | + actualCompletionCount 55 | + " times, but once was expected"); 56 | 57 | Exception exceptionThrown = callback.getThrownExceptionAtInvocationCount(1); 58 | assertNull(exceptionThrown, "An exception was thrown on completed, but none was expected"); 59 | } 60 | 61 | public static void assertCallbackCompletedOnceWithException( 62 | Class expectedExceptionClass, 63 | VerifyableResponseCallback callback) { 64 | Exception thrownException = callback.getThrownExceptionAtInvocationCount(1); 65 | Assertions.assertNotNull( 66 | thrownException, "Exception on complete was expected, but none was thrown"); 67 | assertTrue( 68 | thrownException.getClass().isAssignableFrom(expectedExceptionClass), 69 | "Unexpected exception type thrown on completed"); 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/HttpTestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; 21 | import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; 22 | import org.apache.hc.client5.http.async.methods.SimpleRequestBuilder; 23 | import org.apache.hc.core5.http.ContentType; 24 | import org.apache.hc.core5.net.URIBuilder; 25 | 26 | import java.net.URISyntaxException; 27 | 28 | /** Utilities for http request handling tests. */ 29 | public class HttpTestUtils { 30 | public static String buildRequestUrl(WireMockRuntimeInfo wmRuntimeInfo) 31 | throws URISyntaxException { 32 | return new URIBuilder(wmRuntimeInfo.getHttpBaseUrl()) 33 | .setPath("/remote_write") 34 | .setPort(wmRuntimeInfo.getHttpPort()) 35 | .build() 36 | .toString(); 37 | } 38 | 39 | public static SimpleHttpRequest buildPostRequest(String requestUrl) { 40 | return SimpleRequestBuilder.post() 41 | .setUri(requestUrl) 42 | .setBody("N/A", ContentType.DEFAULT_BINARY) 43 | .build(); 44 | } 45 | 46 | /** 47 | * Create a {@link PrometheusSinkConfiguration.RetryConfiguration} with the fastest possible 48 | * retry and the specified max retry count. 49 | */ 50 | public static PrometheusSinkConfiguration.RetryConfiguration fastRetryConfiguration( 51 | int maxRetryCount) { 52 | return PrometheusSinkConfiguration.RetryConfiguration.builder() 53 | .setInitialRetryDelayMS(1) 54 | .setMaxRetryDelayMS(1) 55 | .setMaxRetryCount(maxRetryCount) 56 | .build(); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/InspectableMetricGroup.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.metrics.CharacterFilter; 21 | import org.apache.flink.metrics.Counter; 22 | import org.apache.flink.metrics.Gauge; 23 | import org.apache.flink.metrics.Histogram; 24 | import org.apache.flink.metrics.Meter; 25 | import org.apache.flink.metrics.MetricGroup; 26 | import org.apache.flink.metrics.SimpleCounter; 27 | 28 | import java.util.Collections; 29 | import java.util.HashMap; 30 | import java.util.Map; 31 | 32 | /** 33 | * Test implementation of MetricGroup that allows inspecting the value of a metrics. The current 34 | * implementation only supports Counters. 35 | */ 36 | public class InspectableMetricGroup implements MetricGroup { 37 | 38 | private final Map counters = new HashMap<>(); 39 | 40 | @Override 41 | public Counter counter(String name) { 42 | Counter counter = new SimpleCounter(); 43 | counters.put(name, counter); 44 | return counter; 45 | } 46 | 47 | @Override 48 | public C counter(String name, C counter) { 49 | counters.put(name, counter); 50 | return counter; 51 | } 52 | 53 | public long getCounterCount(String name) { 54 | if (counters.containsKey(name)) { 55 | return counters.get(name).getCount(); 56 | } else { 57 | return 0L; 58 | } 59 | } 60 | 61 | @Override 62 | public > G gauge(String name, G gauge) { 63 | throw new UnsupportedOperationException("Gauges are not supported by this metric group"); 64 | } 65 | 66 | @Override 67 | public H histogram(String name, H histogram) { 68 | throw new UnsupportedOperationException("Histograms not supported by this metric group"); 69 | } 70 | 71 | @Override 72 | public M meter(String name, M meter) { 73 | throw new UnsupportedOperationException("Meters not supported by this metric group"); 74 | } 75 | 76 | @Override 77 | public MetricGroup addGroup(String s) { 78 | return new InspectableMetricGroup(); 79 | } 80 | 81 | @Override 82 | public MetricGroup addGroup(String s, String s1) { 83 | return new InspectableMetricGroup(); 84 | } 85 | 86 | @Override 87 | public String[] getScopeComponents() { 88 | return new String[0]; 89 | } 90 | 91 | @Override 92 | public Map getAllVariables() { 93 | return Collections.emptyMap(); 94 | } 95 | 96 | @Override 97 | public String getMetricIdentifier(String metricName) { 98 | return metricName; 99 | } 100 | 101 | @Override 102 | public String getMetricIdentifier(String metricName, CharacterFilter filter) { 103 | return metricName; 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/InspectableMetricGroupAssertions.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics; 21 | 22 | import org.junit.jupiter.api.Assertions; 23 | 24 | /** Assertions to verify whether metrics of a {@link InspectableMetricGroup} has been modified. */ 25 | public class InspectableMetricGroupAssertions { 26 | public static void assertCounterWasIncremented( 27 | InspectableMetricGroup metricGroup, SinkMetrics.SinkCounter sinkCounter) { 28 | String counterName = sinkCounter.getMetricName(); 29 | Assertions.assertTrue( 30 | metricGroup.getCounterCount(counterName) > 0, 31 | "The counter " + counterName + " has not been incremented"); 32 | } 33 | 34 | public static void assertCounterCount( 35 | long expectedCounterCount, 36 | InspectableMetricGroup metricGroup, 37 | SinkMetrics.SinkCounter sinkCounter) { 38 | String counterName = sinkCounter.getMetricName(); 39 | long actualCounterCount = metricGroup.getCounterCount(counterName); 40 | Assertions.assertEquals( 41 | expectedCounterCount, 42 | actualCounterCount, 43 | "The counter " 44 | + counterName 45 | + " was expected to be " 46 | + expectedCounterCount 47 | + " but was " 48 | + actualCounterCount); 49 | } 50 | 51 | public static void assertCounterWasNotIncremented( 52 | InspectableMetricGroup metricGroup, SinkMetrics.SinkCounter sinkCounter) { 53 | String counterName = sinkCounter.getMetricName(); 54 | Assertions.assertTrue( 55 | metricGroup.getCounterCount(counterName) == 0, 56 | "The counter " + counterName + " has been incremented"); 57 | } 58 | 59 | public static void assertCountersWereNotIncremented( 60 | InspectableMetricGroup metricGroup, SinkMetrics.SinkCounter... counters) { 61 | for (SinkMetrics.SinkCounter counter : counters) { 62 | assertCounterWasNotIncremented(metricGroup, counter); 63 | } 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusRemoteWriteHttpRequestBuilderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; 21 | import org.apache.hc.core5.http.HttpHeaders; 22 | import org.junit.jupiter.api.Test; 23 | 24 | import java.util.Map; 25 | 26 | import static org.junit.jupiter.api.Assertions.assertEquals; 27 | 28 | class PrometheusRemoteWriteHttpRequestBuilderTest { 29 | 30 | private static final String ENDPOINT = "/anything"; 31 | private static final byte[] REQUEST_BODY = {(byte) 0x01}; 32 | private static final String USER_AGENT = "MY-USER-AGENT"; 33 | 34 | @Test 35 | void shouldAddContentEncodingHeader() { 36 | PrometheusRemoteWriteHttpRequestBuilder sut = 37 | new PrometheusRemoteWriteHttpRequestBuilder(ENDPOINT, null, USER_AGENT); 38 | SimpleHttpRequest request = sut.buildHttpRequest(REQUEST_BODY); 39 | assertEquals("snappy", request.getHeaders(HttpHeaders.CONTENT_ENCODING)[0].getValue()); 40 | } 41 | 42 | @Test 43 | void shouldAddPrometheusRemoteWriteVersionHeader() { 44 | PrometheusRemoteWriteHttpRequestBuilder sut = 45 | new PrometheusRemoteWriteHttpRequestBuilder(ENDPOINT, null, USER_AGENT); 46 | SimpleHttpRequest request = sut.buildHttpRequest(REQUEST_BODY); 47 | assertEquals( 48 | "0.1.0", request.getHeaders("X-Prometheus-Remote-Write-Version")[0].getValue()); 49 | } 50 | 51 | @Test 52 | void shouldAddUserAgent() { 53 | PrometheusRemoteWriteHttpRequestBuilder sut = 54 | new PrometheusRemoteWriteHttpRequestBuilder(ENDPOINT, null, USER_AGENT); 55 | SimpleHttpRequest request = sut.buildHttpRequest(REQUEST_BODY); 56 | assertEquals(1, request.getHeaders(HttpHeaders.USER_AGENT).length); 57 | assertEquals(USER_AGENT, request.getHeaders(HttpHeaders.USER_AGENT)[0].getValue()); 58 | } 59 | 60 | @Test 61 | void shouldInvokeRequestSignerPassingAMutableMap() { 62 | CapturingPrometheusRequestSigner signer = new CapturingPrometheusRequestSigner(); 63 | 64 | PrometheusRemoteWriteHttpRequestBuilder sut = 65 | new PrometheusRemoteWriteHttpRequestBuilder(ENDPOINT, signer, USER_AGENT); 66 | 67 | sut.buildHttpRequest(REQUEST_BODY); 68 | 69 | // Verify the signer was invoked once 70 | assertEquals(1, signer.getInvocationCount()); 71 | 72 | // Verify the header Map of headers passed to the signer was actually mutable 73 | Map capturedRequestHeaders = signer.getRequestHeadersAtInvocationCount(1); 74 | capturedRequestHeaders.put("foo", "bar"); 75 | assertEquals("bar", capturedRequestHeaders.get("foo")); 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusSinkBuilderTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import static org.junit.jupiter.api.Assertions.assertNotNull; 23 | import static org.junit.jupiter.api.Assertions.assertThrows; 24 | 25 | class PrometheusSinkBuilderTest { 26 | private static final String ENDPOINT = "http://example.com:123/something"; 27 | 28 | @Test 29 | void shouldBuildSinkOnlyProvidingPrometheusRemoteWriteUrl() { 30 | PrometheusSink sink = 31 | (PrometheusSink) 32 | new PrometheusSinkBuilder().setPrometheusRemoteWriteUrl(ENDPOINT).build(); 33 | assertNotNull(sink); 34 | } 35 | 36 | @Test 37 | void shouldBuildSinkProvidingAllFields() { 38 | PrometheusSink sink = 39 | (PrometheusSink) 40 | new PrometheusSinkBuilder() 41 | .setPrometheusRemoteWriteUrl(ENDPOINT) 42 | .setMaxBatchSizeInSamples(500) 43 | .setMaxRecordSizeInSamples(500) 44 | .setMaxTimeInBufferMS(5000) 45 | .setRetryConfiguration( 46 | PrometheusSinkConfiguration.RetryConfiguration 47 | .DEFAULT_RETRY_CONFIGURATION) 48 | .setSocketTimeoutMs(1000) 49 | .setRequestSigner(new DummyPrometheusRequestSigner()) 50 | .setHttpUserAgent("test") 51 | .setErrorHandlingBehaviorConfiguration( 52 | PrometheusSinkConfiguration 53 | .SinkWriterErrorHandlingBehaviorConfiguration 54 | .builder() 55 | .onMaxRetryExceeded( 56 | PrometheusSinkConfiguration.OnErrorBehavior 57 | .FAIL) 58 | .onPrometheusNonRetryableError( 59 | PrometheusSinkConfiguration.OnErrorBehavior 60 | .DISCARD_AND_CONTINUE) 61 | .build()) 62 | .setMetricGroupName("test") 63 | .build(); 64 | assertNotNull(sink); 65 | } 66 | 67 | @Test 68 | void shouldFailIfPrometheusRemoteWriteUrlIsMissing() { 69 | assertThrows(IllegalArgumentException.class, () -> new PrometheusSinkBuilder().build()); 70 | } 71 | 72 | @Test 73 | void shouldFailIfPrometheusRemoteWriteUrlIsInvalid() { 74 | assertThrows( 75 | IllegalArgumentException.class, 76 | () -> new PrometheusSinkBuilder().setPrometheusRemoteWriteUrl("invalid").build()); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusSinkSerializationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import static org.apache.flink.util.InstantiationUtil.isSerializable; 23 | import static org.junit.jupiter.api.Assertions.assertTrue; 24 | 25 | public class PrometheusSinkSerializationTest { 26 | 27 | @Test 28 | void shouldBeActuallySerializable() { 29 | PrometheusSink sink = 30 | (PrometheusSink) 31 | PrometheusSink.builder() 32 | .setPrometheusRemoteWriteUrl("http://example.com/endpoint") 33 | .build(); 34 | 35 | assertTrue(isSerializable(sink), "The sink object should be serializable"); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusSinkWriterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink; 20 | 21 | import org.apache.flink.connector.base.sink.writer.ElementConverter; 22 | import org.apache.flink.connector.base.sink.writer.TestSinkInitContext; 23 | import org.apache.flink.connector.prometheus.sink.http.PrometheusAsyncHttpClientBuilder; 24 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 25 | 26 | import org.apache.hc.core5.http.HttpHeaders; 27 | import org.junit.jupiter.api.Test; 28 | 29 | import static org.assertj.core.api.AssertionsForClassTypes.assertThat; 30 | 31 | class PrometheusSinkWriterTest { 32 | 33 | private static final int MAX_IN_FLIGHT_REQUESTS = 1; 34 | private static final int MAX_BUFFERED_REQUESTS = 512; 35 | private static final int MAX_BATCH_SIZE_IN_SAMPLES = 97; 36 | private static final int MAX_RECORD_SIZE_IN_SAMPLES = 31; 37 | private static final long MAX_TIME_IN_BUFFER_MS = 513; 38 | private static final String PROMETHEUS_REMOTE_WRITE_URL = "https://foo.bar/baz"; 39 | 40 | private static final ElementConverter 41 | ELEMENT_CONVERTER = new PrometheusTimeSeriesConverter(); 42 | private static final PrometheusAsyncHttpClientBuilder CLIENT_BUILDER = 43 | new PrometheusAsyncHttpClientBuilder( 44 | PrometheusSinkConfiguration.RetryConfiguration.builder().build()); 45 | private static final PrometheusRequestSigner REQUEST_SIGNER = 46 | new DummyPrometheusRequestSigner(); 47 | private static final String HTTP_USER_AGENT = "Test-User-Agent"; 48 | private static final PrometheusSinkConfiguration.SinkWriterErrorHandlingBehaviorConfiguration 49 | ERROR_HANDLING_BEHAVIOR_CONFIGURATION = 50 | PrometheusSinkConfiguration.SinkWriterErrorHandlingBehaviorConfiguration 51 | .DEFAULT_BEHAVIORS; 52 | private static final String METRIC_GROUP_NAME = "test-group"; 53 | 54 | @Test 55 | void testInizializeAsyncSinkBaseParameters() throws Exception { 56 | TestSinkInitContext sinkInitContext = new TestSinkInitContext(); 57 | 58 | PrometheusSink sink = 59 | new PrometheusSink( 60 | ELEMENT_CONVERTER, 61 | MAX_IN_FLIGHT_REQUESTS, 62 | MAX_BUFFERED_REQUESTS, 63 | MAX_BATCH_SIZE_IN_SAMPLES, 64 | MAX_RECORD_SIZE_IN_SAMPLES, 65 | MAX_TIME_IN_BUFFER_MS, 66 | PROMETHEUS_REMOTE_WRITE_URL, 67 | CLIENT_BUILDER, 68 | REQUEST_SIGNER, 69 | HTTP_USER_AGENT, 70 | ERROR_HANDLING_BEHAVIOR_CONFIGURATION, 71 | METRIC_GROUP_NAME); 72 | 73 | PrometheusSinkWriter sinkWriter = (PrometheusSinkWriter) sink.createWriter(sinkInitContext); 74 | 75 | assertThat(sinkWriter).extracting("maxBatchSize").isEqualTo(MAX_BATCH_SIZE_IN_SAMPLES); 76 | assertThat(sinkWriter) 77 | .extracting("maxBatchSizeInBytes") 78 | .isEqualTo((long) MAX_BATCH_SIZE_IN_SAMPLES); 79 | 80 | assertThat(sinkWriter).extracting("maxBufferedRequests").isEqualTo(MAX_BUFFERED_REQUESTS); 81 | assertThat(sinkWriter) 82 | .extracting("maxRecordSizeInBytes") 83 | .isEqualTo((long) MAX_RECORD_SIZE_IN_SAMPLES); 84 | assertThat(sinkWriter).extracting("maxTimeInBufferMS").isEqualTo(MAX_TIME_IN_BUFFER_MS); 85 | 86 | assertThat(sinkWriter) 87 | .extracting("requestBuilder") 88 | .extracting("prometheusRemoteWriteUrl") 89 | .isEqualTo(PROMETHEUS_REMOTE_WRITE_URL); 90 | assertThat(sinkWriter) 91 | .extracting("requestBuilder") 92 | .extracting("requestSigner") 93 | .isEqualTo(REQUEST_SIGNER); 94 | assertThat(sinkWriter) 95 | .extracting("requestBuilder") 96 | .extracting("fixedHeaders") 97 | .extracting(HttpHeaders.USER_AGENT) 98 | .isEqualTo(HTTP_USER_AGENT); 99 | 100 | assertThat(sinkWriter) 101 | .extracting("errorHandlingBehaviorConfig") 102 | .isEqualTo(ERROR_HANDLING_BEHAVIOR_CONFIGURATION); 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusStateSerializerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.connector.base.sink.writer.BufferedRequestState; 21 | import org.apache.flink.connector.base.sink.writer.ElementConverter; 22 | import org.apache.flink.connector.base.sink.writer.RequestEntryWrapper; 23 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 24 | 25 | import org.assertj.core.api.Assertions; 26 | import org.junit.jupiter.api.Test; 27 | 28 | import java.io.IOException; 29 | import java.util.List; 30 | import java.util.stream.Collectors; 31 | import java.util.stream.IntStream; 32 | 33 | import static org.junit.jupiter.api.Assertions.assertEquals; 34 | 35 | class PrometheusStateSerializerTest { 36 | 37 | private static final ElementConverter 38 | ELEMENT_CONVERTER = new PrometheusTimeSeriesConverter(); 39 | 40 | private static PrometheusTimeSeries getTestTimeSeries(int i) { 41 | return PrometheusTimeSeries.builder() 42 | .withMetricName("metric-name") 43 | .addLabel("dimensionA", "value-" + i) 44 | .addSample(i + 42.0, i + 1L) 45 | .addSample(i + 3.14, i + 2L) 46 | .build(); 47 | } 48 | 49 | /** 50 | * This method uses the same implementation as PrometheusSinkWriter.getSizeInBytes() to extract 51 | * the requestEntry "size" (i.e. the number of Samples). This is the "size" used in 52 | * RequestEntryWrapper. 53 | * 54 | *

See 55 | * https://github.com/apache/flink/blob/69e812688b43be9a0c4f79e6af81bc2d1d8a873e/flink-connectors/flink-connector-base/src/main/java/org/apache/flink/connector/base/sink/writer/AsyncSinkWriterStateSerializer.java#L60 56 | */ 57 | private static int getRequestSize(Types.TimeSeries requestEntry) { 58 | return requestEntry.getSamplesCount(); 59 | } 60 | 61 | private static BufferedRequestState getTestState() { 62 | return new BufferedRequestState<>( 63 | IntStream.range(0, 10) 64 | .mapToObj(PrometheusStateSerializerTest::getTestTimeSeries) 65 | .map((element) -> ELEMENT_CONVERTER.apply(element, null)) 66 | .map( 67 | (requestEntry) -> 68 | new RequestEntryWrapper<>( 69 | requestEntry, getRequestSize(requestEntry))) 70 | .collect(Collectors.toList())); 71 | } 72 | 73 | private void assertThatBufferStatesAreEqual( 74 | BufferedRequestState actualBuffer, 75 | BufferedRequestState expectedBuffer) { 76 | Assertions.assertThat(actualBuffer.getStateSize()).isEqualTo(expectedBuffer.getStateSize()); 77 | int actualLength = actualBuffer.getBufferedRequestEntries().size(); 78 | Assertions.assertThat(actualLength) 79 | .isEqualTo(expectedBuffer.getBufferedRequestEntries().size()); 80 | List> actualRequestEntries = 81 | actualBuffer.getBufferedRequestEntries(); 82 | List> expectedRequestEntries = 83 | expectedBuffer.getBufferedRequestEntries(); 84 | 85 | for (int i = 0; i < actualLength; i++) { 86 | // Protobuf-generated objects like Types.TimeSeries implements equals() with all nested 87 | // objects 88 | Assertions.assertThat(actualRequestEntries.get(i).getRequestEntry()) 89 | .isEqualTo(expectedRequestEntries.get(i).getRequestEntry()); 90 | Assertions.assertThat(actualRequestEntries.get(i).getSize()) 91 | .isEqualTo(expectedRequestEntries.get(i).getSize()); 92 | } 93 | } 94 | 95 | @Test 96 | void testSerializeDeserialize() throws IOException { 97 | BufferedRequestState expectedState = getTestState(); 98 | PrometheusStateSerializer serializer = new PrometheusStateSerializer(); 99 | 100 | byte[] serializedExpectedState = serializer.serialize(expectedState); 101 | BufferedRequestState actualState = 102 | serializer.deserialize(1, serializedExpectedState); 103 | assertThatBufferStatesAreEqual(actualState, expectedState); 104 | } 105 | 106 | @Test 107 | public void testVersion() { 108 | PrometheusStateSerializer serializer = new PrometheusStateSerializer(); 109 | assertEquals(1, serializer.getVersion()); 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusTimeSeriesConverterTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.flink.api.connector.sink2.SinkWriter; 21 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 22 | 23 | import org.junit.jupiter.api.Test; 24 | 25 | import static org.junit.jupiter.api.Assertions.assertEquals; 26 | 27 | class PrometheusTimeSeriesConverterTest { 28 | 29 | private PrometheusTimeSeriesConverter sut = new PrometheusTimeSeriesConverter(); 30 | 31 | private SinkWriter.Context dummyContext = 32 | new SinkWriter.Context() { 33 | @Override 34 | public long currentWatermark() { 35 | return 0L; 36 | } 37 | 38 | @Override 39 | public Long timestamp() { 40 | return null; 41 | } 42 | }; 43 | 44 | @Test 45 | public void testMetricNameLabel() { 46 | PrometheusTimeSeries input = 47 | PrometheusTimeSeries.builder() 48 | .withMetricName("metric-1") 49 | .addSample(42.0, 1L) 50 | .build(); 51 | 52 | Types.TimeSeries requestEntry = sut.apply(input, dummyContext); 53 | 54 | assertEquals(1, requestEntry.getLabelsList().size()); 55 | 56 | Types.Label firstLabel = requestEntry.getLabelsList().get(0); 57 | assertEquals("__name__", firstLabel.getName()); 58 | assertEquals("metric-1", firstLabel.getValue()); 59 | } 60 | 61 | @Test 62 | public void testAdditionalLabels() { 63 | PrometheusTimeSeries input = 64 | PrometheusTimeSeries.builder() 65 | .withMetricName("metric-1") 66 | .addLabel("dimensionA", "value-A") 67 | .addLabel("dimensionB", "value-B") 68 | .addSample(42, 1L) 69 | .build(); 70 | 71 | Types.TimeSeries requestEntry = sut.apply(input, dummyContext); 72 | 73 | assertEquals(3, requestEntry.getLabelsList().size()); 74 | 75 | Types.Label secondLabel = requestEntry.getLabelsList().get(1); 76 | assertEquals("dimensionA", secondLabel.getName()); 77 | assertEquals("value-A", secondLabel.getValue()); 78 | 79 | Types.Label thirdLabel = requestEntry.getLabelsList().get(2); 80 | assertEquals("dimensionB", thirdLabel.getName()); 81 | assertEquals("value-B", thirdLabel.getValue()); 82 | } 83 | 84 | @Test 85 | public void testSamples() { 86 | PrometheusTimeSeries input = 87 | PrometheusTimeSeries.builder() 88 | .withMetricName("metric-1") 89 | .addSample(42.0, 1L) 90 | .addSample(3.14, 2L) 91 | .build(); 92 | 93 | Types.TimeSeries requestEntry = sut.apply(input, dummyContext); 94 | 95 | assertEquals(2, requestEntry.getSamplesList().size()); 96 | 97 | Types.Sample firstSample = requestEntry.getSamplesList().get(0); 98 | assertEquals(42.0d, firstSample.getValue()); 99 | assertEquals(1L, firstSample.getTimestamp()); 100 | 101 | Types.Sample secondSample = requestEntry.getSamplesList().get(1); 102 | assertEquals(3.14d, secondSample.getValue()); 103 | assertEquals(2L, secondSample.getTimestamp()); 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusTimeSeriesLabelsAndMetricNameKeySelectorTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import static org.junit.jupiter.api.Assertions.assertEquals; 23 | import static org.junit.jupiter.api.Assertions.assertNotEquals; 24 | 25 | class PrometheusTimeSeriesLabelsAndMetricNameKeySelectorTest { 26 | 27 | private PrometheusTimeSeriesLabelsAndMetricNameKeySelector selector = 28 | new PrometheusTimeSeriesLabelsAndMetricNameKeySelector(); 29 | 30 | @Test 31 | void timeSeriesWithSameLabelsAndMetricNameShouldHaveSameKey() throws Exception { 32 | PrometheusTimeSeries ts1 = 33 | PrometheusTimeSeries.builder() 34 | .withMetricName("metric1") 35 | .addLabel("label1", "value1") 36 | .addLabel("label2", "value2") 37 | .addSample(42, 1L) 38 | .addSample(3.14, 2L) 39 | .build(); 40 | 41 | PrometheusTimeSeries ts2 = 42 | PrometheusTimeSeries.builder() 43 | .withMetricName("metric1") 44 | .addLabel("label1", "value1") 45 | .addLabel("label2", "value2") 46 | .addSample(57, 1L) 47 | .addSample(123, 2L) 48 | .build(); 49 | 50 | assertEquals(selector.getKey(ts1), selector.getKey(ts2)); 51 | } 52 | 53 | @Test 54 | void timeSeriesWithDifferentLabelValuesAndSameMetricNameShouldHaveDifferentKey() 55 | throws Exception { 56 | PrometheusTimeSeries ts1 = 57 | PrometheusTimeSeries.builder() 58 | .withMetricName("metric1") 59 | .addLabel("label1", "valueX") 60 | .addLabel("label2", "valueY") 61 | .addSample(42, 1L) 62 | .addSample(3.14, 2L) 63 | .build(); 64 | 65 | PrometheusTimeSeries ts2 = 66 | PrometheusTimeSeries.builder() 67 | .withMetricName("metric1") 68 | .addLabel("label1", "value1") 69 | .addLabel("label2", "value2") 70 | .addSample(42, 1L) 71 | .addSample(3.14, 2L) 72 | .build(); 73 | 74 | assertNotEquals(selector.getKey(ts1), selector.getKey(ts2)); 75 | } 76 | 77 | @Test 78 | void timeSeriesWithSameLabelsAndDifferentMetricNameShouldHaveDifferentKey() throws Exception { 79 | PrometheusTimeSeries ts1 = 80 | PrometheusTimeSeries.builder() 81 | .withMetricName("metric1") 82 | .addLabel("label1", "value1") 83 | .addLabel("label2", "value2") 84 | .addSample(42, 1L) 85 | .addSample(3.14, 2L) 86 | .build(); 87 | 88 | PrometheusTimeSeries ts2 = 89 | PrometheusTimeSeries.builder() 90 | .withMetricName("metric2") 91 | .addLabel("label1", "value1") 92 | .addLabel("label2", "value2") 93 | .addSample(42, 1L) 94 | .addSample(3.14, 2L) 95 | .build(); 96 | 97 | assertNotEquals(selector.getKey(ts1), selector.getKey(ts2)); 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/PrometheusTimeSeriesTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import static org.junit.jupiter.api.Assertions.assertEquals; 23 | import static org.junit.jupiter.api.Assertions.assertFalse; 24 | import static org.junit.jupiter.api.Assertions.assertNotEquals; 25 | import static org.junit.jupiter.api.Assertions.assertTrue; 26 | 27 | class PrometheusTimeSeriesTest { 28 | 29 | private static PrometheusTimeSeries aTimeSeries() { 30 | return PrometheusTimeSeries.builder() 31 | .withMetricName("MyMetricName") 32 | .addLabel("Label1", "Value1") 33 | .addLabel("Label2", "Value2") 34 | .addSample(0.1, 1L) 35 | .addSample(0.2, 2L) 36 | .build(); 37 | } 38 | 39 | private static PrometheusTimeSeries aDifferentTimeSeries() { 40 | return PrometheusTimeSeries.builder() 41 | .withMetricName("AnotherMetricName") 42 | .addLabel("Label3", "Value3") 43 | .addLabel("Label4", "Value4") 44 | .addSample(0.2, 3L) 45 | .addSample(0.7, 4L) 46 | .build(); 47 | } 48 | 49 | @Test 50 | public void testEqualsReflexivity() { 51 | PrometheusTimeSeries ts1 = aTimeSeries(); 52 | 53 | assertTrue(ts1.equals(ts1)); 54 | } 55 | 56 | @Test 57 | public void testEqualsSymmetry() { 58 | PrometheusTimeSeries ts1 = aTimeSeries(); 59 | PrometheusTimeSeries ts2 = aTimeSeries(); 60 | 61 | assertTrue(ts1.equals(ts2) && ts2.equals(ts1)); 62 | } 63 | 64 | @Test 65 | public void testEqualsTransitivity() { 66 | PrometheusTimeSeries ts1 = aTimeSeries(); 67 | PrometheusTimeSeries ts2 = aTimeSeries(); 68 | PrometheusTimeSeries ts3 = aTimeSeries(); 69 | 70 | assertTrue(ts1.equals(ts2) && ts2.equals(ts3) && ts1.equals(ts3)); 71 | } 72 | 73 | @Test 74 | public void testEqualsConsistency() { 75 | PrometheusTimeSeries ts1 = aTimeSeries(); 76 | PrometheusTimeSeries ts2 = aTimeSeries(); 77 | 78 | assertTrue(ts1.equals(ts2) == ts1.equals(ts2)); 79 | } 80 | 81 | @Test 82 | public void testEqualsNullComparison() { 83 | PrometheusTimeSeries ts1 = aTimeSeries(); 84 | 85 | assertFalse(ts1.equals(null)); 86 | } 87 | 88 | @Test 89 | public void testHashCodeConsistency() { 90 | PrometheusTimeSeries ts1 = aTimeSeries(); 91 | int hashCode = ts1.hashCode(); 92 | 93 | assertEquals(hashCode, ts1.hashCode()); 94 | } 95 | 96 | @Test 97 | public void testEqualObjectsHaveEqualHashCodes() { 98 | PrometheusTimeSeries ts1 = aTimeSeries(); 99 | PrometheusTimeSeries ts2 = aTimeSeries(); 100 | 101 | assertTrue(ts1.equals(ts2)); 102 | assertEquals(ts1.hashCode(), ts2.hashCode()); 103 | } 104 | 105 | @Test 106 | public void testUnequalObjectsHaveDifferentHashCodes() { 107 | PrometheusTimeSeries ts1 = aTimeSeries(); 108 | PrometheusTimeSeries ts2 = aDifferentTimeSeries(); 109 | 110 | assertFalse(ts1.equals(ts2)); 111 | assertNotEquals(ts1.hashCode(), ts2.hashCode()); 112 | } 113 | 114 | @Test 115 | public void testBuilder() { 116 | 117 | PrometheusTimeSeries ts = 118 | PrometheusTimeSeries.builder() 119 | .withMetricName("MyMetricName") 120 | .addLabel("Label1", "Value1") 121 | .addLabel("Label2", "Value2") 122 | .addSample(0.1, 1L) 123 | .addSample(0.2, 2L) 124 | .addSample(0.3, 3L) 125 | .build(); 126 | 127 | assertEquals("MyMetricName", ts.getMetricName()); 128 | assertEquals(2, ts.getLabels().length); 129 | assertLabelMatches("Label1", "Value1", ts.getLabels()[0]); 130 | assertLabelMatches("Label2", "Value2", ts.getLabels()[1]); 131 | assertEquals(3, ts.getSamples().length); 132 | assertSampleMatches(0.1, 1L, ts.getSamples()[0]); 133 | assertSampleMatches(0.2, 2L, ts.getSamples()[1]); 134 | assertSampleMatches(0.3, 3L, ts.getSamples()[2]); 135 | } 136 | 137 | private static void assertLabelMatches( 138 | String expectedLabelName, 139 | String expectedLabelValue, 140 | PrometheusTimeSeries.Label actual) { 141 | assertEquals(expectedLabelName, actual.getName()); 142 | assertEquals(expectedLabelValue, actual.getValue()); 143 | } 144 | 145 | private static void assertSampleMatches( 146 | double expectedValue, long expectedTimestamp, PrometheusTimeSeries.Sample actual) { 147 | assertEquals(expectedValue, actual.getValue()); 148 | assertEquals(expectedTimestamp, actual.getTimestamp()); 149 | } 150 | } 151 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/RequestEntrySizeUtilsTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink; 20 | 21 | import org.apache.flink.connector.prometheus.sink.prometheus.Types; 22 | 23 | import org.junit.jupiter.api.Test; 24 | 25 | import java.util.ArrayList; 26 | import java.util.Collections; 27 | import java.util.List; 28 | 29 | import static org.junit.jupiter.api.Assertions.assertEquals; 30 | 31 | class RequestEntrySizeUtilsTest { 32 | 33 | private static Types.Sample.Builder aSample(double value, long value1) { 34 | return Types.Sample.newBuilder().setValue(value).setTimestamp(value1); 35 | } 36 | 37 | private static Types.Label aLabel(String labelName, String labelValue) { 38 | return Types.Label.newBuilder().setName(labelName).setValue(labelValue).build(); 39 | } 40 | 41 | private static Types.TimeSeries aTimeSeriesWith2Samples() { 42 | return Types.TimeSeries.newBuilder() 43 | .addSamples(aSample(0.1, 1L)) 44 | .addSamples(aSample(0.2, 2L)) 45 | .addLabels(aLabel("L1", "V1")) 46 | .build(); 47 | } 48 | 49 | @Test 50 | void countSamples() { 51 | List entries = 52 | new ArrayList() { 53 | { 54 | add(aTimeSeriesWith2Samples()); 55 | add(aTimeSeriesWith2Samples()); 56 | add(aTimeSeriesWith2Samples()); 57 | } 58 | }; 59 | 60 | long count = RequestEntrySizeUtils.countSamples(entries); 61 | assertEquals(6, count); 62 | } 63 | 64 | @Test 65 | void countSamplesOfEmptyList() { 66 | List entries = Collections.emptyList(); 67 | 68 | long count = RequestEntrySizeUtils.countSamples(entries); 69 | assertEquals(0, count); 70 | } 71 | 72 | @Test 73 | void requestSizeForBatching() { 74 | Types.TimeSeries ts = aTimeSeriesWith2Samples(); 75 | 76 | long sampleCount = RequestEntrySizeUtils.requestSizeForBatching(ts); 77 | assertEquals(2, sampleCount); 78 | } 79 | 80 | @Test 81 | void requestSerializedSize() { 82 | Types.TimeSeries ts = aTimeSeriesWith2Samples(); 83 | long serializedSize = RequestEntrySizeUtils.requestSerializedSize(ts); 84 | int protobufSerializedSize = ts.getSerializedSize(); 85 | 86 | assertEquals(protobufSerializedSize, (int) serializedSize); 87 | } 88 | } 89 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/SinkWriterErrorHandlingBehaviorConfigurationTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.junit.jupiter.api.Test; 21 | 22 | import static org.junit.jupiter.api.Assertions.assertEquals; 23 | import static org.junit.jupiter.api.Assertions.assertThrows; 24 | 25 | class SinkWriterErrorHandlingBehaviorConfigurationTest { 26 | 27 | private static final PrometheusSinkConfiguration.SinkWriterErrorHandlingBehaviorConfiguration 28 | DEFAULT_CONFIG = 29 | PrometheusSinkConfiguration.SinkWriterErrorHandlingBehaviorConfiguration 30 | .builder() 31 | .build(); 32 | 33 | @Test 34 | public void shouldDefaultToFailOnMaxRetryExceeded() { 35 | assertEquals( 36 | PrometheusSinkConfiguration.OnErrorBehavior.FAIL, 37 | DEFAULT_CONFIG.getOnMaxRetryExceeded()); 38 | } 39 | 40 | @Test 41 | public void shouldDefaultToDiscardAndContinueOnPrometheusNonRetryableError() { 42 | assertEquals( 43 | PrometheusSinkConfiguration.OnErrorBehavior.DISCARD_AND_CONTINUE, 44 | DEFAULT_CONFIG.getOnPrometheusNonRetryableError()); 45 | } 46 | 47 | @Test 48 | public void shouldPreventSettingContinueOnPrometheusNonRetryableErrorToFail() { 49 | assertThrows( 50 | IllegalArgumentException.class, 51 | () -> 52 | PrometheusSinkConfiguration.SinkWriterErrorHandlingBehaviorConfiguration 53 | .builder() 54 | .onPrometheusNonRetryableError( 55 | PrometheusSinkConfiguration.OnErrorBehavior.FAIL) 56 | .build()); 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/VerifyableResponseCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink; 19 | 20 | import org.apache.hc.client5.http.async.methods.SimpleHttpResponse; 21 | import org.apache.hc.core5.concurrent.CallbackContribution; 22 | 23 | import java.util.ArrayList; 24 | import java.util.HashMap; 25 | import java.util.List; 26 | import java.util.Map; 27 | 28 | /** 29 | * Wrapper of {@link HttpResponseCallback} that captures the completion of the Future, and any 30 | * exception thrown. 31 | * 32 | *

Note that any exception thrown by completed() is captured and not rethrown. 33 | */ 34 | public class VerifyableResponseCallback extends CallbackContribution { 35 | 36 | private final HttpResponseCallback responseCallback; 37 | private final List completedResponses = new ArrayList<>(); 38 | private final Map thrownExceptions = new HashMap<>(); 39 | 40 | VerifyableResponseCallback(HttpResponseCallback responseCallback) { 41 | super(responseCallback); 42 | this.responseCallback = responseCallback; 43 | } 44 | 45 | @Override 46 | public void completed(SimpleHttpResponse response) { 47 | int thisInvocationCount = completedResponses.size() + 1; 48 | 49 | // Capture the completed response 50 | completedResponses.add(response); 51 | // Forward to the wrapped callback, capturing any exception 52 | try { 53 | responseCallback.completed(response); 54 | } catch (Exception ex) { 55 | thrownExceptions.put(thisInvocationCount, ex); 56 | } 57 | } 58 | 59 | public int getCompletedResponsesCount() { 60 | return completedResponses.size(); 61 | } 62 | 63 | public Exception getThrownExceptionAtInvocationCount(int invocationCount) { 64 | if (invocationCount <= getCompletedResponsesCount()) { 65 | return thrownExceptions.get(invocationCount); 66 | } else { 67 | return null; 68 | } 69 | } 70 | 71 | public SimpleHttpResponse getCompletedResponseAtInvocationCount(int invocationCount) { 72 | if (invocationCount <= getCompletedResponsesCount()) { 73 | return completedResponses.get(invocationCount - 1); 74 | } else { 75 | return null; 76 | } 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/http/AsyncHttpClientRetryIT.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.connector.prometheus.sink.HttpTestUtils; 21 | import org.apache.flink.connector.prometheus.sink.metrics.VerifybleSinkMetricsCallback; 22 | 23 | import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; 24 | import com.github.tomakehurst.wiremock.junit5.WireMockTest; 25 | import org.apache.hc.client5.http.async.methods.SimpleHttpRequest; 26 | import org.apache.hc.client5.http.impl.async.CloseableHttpAsyncClient; 27 | import org.apache.hc.core5.http.HttpStatus; 28 | import org.junit.jupiter.api.Test; 29 | 30 | import java.io.IOException; 31 | import java.net.URISyntaxException; 32 | 33 | import static com.github.tomakehurst.wiremock.client.WireMock.exactly; 34 | import static com.github.tomakehurst.wiremock.client.WireMock.notFound; 35 | import static com.github.tomakehurst.wiremock.client.WireMock.ok; 36 | import static com.github.tomakehurst.wiremock.client.WireMock.post; 37 | import static com.github.tomakehurst.wiremock.client.WireMock.postRequestedFor; 38 | import static com.github.tomakehurst.wiremock.client.WireMock.serverError; 39 | import static com.github.tomakehurst.wiremock.client.WireMock.status; 40 | import static com.github.tomakehurst.wiremock.client.WireMock.stubFor; 41 | import static com.github.tomakehurst.wiremock.client.WireMock.urlEqualTo; 42 | import static com.github.tomakehurst.wiremock.client.WireMock.verify; 43 | import static org.apache.flink.connector.prometheus.sink.http.HttpClientTestUtils.statusCodeAsserter; 44 | import static org.awaitility.Awaitility.await; 45 | 46 | /** 47 | * Test the stack of RemoteWriteRetryStrategy, RetryConfiguration and RemoteWriteResponseClassifier, 48 | * and PrometheusAsyncHttpClientBuilder. 49 | */ 50 | @WireMockTest 51 | public class AsyncHttpClientRetryIT { 52 | 53 | @Test 54 | public void shouldRetryOn500UpToRetryLimitThenSuccessfullyReturn( 55 | WireMockRuntimeInfo wmRuntimeInfo) throws URISyntaxException, IOException { 56 | stubFor(post("/remote_write").willReturn(serverError())); 57 | 58 | int retryLimit = 10; 59 | int expectedRequestCount = retryLimit + 1; 60 | PrometheusAsyncHttpClientBuilder clientBuilder = 61 | new PrometheusAsyncHttpClientBuilder( 62 | HttpTestUtils.fastRetryConfiguration(retryLimit)); 63 | try (CloseableHttpAsyncClient client = 64 | clientBuilder.buildAndStartClient(new VerifybleSinkMetricsCallback())) { 65 | SimpleHttpRequest request = 66 | HttpTestUtils.buildPostRequest(HttpTestUtils.buildRequestUrl(wmRuntimeInfo)); 67 | client.execute(request, statusCodeAsserter(HttpStatus.SC_SERVER_ERROR)); 68 | 69 | await().untilAsserted( 70 | () -> 71 | verify( 72 | exactly(expectedRequestCount), 73 | postRequestedFor(urlEqualTo("/remote_write")))); 74 | } 75 | } 76 | 77 | @Test 78 | public void shouldRetryOn429UpToRetryLimitThenSuccessfullyReturn( 79 | WireMockRuntimeInfo wmRuntimeInfo) throws URISyntaxException, IOException { 80 | stubFor(post("/remote_write").willReturn(status(HttpStatus.SC_TOO_MANY_REQUESTS))); 81 | 82 | int retryLimit = 10; 83 | int expectedRequestCount = retryLimit + 1; 84 | PrometheusAsyncHttpClientBuilder clientBuilder = 85 | new PrometheusAsyncHttpClientBuilder( 86 | HttpTestUtils.fastRetryConfiguration(retryLimit)); 87 | try (CloseableHttpAsyncClient client = 88 | clientBuilder.buildAndStartClient(new VerifybleSinkMetricsCallback())) { 89 | SimpleHttpRequest request = 90 | HttpTestUtils.buildPostRequest(HttpTestUtils.buildRequestUrl(wmRuntimeInfo)); 91 | client.execute(request, statusCodeAsserter(HttpStatus.SC_TOO_MANY_REQUESTS)); 92 | 93 | await().untilAsserted( 94 | () -> 95 | verify( 96 | exactly(expectedRequestCount), 97 | postRequestedFor(urlEqualTo("/remote_write")))); 98 | } 99 | } 100 | 101 | @Test 102 | public void shouldNotRetryOn404ThenSuccessfullyReturn(WireMockRuntimeInfo wmRuntimeInfo) 103 | throws URISyntaxException, IOException { 104 | stubFor(post("/remote_write").willReturn(notFound())); 105 | 106 | PrometheusAsyncHttpClientBuilder clientBuilder = 107 | new PrometheusAsyncHttpClientBuilder(HttpTestUtils.fastRetryConfiguration(2)); 108 | 109 | try (CloseableHttpAsyncClient client = 110 | clientBuilder.buildAndStartClient(new VerifybleSinkMetricsCallback())) { 111 | SimpleHttpRequest request = 112 | HttpTestUtils.buildPostRequest(HttpTestUtils.buildRequestUrl(wmRuntimeInfo)); 113 | client.execute(request, statusCodeAsserter(HttpStatus.SC_NOT_FOUND)); 114 | 115 | await().untilAsserted( 116 | () -> 117 | verify( 118 | exactly(1), 119 | postRequestedFor(urlEqualTo("/remote_write")))); 120 | } 121 | } 122 | 123 | @Test 124 | void shouldNotRetryOn200OkThenSuccessfullyReturn(WireMockRuntimeInfo wmRuntimeInfo) 125 | throws URISyntaxException, IOException { 126 | stubFor(post("/remote_write").willReturn(ok())); 127 | 128 | PrometheusAsyncHttpClientBuilder clientBuilder = 129 | new PrometheusAsyncHttpClientBuilder(HttpTestUtils.fastRetryConfiguration(2)); 130 | 131 | try (CloseableHttpAsyncClient client = 132 | clientBuilder.buildAndStartClient(new VerifybleSinkMetricsCallback())) { 133 | SimpleHttpRequest request = 134 | HttpTestUtils.buildPostRequest(HttpTestUtils.buildRequestUrl(wmRuntimeInfo)); 135 | client.execute(request, statusCodeAsserter(HttpStatus.SC_OK)); 136 | 137 | await().untilAsserted( 138 | () -> 139 | verify( 140 | exactly(1), 141 | postRequestedFor(urlEqualTo("/remote_write")))); 142 | } 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/http/HttpClientTestUtils.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.hc.client5.http.async.methods.SimpleHttpResponse; 21 | import org.apache.hc.core5.concurrent.FutureCallback; 22 | import org.apache.hc.core5.http.HttpRequest; 23 | import org.apache.hc.core5.http.HttpResponse; 24 | import org.apache.hc.core5.http.Method; 25 | import org.apache.hc.core5.http.message.BasicHttpRequest; 26 | import org.apache.hc.core5.http.message.BasicHttpResponse; 27 | import org.apache.hc.core5.http.protocol.BasicHttpContext; 28 | import org.apache.hc.core5.http.protocol.HttpContext; 29 | import org.junit.jupiter.api.Assertions; 30 | import org.slf4j.Logger; 31 | 32 | import static org.junit.jupiter.api.Assertions.assertEquals; 33 | 34 | /** Test utilities for testing features involving the Apache Http Client. */ 35 | public class HttpClientTestUtils { 36 | 37 | public static FutureCallback statusCodeAsserter(int expectedStatusCode) { 38 | 39 | return new FutureCallback() { 40 | @Override 41 | public void completed(SimpleHttpResponse response) { 42 | assertEquals( 43 | expectedStatusCode, 44 | response.getCode(), 45 | "Request should return status code " + expectedStatusCode); 46 | } 47 | 48 | @Override 49 | public void failed(Exception ex) { 50 | Assertions.fail("Request should not throw exception"); 51 | } 52 | 53 | @Override 54 | public void cancelled() { 55 | Assertions.fail("Request should not be cancelled"); 56 | } 57 | }; 58 | } 59 | 60 | public static FutureCallback loggingCallback(Logger logger) { 61 | return new FutureCallback() { 62 | @Override 63 | public void completed(SimpleHttpResponse simpleHttpResponse) { 64 | logger.info( 65 | "Request Success: {},{}", 66 | simpleHttpResponse.getCode(), 67 | simpleHttpResponse.getReasonPhrase()); 68 | } 69 | 70 | @Override 71 | public void failed(Exception e) { 72 | logger.info("Request Failure", e); 73 | } 74 | 75 | @Override 76 | public void cancelled() { 77 | logger.info("Request Cancelled"); 78 | } 79 | }; 80 | } 81 | 82 | public static HttpContext httpContext() { 83 | return new BasicHttpContext(); 84 | } 85 | 86 | public static HttpRequest postHttpRequest() { 87 | return new BasicHttpRequest(Method.POST, "/"); 88 | } 89 | 90 | public static HttpResponse httpResponse(int statusCode) { 91 | return new BasicHttpResponse(statusCode); 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/http/RemoteWriteResponseClassifierTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.hc.core5.http.HttpResponse; 21 | import org.junit.jupiter.api.Test; 22 | 23 | import static org.apache.flink.connector.prometheus.sink.http.HttpClientTestUtils.httpResponse; 24 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.FATAL_ERROR; 25 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.NON_RETRYABLE_ERROR; 26 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.RETRYABLE_ERROR; 27 | import static org.apache.flink.connector.prometheus.sink.http.RemoteWriteResponseType.UNHANDLED; 28 | import static org.junit.jupiter.api.Assertions.assertEquals; 29 | 30 | public class RemoteWriteResponseClassifierTest { 31 | @Test 32 | void shouldClassify100AsUnhandled() { 33 | HttpResponse response = httpResponse(100); 34 | assertEquals(UNHANDLED, RemoteWriteResponseClassifier.classify(response)); 35 | } 36 | 37 | @Test 38 | void shouldClassify200AsSuccess() { 39 | HttpResponse response = httpResponse(200); 40 | 41 | assertEquals( 42 | RemoteWriteResponseType.SUCCESS, RemoteWriteResponseClassifier.classify(response)); 43 | } 44 | 45 | @Test 46 | void shouldClassify400AsNonRetryableError() { 47 | HttpResponse response = httpResponse(400); 48 | 49 | assertEquals(NON_RETRYABLE_ERROR, RemoteWriteResponseClassifier.classify(response)); 50 | } 51 | 52 | @Test 53 | void shouldClassify403AsFatal() { 54 | HttpResponse response = httpResponse(403); 55 | 56 | assertEquals(FATAL_ERROR, RemoteWriteResponseClassifier.classify(response)); 57 | } 58 | 59 | @Test 60 | void shouldClassify404AsFatal() { 61 | HttpResponse response = httpResponse(404); 62 | 63 | assertEquals(FATAL_ERROR, RemoteWriteResponseClassifier.classify(response)); 64 | } 65 | 66 | @Test 67 | void shouldClassify429AsRetryableError() { 68 | HttpResponse response = httpResponse(429); 69 | 70 | assertEquals(RETRYABLE_ERROR, RemoteWriteResponseClassifier.classify(response)); 71 | } 72 | 73 | @Test 74 | void shouldClassify500AsRetryableError() { 75 | HttpResponse response = httpResponse(500); 76 | 77 | assertEquals(RETRYABLE_ERROR, RemoteWriteResponseClassifier.classify(response)); 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/http/RemoteWriteRetryStrategyTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one or more 3 | * contributor license agreements. See the NOTICE file distributed with 4 | * this work for additional information regarding copyright ownership. 5 | * The ASF licenses this file to You under the Apache License, Version 2.0 6 | * (the "License"); you may not use this file except in compliance with 7 | * the License. You may obtain a copy of the License at 8 | * 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.connector.prometheus.sink.PrometheusSinkConfiguration; 21 | import org.apache.flink.connector.prometheus.sink.metrics.VerifybleSinkMetricsCallback; 22 | 23 | import org.apache.hc.core5.http.HttpRequest; 24 | import org.apache.hc.core5.http.HttpResponse; 25 | import org.apache.hc.core5.http.HttpStatus; 26 | import org.apache.hc.core5.http.protocol.HttpContext; 27 | import org.apache.hc.core5.util.TimeValue; 28 | import org.junit.jupiter.api.Test; 29 | 30 | import javax.net.ssl.SSLException; 31 | 32 | import java.io.IOException; 33 | import java.io.InterruptedIOException; 34 | import java.net.ConnectException; 35 | import java.net.NoRouteToHostException; 36 | import java.net.UnknownHostException; 37 | 38 | import static org.apache.flink.connector.prometheus.sink.http.HttpClientTestUtils.httpContext; 39 | import static org.apache.flink.connector.prometheus.sink.http.HttpClientTestUtils.httpResponse; 40 | import static org.apache.flink.connector.prometheus.sink.http.HttpClientTestUtils.postHttpRequest; 41 | import static org.junit.jupiter.api.Assertions.assertEquals; 42 | import static org.junit.jupiter.api.Assertions.assertFalse; 43 | import static org.junit.jupiter.api.Assertions.assertTrue; 44 | 45 | class RemoteWriteRetryStrategyTest { 46 | 47 | private static final int INITIAL_RETRY_DELAY_MS = 30; 48 | private static final int MAX_RETRY_DELAY_MS = 5000; 49 | private static final PrometheusSinkConfiguration.RetryConfiguration RETRY_CONFIGURATION = 50 | PrometheusSinkConfiguration.RetryConfiguration.builder() 51 | .setInitialRetryDelayMS(INITIAL_RETRY_DELAY_MS) 52 | .setMaxRetryDelayMS(MAX_RETRY_DELAY_MS) 53 | .setMaxRetryCount(Integer.MAX_VALUE) 54 | .build(); 55 | 56 | @Test 57 | public void shouldRetryOnRetryableErrorResponse() { 58 | HttpResponse httpResponse = httpResponse(HttpStatus.SC_INTERNAL_SERVER_ERROR); 59 | HttpContext httpContext = httpContext(); 60 | VerifybleSinkMetricsCallback metrics = new VerifybleSinkMetricsCallback(); 61 | 62 | RemoteWriteRetryStrategy strategy = 63 | new RemoteWriteRetryStrategy(RETRY_CONFIGURATION, metrics); 64 | assertTrue(strategy.retryRequest(httpResponse, 1, httpContext)); 65 | } 66 | 67 | @Test 68 | public void shouldNotRetryOnNonRetryableErrorResponse() { 69 | HttpResponse httpResponse = httpResponse(HttpStatus.SC_FORBIDDEN); 70 | HttpContext httpContext = httpContext(); 71 | VerifybleSinkMetricsCallback metricsCallback = new VerifybleSinkMetricsCallback(); 72 | 73 | RemoteWriteRetryStrategy strategy = 74 | new RemoteWriteRetryStrategy(RETRY_CONFIGURATION, metricsCallback); 75 | assertFalse(strategy.retryRequest(httpResponse, 1, httpContext)); 76 | } 77 | 78 | @Test 79 | public void shouldRetryIOException() { 80 | HttpRequest httpRequest = postHttpRequest(); 81 | HttpContext httpContext = httpContext(); 82 | VerifybleSinkMetricsCallback metricsCallback = new VerifybleSinkMetricsCallback(); 83 | 84 | RemoteWriteRetryStrategy strategy = 85 | new RemoteWriteRetryStrategy(RETRY_CONFIGURATION, metricsCallback); 86 | 87 | assertTrue(strategy.retryRequest(httpRequest, new IOException("dummy"), 1, httpContext)); 88 | } 89 | 90 | @Test 91 | public void shouldNotRetryNonRetryableIOExceptions() { 92 | HttpRequest httpRequest = postHttpRequest(); 93 | HttpContext httpContext = httpContext(); 94 | VerifybleSinkMetricsCallback metricsCallback = new VerifybleSinkMetricsCallback(); 95 | 96 | RemoteWriteRetryStrategy strategy = 97 | new RemoteWriteRetryStrategy(RETRY_CONFIGURATION, metricsCallback); 98 | 99 | assertFalse( 100 | strategy.retryRequest( 101 | httpRequest, new InterruptedIOException("dummy"), 1, httpContext)); 102 | assertFalse( 103 | strategy.retryRequest( 104 | httpRequest, new UnknownHostException("dummy"), 1, httpContext)); 105 | assertFalse( 106 | strategy.retryRequest(httpRequest, new ConnectException("dummy"), 1, httpContext)); 107 | assertFalse( 108 | strategy.retryRequest( 109 | httpRequest, new NoRouteToHostException("dummy"), 1, httpContext)); 110 | assertFalse(strategy.retryRequest(httpRequest, new SSLException("dummy"), 1, httpContext)); 111 | } 112 | 113 | @Test 114 | public void retryDelayShouldDecreaseExponentiallyWithExecCount() { 115 | HttpResponse httpResponse = httpResponse(HttpStatus.SC_INTERNAL_SERVER_ERROR); 116 | HttpContext httpContext = httpContext(); 117 | VerifybleSinkMetricsCallback metricsCallback = new VerifybleSinkMetricsCallback(); 118 | 119 | RemoteWriteRetryStrategy strategy = 120 | new RemoteWriteRetryStrategy(RETRY_CONFIGURATION, metricsCallback); 121 | 122 | assertEquals( 123 | TimeValue.ofMilliseconds(INITIAL_RETRY_DELAY_MS), 124 | strategy.getRetryInterval(httpResponse, 1, httpContext)); 125 | assertEquals( 126 | TimeValue.ofMilliseconds(INITIAL_RETRY_DELAY_MS * 2), 127 | strategy.getRetryInterval(httpResponse, 2, httpContext)); 128 | assertEquals( 129 | TimeValue.ofMilliseconds(INITIAL_RETRY_DELAY_MS * 2 * 2), 130 | strategy.getRetryInterval(httpResponse, 3, httpContext)); 131 | assertEquals( 132 | TimeValue.ofMilliseconds(INITIAL_RETRY_DELAY_MS * 2 * 2 * 2), 133 | strategy.getRetryInterval(httpResponse, 4, httpContext)); 134 | } 135 | 136 | @Test 137 | public void retryDelayShouldNotExceedMaximumDelay() { 138 | PrometheusSinkConfiguration.RetryConfiguration retryConfiguration = 139 | PrometheusSinkConfiguration.RetryConfiguration.builder() 140 | .setInitialRetryDelayMS(30) 141 | .setMaxRetryDelayMS(5000) 142 | .setMaxRetryCount(Integer.MAX_VALUE) 143 | .build(); 144 | HttpResponse httpResponse = httpResponse(HttpStatus.SC_INTERNAL_SERVER_ERROR); 145 | HttpContext httpContext = httpContext(); 146 | VerifybleSinkMetricsCallback metricsCallback = new VerifybleSinkMetricsCallback(); 147 | 148 | RemoteWriteRetryStrategy strategy = 149 | new RemoteWriteRetryStrategy(retryConfiguration, metricsCallback); 150 | 151 | assertEquals( 152 | TimeValue.ofMilliseconds(5000), 153 | strategy.getRetryInterval(httpResponse, 10_000, httpContext)); 154 | } 155 | } 156 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/http/RethrowingIOSessionListenerTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * http://www.apache.org/licenses/LICENSE-2.0 10 | * 11 | * Unless required by applicable law or agreed to in writing, software 12 | * distributed under the License is distributed on an "AS IS" BASIS, 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | * See the License for the specific language governing permissions and 15 | * limitations under the License. 16 | */ 17 | 18 | package org.apache.flink.connector.prometheus.sink.http; 19 | 20 | import org.apache.flink.connector.prometheus.sink.errorhandling.PrometheusSinkWriteException; 21 | 22 | import org.junit.jupiter.api.Test; 23 | 24 | import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; 25 | import static org.junit.jupiter.api.Assertions.assertEquals; 26 | import static org.junit.jupiter.api.Assertions.assertThrows; 27 | 28 | class RethrowingIOSessionListenerTest { 29 | @Test 30 | void exceptionHandlerShouldRethrowPrometheusSinkWriteException() { 31 | RethrowingIOSessionListener listener = new RethrowingIOSessionListener(); 32 | PrometheusSinkWriteException exception = new PrometheusSinkWriteException("Test exception"); 33 | 34 | Exception thrown = assertThrows(Exception.class, () -> listener.exception(null, exception)); 35 | assertEquals(exception, thrown); 36 | } 37 | 38 | @Test 39 | void exceptionHandlerShouldNotRethrowOtherExceptions() { 40 | RethrowingIOSessionListener listener = new RethrowingIOSessionListener(); 41 | Exception otherException = new Exception("Other exception"); 42 | 43 | assertDoesNotThrow(() -> listener.exception(null, otherException)); 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/metrics/SinkMetricsCallbackTest.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink.metrics; 20 | 21 | import org.apache.flink.connector.prometheus.sink.InspectableMetricGroup; 22 | 23 | import org.junit.jupiter.api.BeforeEach; 24 | import org.junit.jupiter.api.Test; 25 | 26 | import static org.apache.flink.connector.prometheus.sink.InspectableMetricGroupAssertions.assertCounterCount; 27 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_DROPPED; 28 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_NON_RETRYABLE_DROPPED; 29 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_OUT; 30 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_SAMPLES_RETRY_LIMIT_DROPPED; 31 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_WRITE_REQUESTS_OUT; 32 | import static org.apache.flink.connector.prometheus.sink.metrics.SinkMetrics.SinkCounter.NUM_WRITE_REQUESTS_PERMANENTLY_FAILED; 33 | 34 | class SinkMetricsCallbackTest { 35 | private InspectableMetricGroup metricGroup; 36 | private SinkMetricsCallback metricsCallback; 37 | 38 | private static final long SAMPLE_COUNT = 42; 39 | 40 | @BeforeEach 41 | void setUp() { 42 | metricGroup = new InspectableMetricGroup(); 43 | metricsCallback = new SinkMetricsCallback(SinkMetrics.registerSinkMetrics(metricGroup)); 44 | } 45 | 46 | @Test 47 | void onSuccessfulWriteRequest() { 48 | metricsCallback.onSuccessfulWriteRequest(SAMPLE_COUNT); 49 | 50 | assertCounterCount(SAMPLE_COUNT, metricGroup, NUM_SAMPLES_OUT); 51 | assertCounterCount(1, metricGroup, NUM_WRITE_REQUESTS_OUT); 52 | 53 | assertCounterCount(0, metricGroup, NUM_SAMPLES_DROPPED); 54 | assertCounterCount(0, metricGroup, NUM_WRITE_REQUESTS_PERMANENTLY_FAILED); 55 | } 56 | 57 | @Test 58 | void onFailedWriteRequestForNonRetryableError() { 59 | metricsCallback.onFailedWriteRequestForNonRetryableError(SAMPLE_COUNT); 60 | 61 | assertCounterCount(SAMPLE_COUNT, metricGroup, NUM_SAMPLES_NON_RETRYABLE_DROPPED); 62 | assertCounterCount(SAMPLE_COUNT, metricGroup, NUM_SAMPLES_DROPPED); 63 | assertCounterCount(1, metricGroup, NUM_WRITE_REQUESTS_PERMANENTLY_FAILED); 64 | 65 | assertCounterCount(0, metricGroup, NUM_SAMPLES_OUT); 66 | assertCounterCount(0, metricGroup, NUM_WRITE_REQUESTS_OUT); 67 | } 68 | 69 | @Test 70 | void onFailedWriteRequestForRetryLimitExceeded() { 71 | metricsCallback.onFailedWriteRequestForRetryLimitExceeded(SAMPLE_COUNT); 72 | 73 | assertCounterCount(SAMPLE_COUNT, metricGroup, NUM_SAMPLES_RETRY_LIMIT_DROPPED); 74 | assertCounterCount(SAMPLE_COUNT, metricGroup, NUM_SAMPLES_DROPPED); 75 | assertCounterCount(1, metricGroup, NUM_WRITE_REQUESTS_PERMANENTLY_FAILED); 76 | 77 | assertCounterCount(0, metricGroup, NUM_SAMPLES_OUT); 78 | assertCounterCount(0, metricGroup, NUM_WRITE_REQUESTS_OUT); 79 | } 80 | 81 | @Test 82 | void onFailedWriteRequestForHttpClientIoFail() { 83 | metricsCallback.onFailedWriteRequestForHttpClientIoFail(SAMPLE_COUNT); 84 | assertCounterCount(SAMPLE_COUNT, metricGroup, NUM_SAMPLES_DROPPED); 85 | assertCounterCount(1, metricGroup, NUM_WRITE_REQUESTS_PERMANENTLY_FAILED); 86 | 87 | assertCounterCount(0, metricGroup, NUM_SAMPLES_OUT); 88 | assertCounterCount(0, metricGroup, NUM_WRITE_REQUESTS_OUT); 89 | } 90 | } 91 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/java/org/apache/flink/connector/prometheus/sink/metrics/VerifybleSinkMetricsCallback.java: -------------------------------------------------------------------------------- 1 | /* 2 | * Licensed to the Apache Software Foundation (ASF) under one 3 | * or more contributor license agreements. See the NOTICE file 4 | * distributed with this work for additional information 5 | * regarding copyright ownership. The ASF licenses this file 6 | * to you under the Apache License, Version 2.0 (the 7 | * "License"); you may not use this file except in compliance 8 | * with the License. You may obtain a copy of the License at 9 | * 10 | * http://www.apache.org/licenses/LICENSE-2.0 11 | * 12 | * Unless required by applicable law or agreed to in writing, software 13 | * distributed under the License is distributed on an "AS IS" BASIS, 14 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | * See the License for the specific language governing permissions and 16 | * limitations under the License. 17 | */ 18 | 19 | package org.apache.flink.connector.prometheus.sink.metrics; 20 | 21 | import org.apache.flink.metrics.groups.UnregisteredMetricsGroup; 22 | 23 | /** 24 | * Dummy implementation of {@link 25 | * org.apache.flink.connector.prometheus.sink.metrics.SinkMetricsCallback} wrapping dummy metrics, 26 | * that allows verifying invoked callbacks. 27 | */ 28 | public class VerifybleSinkMetricsCallback extends SinkMetricsCallback { 29 | private int successfulWriteRequestsCount = 0; 30 | private int failedWriteRequestForNonRetryableErrorCount = 0; 31 | private int failedWriteRequestForRetryLimitExceededCount = 0; 32 | private int failedWriteRequestForHttpClientIoFailCount = 0; 33 | private int writeRequestsRetryCount = 0; 34 | 35 | public VerifybleSinkMetricsCallback() { 36 | super( 37 | SinkMetrics.registerSinkMetrics( 38 | UnregisteredMetricsGroup.createSinkWriterMetricGroup())); 39 | } 40 | 41 | @Override 42 | public void onSuccessfulWriteRequest(long sampleCount) { 43 | successfulWriteRequestsCount++; 44 | } 45 | 46 | @Override 47 | public void onFailedWriteRequestForNonRetryableError(long sampleCount) { 48 | failedWriteRequestForNonRetryableErrorCount++; 49 | } 50 | 51 | @Override 52 | public void onFailedWriteRequestForRetryLimitExceeded(long sampleCount) { 53 | failedWriteRequestForRetryLimitExceededCount++; 54 | } 55 | 56 | @Override 57 | public void onFailedWriteRequestForHttpClientIoFail(long sampleCount) { 58 | failedWriteRequestForHttpClientIoFailCount++; 59 | } 60 | 61 | @Override 62 | public void onWriteRequestRetry() { 63 | writeRequestsRetryCount++; 64 | } 65 | 66 | public boolean verifyOnlySuccessfulWriteRequestsWasCalledOnce() { 67 | return successfulWriteRequestsCount == 1 68 | && failedWriteRequestForNonRetryableErrorCount == 0 69 | && failedWriteRequestForRetryLimitExceededCount == 0 70 | && failedWriteRequestForHttpClientIoFailCount == 0 71 | && writeRequestsRetryCount == 0; 72 | } 73 | 74 | public boolean verifyOnlyFailedWriteRequestsForNonRetryableErrorWasCalledOnce() { 75 | return successfulWriteRequestsCount == 0 76 | && failedWriteRequestForNonRetryableErrorCount == 1 77 | && failedWriteRequestForRetryLimitExceededCount == 0 78 | && failedWriteRequestForHttpClientIoFailCount == 0 79 | && writeRequestsRetryCount == 0; 80 | } 81 | 82 | public boolean verifyOnlyFailedWriteRequestsForRetryLimitExceededWasCalledOnce() { 83 | return successfulWriteRequestsCount == 0 84 | && failedWriteRequestForNonRetryableErrorCount == 0 85 | && failedWriteRequestForRetryLimitExceededCount == 1 86 | && failedWriteRequestForHttpClientIoFailCount == 0 87 | && writeRequestsRetryCount == 0; 88 | } 89 | 90 | public boolean verifyOnlyFailedWriteRequestsForHttpClientIoFailWasCalledOnce() { 91 | return successfulWriteRequestsCount == 0 92 | && failedWriteRequestForNonRetryableErrorCount == 0 93 | && failedWriteRequestForRetryLimitExceededCount == 0 94 | && failedWriteRequestForHttpClientIoFailCount == 1 95 | && writeRequestsRetryCount == 0; 96 | } 97 | 98 | public boolean verifyOnlyWriteRequestsRetryWasCalled(int times) { 99 | return successfulWriteRequestsCount == 0 100 | && failedWriteRequestForNonRetryableErrorCount == 0 101 | && failedWriteRequestForRetryLimitExceededCount == 0 102 | && failedWriteRequestForHttpClientIoFailCount == 0 103 | && writeRequestsRetryCount == times; 104 | } 105 | } 106 | -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/resources/archunit.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one 3 | # or more contributor license agreements. See the NOTICE file 4 | # distributed with this work for additional information 5 | # regarding copyright ownership. The ASF licenses this file 6 | # to you under the Apache License, Version 2.0 (the 7 | # "License"); you may not use this file except in compliance 8 | # with the License. You may obtain a copy of the License at 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | # By default we allow removing existing violations, but fail when new violations are added. 19 | freeze.store.default.allowStoreUpdate=true 20 | 21 | # Enable this if a new (frozen) rule has been added in order to create the initial store and record the existing violations. 22 | #freeze.store.default.allowStoreCreation=true 23 | 24 | # Enable this to add allow new violations to be recorded. 25 | # NOTE: Adding new violations should be avoided when possible. If the rule was correct to flag a new 26 | # violation, please try to avoid creating the violation. If the violation was created due to a 27 | # shortcoming of the rule, file a JIRA issue so the rule can be improved. 28 | #freeze.refreeze=true 29 | 30 | freeze.store.default.path=archunit-violations -------------------------------------------------------------------------------- /flink-connector-prometheus/src/test/resources/log4j2.properties: -------------------------------------------------------------------------------- 1 | # 2 | # Licensed to the Apache Software Foundation (ASF) under one or more 3 | # contributor license agreements. See the NOTICE file distributed with 4 | # this work for additional information regarding copyright ownership. 5 | # The ASF licenses this file to You under the Apache License, Version 2.0 6 | # (the "License"); you may not use this file except in compliance with 7 | # the License. You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | # 17 | 18 | appender.console.name = ConsoleAppender 19 | appender.console.type = CONSOLE 20 | appender.console.layout.type = PatternLayout 21 | appender.console.layout.pattern = %d{HH:mm:ss,SSS} %-5p %-60c %x - %m%n 22 | 23 | ## Uncomment the following for testing 24 | #logger.tracing.name = org.apache.flink.connector.prometheus.sink 25 | #logger.tracing.level = DEBUG 26 | #logger.tracing.additivity = false 27 | #logger.tracing.appenderRef.console.ref = ConsoleAppender 28 | 29 | rootLogger.level = INFO 30 | rootLogger.appenderRef.console.ref = ConsoleAppender 31 | -------------------------------------------------------------------------------- /tools/maven/suppressions.xml: -------------------------------------------------------------------------------- 1 | 2 | 18 | 19 | 22 | 23 | 24 | 25 | 28 | 31 | --------------------------------------------------------------------------------