├── .github └── workflows │ ├── publish.yml │ └── python-package.yml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── LICENSE ├── README.md ├── example ├── basic_example.py └── install_and_run.sh ├── requirements-dev.txt ├── setup.cfg ├── setup.py ├── src └── dynatrace │ └── opentelemetry │ └── metrics │ └── export │ ├── __init__.py │ ├── _exporter.py │ ├── _factory.py │ └── _histogram_utils.py ├── test ├── test_exporter.py └── test_histogram_utils.py └── tox.ini /.github/workflows/publish.yml: -------------------------------------------------------------------------------- 1 | name: Upload Python Package to PyPI 2 | 3 | on: 4 | release: 5 | types: [ published ] 6 | 7 | jobs: 8 | publish: 9 | name: "Publish Python package to PyPI" 10 | runs-on: ubuntu-latest 11 | 12 | steps: 13 | - uses: actions/checkout@v3 14 | - uses: actions/setup-python@v4 15 | with: 16 | python-version: 3.8 17 | - name: Install dependencies 18 | run: | 19 | python -m pip install --upgrade pip 20 | pip install setuptools wheel twine 21 | - name: Build and publish 22 | env: 23 | TWINE_USERNAME: __token__ 24 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }} 25 | run: | 26 | python setup.py sdist bdist_wheel 27 | twine upload dist/* 28 | -------------------------------------------------------------------------------- /.github/workflows/python-package.yml: -------------------------------------------------------------------------------- 1 | name: Python build 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | workflow_dispatch: 9 | 10 | jobs: 11 | lint: 12 | runs-on: ubuntu-latest 13 | 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: Set up Python 17 | uses: actions/setup-python@v4 18 | with: 19 | python-version: 3.8 20 | - name: Install Dependencies 21 | run: | 22 | pip install -U pip 23 | pip install -U wheel setuptools tox 24 | - run: tox -e lint 25 | 26 | build-and-test: 27 | strategy: 28 | fail-fast: false 29 | matrix: 30 | os: 31 | - 'ubuntu-latest' 32 | - 'windows-latest' 33 | - 'macos-latest' 34 | python-version: 35 | - '3.7' 36 | - '3.8' 37 | - '3.9' 38 | - '3.10' 39 | - 'pypy3.7' 40 | - 'pypy3.8' 41 | - 'pypy3.9' 42 | 43 | runs-on: ${{ matrix.os }} 44 | 45 | steps: 46 | - uses: actions/checkout@v3 47 | - name: Set up Python ${{ matrix.python-version }} 48 | uses: actions/setup-python@v4 49 | with: 50 | python-version: ${{ matrix.python-version }} 51 | - name: Install Dependencies 52 | run: | 53 | pip install -U pip 54 | pip install -U wheel setuptools tox 55 | - name: Test 56 | run: tox -e py 57 | 58 | 59 | all-passed: 60 | needs: 61 | - lint 62 | - build-and-test 63 | runs-on: ubuntu-latest 64 | steps: 65 | - name: All checks passed 66 | run: 'true' 67 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | *.sw[op] 3 | 4 | # C extensions 5 | *.so 6 | 7 | # Packages 8 | *.egg 9 | *.egg-info 10 | dist 11 | build 12 | eggs 13 | parts 14 | bin 15 | var 16 | sdist 17 | develop-eggs 18 | .installed.cfg 19 | lib 20 | lib64 21 | __pycache__ 22 | .pytest_cache 23 | venv*/ 24 | .venv*/ 25 | 26 | # Installer logs 27 | pip-log.txt 28 | 29 | # Unit test / coverage reports 30 | coverage.xml 31 | .coverage 32 | .nox 33 | .tox 34 | .cache 35 | htmlcov 36 | 37 | # Translations 38 | *.mo 39 | 40 | # Mac 41 | .DS_Store 42 | 43 | # Mr Developer 44 | .mr.developer.cfg 45 | .project 46 | .pydevproject 47 | 48 | # JetBrains 49 | .idea 50 | 51 | # VSCode 52 | .vscode 53 | 54 | # Sphinx 55 | _build/ 56 | 57 | # mypy 58 | .mypy_cache/ 59 | target 60 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor covenant code of conduct 2 | 3 | ## Our pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, sex characteristics, gender identity and expression, 9 | level of experience, education, socio-economic status, nationality, personal 10 | appearance, race, religion, or sexual identity and orientation. 11 | 12 | ## Our standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This code of conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project email 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at opensource@dynatrace.com. All 59 | complaints will be reviewed and investigated and will result in a response that 60 | is deemed necessary and appropriate to the circumstances. The project team is 61 | obligated to maintain confidentiality with regard to the reporter of an incident. 62 | Further details of specific enforcement policies may be posted separately. 63 | 64 | Project maintainers who do not follow or enforce this code of conduct in good 65 | faith may face temporary or permanent repercussions as determined by other 66 | members of the project's leadership. 67 | 68 | ## Attribution 69 | 70 | This code of conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 72 | 73 | [homepage]: https://www.contributor-covenant.org 74 | 75 | For answers to common questions about this code of conduct, see 76 | https://www.contributor-covenant.org/faq -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dynatrace 2 | 3 | [Dynatrace](https://www.dynatrace.com/integrations/opentelemetry) supports native 4 | OpenTelemetry protocol (OTLP) ingest for traces, metrics and logs. 5 | All signals can be sent directly to Dynatrace via **OTLP protobuf over HTTP** 6 | using the built-in OTLP/HTTP Exporter available in the OpenTelemetry Python SDK. 7 | More information on configuring your Python applications to use the OTLP exporter can be found in the 8 | [Dynatrace documentation](https://www.dynatrace.com/support/help/shortlink/otel-wt-python). 9 | 10 | ## Dynatrace OpenTelemetry Metrics Exporter for Python 11 | ![Static Badge](https://img.shields.io/badge/status-deprecated-orange) 12 | 13 | > **Warning** 14 | > Dynatrace supports native OpenTelemetry protocol (OTLP) ingest for traces, metrics and logs. 15 | > Therefore, the proprietary Dynatrace OpenTelemetry metrics exporter is deprecated in favor of exporting via OTLP/HTTP. 16 | > 17 | > The exporter is still available but after the end of 2023, no support, updates, or compatibility with newer OTel versions will be provided. 18 | > 19 | > Please refer to the [migration guide](https://www.dynatrace.com/support/help/shortlink/migrating-dynatrace-metrics-exporter-otlp-exporter#migrate-applications) for instructions on how to migrate to the OTLP HTTP exporter, as well as reasoning and benefits for this transition. 20 | > 21 | > For an example on how to configure the OTLP exporter in a Python application, check out the [Python integration walk-through](https://www.dynatrace.com/support/help/shortlink/otel-wt-python) page in the Dynatrace documentation. 22 | 23 | This exporter allows exporting metrics created using the [OpenTelemetry SDK for Python](https://github.com/open-telemetry/opentelemetry-python) 24 | directly to [Dynatrace](https://www.dynatrace.com). 25 | 26 | **It was built against OpenTelemetry SDK version `1.12.0` and should work with any `1.12+` version.** 27 | 28 | More information on exporting OpenTelemetry metrics to Dynatrace can be found in the 29 | [Dynatrace documentation](https://www.dynatrace.com/support/help/shortlink/opentelemetry-metrics). 30 | 31 | ### Getting started 32 | 33 | #### Installation 34 | 35 | To install the [latest version from PyPI](https://pypi.org/project/opentelemetry-exporter-dynatrace-metrics/) run: 36 | 37 | ```shell 38 | pip install opentelemetry-exporter-dynatrace-metrics 39 | ``` 40 | 41 | #### Usage 42 | 43 | ```python 44 | from opentelemetry import metrics 45 | from opentelemetry.sdk.metrics import MeterProvider 46 | from dynatrace.opentelemetry.metrics.export import ( 47 | configure_dynatrace_metrics_export 48 | ) 49 | 50 | 51 | # setup metrics export pipeline 52 | metrics.set_meter_provider(MeterProvider( 53 | # configure Exporter/MetricReader combination with a 5000ms export 54 | # interval, endpoint url and API token. 55 | metric_readers=[ 56 | configure_dynatrace_metrics_export( 57 | export_interval_millis=5000, 58 | endpoint_url=endpoint_url, 59 | api_token=api_token) 60 | ])) 61 | 62 | # get a meter 63 | meter = metrics.get_meter(__name__) 64 | 65 | # create a counter instrument and provide the first data point 66 | counter = meter.create_counter( 67 | name="my_counter", 68 | description="Description of MyCounter", 69 | unit="1" 70 | ) 71 | 72 | counter.add(25, {"dimension-1": "value-1"}) 73 | ``` 74 | 75 | #### Example 76 | 77 | To run the [example](example/basic_example.py), clone this repository and change to the `opentelemetry-metric-python` folder, then run: 78 | 79 | ```shell 80 | pip install . # install the Dynatrace exporter 81 | pip install psutil # this package is used by the example to read CPU/Memory usage 82 | export LOGLEVEL=DEBUG # (optional) Set the log level to debug to see more output (default is INFO) 83 | python example/basic_example.py 84 | ``` 85 | 86 | A more complete setup routine can be found [here](example/install_and_run.sh), including installing inside a virtual environment and getting required packages. 87 | If you just want to see it in action, it should be sufficient to run [`example/install_and_run.sh`](example/install_and_run.sh) from the root folder. 88 | This script will install Python, set up a virtual environment, pull in all the required packages and run the [example](example/basic_example.py). 89 | 90 | The example also offers a simple CLI. Run `python example/basic_example.py -h` to get more information. 91 | 92 | #### Configuration 93 | 94 | The exporter allows for configuring the following settings by passing them to the constructor: 95 | 96 | ##### Dynatrace API Endpoint 97 | 98 | The endpoint to which the metrics are sent is specified using the `endpoint_url` parameter. 99 | 100 | Given an environment ID `myenv123` on Dynatrace SaaS, the [metrics ingest endpoint](https://www.dynatrace.com/support/help/dynatrace-api/environment-api/metric-v2/post-ingest-metrics/) would be `https://myenv123.live.dynatrace.com/api/v2/metrics/ingest`. 101 | 102 | If a OneAgent is installed on the host, it can provide a local endpoint for providing metrics directly without the need for an API token. 103 | This feature is currently in an Early Adopter phase and has to be enabled as described in the [OneAgent metric API documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/local-api/). 104 | Using the local API endpoint, the host ID and host name context are automatically added to each metric as dimensions. 105 | The default metric API endpoint exposed by the OneAgent is `http://localhost:14499/metrics/ingest`. 106 | If no endpoint is set and a OneAgent is running on the host, metrics will be exported to it automatically using the OneAgent with no endpoint or API token configuration required. 107 | 108 | ##### Dynatrace API Token 109 | 110 | The Dynatrace API token to be used by the exporter is specified using the `api_token` parameter and could, for example, be read from an environment variable. 111 | 112 | Creating an API token for your Dynatrace environment is described in the [Dynatrace API documentation](https://www.dynatrace.com/support/help/dynatrace-api/basics/dynatrace-api-authentication/). 113 | The permission required for sending metrics is `Ingest metrics` (`metrics.ingest`) and it is recommended to limit scope to only this permission. 114 | 115 | ##### Metric Key Prefix 116 | 117 | The `prefix` parameter specifies an optional prefix, which is prepended to each metric key, separated by a dot (`.`). 118 | 119 | ##### Default Dimensions 120 | 121 | The `default_dimensions` parameter can be used to optionally specify a list of key/value pairs, which will be added as additional dimensions to all data points. 122 | Dimension keys are unique, and labels on instruments will overwrite the default dimensions if key collisions appear. 123 | 124 | ##### Export Dynatrace Metadata 125 | 126 | If running on a host with a running OneAgent, setting the `export_dynatrace_metadata` option to `True` will export metadata collected by the OneAgent to the Dynatrace endpoint. 127 | If no Dynatrace API endpoint is set, the default exporter endpoint will be the OneAgent endpoint, and this option will be set automatically. 128 | Therefore, if no endpoint is specified, a OneAgent is assumed to be running and used as the export endpoint for all metric lines, including metadata. 129 | More information on the underlying Dynatrace metadata feature that is used by the exporter can be found in the 130 | [Dynatrace documentation](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/ingestion-methods/enrich-metrics/). 131 | 132 | ###### Dimensions precedence 133 | 134 | When specifying default dimensions, attributes and Dynatrace metadata enrichment, the precedence of dimensions with the same key is as follows: 135 | Default dimensions are overwritten by attributes passed to instruments, which in turn are overwritten by the Dynatrace metadata dimensions (even though the likeliness of a collision here is very low, since the Dynatrace metadata only contains [Dynatrace reserved dimensions](https://www.dynatrace.com/support/help/how-to-use-dynatrace/metrics/metric-ingestion/metric-ingestion-protocol/#syntax) starting with `dt.*`). 136 | 137 | ### Development 138 | 139 | #### Requirements 140 | 141 | Just [`tox`](https://pypi.org/project/tox/). Make sure to `pip install` the `requirements-dev.txt` to get the relevant packages. 142 | 143 | #### Running tests and lint 144 | 145 | * Test all supported python versions: `tox` 146 | * Test all supported python versions in parallel: `tox -p` 147 | * A particular python version: `tox -e 38` 148 | * Current python version: `tox -e py` 149 | * Lint: `tox -e lint` 150 | 151 | ### Limitations 152 | 153 | #### Typed attributes support 154 | 155 | The OpenTelemetry Metrics API for Python supports the concept 156 | of [Attributes]( https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes). 157 | These attributes consist of key-value pairs, where the keys are strings and the 158 | values are either primitive types or arrays of uniform primitive types. 159 | 160 | At the moment, this exporter **only supports attributes with string key and 161 | value type**. 162 | This means that if attributes of any other type are used, they will be 163 | **ignored** and **only** the string-valued attributes are going to be sent to 164 | Dynatrace. 165 | 166 | #### Histogram 167 | 168 | OpenTelemetry Histograms are exported to Dynatrace as statistical summaries 169 | consisting of a minimum and maximum value, the total sum of all values, and the 170 | count of the values summarized. If the min and max values are not directly 171 | available on the metric data point, estimations based on the boundaries of the 172 | first and last buckets containing values are used. 173 | -------------------------------------------------------------------------------- /example/basic_example.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import argparse 16 | import logging 17 | import os 18 | import random 19 | import time 20 | from os.path import splitext, basename 21 | 22 | import opentelemetry.metrics as metrics 23 | import psutil 24 | from opentelemetry.metrics import Observation, CallbackOptions 25 | from opentelemetry.sdk.metrics import MeterProvider 26 | 27 | from dynatrace.opentelemetry.metrics.export import ( 28 | configure_dynatrace_metrics_export 29 | ) 30 | 31 | cpu_gauge = None 32 | ram_gauge = None 33 | 34 | 35 | # Callback to gather cpu usage 36 | def get_cpu_usage_callback(_: CallbackOptions): 37 | for (number, percent) in enumerate(psutil.cpu_percent(percpu=True)): 38 | attributes = {"cpu_number": str(number)} 39 | yield Observation(percent, attributes) 40 | 41 | 42 | # Callback to gather RAM memory usage 43 | def get_ram_usage_callback(_: CallbackOptions): 44 | ram_percent = psutil.virtual_memory().percent 45 | yield Observation(ram_percent) 46 | 47 | 48 | def parse_arguments(): 49 | parser = argparse.ArgumentParser( 50 | description="Example exporting metrics using the Dynatrace metrics " 51 | "exporter.", 52 | epilog="The script can be run without any arguments. In that case, the" 53 | " local OneAgent is used as an endpoint, if it is installed.") 54 | parser.add_argument("-e", "--endpoint", default=None, type=str, 55 | dest="endpoint", 56 | help="The endpoint url used to export metrics to. " 57 | "This can be either a Dynatrace metrics " 58 | "ingestion endpoint, or a local OneAgent " 59 | "endpoint. If no value is set, the default " 60 | "local OneAgent endpoint is used.") 61 | 62 | parser.add_argument("-t", "--token", default=None, type=str, dest="token", 63 | help="API Token generated in the Dynatrace UI. Needs " 64 | "to have metrics ingestion enabled in order to " 65 | "work correctly. Can be omitted when exporting " 66 | "to the local OneAgent.") 67 | 68 | parser.add_argument("-nm", "--no-metadata", dest="metadata_enrichment", 69 | action="store_false", 70 | help="Turn off Dynatrace Metadata enrichment. If no " 71 | "OneAgent is running on " 72 | "the host, this is ignored. Otherwise, Dynatrace " 73 | "metadata will be added to each of the exported " 74 | "metric lines.") 75 | 76 | parser.add_argument("-i", "--interval", default=10., type=float, 77 | dest="interval", 78 | help="Set the export interval in seconds for the " 79 | "Dynatrace metrics exporter. This specifies how " 80 | "often data is exported to Dynatrace. We suggest " 81 | "using export intervals of 10 to 60 seconds. The " 82 | "default interval is 10 seconds.") 83 | 84 | parser.set_defaults(metadata_enrichment=True) 85 | return parser.parse_args() 86 | 87 | 88 | if __name__ == '__main__': 89 | args = parse_arguments() 90 | 91 | script_name = splitext(basename(__file__))[0] 92 | 93 | # try to read the log level from the environment variable "LOGLEVEL" and 94 | # setting it to "INFO" if not found. 95 | # Valid levels are: DEBUG, INFO, WARN/WARNING, ERROR, CRITICAL/FATAL 96 | loglevel = os.environ.get("LOGLEVEL", "INFO").upper() 97 | logging.basicConfig(level=loglevel) 98 | logger = logging.getLogger(script_name) 99 | 100 | if not args.endpoint: 101 | logger.info( 102 | "No Dynatrace endpoint specified, exporting to default local " 103 | "OneAgent endpoint.") 104 | 105 | # set up OpenTelemetry for export: 106 | logger.debug("setting up global OpenTelemetry configuration.") 107 | # This call sets up the MeterProvider with a 108 | # PeriodicExportingMetricReader that exports every 5000ms and the 109 | # Dynatrace exporter exporting to args.endpoint with args.token 110 | metrics.set_meter_provider(MeterProvider( 111 | metric_readers=[configure_dynatrace_metrics_export( 112 | export_interval_millis=5000, 113 | endpoint_url=args.endpoint, 114 | api_token=args.token, 115 | prefix="otel.python", 116 | export_dynatrace_metadata=args.metadata_enrichment, 117 | default_dimensions={"default1": "defval1"}) 118 | ])) 119 | 120 | meter = metrics.get_meter(script_name) 121 | 122 | logger.info("creating instruments to record metrics data") 123 | requests_counter = meter.create_counter( 124 | name="requests", 125 | description="number of requests", 126 | unit="1" 127 | ) 128 | 129 | requests_size = meter.create_histogram( 130 | name="request_size_bytes", 131 | description="size of requests", 132 | unit="byte" 133 | ) 134 | 135 | cpu_gauge = meter.create_observable_gauge( 136 | callbacks=[get_cpu_usage_callback], 137 | name="cpu_percent", 138 | description="per-cpu usage", 139 | unit="1" 140 | ) 141 | 142 | ram_gauge = meter.create_observable_gauge( 143 | callbacks=[get_ram_usage_callback], 144 | name="ram_percent", 145 | description="RAM memory usage", 146 | unit="1", 147 | ) 148 | 149 | # Attributes are used to identify key-values that are associated with a 150 | # specific metric that you want to record. These are useful for 151 | # pre-aggregation and can be used to store custom dimensions pertaining 152 | # to a metric 153 | staging_attributes = {"environment": "staging"} 154 | testing_attributes = {"environment": "testing"} 155 | 156 | logger.info("starting instrumented application...") 157 | try: 158 | while True: 159 | # Update the metric instruments using the direct calling convention 160 | requests_counter.add(random.randint(0, 25), staging_attributes) 161 | requests_size.record(random.randint(0, 300), staging_attributes) 162 | 163 | requests_counter.add(random.randint(0, 35), testing_attributes) 164 | requests_size.record(random.randint(0, 100), testing_attributes) 165 | time.sleep(5) 166 | 167 | except KeyboardInterrupt: 168 | logger.info("shutting down...") 169 | -------------------------------------------------------------------------------- /example/install_and_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check if running as root. Helpful in environments where you are root but sudo is not installed. 4 | if [ $EUID -eq 0 ]; then 5 | apt-get update 6 | apt-get install -y python3 python3-pip python3-venv 7 | else 8 | sudo apt-get update 9 | sudo apt-get install -y python3 python3-pip python3-venv 10 | fi 11 | 12 | # change into the opentelemetry-metric-python folder if you haven't already 13 | python3 -m venv .venv `# create a new virtual environment in the current folder` 14 | 15 | source .venv/bin/activate 16 | pip3 install --upgrade setuptools `# make sure setuptools and wheel are on the latest version` 17 | pip3 install psutil `# for observing cpu and ram` 18 | pip3 install . `# install the library itself` 19 | # Valid log levels are: DEBUG, INFO, WARN/WARNING, ERROR, CRITICAL/FATAL 20 | export LOGLEVEL=DEBUG `# set the log level` 21 | python3 example/basic_example.py `# run the example in a venv` 22 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | tox 2 | pytest~=6.2.5 3 | parameterized~=0.8.1 -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | [metadata] 16 | name = opentelemetry-exporter-dynatrace-metrics 17 | version = attr: src.dynatrace.opentelemetry.metrics.export.VERSION 18 | description = Dynatrace Metric Exporter for OpenTelemetry 19 | long_description = file: README.md 20 | long_description_content_type = text/markdown; charset=UTF-8; variant=GFM 21 | author = Dynatrace 22 | author_email = opensource@dynatrace.com 23 | url = https://github.com/dynatrace-oss/opentelemetry-metric-python/ 24 | project_urls = 25 | Dynatrace = https://www.dynatrace.com/ 26 | Source = https://github.com/dynatrace-oss/opentelemetry-metric-python/ 27 | Download = https://pypi.org/project/opentelemetry-exporter-dynatrace-metrics/#files 28 | license = Apache-2.0 29 | platforms = any 30 | classifiers = 31 | Development Status :: 7 - Inactive 32 | Intended Audience :: Developers 33 | License :: OSI Approved :: Apache Software License 34 | Operating System :: OS Independent 35 | Programming Language :: Python 36 | Programming Language :: Python :: 3 37 | Programming Language :: Python :: 3.7 38 | Programming Language :: Python :: 3.8 39 | Programming Language :: Python :: 3.9 40 | Programming Language :: Python :: 3.10 41 | Programming Language :: Python :: Implementation :: CPython 42 | Programming Language :: Python :: Implementation :: PyPy 43 | Topic :: System :: Monitoring 44 | 45 | [options] 46 | python_requires = >=3.7 47 | package_dir= 48 | =src 49 | packages=find_namespace: 50 | install_requires= 51 | opentelemetry-api~=1.12 52 | opentelemetry-sdk~=1.12 53 | requests~=2.25 54 | dynatrace-metric-utils~=0.2.1 55 | 56 | [tool:pytest] 57 | testpaths = test 58 | 59 | [options.packages.find] 60 | where = src 61 | 62 | [options.entry_points] 63 | opentelemetry_exporter = 64 | dynatrace = dynatrace.opentelemetry.metric.exporter:DynatraceMetricsExporter 65 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | import setuptools 9 | 10 | setuptools.setup() 11 | -------------------------------------------------------------------------------- /src/dynatrace/opentelemetry/metrics/export/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | from typing import Mapping, Optional 16 | 17 | from dynatrace.opentelemetry.metrics.export._exporter import ( 18 | _DynatraceMetricsExporter, 19 | ) 20 | from opentelemetry.sdk.metrics.export import ( 21 | PeriodicExportingMetricReader, 22 | MetricReader, 23 | ) 24 | 25 | VERSION = "1.0.3" 26 | 27 | 28 | def configure_dynatrace_metrics_export( 29 | endpoint_url: Optional[str] = None, 30 | api_token: Optional[str] = None, 31 | prefix: Optional[str] = None, 32 | default_dimensions: Optional[Mapping[str, str]] = None, 33 | export_dynatrace_metadata: Optional[bool] = False, 34 | export_interval_millis: Optional[float] = None 35 | ) -> MetricReader: 36 | """ 37 | Configures and creates a PeriodicExportingMetricReader and 38 | DynatraceMetricsExporter combination. 39 | 40 | Parameters 41 | ---------- 42 | endpoint_url: str, Optional 43 | The endpoint to send metrics to. Given an environment ID `myenv123` on 44 | Dynatrace SaaS, the endpoint_url would be 45 | `https://myenv123.live.dynatrace.com/api/v2/metrics/ingest`. 46 | (default: local OneAgent Endpoint) 47 | api_token: str, Optional 48 | The API token for your Dynatrace environment with at least the scope 49 | `metrics.ingest`. 50 | (default: no API token). 51 | prefix: str, Optional 52 | Will be prepended to each metric key, separated by a dot 53 | (`.`). 54 | (default: no prefix) 55 | default_dimensions: Mapping[str, str], Optional 56 | Static dimensions to add to every metric. Dimension keys need 57 | to be unique, attributes on instruments will overwrite the default 58 | dimensions if key collisions appear. 59 | (default: empty) 60 | export_dynatrace_metadata: bool, Optional 61 | If running on a host with a running OneAgent, 62 | setting the `export_dynatrace_metadata` option to `True` will export 63 | metadata collected by the OneAgent to the Dynatrace endpoint. This 64 | option will default to `True` when `endpoint_url` is not set. 65 | (default: `False`) 66 | export_interval_millis: float, Optional 67 | Time to wait between exports in milliseconds. 68 | (default: `60000`) 69 | Returns 70 | ------- 71 | PeriodicExportingMetricReader, configured with a Dynatrace metrics exporter 72 | according to this method's parameters. 73 | """ 74 | return PeriodicExportingMetricReader( 75 | export_interval_millis=export_interval_millis, 76 | exporter=_DynatraceMetricsExporter( 77 | endpoint_url=endpoint_url, 78 | api_token=api_token, 79 | prefix=prefix, 80 | default_dimensions=default_dimensions, 81 | export_dynatrace_metadata=export_dynatrace_metadata, 82 | ) 83 | ) 84 | -------------------------------------------------------------------------------- /src/dynatrace/opentelemetry/metrics/export/_exporter.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import logging 16 | from typing import Mapping, Optional 17 | 18 | import requests 19 | from dynatrace.metric.utils import ( 20 | DynatraceMetricsSerializer, 21 | MetricError, 22 | DynatraceMetricsApiConstants, 23 | ) 24 | from dynatrace.opentelemetry.metrics.export._factory import ( 25 | _OTelDynatraceMetricsFactory, 26 | ) 27 | from opentelemetry.sdk.metrics.export import ( 28 | MetricExporter, 29 | MetricExportResult, 30 | MetricsData, 31 | ) 32 | 33 | import opentelemetry.sdk.metrics as metrics 34 | from opentelemetry.sdk.metrics.export import ( 35 | AggregationTemporality, 36 | ) 37 | 38 | _DYNATRACE_TEMPORALITY_PREFERENCE = { 39 | metrics.Counter: AggregationTemporality.DELTA, 40 | metrics.UpDownCounter: AggregationTemporality.CUMULATIVE, 41 | metrics.Histogram: AggregationTemporality.DELTA, 42 | metrics.ObservableCounter: AggregationTemporality.DELTA, 43 | metrics.ObservableUpDownCounter: AggregationTemporality.CUMULATIVE, 44 | metrics.ObservableGauge: AggregationTemporality.CUMULATIVE, 45 | } 46 | 47 | 48 | class _DynatraceMetricsExporter(MetricExporter): 49 | """ 50 | A class which implements the OpenTelemetry MetricsExporter interface 51 | 52 | Methods 53 | ------- 54 | export(metric_records: MetricsData) 55 | """ 56 | 57 | def __init__( 58 | self, 59 | endpoint_url: Optional[str] = None, 60 | api_token: Optional[str] = None, 61 | prefix: Optional[str] = None, 62 | default_dimensions: Optional[Mapping[str, str]] = None, 63 | export_dynatrace_metadata: Optional[bool] = False, 64 | ): 65 | super().__init__( 66 | preferred_temporality=_DYNATRACE_TEMPORALITY_PREFERENCE, 67 | preferred_aggregation=None 68 | ) 69 | self.__logger = logging.getLogger(__name__) 70 | 71 | if endpoint_url: 72 | self._endpoint_url = endpoint_url 73 | else: 74 | self.__logger.info("No Dynatrace endpoint specified, exporting " 75 | "to default local OneAgent ingest endpoint.") 76 | self._endpoint_url = "http://localhost:14499/metrics/ingest" 77 | 78 | self._metric_factory = _OTelDynatraceMetricsFactory() 79 | self._serializer = DynatraceMetricsSerializer( 80 | self.__logger.getChild(DynatraceMetricsSerializer.__name__), 81 | prefix, 82 | default_dimensions, 83 | export_dynatrace_metadata, 84 | "opentelemetry") 85 | 86 | self._session = requests.Session() 87 | self._headers = { 88 | "Accept": "*/*; q=0", 89 | "Content-Type": "text/plain; charset=utf-8", 90 | "User-Agent": "opentelemetry-metric-python", 91 | } 92 | if api_token: 93 | if not endpoint_url: 94 | self.__logger.warning("Just API token but no endpoint passed. " 95 | "Skipping token authentication for local" 96 | " OneAgent endpoint") 97 | else: 98 | self._headers["Authorization"] = "Api-Token " + api_token 99 | 100 | def export(self, 101 | metrics_data: MetricsData, 102 | **kwargs) -> MetricExportResult: 103 | """ 104 | Export Metrics to Dynatrace 105 | 106 | Parameters 107 | ---------- 108 | metrics_data : MetricsData, required 109 | The Metrics to be exported 110 | 111 | Returns 112 | ------- 113 | MetricExportResult 114 | Indicates SUCCESS (all metrics exported successfully) 115 | or FAILURE (otherwise) 116 | """ 117 | if len(metrics_data.resource_metrics) == 0: 118 | return MetricExportResult.SUCCESS 119 | 120 | string_buffer = [] 121 | for resource_metric in metrics_data.resource_metrics: 122 | for scope_metric in resource_metric.scope_metrics: 123 | for metric in scope_metric.metrics: 124 | for data_point in metric.data.data_points: 125 | dt_metric = self._metric_factory.create_metric( 126 | metric, 127 | data_point) 128 | if dt_metric is None: 129 | continue 130 | try: 131 | string_buffer.append( 132 | self._serializer.serialize(dt_metric)) 133 | except MetricError as ex: 134 | self.__logger.warning( 135 | "Failed to serialize metric. Skipping: %s", ex) 136 | try: 137 | self._send_lines(string_buffer) 138 | except Exception as ex: 139 | self.__logger.warning( 140 | "Failed to export metrics: %s", ex) 141 | return MetricExportResult.FAILURE 142 | return MetricExportResult.SUCCESS 143 | 144 | def force_flush(self, timeout_millis: float = 10_000) -> bool: 145 | # nothing to do. 146 | pass 147 | 148 | def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: 149 | # nothing to do. 150 | pass 151 | 152 | def _send_lines(self, metric_lines): 153 | # split all metrics into batches of 154 | # DynatraceMetricApiConstants.PayloadLinesLimit lines 155 | chunk_size = DynatraceMetricsApiConstants.payload_lines_limit() 156 | 157 | for index in range(0, len(metric_lines), chunk_size): 158 | metric_lines_chunk = metric_lines[index:index + chunk_size] 159 | serialized_records = "\n".join(metric_lines_chunk) 160 | self.__logger.debug( 161 | "sending lines:\n" + serialized_records) 162 | with self._session.post(self._endpoint_url, 163 | data=serialized_records, 164 | headers=self._headers) as resp: 165 | resp.raise_for_status() 166 | self.__logger.debug( 167 | "got response: {}".format( 168 | resp.content.decode("utf-8"))) 169 | -------------------------------------------------------------------------------- /src/dynatrace/opentelemetry/metrics/export/_factory.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import logging 16 | from typing import Optional, Mapping 17 | 18 | from dynatrace.metric.utils import ( 19 | DynatraceMetricsFactory, 20 | MetricError 21 | ) 22 | from opentelemetry.sdk.metrics.export import ( 23 | Sum, 24 | AggregationTemporality, 25 | Gauge, 26 | Histogram, 27 | DataPointT, 28 | NumberDataPoint, 29 | Metric, 30 | HistogramDataPoint 31 | ) 32 | 33 | from dynatrace.opentelemetry.metrics.export._histogram_utils import ( 34 | _get_histogram_min, 35 | _get_histogram_max 36 | ) 37 | 38 | 39 | class _OTelDynatraceMetricsFactory: 40 | """ 41 | A class which implements the OpenTelemetry MetricsExporter interface 42 | 43 | Methods 44 | ------- 45 | export(metric_records: MetricsData) 46 | """ 47 | 48 | def __init__( 49 | self, 50 | ): 51 | self.__logger = logging.getLogger(__name__) 52 | self._metric_factory = DynatraceMetricsFactory() 53 | 54 | def create_metric(self, metric: Metric, point: DataPointT): 55 | try: 56 | if isinstance(metric.data, Sum): 57 | return self._sum_to_dynatrace_metric(metric, point) 58 | if isinstance(metric.data, Histogram): 59 | return self._histogram_to_dynatrace_metric(metric, point) 60 | if isinstance(metric.data, Gauge): 61 | # gauge does not support or require temporality. 62 | return self._to_dynatrace_gauge(metric, point) 63 | 64 | self.__logger.warning("Failed to create a Dynatrace metric, " 65 | "unsupported metric point type: %s", 66 | type(metric.data).__name__) 67 | 68 | except MetricError as ex: 69 | self.__logger.warning("Failed to create the Dynatrace metric: %s", 70 | ex) 71 | return None 72 | 73 | def _sum_to_dynatrace_metric(self, metric: Metric, point: NumberDataPoint): 74 | if metric.data.is_monotonic: 75 | if metric.data.aggregation_temporality != \ 76 | AggregationTemporality.DELTA: 77 | self._log_temporality_mismatch( 78 | "monotonic Sum", 79 | metric, 80 | supported_temporality=AggregationTemporality.DELTA) 81 | return None 82 | return self._to_dynatrace_counter(metric, point) 83 | else: 84 | if metric.data.aggregation_temporality != \ 85 | AggregationTemporality.CUMULATIVE: 86 | self._log_temporality_mismatch( 87 | "non-monotonic Sum", 88 | metric, 89 | supported_temporality=AggregationTemporality.CUMULATIVE) 90 | return None 91 | return self._to_dynatrace_gauge(metric, point) 92 | 93 | def _to_dynatrace_counter(self, metric: Metric, 94 | point: NumberDataPoint): 95 | if isinstance(point.value, float): 96 | return self._metric_factory.create_float_counter_delta( 97 | metric.name, 98 | point.value, 99 | self._filter_dimensions(point.attributes), 100 | int(point.time_unix_nano / 1000000)) 101 | if isinstance(point.value, int): 102 | return self._metric_factory.create_int_counter_delta( 103 | metric.name, 104 | point.value, 105 | self._filter_dimensions(point.attributes), 106 | int(point.time_unix_nano / 1000000)) 107 | 108 | def _to_dynatrace_gauge(self, metric: Metric, 109 | point: NumberDataPoint): 110 | if isinstance(point.value, float): 111 | return self._metric_factory.create_float_gauge( 112 | metric.name, 113 | point.value, 114 | self._filter_dimensions(point.attributes), 115 | int(point.time_unix_nano / 1000000)) 116 | if isinstance(point.value, int): 117 | return self._metric_factory.create_int_gauge( 118 | metric.name, 119 | point.value, 120 | self._filter_dimensions(point.attributes), 121 | int(point.time_unix_nano / 1000000)) 122 | 123 | def _histogram_to_dynatrace_metric(self, metric: Metric, 124 | point: HistogramDataPoint): 125 | # only allow AggregationTemporality.DELTA 126 | if metric.data.aggregation_temporality != AggregationTemporality.DELTA: 127 | self._log_temporality_mismatch( 128 | "Histogram", 129 | metric, 130 | supported_temporality=AggregationTemporality.DELTA) 131 | return None 132 | 133 | return self._metric_factory.create_float_summary( 134 | metric.name, 135 | _get_histogram_min(point), 136 | _get_histogram_max(point), 137 | point.sum, 138 | sum(point.bucket_counts), 139 | self._filter_dimensions(point.attributes), 140 | int(point.time_unix_nano / 1000000)) 141 | 142 | def _log_temporality_mismatch( 143 | self, 144 | metric_type: 145 | str, metric: Metric, 146 | supported_temporality: AggregationTemporality): 147 | self.__logger.warning("Failed to create Dynatrace metric: " 148 | "exporter received %s '%s' with " 149 | "AggregationTemporality.%s, but only " 150 | "AggregationTemporality.%s is currently " 151 | "supported.", 152 | metric_type, 153 | metric.name, 154 | metric.data.aggregation_temporality.name, 155 | supported_temporality.name) 156 | 157 | def _filter_dimensions( 158 | self, 159 | attributes: Optional[Mapping]) -> Optional[Mapping[str, str]]: 160 | 161 | if not attributes: 162 | return attributes 163 | 164 | return dict( 165 | filter(lambda attr: 166 | self._is_valid_dimension_key_type(attr[0]) 167 | and self._is_valid_dimension_value_type(attr[1]), 168 | attributes.items())) 169 | 170 | def _is_valid_dimension_value_type(self, value) -> bool: 171 | if isinstance(value, str): 172 | return True 173 | 174 | self.__logger.warning( 175 | "Skipping unsupported dimension with value type '%s'", 176 | type(value).__name__) 177 | return False 178 | 179 | def _is_valid_dimension_key_type(self, key) -> bool: 180 | if isinstance(key, str): 181 | return True 182 | 183 | self.__logger.warning( 184 | "Skipping unsupported dimension key value type '%s'", 185 | type(key).__name__) 186 | return False 187 | -------------------------------------------------------------------------------- /src/dynatrace/opentelemetry/metrics/export/_histogram_utils.py: -------------------------------------------------------------------------------- 1 | # Copyright 2022 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import math 16 | 17 | from opentelemetry.sdk.metrics.export import ( 18 | HistogramDataPoint 19 | ) 20 | 21 | 22 | def _get_histogram_max(histogram: HistogramDataPoint): 23 | if histogram.max is not None and math.isfinite(histogram.max): 24 | return histogram.max 25 | 26 | if len(histogram.bucket_counts) == 1: 27 | # In this case, only one bucket exists: (-Inf, Inf). If there were 28 | # any boundaries, there would be more counts. 29 | if histogram.bucket_counts[0] > 0: 30 | # in case the single bucket contains something, use the mean as 31 | # max. 32 | return histogram.sum / histogram.count 33 | # otherwise the histogram has no data. Use the sum as the min and 34 | # max, respectively. 35 | return histogram.sum 36 | 37 | # loop over bucket_counts in reverse 38 | last_element_index = len(histogram.bucket_counts) - 1 39 | for index in range(last_element_index, -1, -1): 40 | if histogram.bucket_counts[index] > 0: 41 | if index == last_element_index: 42 | # use the last bound in the bounds array. This can only be the 43 | # case if there is a count > 0 in the last bucket (lastBound, 44 | # Inf). In some cases, the mean of the histogram is larger than 45 | # this bound, thus use the maximum of the estimated bound and 46 | # the mean. 47 | return max(histogram.explicit_bounds[index - 1], 48 | histogram.sum / histogram.count) 49 | # In any other bucket (lowerBound, upperBound], use the upperBound. 50 | return histogram.explicit_bounds[index] 51 | 52 | # there are no counts > 0, so calculating a mean would result in a 53 | # division by 0. By returning the sum, we can let the backend decide what 54 | # to do with the value (with a count of 0) 55 | return histogram.sum 56 | 57 | 58 | def _get_histogram_min(histogram: HistogramDataPoint): 59 | if histogram.min is not None and math.isfinite(histogram.min): 60 | return histogram.min 61 | 62 | if len(histogram.bucket_counts) == 1: 63 | # In this case, only one bucket exists: (-Inf, Inf). If there were 64 | # any boundaries, there would be more counts. 65 | if histogram.bucket_counts[0] > 0: 66 | # in case the single bucket contains something, use the mean as 67 | # min. 68 | return histogram.sum / histogram.count 69 | # otherwise the histogram has no data. Use the sum as the min and 70 | # max, respectively. 71 | return histogram.sum 72 | 73 | # iterate all buckets to find the first bucket with count > 0 74 | for index in range(0, len(histogram.bucket_counts)): 75 | # the current bucket contains something. 76 | if histogram.bucket_counts[index] > 0: 77 | if index == 0: 78 | # In the first bucket, (-Inf, firstBound], use firstBound 79 | # (this is the lowest specified bound overall). This is not 80 | # quite correct but the best approximation we can get at 81 | # this point. However, this might lead to a min bigger than 82 | # the mean, thus choose the minimum of the following: 83 | # - The lowest boundary 84 | # - The histogram's average (histogram sum / sum of counts) 85 | return min(histogram.explicit_bounds[index], 86 | histogram.sum / histogram.count) 87 | # In all other buckets (lowerBound, upperBound] use the 88 | # lowerBound to estimate min. 89 | return histogram.explicit_bounds[index - 1] 90 | 91 | # there are no counts > 0, so calculating a mean would result in a 92 | # division by 0. By returning the sum, we can let the backend decide what 93 | # to do with the value (with a count of 0) 94 | return histogram.sum 95 | -------------------------------------------------------------------------------- /test/test_exporter.py: -------------------------------------------------------------------------------- 1 | # Copyright 2020 Dynatrace LLC 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | import math 16 | import re 17 | import unittest 18 | from typing import Sequence, Union 19 | from unittest import mock 20 | from unittest.mock import patch 21 | 22 | import requests 23 | from dynatrace.opentelemetry.metrics.export import ( 24 | _DynatraceMetricsExporter, 25 | configure_dynatrace_metrics_export, 26 | ) 27 | from opentelemetry.sdk.metrics import MeterProvider 28 | from opentelemetry.sdk.metrics.export import ( 29 | Metric, 30 | MetricExportResult, 31 | PeriodicExportingMetricReader, 32 | Gauge, 33 | Sum, 34 | AggregationTemporality, 35 | Histogram, MetricsData, 36 | NumberDataPoint, 37 | DataT, 38 | ResourceMetrics, 39 | ScopeMetrics, 40 | HistogramDataPoint, 41 | ) 42 | from opentelemetry.sdk.metrics.view import View 43 | from opentelemetry.sdk.resources import Resource 44 | from opentelemetry.sdk.util.instrumentation import InstrumentationScope 45 | from parameterized import parameterized 46 | 47 | 48 | class AnyStringMatching(str): 49 | def __eq__(self, other): 50 | return re.match(str(self), other) 51 | 52 | 53 | class TestExporter(unittest.TestCase): 54 | 55 | def setUp(self) -> None: 56 | self._instrument_name = "my.instr" 57 | self._attributes = { 58 | "l1": "v1", 59 | "l2": "v2" 60 | } 61 | self._headers = { 62 | "Accept": "*/*; q=0", 63 | "Content-Type": "text/plain; charset=utf-8", 64 | "User-Agent": "opentelemetry-metric-python", 65 | } 66 | # 01/01/2021 00:00:00 67 | self._test_timestamp_nanos = 1609455600000000000 68 | self._test_timestamp_millis = int(self._test_timestamp_nanos / 1000000) 69 | 70 | self._ingest_endpoint = "http://localhost:14499/metrics/ingest" 71 | 72 | @patch.object(requests.Session, 'post') 73 | def test_empty_records(self, mock_post): 74 | mock_post.return_value = self._get_session_response() 75 | 76 | exporter = _DynatraceMetricsExporter() 77 | metrics_data = MetricsData(resource_metrics=[]) 78 | result = exporter.export(metrics_data) 79 | self.assertEqual(MetricExportResult.SUCCESS, result) 80 | 81 | mock_post.assert_not_called() 82 | 83 | @patch.object(requests.Session, 'post') 84 | def test_all_optional(self, mock_post): 85 | mock_post.return_value = self._get_session_response() 86 | 87 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 88 | 89 | exporter = _DynatraceMetricsExporter() 90 | result = exporter.export(metrics_data) 91 | 92 | self.assertEqual(MetricExportResult.SUCCESS, result) 93 | mock_post.assert_called_once_with( 94 | self._ingest_endpoint, 95 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 " 96 | + str(self._test_timestamp_millis), 97 | headers=self._headers) 98 | 99 | @patch.object(requests.Session, 'post') 100 | def test_with_endpoint(self, mock_post): 101 | mock_post.return_value = self._get_session_response() 102 | 103 | endpoint = "https://abc1234.dynatrace.com/metrics/ingest" 104 | 105 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 106 | 107 | exporter = _DynatraceMetricsExporter(endpoint_url=endpoint) 108 | result = exporter.export(metrics_data) 109 | 110 | self.assertEqual(MetricExportResult.SUCCESS, result) 111 | mock_post.assert_called_once_with( 112 | endpoint, 113 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 " 114 | + str(self._test_timestamp_millis), 115 | headers=self._headers) 116 | 117 | @patch.object(requests.Session, 'post') 118 | def test_with_endpoint_and_token(self, mock_post): 119 | mock_post.return_value = self._get_session_response() 120 | 121 | endpoint = "https://abc1234.dynatrace.com/metrics/ingest" 122 | token = "my.secret.token" 123 | # add the token to the expected headers 124 | self._headers["Authorization"] = "Api-Token {}".format(token) 125 | 126 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 127 | 128 | exporter = _DynatraceMetricsExporter(endpoint_url=endpoint, 129 | api_token=token) 130 | result = exporter.export(metrics_data) 131 | 132 | self.assertEqual(MetricExportResult.SUCCESS, result) 133 | mock_post.assert_called_once_with( 134 | endpoint, 135 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 " 136 | + str(self._test_timestamp_millis), 137 | headers=self._headers) 138 | 139 | @patch.object(requests.Session, 'post') 140 | def test_with_only_token(self, mock_post): 141 | mock_post.return_value = self._get_session_response() 142 | 143 | # token is not added in the expected headers 144 | token = "my.secret.token" 145 | 146 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 147 | 148 | exporter = _DynatraceMetricsExporter(api_token=token) 149 | result = exporter.export(metrics_data) 150 | 151 | self.assertEqual(MetricExportResult.SUCCESS, result) 152 | mock_post.assert_called_once_with( 153 | self._ingest_endpoint, 154 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 " 155 | + str(self._test_timestamp_millis), 156 | headers=self._headers) 157 | 158 | @patch.object(requests.Session, 'post') 159 | def test_with_prefix(self, mock_post): 160 | mock_post.return_value = self._get_session_response() 161 | 162 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 163 | 164 | prefix = "test_prefix" 165 | exporter = _DynatraceMetricsExporter(prefix=prefix) 166 | result = exporter.export(metrics_data) 167 | 168 | self.assertEqual(MetricExportResult.SUCCESS, result) 169 | mock_post.assert_called_once_with( 170 | self._ingest_endpoint, 171 | data="{0}.my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 {1}" 172 | .format(prefix, self._test_timestamp_millis), 173 | headers=self._headers) 174 | 175 | @patch.object(requests.Session, 'post') 176 | def test_with_default_dimensions(self, mock_post): 177 | mock_post.return_value = self._get_session_response() 178 | 179 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 180 | 181 | dimensions = {"attribute1": "tv1", "attribute2": "tv2"} 182 | exporter = _DynatraceMetricsExporter(default_dimensions=dimensions) 183 | result = exporter.export(metrics_data) 184 | 185 | self.assertEqual(MetricExportResult.SUCCESS, result) 186 | mock_post.assert_called_once_with( 187 | self._ingest_endpoint, 188 | data="my.instr,attribute1=tv1,attribute2=tv2,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 " 189 | + str(self._test_timestamp_millis), 190 | headers=self._headers) 191 | 192 | @patch.object(requests.Session, 'post') 193 | @patch('dynatrace.metric.utils._dynatrace_metadata_enricher' 194 | '.DynatraceMetadataEnricher._get_metadata_file_content') 195 | def test_dynatrace_metadata_enrichment_with_default_attributes( 196 | self, mock_enricher, mock_post): 197 | mock_post.return_value = self._get_session_response() 198 | 199 | # attributes coming from the Dynatrace metadata enricher 200 | mock_enricher.return_value = [ 201 | "dt_mattribute1=value1", 202 | "dt_mattribute2=value2" 203 | ] 204 | 205 | default_attributes = {"attribute1": "tv1", "attribute2": "tv2"} 206 | 207 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 208 | 209 | exporter = _DynatraceMetricsExporter( 210 | default_dimensions=default_attributes, 211 | export_dynatrace_metadata=True) 212 | result = exporter.export(metrics_data) 213 | 214 | self.assertEqual(MetricExportResult.SUCCESS, result) 215 | mock_post.assert_called_once_with( 216 | self._ingest_endpoint, 217 | data="my.instr,attribute1=tv1,attribute2=tv2,l1=v1,l2=v2," 218 | "dt_mattribute1=value1,dt_mattribute2=value2," 219 | "dt.metrics.source=opentelemetry count,delta=10 " 220 | + str(self._test_timestamp_millis), 221 | headers=self._headers) 222 | 223 | @parameterized.expand([ 224 | ("",), 225 | (".",) 226 | ]) 227 | def test_invalid_metricname_skipped(self, instrument_name): 228 | with patch.object(requests.Session, 'post') as mock_post: 229 | mock_post.return_value = self._get_session_response() 230 | 231 | metrics = [] 232 | for n in range(4): 233 | data = self._create_sum(n) 234 | if n == 3: 235 | # create metrics with invalid name. 236 | metric = self._metric_from_data(data, 237 | instrument_name=instrument_name) 238 | metrics.append(metric) 239 | else: 240 | metrics.append(self._metric_from_data(data)) 241 | 242 | expected = "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=0 {0}\n" \ 243 | "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=1 {0}\n" \ 244 | "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=2 {0}" \ 245 | .format(self._test_timestamp_millis) 246 | 247 | exporter = _DynatraceMetricsExporter() 248 | result = exporter.export( 249 | self._metrics_data_from_metrics(metrics)) 250 | 251 | self.assertEqual(MetricExportResult.SUCCESS, result) 252 | 253 | mock_post.assert_any_call( 254 | self._ingest_endpoint, 255 | data=expected, 256 | headers=self._headers) 257 | 258 | @patch.object(requests.Session, 'post') 259 | @patch('dynatrace.metric.utils.dynatrace_metrics_api_constants' 260 | '.DynatraceMetricsApiConstants.payload_lines_limit') 261 | def test_batching(self, mock_const, mock_post): 262 | mock_post.return_value = self._get_session_response() 263 | mock_const.return_value = 1 264 | 265 | metrics = [self._create_sum(n) for n in range(2)] 266 | 267 | first_expected = "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=0 {0}" \ 268 | .format(self._test_timestamp_millis) 269 | second_expected = "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=1 {0}" \ 270 | .format(self._test_timestamp_millis) 271 | 272 | exporter = _DynatraceMetricsExporter() 273 | result = exporter.export( 274 | self._metrics_data_from_data(metrics)) 275 | 276 | # should have failed the whole batch as the second POST request failed 277 | self.assertEqual(MetricExportResult.SUCCESS, result) 278 | 279 | mock_post.assert_any_call( 280 | self._ingest_endpoint, 281 | data=first_expected, 282 | headers=self._headers) 283 | 284 | mock_post.assert_any_call( 285 | self._ingest_endpoint, 286 | data=second_expected, 287 | headers=self._headers) 288 | 289 | @patch.object(requests.Session, 'post') 290 | @patch('dynatrace.metric.utils.dynatrace_metrics_api_constants' 291 | '.DynatraceMetricsApiConstants.payload_lines_limit') 292 | def test_entire_batch_fail(self, mock_const, mock_post): 293 | mock_post.side_effect = [self._get_session_response(), 294 | self._get_session_response(error=True)] 295 | mock_const.return_value = 2 296 | 297 | metrics = [self._create_sum(n) for n in range(4)] 298 | 299 | first_expected = "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=0 {0}\n" \ 300 | "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=1 {0}" \ 301 | .format(self._test_timestamp_millis) 302 | second_expected = "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=2 {0}\n" \ 303 | "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=3 {0}" \ 304 | .format(self._test_timestamp_millis) 305 | 306 | exporter = _DynatraceMetricsExporter() 307 | result = exporter.export( 308 | self._metrics_data_from_data(metrics)) 309 | 310 | # should have failed the whole batch as the second POST request failed 311 | self.assertEqual(MetricExportResult.FAILURE, result) 312 | 313 | mock_post.assert_any_call( 314 | self._ingest_endpoint, 315 | data=first_expected, 316 | headers=self._headers) 317 | 318 | mock_post.assert_any_call( 319 | self._ingest_endpoint, 320 | data=second_expected, 321 | headers=self._headers) 322 | 323 | @patch.object(requests.Session, 'post') 324 | def test_monotonic_delta_sum_exported_as_counter(self, mock_post): 325 | metrics_data = self._metrics_data_from_data([self._create_sum(10)]) 326 | 327 | exporter = _DynatraceMetricsExporter() 328 | 329 | result = exporter.export(metrics_data) 330 | 331 | self.assertEqual(MetricExportResult.SUCCESS, result) 332 | mock_post.assert_called_once_with( 333 | self._ingest_endpoint, 334 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 {0}" 335 | .format(self._test_timestamp_millis), 336 | headers=self._headers) 337 | 338 | @patch.object(requests.Session, 'post') 339 | def test_non_monotonic_delta_sum_is_dropped(self, mock_post): 340 | metrics_data = self._metrics_data_from_data([ 341 | self._create_sum( 342 | 10, 343 | monotonic=False, 344 | aggregation_temporality=AggregationTemporality.DELTA)]) 345 | 346 | exporter = _DynatraceMetricsExporter() 347 | 348 | result = exporter.export(metrics_data) 349 | 350 | self.assertEqual(MetricExportResult.SUCCESS, result) 351 | mock_post.assert_not_called() 352 | 353 | @patch.object(requests.Session, 'post') 354 | def test_monotonic_cumulative_sum_is_dropped(self, mock_post): 355 | metrics_data = self._metrics_data_from_data([self._create_sum(10, 356 | monotonic=True, 357 | aggregation_temporality=AggregationTemporality.CUMULATIVE)]) 358 | 359 | exporter = _DynatraceMetricsExporter() 360 | 361 | result = exporter.export(metrics_data) 362 | 363 | self.assertEqual(MetricExportResult.SUCCESS, result) 364 | mock_post.assert_not_called() 365 | 366 | @patch.object(requests.Session, 'post') 367 | def test_non_monotonic_cumulative_sum_exported_as_gauge(self, mock_post): 368 | metrics_data = self._metrics_data_from_data([self._create_sum(10, 369 | monotonic=False, 370 | aggregation_temporality=AggregationTemporality.CUMULATIVE)]) 371 | 372 | exporter = _DynatraceMetricsExporter() 373 | 374 | result = exporter.export(metrics_data) 375 | 376 | self.assertEqual(MetricExportResult.SUCCESS, result) 377 | mock_post.assert_called_once_with( 378 | self._ingest_endpoint, 379 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry gauge,10 {0}" 380 | .format(str(int(self._test_timestamp_nanos / 1000000))), 381 | headers=self._headers) 382 | 383 | @patch.object(requests.Session, 'post') 384 | def test_gauge_reported_as_gauge(self, mock_post): 385 | data = self._create_gauge(value=10) 386 | 387 | exporter = _DynatraceMetricsExporter() 388 | result = exporter.export( 389 | self._metrics_data_from_data([data])) 390 | 391 | self.assertEqual(MetricExportResult.SUCCESS, result) 392 | mock_post.assert_called_once_with( 393 | self._ingest_endpoint, 394 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry gauge,10 {0}" 395 | .format(str(int(self._test_timestamp_nanos / 1000000))), 396 | headers=self._headers) 397 | 398 | @patch.object(requests.Session, 'post') 399 | def test_histogram_exported_as_gauge(self, mock_post): 400 | data = self._create_histogram( 401 | bucket_counts=[1, 2, 4, 5], 402 | explicit_bounds=[0, 5, 10], 403 | histogram_sum=87, 404 | histogram_min=-3, 405 | histogram_max=12 406 | ) 407 | 408 | metrics_data = self._metrics_data_from_data([data]) 409 | 410 | exporter = _DynatraceMetricsExporter() 411 | result = exporter.export(metrics_data) 412 | 413 | self.assertEqual(MetricExportResult.SUCCESS, result) 414 | mock_post.assert_called_once_with( 415 | self._ingest_endpoint, 416 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry gauge,min=-3,max=12,sum=87,count=12 {0}" 417 | .format(str(int(self._test_timestamp_nanos / 1000000))), 418 | headers=self._headers) 419 | 420 | @patch.object(requests.Session, 'post') 421 | def test_cumulative_histogram_dropped(self, mock_post): 422 | data = self._create_histogram( 423 | bucket_counts=[1, 2, 4, 5], 424 | explicit_bounds=[0, 5, 10], 425 | aggregation_temporality=AggregationTemporality.CUMULATIVE 426 | ) 427 | 428 | metrics_data = self._metrics_data_from_data([data]) 429 | 430 | exporter = _DynatraceMetricsExporter() 431 | result = exporter.export(metrics_data) 432 | 433 | self.assertEqual(MetricExportResult.SUCCESS, result) 434 | mock_post.assert_not_called() 435 | 436 | @patch.object(requests.Session, 'post') 437 | def test_histogram_without_min_max_exported_as_estimated_gauge(self, 438 | mock_post): 439 | 440 | data = self._create_histogram(bucket_counts=[1, 2, 4, 5], 441 | explicit_bounds=[0, 5, 10], 442 | histogram_sum=87, 443 | histogram_min=math.inf, 444 | histogram_max=-math.inf 445 | ) 446 | metrics_data = self._metrics_data_from_data([data]) 447 | 448 | exporter = _DynatraceMetricsExporter() 449 | result = exporter.export(metrics_data) 450 | 451 | self.assertEqual(MetricExportResult.SUCCESS, result) 452 | mock_post.assert_called_once_with( 453 | self._ingest_endpoint, 454 | data="my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry gauge,min=0,max=10,sum=87,count=12 {0}" 455 | .format(str(int(self._test_timestamp_nanos / 1000000))), 456 | headers=self._headers) 457 | 458 | @patch.object(requests.Session, 'post') 459 | def test_multiple_records(self, mock_post): 460 | mock_post.return_value = self._get_session_response() 461 | 462 | data = [ 463 | self._create_sum(10), 464 | self._create_gauge(value=20), 465 | self._create_histogram(bucket_counts=[1, 2, 4, 5], 466 | explicit_bounds=[0, 5, 10], 467 | histogram_min=-3, 468 | histogram_max=12, 469 | histogram_sum=87 470 | ) 471 | ] 472 | exporter = _DynatraceMetricsExporter() 473 | result = exporter.export(self._metrics_data_from_data(data)) 474 | 475 | expected = "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry count,delta=10 {0}\n" \ 476 | "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry gauge,20 {0}\n" \ 477 | "my.instr,l1=v1,l2=v2,dt.metrics.source=opentelemetry gauge,min=-3,max=12,sum=87,count=12 {0}" \ 478 | .format(int(self._test_timestamp_millis)) 479 | 480 | self.assertEqual(MetricExportResult.SUCCESS, result) 481 | mock_post.assert_called_once_with( 482 | self._ingest_endpoint, 483 | data=expected, 484 | headers=self._headers) 485 | 486 | @patch.object(requests.Session, 'post') 487 | def test_view(self, mock_post): 488 | mock_post.return_value = self._get_session_response() 489 | 490 | exporter = _DynatraceMetricsExporter() 491 | 492 | metric_reader = PeriodicExportingMetricReader( 493 | export_interval_millis=3600000, 494 | # 1h so that the test can finish before the collection event fires. 495 | exporter=exporter) 496 | 497 | meter_provider = MeterProvider(metric_readers=[metric_reader], 498 | views=[View(name="my.renamed.instr", 499 | instrument_name=self._instrument_name)]) 500 | 501 | meter = meter_provider.get_meter(name="my.meter", version="1.0.0") 502 | counter = meter.create_counter(self._instrument_name) 503 | counter.add(10, attributes={"l1": "v1", "l2": "v2"}) 504 | 505 | metric_reader.collect() 506 | 507 | mock_post.assert_called_once_with( 508 | self._ingest_endpoint, 509 | data=AnyStringMatching( 510 | r"my\.renamed\.instr,(l2=v2,l1=v1|l1=v1,l2=v2)," 511 | r"dt\.metrics\.source=opentelemetry count,delta=10 [0-9]*"), 512 | headers=self._headers) 513 | 514 | counter.add(10, attributes={"l1": "v1", "l2": "v2"}) 515 | 516 | # shut down cleanly to avoid failed exports later. 517 | meter_provider.shutdown() 518 | 519 | def test_configuration_default(self): 520 | with patch.object(PeriodicExportingMetricReader, 521 | "__init__") as mock_reader: 522 | with patch.object(_DynatraceMetricsExporter, 523 | "__init__") as mock_exporter: 524 | mock_reader.return_value = None 525 | mock_exporter.return_value = None 526 | self.assertIsInstance(configure_dynatrace_metrics_export(), 527 | PeriodicExportingMetricReader) 528 | mock_exporter.assert_called_once_with( 529 | endpoint_url=None, 530 | api_token=None, 531 | prefix=None, 532 | default_dimensions=None, 533 | export_dynatrace_metadata=False, 534 | ) 535 | mock_reader.assert_called_once_with( 536 | export_interval_millis=None, 537 | exporter=mock.ANY, 538 | ) 539 | _, kwargs = mock_reader.call_args 540 | self.assertIsInstance(kwargs.get("exporter"), 541 | _DynatraceMetricsExporter) 542 | 543 | def test_configuration_custom(self): 544 | with patch.object(PeriodicExportingMetricReader, 545 | "__init__") as mock_reader: 546 | with patch.object(_DynatraceMetricsExporter, 547 | "__init__") as mock_exporter: 548 | mock_reader.return_value = None 549 | mock_exporter.return_value = None 550 | self.assertIsInstance(configure_dynatrace_metrics_export( 551 | endpoint_url="endpoint.url", 552 | export_dynatrace_metadata=True, 553 | export_interval_millis=100, 554 | api_token="dt.APItoken", 555 | prefix="otel.python.test", 556 | default_dimensions={"defaultKey": "defaultValue"} 557 | ), 558 | PeriodicExportingMetricReader) 559 | mock_exporter.assert_called_once_with( 560 | endpoint_url="endpoint.url", 561 | api_token="dt.APItoken", 562 | prefix="otel.python.test", 563 | default_dimensions={"defaultKey": "defaultValue"}, 564 | export_dynatrace_metadata=True, 565 | ) 566 | mock_reader.assert_called_once_with( 567 | export_interval_millis=100, 568 | exporter=mock.ANY, 569 | ) 570 | _, kwargs = mock_reader.call_args 571 | self.assertIsInstance(kwargs.get("exporter"), 572 | _DynatraceMetricsExporter) 573 | 574 | @parameterized.expand([ 575 | ("int gauge", "gauge,20"), 576 | ("float gauge", "gauge,1.23"), 577 | ("non-monotonic cumulative int sum", "gauge,10"), 578 | ("non-monotonic cumulative float sum", "gauge,1.23"), 579 | ("monotonic delta int sum", "count,delta=10"), 580 | ("monotonic delta float sum", "count,delta=1.23"), 581 | ("histogram", "gauge,min=-3,max=12,sum=87,count=12"), 582 | ]) 583 | @patch.object(requests.Session, 'post') 584 | def test_invalid_attribute_value(self, instrument_type, expected, 585 | mock_post): 586 | mock_post.return_value = self._get_session_response() 587 | 588 | attributes = { 589 | "string": "value", 590 | "bool": True, 591 | "int": 1, 592 | "float": 2.3, 593 | "string_array": ["a", "b", "c"], 594 | "bool_array": [True, False, True], 595 | "int_array": [1, 2, 3], 596 | "float_array": [1.2, 2.3, 3.4], 597 | "dict": {"a": "b"}, 598 | } 599 | 600 | self._assert_lines_created_correctly(instrument_type, attributes, 601 | expected, mock_post) 602 | 603 | @parameterized.expand([ 604 | ("int gauge", "gauge,20"), 605 | ("float gauge", "gauge,1.23"), 606 | ("non-monotonic cumulative int sum", "gauge,10"), 607 | ("non-monotonic cumulative float sum", "gauge,1.23"), 608 | ("monotonic delta int sum", "count,delta=10"), 609 | ("monotonic delta float sum", "count,delta=1.23"), 610 | ("histogram", "gauge,min=-3,max=12,sum=87,count=12"), 611 | ]) 612 | @patch.object(requests.Session, 'post') 613 | def test_invalid_attribute_keys(self, instrument_type, expected, 614 | mock_post): 615 | mock_post.return_value = self._get_session_response() 616 | 617 | attributes = { 618 | "string": "value", 619 | True: "bool", 620 | 1: "int", 621 | 3.2: "float", 622 | } 623 | 624 | self._assert_lines_created_correctly(instrument_type, attributes, 625 | expected, mock_post) 626 | 627 | def _assert_lines_created_correctly(self, instrument_type, attributes, 628 | expected, mock_post): 629 | expected = \ 630 | "my.instr,string=value,dt.metrics.source=opentelemetry {0} {1}" \ 631 | .format(expected, int(self._test_timestamp_millis)) 632 | 633 | data = [self._get_data_for_type(instrument_type, attributes)] 634 | 635 | exporter = _DynatraceMetricsExporter() 636 | result = exporter.export(self._metrics_data_from_data(data)) 637 | self.assertEqual(MetricExportResult.SUCCESS, result) 638 | mock_post.assert_called_once_with( 639 | self._ingest_endpoint, 640 | data=expected, 641 | headers=self._headers) 642 | 643 | def _get_data_for_type(self, instrument_type, attributes): 644 | if instrument_type == "int gauge": 645 | return self._create_gauge(value=20, attributes=attributes) 646 | elif instrument_type == "float gauge": 647 | return self._create_gauge(value=1.23, attributes=attributes) 648 | elif instrument_type == "non-monotonic cumulative int sum": 649 | return self._create_sum( 650 | value=10, 651 | attributes=attributes, 652 | monotonic=False, 653 | aggregation_temporality=AggregationTemporality.CUMULATIVE) 654 | elif instrument_type == "non-monotonic cumulative float sum": 655 | return self._create_sum( 656 | value=1.23, 657 | attributes=attributes, 658 | monotonic=False, 659 | aggregation_temporality=AggregationTemporality.CUMULATIVE) 660 | elif instrument_type == "monotonic delta int sum": 661 | return self._create_sum( 662 | value=10, 663 | attributes=attributes, 664 | monotonic=True, 665 | aggregation_temporality=AggregationTemporality.DELTA) 666 | elif instrument_type == "monotonic delta float sum": 667 | return self._create_sum( 668 | value=1.23, 669 | attributes=attributes, 670 | monotonic=True, 671 | aggregation_temporality=AggregationTemporality.DELTA) 672 | elif instrument_type == "histogram": 673 | return self._create_histogram( 674 | bucket_counts=[1, 2, 4, 5], 675 | explicit_bounds=[0, 5, 10], 676 | histogram_sum=87, 677 | histogram_min=-3, 678 | histogram_max=12, 679 | attributes=attributes 680 | ) 681 | 682 | def _metrics_data_from_metrics(self, 683 | metrics: Sequence[Metric]) -> MetricsData: 684 | return MetricsData(resource_metrics=[ 685 | ResourceMetrics( 686 | resource=Resource({}), 687 | schema_url="http://schema.url/resource", 688 | scope_metrics=[ScopeMetrics( 689 | scope=InstrumentationScope( 690 | name="dynatrace.opentelemetry.metrics.export", 691 | version="0.0.1"), 692 | schema_url="http://schema.url/scope", 693 | metrics=metrics 694 | )] 695 | )]) 696 | 697 | def _metrics_data_from_data(self, data: Sequence[DataT]) -> MetricsData: 698 | return self._metrics_data_from_metrics( 699 | [self._metric_from_data(item) for item in data] 700 | ) 701 | 702 | def _metric_from_data(self, data: DataT, instrument_name=None): 703 | return Metric( 704 | name=instrument_name 705 | if instrument_name is not None 706 | else self._instrument_name, 707 | description="", 708 | unit="1", 709 | data=data 710 | ) 711 | 712 | def _create_sum(self, value: Union[int, float], monotonic=True, 713 | aggregation_temporality: AggregationTemporality = AggregationTemporality.DELTA, 714 | attributes: dict = None) -> Sum: 715 | if not attributes: 716 | attributes = self._attributes 717 | 718 | return Sum( 719 | is_monotonic=monotonic, 720 | aggregation_temporality=aggregation_temporality, 721 | data_points=[ 722 | NumberDataPoint( 723 | start_time_unix_nano=self._test_timestamp_nanos, 724 | time_unix_nano=self._test_timestamp_nanos, 725 | value=value, 726 | attributes=attributes 727 | ) 728 | ]) 729 | 730 | def _create_gauge(self, value: Union[int, float], 731 | attributes: dict = None) -> Gauge: 732 | if not attributes: 733 | attributes = self._attributes 734 | return Gauge( 735 | data_points=[ 736 | NumberDataPoint( 737 | start_time_unix_nano=self._test_timestamp_nanos, 738 | time_unix_nano=self._test_timestamp_nanos, 739 | value=value, 740 | attributes=attributes 741 | ) 742 | ]) 743 | 744 | def _create_histogram(self, 745 | bucket_counts: Sequence[int], 746 | explicit_bounds: Sequence[int], 747 | histogram_sum: Union[int, float] = 0, 748 | histogram_min: Union[int, float] = 0, 749 | histogram_max: Union[int, float] = 0, 750 | aggregation_temporality: AggregationTemporality = AggregationTemporality.DELTA, 751 | attributes: dict = None) -> Histogram: 752 | if not attributes: 753 | attributes = self._attributes 754 | return Histogram( 755 | data_points=[ 756 | HistogramDataPoint( 757 | attributes=attributes, 758 | bucket_counts=bucket_counts, 759 | explicit_bounds=explicit_bounds, 760 | count=sum(bucket_counts), 761 | sum=histogram_sum, 762 | min=histogram_min, 763 | max=histogram_max, 764 | time_unix_nano=self._test_timestamp_nanos, 765 | start_time_unix_nano=self._test_timestamp_nanos, 766 | ) 767 | ], 768 | aggregation_temporality=aggregation_temporality, 769 | ) 770 | 771 | @staticmethod 772 | def _get_session_response(error: bool = False) -> requests.Response: 773 | r = requests.Response() 774 | if error: 775 | r.status_code = 500 776 | else: 777 | r.status_code = 200 778 | r._content = str.encode('{}') 779 | return r 780 | 781 | 782 | if __name__ == '__main__': 783 | unittest.main() 784 | -------------------------------------------------------------------------------- /test/test_histogram_utils.py: -------------------------------------------------------------------------------- 1 | import math 2 | import unittest 3 | 4 | from typing import List, Union 5 | 6 | from parameterized import parameterized 7 | from opentelemetry.sdk.metrics.export import HistogramDataPoint 8 | 9 | from dynatrace.opentelemetry.metrics.export._histogram_utils import ( 10 | _get_histogram_min, 11 | _get_histogram_max, 12 | ) 13 | 14 | 15 | def create_histogram(explicit_bounds: List[int], bucket_counts: List[int], 16 | histogram_sum: Union[float, int]): 17 | start_time = 1619687639000000000 18 | end_time = 1619687639000000000 19 | return HistogramDataPoint(bucket_counts=bucket_counts, 20 | explicit_bounds=explicit_bounds, 21 | sum=histogram_sum, 22 | min=+math.inf, 23 | max=-math.inf, 24 | time_unix_nano=end_time, 25 | start_time_unix_nano=start_time, 26 | attributes=dict(), 27 | count=sum(bucket_counts)) 28 | 29 | 30 | class TestMin(unittest.TestCase): 31 | @parameterized.expand([ 32 | # Values between the first two boundaries. 33 | ([1, 2, 3, 4, 5], [0, 1, 0, 3, 2, 0], 21.2, 1), 34 | # First bucket has value, use the first boundary 35 | # as estimation instead of Inf. 36 | ([1, 2, 3, 4, 5], [1, 0, 0, 3, 0, 4], 34.5, 1), 37 | # Only the first bucket has values, use the mean 38 | # (0.25) Otherwise, the min would be estimated as 39 | # 1, and min <= avg would be violated. 40 | ([1, 2, 3, 4, 5], [3, 0, 0, 0, 0, 0], 0.75, 0.25), 41 | # Just one bucket from -Inf to Inf, calculate the 42 | # mean as min value. 43 | ([], [4], 8.8, 2.2), 44 | # Just one bucket from -Inf to Inf, calculate the 45 | # mean as min value. 46 | ([], [1], 1.2, 1.2), 47 | # Only the last bucket has a value, use the lower 48 | # bound. 49 | ([1, 2, 3, 4, 5], [0, 0, 0, 0, 0, 3], 15.6, 5), 50 | ]) 51 | def test_get_min(self, boundaries, buckets, histogram_sum, expected_min): 52 | # Values between the first two boundaries. 53 | self.assertEqual(expected_min, 54 | _get_histogram_min( 55 | create_histogram(boundaries, 56 | buckets, 57 | histogram_sum))) 58 | 59 | 60 | class TestMax(unittest.TestCase): 61 | @parameterized.expand([ 62 | # Values between the first two boundaries. 63 | ([1, 2, 3, 4, 5], [0, 1, 0, 3, 2, 0], 21.2, 5), 64 | # Last bucket has value, use the last boundary as 65 | # estimation instead of Inf. 66 | ([1, 2, 3, 4, 5], [1, 0, 0, 3, 0, 4], 34.5, 5), 67 | # Only the last bucket has values, use the 68 | # mean (10.1) Otherwise, the max would be 69 | # estimated as 5, and max >= avg would be 70 | # violated. 71 | ([1, 2, 3, 4, 5], [0, 0, 0, 0, 0, 2], 20.2, 10.1), 72 | # Just one bucket from -Inf to Inf, calculate 73 | # the mean as max value. 74 | ([], [4], 8.8, 2.2), 75 | # Just one bucket from -Inf to Inf, calculate 76 | # the mean as max value. 77 | ([], [1], 1.2, 1.2), 78 | # Max is larger than the sum, use the 79 | # estimated boundary. 80 | ([0, 5], [0, 2, 0], 2.3, 5), 81 | # Only the last bucket has a value, use the lower 82 | # bound. 83 | ([1, 2, 3, 4, 5], [3, 0, 0, 0, 0, 0], 1.5, 1), 84 | ]) 85 | def test_get_max(self, boundaries, buckets, histogram_sum, expected_max): 86 | # Values between the first two boundaries. 87 | self.assertEqual(expected_max, 88 | _get_histogram_max( 89 | create_histogram(boundaries, 90 | buckets, 91 | histogram_sum))) 92 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = 3 | py37 4 | py38 5 | py39 6 | py310 7 | pypy37 8 | pypy38 9 | pypy39 10 | lint 11 | 12 | [testenv] 13 | deps = 14 | pytest 15 | parameterized 16 | commands = 17 | pytest -v 18 | 19 | [testenv:lint] 20 | basepython = python3 21 | skip_install = true 22 | deps = 23 | setuptools 24 | flake8 25 | commands = 26 | flake8 src 27 | python setup.py check --strict 28 | --------------------------------------------------------------------------------