├── .github
├── actions
│ └── get-release-version
│ │ ├── action.yml
│ │ └── get-release-version.js
├── pull_request_template.md
└── workflows
│ ├── main.yml
│ └── release.yml
├── .gitignore
├── CHANGELOG.md
├── LICENSE-APACHE
├── LICENSE-MIT
├── README.md
├── Tiltfile
├── configs
├── compose
│ ├── examples.yaml
│ └── infra.yaml
├── docker
│ └── base.Dockerfile
├── grafana
│ ├── config.ini
│ ├── dashboards
│ │ ├── Autometrics Function Explorer.json
│ │ ├── Autometrics Overview.json
│ │ └── Autometrics Service Level Objectives (SLOs).json
│ └── provisioning
│ │ ├── dashboards
│ │ └── dashboards.yml
│ │ └── datasources
│ │ └── datasource.yml
└── otel-collector-config.yaml
├── docker-compose.yaml
├── examples
├── README.md
├── caller-example.py
├── django_example
│ ├── Readme.md
│ ├── django_example
│ │ ├── __init__.py
│ │ ├── asgi.py
│ │ ├── settings.py
│ │ ├── urls.py
│ │ ├── views
│ │ │ ├── __init__.py
│ │ │ ├── concurrency.py
│ │ │ ├── error.py
│ │ │ ├── latency.py
│ │ │ ├── metrics.py
│ │ │ └── simple.py
│ │ └── wsgi.py
│ ├── locustfile.py
│ ├── manage.py
│ ├── mypy.ini
│ └── run_example.sh
├── docs-example.py
├── example.py
├── export_metrics
│ ├── otel-prometheus.py
│ ├── otlp-grpc.py
│ ├── otlp-http.py
│ └── prometheus-client.py
├── fastapi-example.py
├── fastapi-with-fly-io
│ ├── Dockerfile
│ ├── README.md
│ ├── app.py
│ ├── fly.toml
│ └── images
│ │ ├── go_to_imports.jpg
│ │ ├── import_form.jpg
│ │ ├── overview_dashboard.jpg
│ │ └── select_prometheus.jpg
└── starlette-otel-exemplars.py
├── poetry.lock
├── poetry.toml
├── prometheus.yaml
├── pyproject.toml
├── pyrightconfig.json
└── src
├── autometrics
├── __init__.py
├── conftest.py
├── constants.py
├── decorator.py
├── exemplar.py
├── exposition.py
├── initialization.py
├── objectives.py
├── prometheus_url.py
├── py.typed
├── settings.py
├── test_caller.py
├── test_decorator.py
├── test_initialization.py
├── test_objectives.py
├── test_prometheus_url.py
├── test_utils.py
├── tracker
│ ├── __init__.py
│ ├── opentelemetry.py
│ ├── prometheus.py
│ ├── temporary.py
│ ├── test_concurrency.py
│ ├── test_format.py
│ ├── test_tracker.py
│ ├── tracker.py
│ └── types.py
└── utils.py
└── py.typed
/.github/actions/get-release-version/action.yml:
--------------------------------------------------------------------------------
1 | name: Get version
2 | runs:
3 | using: "node16"
4 | main: "get-release-version.js"
5 |
--------------------------------------------------------------------------------
/.github/actions/get-release-version/get-release-version.js:
--------------------------------------------------------------------------------
1 | const fs = require("fs");
2 | const path = require("path");
3 | const regex = /version = "([\d.]+)"/gm;
4 |
5 | const file_path = path.join(process.env.GITHUB_WORKSPACE, "pyproject.toml");
6 | const file_contents = fs.readFileSync(file_path, { encoding: "utf8" });
7 | const matches = regex.exec(file_contents);
8 | if (matches && matches.length == 2) {
9 | const [_, version] = matches;
10 |
11 | fs.appendFileSync(process.env.GITHUB_OUTPUT, `version=${version}`, {
12 | encoding: "utf8",
13 | });
14 | } else {
15 | throw new Error(`No version found in ${file_path}`);
16 | }
17 |
--------------------------------------------------------------------------------
/.github/pull_request_template.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 | > Please give context of this change for reviewers. **What did you change, and why?**
4 |
5 | ## Checklist
6 |
7 | - [ ] Describe what you're doing, to help give context for reviewer(s)
8 | - [ ] Link to any helpful documentation (Github issues, linear, Slack discussions, etc)
9 | - [ ] Create test cases
10 | - [ ] Update changelog
11 |
15 |
--------------------------------------------------------------------------------
/.github/workflows/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: Lint and test
3 |
4 | on:
5 | pull_request:
6 | branches: ["*"]
7 | push:
8 | branches: ["main"]
9 |
10 | jobs:
11 | lint:
12 | runs-on: ubuntu-latest
13 | strategy:
14 | matrix:
15 | python-version: ["3.8", "3.12", "pypy3.10"]
16 | env:
17 | FORCE_COLOR: 1
18 | steps:
19 | - uses: actions/checkout@v3
20 | - name: Install poetry
21 | run: pipx install poetry
22 | - uses: actions/setup-python@v4
23 | with:
24 | python-version: ${{ matrix.python-version }}
25 | cache: poetry
26 | - name: Install dependencies (cpython)
27 | if: ${{ matrix.python-version != 'pypy3.10' }}
28 | run: poetry install --no-interaction --no-root --with dev,examples --all-extras
29 | - name: Install dependencies (pypy)
30 | if: ${{ matrix.python-version == 'pypy3.10' }}
31 | run: poetry install --no-interaction --no-root --with dev,examples --extras=exporter-otlp-proto-http
32 | - name: Check code formatting
33 | run: poetry run black .
34 | - name: Lint lib code
35 | run: poetry run mypy src --enable-incomplete-feature=Unpack
36 | - name: Lint lib examples
37 | run: poetry run mypy examples --enable-incomplete-feature=Unpack
38 | - name: Run tests
39 | run: poetry run pytest -n auto
40 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Release and publish
2 | on: [workflow_dispatch]
3 |
4 | permissions:
5 | contents: write
6 |
7 | jobs:
8 | release:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v3
12 | - uses: ./.github/actions/get-release-version
13 | id: release_version
14 | - name: Install poetry
15 | run: pipx install poetry
16 | - uses: actions/setup-python@v4
17 | with:
18 | python-version: 3.11
19 | cache: poetry
20 | - name: Install dependencies
21 | run: poetry install --no-interaction --no-root --with dev
22 | - name: Build
23 | run: poetry build
24 | - name: Tag release
25 | run: |
26 | git config --local user.email "github-actions[bot]@users.noreply.github.com"
27 | git config --local user.name "github-actions[bot]"
28 | git tag ${{ steps.release_version.outputs.version }}
29 | git push origin ${{ steps.release_version.outputs.version }}
30 | - name: Create release
31 | uses: softprops/action-gh-release@v1
32 | with:
33 | files: dist/*
34 | tag_name: ${{ steps.release_version.outputs.version}}
35 | generate_release_notes: true
36 | name: ${{ steps.release_version.outputs.version}}
37 | - name: Publish
38 | run: poetry run twine upload dist/*
39 | env:
40 | TWINE_USERNAME: __token__
41 | TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
42 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | #custom
2 | data/
3 | .vscode/
4 | my_testing/
5 | autometrics-venv/
6 |
7 | # Byte-compiled / optimized / DLL files
8 | __pycache__/
9 | *.py[cod]
10 | *$py.class
11 |
12 | # C extensions
13 | *.so
14 |
15 | # Distribution / packaging
16 | .Python
17 | build/
18 | develop-eggs/
19 | dist/
20 | downloads/
21 | eggs/
22 | .eggs/
23 | lib/
24 | lib64/
25 | parts/
26 | sdist/
27 | var/
28 | wheels/
29 | pip-wheel-metadata/
30 | share/python-wheels/
31 | *.egg-info/
32 | .installed.cfg
33 | *.egg
34 | MANIFEST
35 |
36 | # PyInstaller
37 | # Usually these files are written by a python script from a template
38 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
39 | *.manifest
40 | *.spec
41 |
42 | # Installer logs
43 | pip-log.txt
44 | pip-delete-this-directory.txt
45 |
46 | # Unit test / coverage reports
47 | htmlcov/
48 | .tox/
49 | .nox/
50 | .coverage
51 | .coverage.*
52 | .cache
53 | nosetests.xml
54 | coverage.xml
55 | *.cover
56 | *.py,cover
57 | .hypothesis/
58 | .pytest_cache/
59 |
60 | # Translations
61 | *.mo
62 | *.pot
63 |
64 | # Django stuff:
65 | *.log
66 | local_settings.py
67 | db.sqlite3
68 | db.sqlite3-journal
69 |
70 | # Flask stuff:
71 | instance/
72 | .webassets-cache
73 |
74 | # Scrapy stuff:
75 | .scrapy
76 |
77 | # Sphinx documentation
78 | docs/_build/
79 |
80 | # PyBuilder
81 | target/
82 |
83 | # Jupyter Notebook
84 | .ipynb_checkpoints
85 |
86 | # IPython
87 | profile_default/
88 | ipython_config.py
89 |
90 | # pyenv
91 | .python-version
92 |
93 | # pipenv
94 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
96 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
97 | # install all needed dependencies.
98 | #Pipfile.lock
99 |
100 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
101 | __pypackages__/
102 |
103 | # Celery stuff
104 | celerybeat-schedule
105 | celerybeat.pid
106 |
107 | # SageMath parsed files
108 | *.sage.py
109 |
110 | # Environments
111 | .env
112 | .venv
113 | env/
114 | venv/
115 | ENV/
116 | env.bak/
117 | venv.bak/
118 |
119 | # Spyder project settings
120 | .spyderproject
121 | .spyproject
122 |
123 | # Rope project settings
124 | .ropeproject
125 |
126 | # mkdocs documentation
127 | /site
128 |
129 | # mypy
130 | .mypy_cache/
131 | .dmypy.json
132 | dmypy.json
133 |
134 | # Pyre type checker
135 | .pyre/
136 |
137 | .DS_Store
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | All notable changes to this project will be documented in this file.
4 |
5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/).
6 |
7 |
8 |
9 | ---
10 |
11 | ## [Unreleased]
12 |
13 | ### Added
14 |
15 | -
16 |
17 | ### Changed
18 |
19 | -
20 |
21 | ### Deprecated
22 |
23 | -
24 |
25 | ### Removed
26 |
27 | -
28 |
29 | ### Fixed
30 |
31 | -
32 |
33 | ### Security
34 |
35 | -
36 |
37 | ## [1.0.0] (https://github.com/autometrics-dev/autometrics-py/releases/tag/1.0.0) - 2023-11-14
38 |
39 | ### Added
40 |
41 | - Added support for `record_error_if` and `record_success_if`
42 | - Added OTLP exporters for OpenTelemetry tracker (#89)
43 | - Added `repository_url` and `repository_provider` labels to `build_info` (#97)
44 | - Added `autometrics.version` label to `build_info` (#101)
45 |
46 | ### Changed
47 |
48 | - [💥 Breaking change] `init` function is now required to be called before using autometrics (#89)
49 | - Prometheus exporters are now configured via `init` function (#89)
50 | - Updated examples to call `init` function (#94)
51 | - Updated `docker compose` / `tilt` config in repo to include grafana with our dashboards (#94)
52 | - `Objective`s will now emit a warning when name contains characters other than alphanumeric and dash (#99)
53 |
54 | ### Security
55 |
56 | - Updated FastAPI and Pydantic dependencies in the examples group (#89)
57 | - Updated dependencies in dev and examples groups (#97)
58 |
59 | ## [0.9](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.9) - 2023-09-24
60 |
61 | ### Added
62 |
63 | - Added the `start_http_server`, which starts a separate HTTP server to expose
64 | the metrics instead of using a separate endpoint in the existing server. (#77)
65 | - Added the `init` function that you can use to configure autometrics. (#77)
66 |
67 | ### Changed
68 |
69 | - Renamed the `function.calls.count` metric to `function.calls` (which is exported
70 | to Prometheus as `function_calls_total`) to be in line with OpenTelemetry and
71 | OpenMetrics naming conventions. **Dashboards and alerting rules must be updated.** (#74)
72 | - When the `function.calls.duration` histogram is exported to Prometheus, it now
73 | includes the units (`function_calls_duration_seconds`) to be in line with
74 | Prometheus/OpenMetrics naming conventions. **Dashboards and alerting rules must be updated.** (#74)
75 | - The `caller` label on the `function.calls` metric was replaced with `caller.function`
76 | and `caller.module` (#75)
77 | - All metrics now have a `service.name` label attached. This is set via runtime environment
78 | variable (`AUTOMETRICS_SERVICE_NAME` or `OTEL_SERVICE_NAME`), or falls back to the package name. (#76)
79 | - In case of running a script outside of module, the `module` label is now set to file name (#80)
80 |
81 | ### Security
82 |
83 | - Updated dependencies in examples group (#77)
84 |
85 | ## [0.8](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.8) - 2023-07-24
86 |
87 | ### Added
88 |
89 | - Support for prometheus-client 0.17.x
90 |
91 | ## [0.7](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.7) - 2023-07-19
92 |
93 | ### Added
94 |
95 | - Initialize counter metrics at zero #54
96 |
97 | ### Changed
98 |
99 | - Caller tracking only tracks autometricised functions, as per spec #59
100 | - Function name labels now use qualified name, and module labels use module's `__name__` when available #59
101 |
102 | ### Deprecated
103 |
104 | -
105 |
106 | ### Removed
107 |
108 | -
109 |
110 | ### Fixed
111 |
112 | - Fixed calculation of funciton duration when using OpenTelemetry tracker #66
113 |
114 | ### Security
115 |
116 | -
117 |
118 | ## [0.6](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.6) - 2023-06-23
119 |
120 | ### Added
121 |
122 | - Exemplars support (#51)
123 | - Optional concurrency tracking support (#55)
124 |
125 | ### Changed
126 |
127 | - `build_info` is extended with support for branch labels and now picks up the commit label from `COMMIT_SHA` env var (#52)
128 |
129 | ### Fixed
130 |
131 | - Fixed decorator async function handling (#55)
132 |
133 | ### Security
134 |
135 | - Update requests, starlette, fastapi dependencies used by the examples
136 |
137 | ## [0.5](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.5) - 2023-05-11
138 |
139 | ### Added
140 |
141 | - Support `build_info` metrics for Prometheus tracker (#35)
142 | - **NOTE**: The OpenTelemetry tracker does not accurately track `build_info`, so you will need to set the env var `AUTOMETRICS_TRACKER=PROMETHEUS` to see accurate build info in your metrics (see #38)
143 | - OpenTelemetry Support (#28)
144 | - Fly.io example (#26)
145 | - Django example (#22)
146 |
147 | ### Fixed
148 |
149 | - The `autometrics` decorator now supports async functions (#33)
150 |
151 | ## [0.4](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.4) - 2023-04-13
152 |
153 | ### Added
154 |
155 | - SLO Support (#16)
156 | - Improved documentation and examples (#13 #19)
157 |
158 | ### Changed
159 |
160 | - Development setup (#10 #12)
161 |
162 | ### Fixed
163 |
164 | - Issue with trailing slashes in prometheus url (#14)
165 |
166 | ## [0.3](https://github.com/autometrics-dev/autometrics-py/releases/tag/0.3) - 2023-03-28
167 |
168 | ### Added
169 |
170 | - Implemented caller label for the function calls counter (#9)
171 |
--------------------------------------------------------------------------------
/LICENSE-APACHE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
--------------------------------------------------------------------------------
/LICENSE-MIT:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Fiberplane B.V.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | [](https://github.com/autometrics-dev/autometrics-py/actions/workflows/main.yml)
4 | [](https://discord.gg/kHtwcH8As9)
5 |
6 | > A Python port of the Rust
7 | > [autometrics-rs](https://github.com/fiberplane/autometrics-rs) library
8 |
9 | Metrics are a powerful and cost-efficient tool for understanding the health and performance of your code in production. But it's hard to decide what metrics to track and even harder to write queries to understand the data.
10 |
11 | Autometrics provides a decorator that makes it trivial to instrument any function with the most useful metrics: request rate, error rate, and latency. It standardizes these metrics and then generates powerful Prometheus queries based on your function details to help you quickly identify and debug issues in production.
12 |
13 | See [Why Autometrics?](https://github.com/autometrics-dev#why-autometrics) for more details on the ideas behind autometrics.
14 |
15 | ## Features
16 |
17 | - ✨ `@autometrics` decorator instruments any function or class method to track the
18 | most useful metrics
19 | - 💡 Writes Prometheus queries so you can understand the data generated without
20 | knowing PromQL
21 | - 🔗 Create links to live Prometheus charts directly into each function's docstring
22 | - 🔍 [Identify commits](#build-info) that introduced errors or increased latency
23 | - 🚨 [Define alerts](#alerts--slos) using SLO best practices directly in your source code
24 | - 📊 [Grafana dashboards](#dashboards) work out of the box to visualize the performance of instrumented functions & SLOs
25 | - ⚙️ [Configurable](#settings) metric collection library (`opentelemetry` or `prometheus`)
26 | - 📍 [Attach exemplars](#exemplars) to connect metrics with traces
27 | - ⚡ Minimal runtime overhead
28 |
29 | ## Quickstart
30 |
31 | 1. Add `autometrics` to your project's dependencies:
32 |
33 | ```shell
34 | pip install autometrics
35 | ```
36 |
37 | 2. Instrument your functions with the `@autometrics` decorator
38 |
39 | ```python
40 | from autometrics import autometrics
41 |
42 | @autometrics
43 | def my_function():
44 | # ...
45 | ```
46 |
47 | 3. Configure autometrics by calling the `init` function:
48 |
49 | ```python
50 | from autometrics import init
51 |
52 | init(tracker="prometheus", service_name="my-service")
53 | ```
54 |
55 | 4. Export the metrics for Prometheus
56 |
57 | ```python
58 | # This example uses FastAPI, but you can use any web framework
59 | from fastapi import FastAPI, Response
60 | from prometheus_client import generate_latest
61 |
62 | # Set up a metrics endpoint for Prometheus to scrape
63 | # `generate_latest` returns metrics data in the Prometheus text format
64 | @app.get("/metrics")
65 | def metrics():
66 | return Response(generate_latest())
67 | ```
68 |
69 | 5. Run Prometheus locally with the [Autometrics CLI](https://docs.autometrics.dev/local-development#getting-started-with-am) or [configure it manually](https://github.com/autometrics-dev#5-configuring-prometheus) to scrape your metrics endpoint
70 |
71 | ```sh
72 | # Replace `8080` with the port that your app runs on
73 | am start :8080
74 | ```
75 |
76 | 6. (Optional) If you have Grafana, import the [Autometrics dashboards](https://github.com/autometrics-dev/autometrics-shared#dashboards) for an overview and detailed view of all the function metrics you've collected
77 |
78 | ## Using `autometrics-py`
79 |
80 | - You can import the library in your code and use the decorator for any function:
81 |
82 | ```python
83 | from autometrics import autometrics
84 |
85 | @autometrics
86 | def sayHello:
87 | return "hello"
88 |
89 | ```
90 |
91 | - To show tooltips over decorated functions in VSCode, with links to Prometheus queries, try installing [the VSCode extension](https://marketplace.visualstudio.com/items?itemName=Fiberplane.autometrics).
92 |
93 | > **Note**: We cannot support tooltips without a VSCode extension due to behavior of the [static analyzer](https://github.com/davidhalter/jedi/issues/1921) used in VSCode.
94 |
95 | - You can also track the number of concurrent calls to a function by using the `track_concurrency` argument: `@autometrics(track_concurrency=True)`.
96 |
97 | > **Note**: Concurrency tracking is only supported when you set with the environment variable `AUTOMETRICS_TRACKER=prometheus`.
98 |
99 | - To access the PromQL queries for your decorated functions, run `help(yourfunction)` or `print(yourfunction.__doc__)`.
100 |
101 | > For these queries to work, include a `.env` file in your project with your prometheus endpoint `PROMETHEUS_URL=your endpoint`. If this is not defined, the default endpoint will be `http://localhost:9090/`
102 |
103 | ## Dashboards
104 |
105 | Autometrics provides [Grafana dashboards](https://github.com/autometrics-dev/autometrics-shared#dashboards) that will work for any project instrumented with the library.
106 |
107 | ## Alerts / SLOs
108 |
109 | Autometrics makes it easy to add intelligent alerting to your code, in order to catch increases in the error rate or latency across multiple functions.
110 |
111 | ```python
112 | from autometrics import autometrics
113 | from autometrics.objectives import Objective, ObjectiveLatency, ObjectivePercentile
114 |
115 | # Create an objective for a high success rate
116 | # Here, we want our API to have a success rate of 99.9%
117 | API_SLO_HIGH_SUCCESS = Objective(
118 | "My API SLO for High Success Rate (99.9%)",
119 | success_rate=ObjectivePercentile.P99_9,
120 | )
121 |
122 | @autometrics(objective=API_SLO_HIGH_SUCCESS)
123 | def api_handler():
124 | # ...
125 | ```
126 |
127 | The library uses the concept of Service-Level Objectives (SLOs) to define the acceptable error rate and latency for groups of functions. Alerts will fire depending on the SLOs you set.
128 |
129 | > Not sure what SLOs are? [Check out our docs](https://docs.autometrics.dev/slo) for an introduction.
130 |
131 | In order to receive alerts, **you need to add a special set of rules to your Prometheus setup**. These are configured automatically when you use the [Autometrics CLI](https://docs.autometrics.dev/local-development#getting-started-with-am) to run Prometheus.
132 |
133 | > Already running Prometheus yourself? [Read about how to load the autometrics alerting rules into Prometheus here](https://github.com/autometrics-dev/autometrics-shared#prometheus-recording--alerting-rules).
134 |
135 | Once the alerting rules are in Prometheus, you're ready to go.
136 |
137 | To use autometrics SLOs and alerts, create one or multiple `Objective`s based on the function(s) success rate and/or latency, as shown above.
138 |
139 | The `Objective` can be passed as an argument to the `autometrics` decorator, which will include the given function in that objective.
140 |
141 | The example above used a success rate objective. (I.e., we wanted to be alerted when the error rate started to increase.)
142 |
143 | You can also create an objective for the latency of your functions like so:
144 |
145 | ```python
146 | from autometrics import autometrics
147 | from autometrics.objectives import Objective, ObjectiveLatency, ObjectivePercentile
148 |
149 | # Create an objective for low latency
150 | # - Functions with this objective should have a 99th percentile latency of less than 250ms
151 | API_SLO_LOW_LATENCY = Objective(
152 | "My API SLO for Low Latency (99th percentile < 250ms)",
153 | latency=(ObjectiveLatency.Ms250, ObjectivePercentile.P99),
154 | )
155 |
156 | @autometrics(objective=API_SLO_LOW_LATENCY)
157 | def api_handler():
158 | # ...
159 | ```
160 |
161 | ## The `caller` Label
162 |
163 | Autometrics keeps track of instrumented functions that call each other. So, if you have a function `get_users` that calls another function `db.query`, then the metrics for latter will include a label `caller="get_users"`.
164 |
165 | This allows you to drill down into the metrics for functions that are _called by_ your instrumented functions, provided both of those functions are decorated with `@autometrics`.
166 |
167 | In the example above, this means that you could investigate the latency of the database queries that `get_users` makes, which is rather useful.
168 |
169 | ## Settings and Configuration
170 |
171 | Autometrics makes use of a number of environment variables to configure its behavior. All of them are also configurable with keyword arguments to the `init` function.
172 |
173 | - `tracker` - Configure the package that autometrics will use to produce metrics. Default is `opentelemetry`, but you can also use `prometheus`. Look in `pyproject.toml` for the corresponding versions of packages that will be used.
174 | - `histogram_buckets` - Configure the buckets used for latency histograms. Default is `[0.005, 0.01, 0.025, 0.05, 0.075, 0.1, 0.25, 0.5, 0.75, 1.0, 2.5, 5.0, 7.5, 10.0]`.
175 | - `enable_exemplars` - Enable [exemplar collection](#exemplars). Default is `False`.
176 | - `service_name` - Configure the [service name](#service-name).
177 | - `version`, `commit`, `branch`, `repository_url`, `repository_provider` - Used to configure [build_info](#build-info).
178 |
179 | Below is an example of initializing autometrics with build information, as well as the `prometheus` tracker. (Note that you can also accomplish the same confiugration with environment variables.)
180 |
181 | ```python
182 | from autometrics import autometrics, init
183 | from git_utils import get_git_commit, get_git_branch
184 |
185 | VERSION = "0.0.1"
186 |
187 | init(
188 | tracker="prometheus",
189 | version=VERSION,
190 | commit=get_git_commit(),
191 | branch=get_git_branch()
192 | )
193 | ```
194 |
195 | ## Identifying commits that introduced problems
196 |
197 | Autometrics makes it easy to identify if a specific version or commit introduced errors or increased latencies.
198 |
199 | > **NOTE** - As of writing, `build_info` will not work correctly when using the default setting of `AUTOMETRICS_TRACKER=opentelemetry`. If you wish to use `build_info`, you must use the `prometheus` tracker instead (`AUTOMETRICS_TRACKER=prometheus`).
200 | >
201 | > The issue will be fixed once the following PR is merged and released on the opentelemetry-python project: https://github.com/open-telemetry/opentelemetry-python/pull/3306
202 | >
203 | > autometrics-py will track support for build_info using the OpenTelemetry tracker via [this issue](https://github.com/autometrics-dev/autometrics-py/issues/38)
204 |
205 | The library uses a separate metric (`build_info`) to track the version and git metadata of your code - repository url, provider name, commit and branch.
206 |
207 | It then writes queries that group metrics by these metadata, so you can spot correlations between code changes and potential issues.
208 |
209 | Configure these labels by setting the following environment variables:
210 |
211 | | Label | Run-Time Environment Variables | Default value |
212 | | --------------------- | ------------------------------------- | ------------- |
213 | | `version` | `AUTOMETRICS_VERSION` | `""` |
214 | | `commit` | `AUTOMETRICS_COMMIT` or `COMMIT_SHA` | `""` |
215 | | `branch` | `AUTOMETRICS_BRANCH` or `BRANCH_NAME` | `""` |
216 | | `repository_url` | `AUTOMETRICS_REPOSITORY_URL` | `""`\* |
217 | | `repository_provider` | `AUTOMETRICS_REPOSITORY_PROVIDER` | `""`\* |
218 |
219 | \* Autometrics will attempt to automagically infer these values from the git config inside your working directory. To disable this behavior, explicitly set the corresponding setting or environment variable to `""`.
220 |
221 | This follows the method outlined in [Exposing the software version to Prometheus](https://www.robustperception.io/exposing-the-software-version-to-prometheus/).
222 |
223 | ## Service name
224 |
225 | All metrics produced by Autometrics have a label called `service.name` (or `service_name` when exported to Prometheus) attached, in order to identify the logical service they are part of.
226 |
227 | You may want to override the default service name, for example if you are running multiple instances of the same code base as separate services, and you want to differentiate between the metrics produced by each one.
228 |
229 | The service name is loaded from the following environment variables, in this order:
230 |
231 | 1. `AUTOMETRICS_SERVICE_NAME` (at runtime)
232 | 2. `OTEL_SERVICE_NAME` (at runtime)
233 | 3. First part of `__package__` (at runtime)
234 |
235 | ## Exemplars
236 |
237 | > **NOTE** - As of writing, exemplars aren't supported by the default tracker (`AUTOMETRICS_TRACKER=opentelemetry`).
238 | > You can track the progress of this feature here: https://github.com/autometrics-dev/autometrics-py/issues/41
239 |
240 | Exemplars are a way to associate a metric sample to a trace by attaching `trace_id` and `span_id` to it. You can then use this information to jump from a metric to a trace in your tracing system (for example Jaeger). If you have an OpenTelemetry tracer configured, autometrics will automatically pick up the current span from it.
241 |
242 | To use exemplars, you need to first switch to a tracker that supports them by setting `AUTOMETRICS_TRACKER=prometheus` and enable
243 | exemplar collection by setting `AUTOMETRICS_EXEMPLARS=true`. You also need to enable exemplars in Prometheus by launching Prometheus with the `--enable-feature=exemplar-storage` flag.
244 |
245 | ## Exporting metrics
246 |
247 | There are multiple ways to export metrics from your application, depending on your setup. You can see examples of how to do this in the [examples/export_metrics](https://github.com/autometrics-dev/autometrics-py/tree/main/examples/export_metrics) directory.
248 |
249 | If you want to export metrics to Prometheus, you have two options in case of both `opentelemetry` and `prometheus` trackers:
250 |
251 | 1. Create a route inside your app and respond with `generate_latest()`
252 |
253 | ```python
254 | # This example uses FastAPI, but you can use any web framework
255 | from fastapi import FastAPI, Response
256 | from prometheus_client import generate_latest
257 |
258 | # Set up a metrics endpoint for Prometheus to scrape
259 | @app.get("/metrics")
260 | def metrics():
261 | return Response(generate_latest())
262 | ```
263 |
264 | 2. Specify `prometheus` as the exporter type, and a separate server will be started to expose metrics from your app:
265 |
266 | ```python
267 | exporter = {
268 | "type": "prometheus",
269 | "address": "localhost",
270 | "port": 9464
271 | }
272 | init(tracker="prometheus", service_name="my-service", exporter=exporter)
273 | ```
274 |
275 | For the OpenTelemetry tracker, you have more options, including a custom metric reader. You can specify the exporter type to be `otlp-proto-http` or `otlp-proto-grpc`, and metrics will be exported to a remote OpenTelemetry collector via the specified protocol. You will need to install the respective extra dependency in order for this to work, which you can do when you install autometrics:
276 |
277 | ```sh
278 | pip install autometrics[exporter-otlp-proto-http]
279 | pip install autometrics[exporter-otlp-proto-grpc]
280 | ```
281 |
282 | After installing it you can configure the exporter as follows:
283 |
284 | ```python
285 | exporter = {
286 | "type": "otlp-proto-grpc",
287 | "address": "http://localhost:4317",
288 | "insecure": True
289 | }
290 | init(tracker="opentelemetry", service_name="my-service", exporter=exporter)
291 | ```
292 |
293 | To use a custom metric reader you can specify the exporter type to be `otel-custom` and provide a custom metric reader:
294 |
295 | ```python
296 | my_custom_metric_reader = PrometheusMetricReader("")
297 | exporter = {
298 | "type": "otel-custom",
299 | "reader": my_custom_metric_reader
300 | }
301 | init(tracker="opentelemetry", service_name="my-service", exporter=exporter)
302 | ```
303 |
304 | ## Development of the package
305 |
306 | This package uses [poetry](https://python-poetry.org) as a package manager, with all dependencies separated into three groups:
307 |
308 | - root level dependencies, required
309 | - `dev`, everything that is needed for development or in ci
310 | - `examples`, dependencies of everything in `examples/` directory
311 |
312 | By default, poetry will only install required dependencies, if you want to run examples, install using this command:
313 |
314 | ```sh
315 | poetry install --with examples
316 | ```
317 |
318 | Code in this repository is:
319 |
320 | - formatted using [black](https://black.readthedocs.io/en/stable/).
321 | - contains type definitions (which are linted by [mypy](https://www.mypy-lang.org/))
322 | - tested using [pytest](https://docs.pytest.org/)
323 |
324 | In order to run these tools locally you have to install them, you can install them using poetry:
325 |
326 | ```sh
327 | poetry install --with dev --all-extras
328 | ```
329 |
330 | After that you can run the tools individually
331 |
332 | ```sh
333 | # Formatting using black
334 | poetry run black .
335 | # Lint using mypy
336 | poetry run mypy .
337 | # Run the tests using pytest
338 | poetry run pytest
339 | # Run a single test, and clear the cache
340 | poetry run pytest --cache-clear -k test_tracker
341 | ```
342 |
--------------------------------------------------------------------------------
/Tiltfile:
--------------------------------------------------------------------------------
1 | docker_compose(['configs/compose/infra.yaml', 'configs/compose/examples.yaml'])
2 |
3 | dc_resource('am', labels=["infra"])
4 | dc_resource('grafana', labels=["infra"])
5 | dc_resource('otel-collector', labels=["infra"])
6 | dc_resource('push-gateway', labels=["infra"])
7 | dc_resource('django', labels=["examples"])
8 | dc_resource('fastapi', labels=["examples"])
9 | dc_resource('otlp', labels=["examples"])
10 | dc_resource('starlette', labels=["examples"])
--------------------------------------------------------------------------------
/configs/compose/examples.yaml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 |
3 | services:
4 | django:
5 | container_name: django
6 | build:
7 | context: ../..
8 | dockerfile: configs/docker/base.Dockerfile
9 | args:
10 | PORT: 9464
11 | COPY_PATH: examples/django_example
12 | COMMAND: ./run_example.sh
13 | ports:
14 | - "9464:9464"
15 | fastapi:
16 | container_name: fastapi
17 | build:
18 | context: ../..
19 | dockerfile: configs/docker/base.Dockerfile
20 | args:
21 | PORT: 8080
22 | COPY_PATH: examples/fastapi-example.py
23 | COMMAND: poetry run python3 fastapi-example.py
24 | ports:
25 | - "9465:8080"
26 | starlette:
27 | container_name: starlette
28 | build:
29 | context: ../..
30 | dockerfile: configs/docker/base.Dockerfile
31 | args:
32 | PORT: 8080
33 | COPY_PATH: examples/starlette-otel-exemplars.py
34 | COMMAND: poetry run python3 starlette-otel-exemplars.py
35 | ports:
36 | - "9466:8080"
37 | otlp:
38 | container_name: otlp
39 | build:
40 | context: ../..
41 | dockerfile: configs/docker/base.Dockerfile
42 | args:
43 | COPY_PATH: examples/export_metrics/otlp-http.py
44 | COMMAND: poetry run python3 otlp-http.py
45 |
--------------------------------------------------------------------------------
/configs/compose/infra.yaml:
--------------------------------------------------------------------------------
1 | version: "3.8"
2 |
3 | volumes:
4 | app-logs: {}
5 | grafana-storage: {}
6 |
7 | services:
8 | am:
9 | container_name: am
10 | image: autometrics/am:latest
11 | extra_hosts:
12 | - host.docker.internal:host-gateway
13 | ports:
14 | - "6789:6789"
15 | - "9090:9090"
16 | command: "start http://otel-collector:9464/metrics host.docker.internal:9464 host.docker.internal:9465 host.docker.internal:9466"
17 | environment:
18 | - LISTEN_ADDRESS=0.0.0.0:6789
19 | restart: unless-stopped
20 | volumes:
21 | - app-logs:/var/log
22 | otel-collector:
23 | container_name: otel-collector
24 | image: otel/opentelemetry-collector-contrib:latest
25 | command: ["--config=/etc/otel-collector-config.yaml"]
26 | volumes:
27 | - ../otel-collector-config.yaml:/etc/otel-collector-config.yaml
28 | ports:
29 | - "4317:4317"
30 | - "4318:4318"
31 | - "8888:8888" # expose container metrics in prometheus format
32 | - "55680:55680"
33 | - "55679:55679"
34 | restart: unless-stopped
35 | push-gateway:
36 | container_name: push-gateway
37 | image: ghcr.io/zapier/prom-aggregation-gateway:latest
38 | grafana:
39 | container_name: grafana
40 | image: grafana/grafana-oss
41 | restart: unless-stopped
42 | ports:
43 | - "3000:3000"
44 | volumes:
45 | - grafana-storage:/var/lib/grafana
46 | - ../grafana/config.ini:/etc/grafana/grafana.ini
47 | - ../grafana/dashboards:/var/lib/grafana/dashboards
48 | - ../grafana/provisioning:/etc/grafana/provisioning
49 |
--------------------------------------------------------------------------------
/configs/docker/base.Dockerfile:
--------------------------------------------------------------------------------
1 |
2 | FROM python:latest
3 | ARG COPY_PATH
4 | ARG COMMAND
5 | ARG PORT
6 | WORKDIR /app
7 | RUN apt-get update
8 | RUN pip install poetry
9 | COPY pyproject.toml poetry.lock src ./
10 | RUN poetry config virtualenvs.create false
11 | RUN poetry install --no-interaction --no-root --with examples --extras "exporter-otlp-proto-http"
12 | COPY $COPY_PATH ./
13 | ENV OTEL_EXPORTER_OTLP_ENDPOINT http://host.docker.internal:4318
14 | ENV COMMAND $COMMAND
15 | ENV PORT $PORT
16 | EXPOSE $PORT
17 | CMD ["sh", "-c", "$COMMAND"]
--------------------------------------------------------------------------------
/configs/grafana/config.ini:
--------------------------------------------------------------------------------
1 | [auth.anonymous]
2 | disable_login_form = true
3 | enabled = true
4 | org_role = Admin
--------------------------------------------------------------------------------
/configs/grafana/dashboards/Autometrics Function Explorer.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "datasource",
8 | "uid": "grafana"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "target": {
15 | "limit": 100,
16 | "matchAny": false,
17 | "tags": [],
18 | "type": "dashboard"
19 | },
20 | "type": "dashboard"
21 | }
22 | ]
23 | },
24 | "description": "",
25 | "editable": true,
26 | "fiscalYearStartMonth": 0,
27 | "graphTooltip": 0,
28 | "id": 19,
29 | "links": [],
30 | "liveNow": false,
31 | "panels": [
32 | {
33 | "datasource": {
34 | "type": "prometheus",
35 | "uid": "PBFA97CFB590B2093"
36 | },
37 | "description": "",
38 | "fieldConfig": {
39 | "defaults": {
40 | "color": {
41 | "mode": "palette-classic"
42 | },
43 | "custom": {
44 | "axisCenteredZero": false,
45 | "axisColorMode": "text",
46 | "axisLabel": "Calls per Second",
47 | "axisPlacement": "auto",
48 | "barAlignment": 0,
49 | "drawStyle": "line",
50 | "fillOpacity": 10,
51 | "gradientMode": "none",
52 | "hideFrom": {
53 | "legend": false,
54 | "tooltip": false,
55 | "viz": false
56 | },
57 | "insertNulls": false,
58 | "lineInterpolation": "linear",
59 | "lineWidth": 1,
60 | "pointSize": 5,
61 | "scaleDistribution": {
62 | "type": "linear"
63 | },
64 | "showPoints": "never",
65 | "spanNulls": false,
66 | "stacking": {
67 | "group": "A",
68 | "mode": "none"
69 | },
70 | "thresholdsStyle": {
71 | "mode": "off"
72 | }
73 | },
74 | "mappings": [],
75 | "min": 0,
76 | "thresholds": {
77 | "mode": "absolute",
78 | "steps": [
79 | {
80 | "color": "green",
81 | "value": null
82 | },
83 | {
84 | "color": "red",
85 | "value": 80
86 | }
87 | ]
88 | },
89 | "unit": "none"
90 | },
91 | "overrides": []
92 | },
93 | "gridPos": {
94 | "h": 8,
95 | "w": 24,
96 | "x": 0,
97 | "y": 0
98 | },
99 | "id": 4,
100 | "options": {
101 | "legend": {
102 | "calcs": [
103 | "lastNotNull",
104 | "max"
105 | ],
106 | "displayMode": "table",
107 | "placement": "right",
108 | "showLegend": true,
109 | "sortBy": "Max",
110 | "sortDesc": true
111 | },
112 | "tooltip": {
113 | "mode": "multi",
114 | "sort": "desc"
115 | }
116 | },
117 | "pluginVersion": "9.4.1",
118 | "targets": [
119 | {
120 | "datasource": {
121 | "type": "prometheus",
122 | "uid": "PBFA97CFB590B2093"
123 | },
124 | "editorMode": "code",
125 | "expr": "sum by (function, module, service_name, version, commit) (\n rate(\n {\n __name__=~\"function_calls(_count)?(_total)?\",\n function=~\"${function}\"\n }[$__rate_interval]\n )\n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n)",
126 | "format": "time_series",
127 | "instant": false,
128 | "interval": "",
129 | "legendFormat": "",
130 | "refId": "A"
131 | }
132 | ],
133 | "title": "Request Rate",
134 | "transformations": [],
135 | "type": "timeseries"
136 | },
137 | {
138 | "datasource": {
139 | "type": "prometheus",
140 | "uid": "PBFA97CFB590B2093"
141 | },
142 | "description": "",
143 | "fieldConfig": {
144 | "defaults": {
145 | "color": {
146 | "mode": "palette-classic",
147 | "seriesBy": "max"
148 | },
149 | "custom": {
150 | "axisCenteredZero": false,
151 | "axisColorMode": "text",
152 | "axisLabel": "% of Function Calls That Errored",
153 | "axisPlacement": "auto",
154 | "barAlignment": 0,
155 | "drawStyle": "line",
156 | "fillOpacity": 10,
157 | "gradientMode": "none",
158 | "hideFrom": {
159 | "legend": false,
160 | "tooltip": false,
161 | "viz": false
162 | },
163 | "insertNulls": false,
164 | "lineInterpolation": "linear",
165 | "lineWidth": 1,
166 | "pointSize": 5,
167 | "scaleDistribution": {
168 | "type": "linear"
169 | },
170 | "showPoints": "never",
171 | "spanNulls": false,
172 | "stacking": {
173 | "group": "A",
174 | "mode": "none"
175 | },
176 | "thresholdsStyle": {
177 | "mode": "off"
178 | }
179 | },
180 | "mappings": [],
181 | "thresholds": {
182 | "mode": "absolute",
183 | "steps": [
184 | {
185 | "color": "green",
186 | "value": null
187 | },
188 | {
189 | "color": "red",
190 | "value": 80
191 | }
192 | ]
193 | },
194 | "unit": "percentunit"
195 | },
196 | "overrides": []
197 | },
198 | "gridPos": {
199 | "h": 8,
200 | "w": 24,
201 | "x": 0,
202 | "y": 8
203 | },
204 | "id": 2,
205 | "options": {
206 | "legend": {
207 | "calcs": [
208 | "lastNotNull",
209 | "max"
210 | ],
211 | "displayMode": "table",
212 | "placement": "right",
213 | "showLegend": true,
214 | "sortBy": "Max",
215 | "sortDesc": true
216 | },
217 | "tooltip": {
218 | "mode": "multi",
219 | "sort": "desc"
220 | }
221 | },
222 | "pluginVersion": "9.4.1",
223 | "targets": [
224 | {
225 | "datasource": {
226 | "type": "prometheus",
227 | "uid": "PBFA97CFB590B2093"
228 | },
229 | "editorMode": "code",
230 | "expr": "(\n sum by(function, module, service_name, version, commit) (\n rate(\n {\n __name__=~\"function_calls(_count)?(_total)?\",\n result=\"error\", \n function=~\"${function}\"\n }[$__rate_interval]\n )\n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n )) / (\n sum by(function, module, service_name, version, commit) (\n rate(\n {\n __name__=~\"function_calls(_count)?(_total)?\",\n function=~\"${function}\"\n }[$__rate_interval]\n )\n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n ))",
231 | "interval": "",
232 | "legendFormat": "",
233 | "range": true,
234 | "refId": "A"
235 | }
236 | ],
237 | "title": "Error Ratio",
238 | "type": "timeseries"
239 | },
240 | {
241 | "datasource": {
242 | "type": "prometheus",
243 | "uid": "PBFA97CFB590B2093"
244 | },
245 | "description": "This shows the 99th and 95th percentile latency or response time for the given function.\n\nFor example, if the 99th percentile latency is 500 milliseconds, that means that 99% of calls to the function are handled within 500ms or less.",
246 | "fieldConfig": {
247 | "defaults": {
248 | "color": {
249 | "mode": "palette-classic",
250 | "seriesBy": "max"
251 | },
252 | "custom": {
253 | "axisCenteredZero": false,
254 | "axisColorMode": "text",
255 | "axisLabel": "Function Call Duration",
256 | "axisPlacement": "auto",
257 | "barAlignment": 0,
258 | "drawStyle": "line",
259 | "fillOpacity": 10,
260 | "gradientMode": "none",
261 | "hideFrom": {
262 | "legend": false,
263 | "tooltip": false,
264 | "viz": false
265 | },
266 | "insertNulls": false,
267 | "lineInterpolation": "linear",
268 | "lineWidth": 1,
269 | "pointSize": 5,
270 | "scaleDistribution": {
271 | "type": "linear"
272 | },
273 | "showPoints": "never",
274 | "spanNulls": false,
275 | "stacking": {
276 | "group": "A",
277 | "mode": "none"
278 | },
279 | "thresholdsStyle": {
280 | "mode": "off"
281 | }
282 | },
283 | "mappings": [],
284 | "thresholds": {
285 | "mode": "absolute",
286 | "steps": [
287 | {
288 | "color": "green",
289 | "value": null
290 | },
291 | {
292 | "color": "red",
293 | "value": 80
294 | }
295 | ]
296 | },
297 | "unit": "s"
298 | },
299 | "overrides": []
300 | },
301 | "gridPos": {
302 | "h": 8,
303 | "w": 24,
304 | "x": 0,
305 | "y": 16
306 | },
307 | "id": 5,
308 | "options": {
309 | "legend": {
310 | "calcs": [
311 | "lastNotNull",
312 | "max"
313 | ],
314 | "displayMode": "table",
315 | "placement": "right",
316 | "showLegend": true,
317 | "sortBy": "Max",
318 | "sortDesc": true
319 | },
320 | "tooltip": {
321 | "mode": "multi",
322 | "sort": "desc"
323 | }
324 | },
325 | "pluginVersion": "9.4.1",
326 | "targets": [
327 | {
328 | "datasource": {
329 | "type": "prometheus",
330 | "uid": "PBFA97CFB590B2093"
331 | },
332 | "editorMode": "code",
333 | "expr": "label_replace(\n histogram_quantile(0.99, \n sum by (le, function, module, service_name, commit, version) (\n rate({__name__=~\"function_calls_duration(_seconds)?_bucket\", function=~\"$function\"}[$__rate_interval])\n # Attach the `version` and `commit` labels from the `build_info` metric \n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n )\n ),\n # Add the label {percentile_latency=\"99\"} to the time series\n \"percentile_latency\", \"99\", \"\", \"\"\n)\nor\nlabel_replace(\n histogram_quantile(0.95, \n sum by (le, function, module, service_name, commit, version) (\n rate({__name__=~\"function_calls_duration(_seconds)?_bucket\", function=~\"$function\"}[$__rate_interval])\n # Attach the `version` and `commit` labels from the `build_info` metric \n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n )\n ),\n # Add the label {percentile_latency=\"95\"} to the time series\n \"percentile_latency\", \"95\", \"\", \"\"\n)",
334 | "interval": "",
335 | "legendFormat": "",
336 | "range": true,
337 | "refId": "A"
338 | }
339 | ],
340 | "title": "Latency (95th and 99th Percentile)",
341 | "type": "timeseries"
342 | }
343 | ],
344 | "refresh": "5m",
345 | "revision": 1,
346 | "schemaVersion": 38,
347 | "style": "dark",
348 | "tags": [
349 | "autometrics"
350 | ],
351 | "templating": {
352 | "list": [
353 | {
354 | "allValue": "__none__",
355 | "current": {},
356 | "datasource": {
357 | "type": "prometheus",
358 | "uid": "PBFA97CFB590B2093"
359 | },
360 | "definition": "label_values({__name__=~\"function_calls(_count)?(_total)?\"}, function)",
361 | "hide": 0,
362 | "includeAll": false,
363 | "label": "Show Function(s)",
364 | "multi": true,
365 | "name": "function",
366 | "options": [],
367 | "query": {
368 | "query": "label_values({__name__=~\"function_calls(_count)?(_total)?\"}, function)",
369 | "refId": "StandardVariableQuery"
370 | },
371 | "refresh": 1,
372 | "regex": "",
373 | "skipUrlSync": false,
374 | "sort": 1,
375 | "tagValuesQuery": "",
376 | "tagsQuery": "",
377 | "type": "query",
378 | "useTags": false
379 | }
380 | ]
381 | },
382 | "time": {
383 | "from": "now-6h",
384 | "to": "now"
385 | },
386 | "timepicker": {},
387 | "timezone": "",
388 | "title": "Autometrics Function Explorer",
389 | "uid": "autometrics-function-explorer",
390 | "version": 1,
391 | "weekStart": ""
392 | }
--------------------------------------------------------------------------------
/configs/grafana/dashboards/Autometrics Overview.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": {
7 | "type": "datasource",
8 | "uid": "grafana"
9 | },
10 | "enable": true,
11 | "hide": true,
12 | "iconColor": "rgba(0, 211, 255, 1)",
13 | "name": "Annotations & Alerts",
14 | "target": {
15 | "limit": 100,
16 | "matchAny": false,
17 | "tags": [],
18 | "type": "dashboard"
19 | },
20 | "type": "dashboard"
21 | }
22 | ]
23 | },
24 | "description": "",
25 | "editable": true,
26 | "fiscalYearStartMonth": 0,
27 | "graphTooltip": 0,
28 | "id": 20,
29 | "links": [],
30 | "liveNow": false,
31 | "panels": [
32 | {
33 | "collapsed": false,
34 | "datasource": {
35 | "type": "prometheus",
36 | "uid": "Sc9Taxa4z"
37 | },
38 | "gridPos": {
39 | "h": 1,
40 | "w": 24,
41 | "x": 0,
42 | "y": 0
43 | },
44 | "id": 8,
45 | "panels": [],
46 | "targets": [
47 | {
48 | "datasource": {
49 | "type": "prometheus",
50 | "uid": "Sc9Taxa4z"
51 | },
52 | "refId": "A"
53 | }
54 | ],
55 | "title": "Autometrics-Instrumented Functions",
56 | "type": "row"
57 | },
58 | {
59 | "datasource": {
60 | "type": "prometheus",
61 | "uid": "PBFA97CFB590B2093"
62 | },
63 | "description": "Calls per second is calculated as the average over a 5-minute window",
64 | "fieldConfig": {
65 | "defaults": {
66 | "color": {
67 | "mode": "palette-classic"
68 | },
69 | "custom": {
70 | "axisCenteredZero": false,
71 | "axisColorMode": "text",
72 | "axisLabel": "Calls per Second",
73 | "axisPlacement": "auto",
74 | "barAlignment": 0,
75 | "drawStyle": "line",
76 | "fillOpacity": 10,
77 | "gradientMode": "none",
78 | "hideFrom": {
79 | "legend": false,
80 | "tooltip": false,
81 | "viz": false
82 | },
83 | "insertNulls": false,
84 | "lineInterpolation": "linear",
85 | "lineWidth": 1,
86 | "pointSize": 5,
87 | "scaleDistribution": {
88 | "type": "linear"
89 | },
90 | "showPoints": "never",
91 | "spanNulls": false,
92 | "stacking": {
93 | "group": "A",
94 | "mode": "none"
95 | },
96 | "thresholdsStyle": {
97 | "mode": "off"
98 | }
99 | },
100 | "mappings": [],
101 | "min": 0,
102 | "thresholds": {
103 | "mode": "absolute",
104 | "steps": [
105 | {
106 | "color": "green",
107 | "value": null
108 | },
109 | {
110 | "color": "red",
111 | "value": 80
112 | }
113 | ]
114 | },
115 | "unit": "none"
116 | },
117 | "overrides": []
118 | },
119 | "gridPos": {
120 | "h": 8,
121 | "w": 24,
122 | "x": 0,
123 | "y": 1
124 | },
125 | "id": 4,
126 | "options": {
127 | "legend": {
128 | "calcs": [
129 | "lastNotNull",
130 | "max"
131 | ],
132 | "displayMode": "table",
133 | "placement": "right",
134 | "showLegend": true,
135 | "sortBy": "Max",
136 | "sortDesc": true
137 | },
138 | "tooltip": {
139 | "mode": "multi",
140 | "sort": "desc"
141 | }
142 | },
143 | "pluginVersion": "9.4.1",
144 | "targets": [
145 | {
146 | "datasource": {
147 | "type": "prometheus",
148 | "uid": "PBFA97CFB590B2093"
149 | },
150 | "editorMode": "code",
151 | "expr": "sum by (function, module, service_name, version, commit) (\n rate(\n {\n __name__=~\"function_calls(_count)?(_total)?\", \n function=~\"${functions_top_request_rate}\"\n }[5m]\n )\n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n)",
152 | "format": "time_series",
153 | "instant": false,
154 | "interval": "",
155 | "legendFormat": "",
156 | "refId": "A"
157 | }
158 | ],
159 | "title": "Request Rate (Top $num_function_limit)",
160 | "transformations": [],
161 | "type": "timeseries"
162 | },
163 | {
164 | "datasource": {
165 | "type": "prometheus",
166 | "uid": "PBFA97CFB590B2093"
167 | },
168 | "description": "",
169 | "fieldConfig": {
170 | "defaults": {
171 | "color": {
172 | "mode": "palette-classic",
173 | "seriesBy": "max"
174 | },
175 | "custom": {
176 | "axisCenteredZero": false,
177 | "axisColorMode": "text",
178 | "axisLabel": "Error Rate",
179 | "axisPlacement": "auto",
180 | "axisSoftMax": 1,
181 | "axisSoftMin": 0,
182 | "barAlignment": 0,
183 | "drawStyle": "line",
184 | "fillOpacity": 10,
185 | "gradientMode": "none",
186 | "hideFrom": {
187 | "legend": false,
188 | "tooltip": false,
189 | "viz": false
190 | },
191 | "insertNulls": false,
192 | "lineInterpolation": "linear",
193 | "lineWidth": 1,
194 | "pointSize": 5,
195 | "scaleDistribution": {
196 | "type": "linear"
197 | },
198 | "showPoints": "never",
199 | "spanNulls": false,
200 | "stacking": {
201 | "group": "A",
202 | "mode": "none"
203 | },
204 | "thresholdsStyle": {
205 | "mode": "off"
206 | }
207 | },
208 | "mappings": [],
209 | "thresholds": {
210 | "mode": "absolute",
211 | "steps": [
212 | {
213 | "color": "green",
214 | "value": null
215 | },
216 | {
217 | "color": "red",
218 | "value": 80
219 | }
220 | ]
221 | },
222 | "unit": "percentunit"
223 | },
224 | "overrides": []
225 | },
226 | "gridPos": {
227 | "h": 8,
228 | "w": 24,
229 | "x": 0,
230 | "y": 9
231 | },
232 | "id": 2,
233 | "options": {
234 | "legend": {
235 | "calcs": [
236 | "lastNotNull",
237 | "max"
238 | ],
239 | "displayMode": "table",
240 | "placement": "right",
241 | "showLegend": true,
242 | "sortBy": "Max",
243 | "sortDesc": true
244 | },
245 | "tooltip": {
246 | "mode": "multi",
247 | "sort": "desc"
248 | }
249 | },
250 | "pluginVersion": "9.4.1",
251 | "targets": [
252 | {
253 | "datasource": {
254 | "type": "prometheus",
255 | "uid": "PBFA97CFB590B2093"
256 | },
257 | "editorMode": "code",
258 | "expr": "(\n sum by(function, module, service_name, version, commit) (\n rate(\n {\n __name__=~\"function_calls(_count)?(_total)?\", \n result=\"error\", \n function=~\"${functions_top_error_rate}\"\n }[5m]\n )\n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n )) / (\n sum by(function, module, service_name, version, commit) (\n rate(\n {\n __name__=~\"function_calls(_count)?(_total)?\", \n function=~\"${functions_top_error_rate}\"\n }[5m]\n )\n * on(instance, job) group_left(version, commit) (last_over_time(build_info[$__rate_interval]) or on (instance, job) up)\n ))",
259 | "interval": "",
260 | "legendFormat": "",
261 | "range": true,
262 | "refId": "A"
263 | }
264 | ],
265 | "title": "Error Rate (Top $num_function_limit)",
266 | "type": "timeseries"
267 | }
268 | ],
269 | "refresh": "5m",
270 | "revision": 1,
271 | "schemaVersion": 38,
272 | "style": "dark",
273 | "tags": [
274 | "autometrics"
275 | ],
276 | "templating": {
277 | "list": [
278 | {
279 | "allValue": "",
280 | "current": {},
281 | "datasource": {
282 | "type": "prometheus",
283 | "uid": "PBFA97CFB590B2093"
284 | },
285 | "definition": "query_result(topk($num_function_limit, sum by (function, module, service_name) (rate({__name__=~\"function_calls(_count)?(_total)?\"}[$__range]))))\n",
286 | "hide": 2,
287 | "includeAll": true,
288 | "multi": true,
289 | "name": "functions_top_request_rate",
290 | "options": [],
291 | "query": {
292 | "query": "query_result(topk($num_function_limit, sum by (function, module, service_name) (rate({__name__=~\"function_calls(_count)?(_total)?\"}[$__range]))))\n",
293 | "refId": "StandardVariableQuery"
294 | },
295 | "refresh": 2,
296 | "regex": "/function=\"(\\w+)\"/",
297 | "skipUrlSync": false,
298 | "sort": 4,
299 | "type": "query"
300 | },
301 | {
302 | "current": {
303 | "selected": false,
304 | "text": "10",
305 | "value": "10"
306 | },
307 | "hide": 0,
308 | "label": "Top Functions to Display",
309 | "name": "num_function_limit",
310 | "options": [
311 | {
312 | "selected": true,
313 | "text": "10",
314 | "value": "10"
315 | }
316 | ],
317 | "query": "10",
318 | "skipUrlSync": false,
319 | "type": "textbox"
320 | },
321 | {
322 | "allValue": "",
323 | "current": {},
324 | "datasource": {
325 | "type": "prometheus",
326 | "uid": "PBFA97CFB590B2093"
327 | },
328 | "definition": "query_result(topk($num_function_limit, sum by (function, module, service_name) (rate({__name__=~\"function_calls(_count)?(_total)?\", result=\"error\"}[$__range])) / (sum by (function, module, service_name) (rate({__name__=~\"function_calls(_count)?(_total)?\"}[$__range])))))\n",
329 | "hide": 2,
330 | "includeAll": true,
331 | "multi": true,
332 | "name": "functions_top_error_rate",
333 | "options": [],
334 | "query": {
335 | "query": "query_result(topk($num_function_limit, sum by (function, module, service_name) (rate({__name__=~\"function_calls(_count)?(_total)?\", result=\"error\"}[$__range])) / (sum by (function, module, service_name) (rate({__name__=~\"function_calls(_count)?(_total)?\"}[$__range])))))\n",
336 | "refId": "StandardVariableQuery"
337 | },
338 | "refresh": 2,
339 | "regex": "/function=\"(\\w+)\"/",
340 | "skipUrlSync": false,
341 | "sort": 4,
342 | "type": "query"
343 | }
344 | ]
345 | },
346 | "time": {
347 | "from": "now-6h",
348 | "to": "now"
349 | },
350 | "timepicker": {},
351 | "timezone": "",
352 | "title": "Autometrics Overview",
353 | "uid": "autometrics-overview",
354 | "version": 1,
355 | "weekStart": ""
356 | }
--------------------------------------------------------------------------------
/configs/grafana/provisioning/dashboards/dashboards.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | providers:
4 | # an unique provider name. Required
5 | - name: ${DS_PROMETHEUS}
6 | # Org id. Default to 1
7 | orgId: 1
8 | # name of the dashboard folder.
9 | folder: 'Autometrics'
10 | # folder UID. will be automatically generated if not specified
11 | folderUid: ''
12 | # provider type. Default to 'file'
13 | type: file
14 | # disable dashboard deletion
15 | disableDeletion: false
16 | # how often Grafana will scan for changed dashboards
17 | updateIntervalSeconds: 10
18 | # allow updating provisioned dashboards from the UI
19 | allowUiUpdates: false
20 | options:
21 | # path to dashboard files on disk. Required when using the 'file' type
22 | path: /var/lib/grafana/dashboards
23 | # use folder names from filesystem to create folders in Grafana
24 | foldersFromFilesStructure: true
--------------------------------------------------------------------------------
/configs/grafana/provisioning/datasources/datasource.yml:
--------------------------------------------------------------------------------
1 | apiVersion: 1
2 |
3 | datasources:
4 | - name: Prometheus
5 | type: prometheus
6 | access: proxy
7 | orgId: 1
8 | # Use the name and container port from docker compose
9 | url: http://am:9090/prometheus
10 | basicAuth: false
11 | isDefault: true
12 | editable: true
13 |
--------------------------------------------------------------------------------
/configs/otel-collector-config.yaml:
--------------------------------------------------------------------------------
1 | receivers:
2 | otlp:
3 | protocols:
4 | grpc:
5 | http:
6 |
7 | exporters:
8 | logging:
9 | loglevel: debug
10 | prometheus:
11 | endpoint: "0.0.0.0:9464" # This is where Prometheus will scrape the metrics from.
12 | # namespace: # Replace with your namespace.
13 |
14 |
15 | processors:
16 | batch:
17 |
18 | service:
19 | pipelines:
20 | metrics:
21 | receivers: [otlp]
22 | processors: []
23 | exporters: [logging, prometheus]
24 |
--------------------------------------------------------------------------------
/docker-compose.yaml:
--------------------------------------------------------------------------------
1 | include:
2 | - configs/compose/infra.yaml
3 | - configs/compose/examples.yaml
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # autometrics-py examples
2 |
3 | You should be able to run each example by:
4 |
5 | - cloning this repository
6 | - run `poetry install --with examples`
7 | - and execute `poetry run python examples/.py` from the root of the repo.
8 | - for django and starlette examples, you can find the exact commands below
9 |
10 | You can change the base url for Prometheus links via the `PROMETHEUS_URL` environment variable. So, if your local Prometheus were on a non-default port, like 9091, you would run:
11 |
12 | ```sh
13 | PROMETHEUS_URL=http://localhost:9091/ python examples/example.py
14 | ```
15 |
16 | Read more below about each example, and what kind of features they demonstrate.
17 |
18 | Also, for the examples that expose a `/metrics` endpoint, you will need to configure Prometheus to scrape that endpoint. There is an example `prometheus.yaml` file in the root of this project, but here is the relevant part:
19 |
20 | ```yaml
21 | # Example prometheus.yaml
22 | scrape_configs:
23 | - job_name: "python-autometrics-example"
24 | metrics_path: /metrics
25 | static_configs:
26 | - targets: ["localhost:8080"]
27 | # For a real deployment, you would want the scrape interval to be
28 | # longer but for testing, you want the data to show up quickly
29 | scrape_interval: 500ms
30 | ```
31 |
32 | ## `docs-example.py`
33 |
34 | This script shows how the autometrics decorator augments the docstring for a python function.
35 |
36 | We simply decorate a function, then print its docstring to the console using the built-in `help` function.
37 |
38 | ## `example.py`
39 |
40 | This script demonstrates the basic usage of the `autometrics` decorator. When you run `python examples/example.py`, it will output links to metrics in your configured prometheus instance.
41 |
42 | You can read the script for comments on how it works, but the basic idea is that we have a division function (`div_unhandled`) that occasionally divides by zero and does not catch its errors. We can see its error rate in prometheus via the links in its doc string.
43 |
44 | Note that the script starts an HTTP server on port 8080 using the Prometheus client library, which exposes metrics to prometheus (via a `/metrics` endpoint).
45 |
46 | Then, it enters into an infinite loop (with a 2 second sleep period), calling methods repeatedly with different input parameters. This should start generating data that you can explore in Prometheus. Just follow the links that are printed to the console!
47 |
48 | > Don't forget to configure Prometheus itself to scrape the metrics endpoint. Refer to the example `prometheus.yaml` file in the root of this project on how to set this up.
49 |
50 | ## `caller-example.py`
51 |
52 | Autometrics also tracks a label, `caller`, which is the name of the function that called the decorated function. The `caller-example.py` script shows how to use that label. It uses the same structure as the `example.py` script, but it prints a PromQL query that you can use to explore the caller data yourself.
53 |
54 | > Don't forget to configure Prometheus itself to scrape the metrics endpoint. Refer to the example `prometheus.yaml` file in the root of this project on how to set this up.
55 |
56 | ## `fastapi-example.py`
57 |
58 | This is an example that shows you how to use autometrics to get metrics on http handlers with FastAPI. In this case, we're setting up the API ourselves, which means we need to expose a `/metrics` endpoint manually.
59 |
60 | > Don't forget to configure Prometheus itself to scrape the metrics endpoint. Refer to the example `prometheus.yaml` file in the root of this project on how to set this up.
61 |
62 | ## `django-example`
63 |
64 | This is a default Django project with autometrics configured. You can find examples of instrumenting function and class based views in `django_example/views`. To run the example, navigate to `django_example` directory and run the standard command:
65 |
66 | `python manage.py runserver 8080`
67 |
68 | > Don't forget to configure Prometheus itself to scrape the metrics endpoint. Refer to the example `prometheus.yaml` file in the root of this project on how to set this up.
69 |
70 | ## `starlette-otel-exemplars.py`
71 |
72 | This app shows how to use the OpenTelemetry integration to add exemplars to your metrics. In a distributed system, it allows you to track a request as it flows through your system by adding trace/span ids to it. We can catch these ids from OpenTelemetry and expose them to Prometheus as exemplars. Do note that exemplars are an experimental feature and you need to enable it in Prometheus with a `--enable-feature=exemplar-storage` flag.
73 |
74 | > Don't forget to configure Prometheus itself to scrape the metrics endpoint. Refer to the example `prometheus.yaml` file in the root of this project on how to set this up.
75 |
--------------------------------------------------------------------------------
/examples/caller-example.py:
--------------------------------------------------------------------------------
1 | from autometrics import autometrics, init
2 | import time
3 | import random
4 |
5 |
6 | # This is moana, who would rather explore the ocean than prometheus metrics
7 | @autometrics
8 | def moana():
9 | return "surf's up!"
10 |
11 |
12 | # This is neo, the one (that we'll end up calling)
13 | @autometrics
14 | def neo():
15 | return "i know kung fu"
16 |
17 |
18 | # This is simba. Rawr.
19 | @autometrics
20 | def simba():
21 | return "rawr"
22 |
23 |
24 | # Define a function that randomly calls `moana`, `neo`, or `simba`
25 | @autometrics
26 | def destiny():
27 | random_int = random.randint(0, 2)
28 | if random_int == 0:
29 | return f"Destiny is calling moana. moana says: {moana()}"
30 | elif random_int == 1:
31 | return f"Destiny is calling neo. neo says: {neo()}"
32 | else:
33 | return f"Destiny is calling simba. simba says: {simba()}"
34 |
35 |
36 | # Initialize autometrics and start an HTTP server on port 8080 using
37 | # the Prometheus client library, which exposes our metrics to prometheus
38 | init(exporter={"type": "prometheus", "port": 8080})
39 |
40 | print(f"Try this PromQL query in your Prometheus dashboard:\n")
41 | print(
42 | f"# Rate of calls to the `destiny` function per second, averaged over 5 minute windows\n"
43 | )
44 | print(
45 | 'sum by (function, module) (rate(function_calls_count_total{caller="destiny"}[5m]))'
46 | )
47 |
48 | # Enter an infinite loop (with a 1 second sleep period), calling the `destiny` and `agent_smith` methods.
49 | while True:
50 | destiny()
51 | time.sleep(0.3)
52 |
53 |
54 | # NOTE - You will want to open prometheus
55 |
--------------------------------------------------------------------------------
/examples/django_example/Readme.md:
--------------------------------------------------------------------------------
1 | # Autometrics Django example
2 |
3 | This example project illustrates how you can integrate autometrics into a Django application.
4 |
5 | ## Running the example
6 |
7 | **Note:** You will need [prometheus](https://prometheus.io/download/) installed locally.
8 |
9 | Install dependencies from root project:
10 |
11 | ```shell
12 | poetry install --with examples
13 | ```
14 |
15 | Next, run the Django server:
16 |
17 | ```shell
18 | poetry run python3 manage.py runserver
19 | ```
20 |
21 | Now when you visit any of the routes marked with `@autometrics`, you should see metrics added to prometheus.
22 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/examples/django_example/django_example/__init__.py
--------------------------------------------------------------------------------
/examples/django_example/django_example/asgi.py:
--------------------------------------------------------------------------------
1 | """
2 | ASGI config for django_example project.
3 |
4 | It exposes the ASGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/4.2/howto/deployment/asgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.asgi import get_asgi_application
13 |
14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_example.settings")
15 |
16 | application = get_asgi_application()
17 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for django_example project.
3 |
4 | Generated by 'django-admin startproject' using Django 4.2.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/4.2/topics/settings/
8 |
9 | For the full list of settings and their values, see
10 | https://docs.djangoproject.com/en/4.2/ref/settings/
11 | """
12 |
13 | from pathlib import Path
14 | from autometrics import init
15 |
16 | init(
17 | branch="main",
18 | commit="67a1b3a",
19 | version="0.1.0",
20 | service_name="django",
21 | )
22 |
23 | # Build paths inside the project like this: BASE_DIR / 'subdir'.
24 | BASE_DIR = Path(__file__).resolve().parent.parent
25 |
26 |
27 | # Quick-start development settings - unsuitable for production
28 | # See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/
29 |
30 | # SECURITY WARNING: keep the secret key used in production secret!
31 | SECRET_KEY = "django-insecure-4#s6_yrup46u#lgx)5ah%s-&ddn@_7z!t@8)@!#e1ytg=gi8)&"
32 |
33 | # SECURITY WARNING: don't run with debug turned on in production!
34 | DEBUG = True
35 |
36 | ALLOWED_HOSTS = ["host.docker.internal", "localhost"]
37 |
38 |
39 | # Application definition
40 |
41 | INSTALLED_APPS = [
42 | # 'django.contrib.admin',
43 | # 'django.contrib.auth',
44 | # "django.contrib.contenttypes",
45 | # "django.contrib.sessions",
46 | # "django.contrib.messages",
47 | "django.contrib.staticfiles",
48 | ]
49 |
50 | MIDDLEWARE = [
51 | "django.middleware.security.SecurityMiddleware",
52 | # "django.contrib.sessions.middleware.SessionMiddleware",
53 | "django.middleware.common.CommonMiddleware",
54 | "django.middleware.csrf.CsrfViewMiddleware",
55 | # 'django.contrib.auth.middleware.AuthenticationMiddleware',
56 | # "django.contrib.messages.middleware.MessageMiddleware",
57 | "django.middleware.clickjacking.XFrameOptionsMiddleware",
58 | ]
59 |
60 | ROOT_URLCONF = "django_example.urls"
61 |
62 | TEMPLATES = [
63 | {
64 | "BACKEND": "django.template.backends.django.DjangoTemplates",
65 | "DIRS": [],
66 | "APP_DIRS": True,
67 | "OPTIONS": {
68 | "context_processors": [
69 | "django.template.context_processors.debug",
70 | "django.template.context_processors.request",
71 | "django.contrib.auth.context_processors.auth",
72 | "django.contrib.messages.context_processors.messages",
73 | ],
74 | },
75 | },
76 | ]
77 |
78 | WSGI_APPLICATION = "django_example.wsgi.application"
79 |
80 |
81 | # Database
82 | # https://docs.djangoproject.com/en/4.2/ref/settings/#databases
83 |
84 | DATABASES = {
85 | "default": {
86 | "ENGINE": "django.db.backends.sqlite3",
87 | "NAME": BASE_DIR / "db.sqlite3",
88 | }
89 | }
90 |
91 |
92 | # Password validation
93 | # https://docs.djangoproject.com/en/4.2/ref/settings/#auth-password-validators
94 |
95 | AUTH_PASSWORD_VALIDATORS = [
96 | {
97 | "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
98 | },
99 | {
100 | "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
101 | },
102 | {
103 | "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
104 | },
105 | {
106 | "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
107 | },
108 | ]
109 |
110 |
111 | # Internationalization
112 | # https://docs.djangoproject.com/en/4.2/topics/i18n/
113 |
114 | LANGUAGE_CODE = "en-us"
115 |
116 | TIME_ZONE = "UTC"
117 |
118 | USE_I18N = True
119 |
120 | USE_TZ = True
121 |
122 |
123 | # Static files (CSS, JavaScript, Images)
124 | # https://docs.djangoproject.com/en/4.2/howto/static-files/
125 |
126 | STATIC_URL = "static/"
127 |
128 | # Default primary key field type
129 | # https://docs.djangoproject.com/en/4.2/ref/settings/#default-auto-field
130 |
131 | DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
132 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/urls.py:
--------------------------------------------------------------------------------
1 | """
2 | URL configuration for django_example project.
3 |
4 | The `urlpatterns` list routes URLs to views. For more information please see:
5 | https://docs.djangoproject.com/en/4.2/topics/http/urls/
6 | Examples:
7 | Function views
8 | 1. Add an import: from my_app import views
9 | 2. Add a URL to urlpatterns: path('', views.home, name='home')
10 | Class-based views
11 | 1. Add an import: from other_app.views import Home
12 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
13 | Including another URLconf
14 | 1. Import the include() function: from django.urls import include, path
15 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
16 | """
17 | from django.urls import path
18 |
19 | from .views.concurrency import ConcurrencyView
20 | from .views.latency import RandomLatencyView
21 | from .views.metrics import metrics
22 | from .views.simple import simple_handler
23 | from .views.error import ErrorOrOkView
24 |
25 | urlpatterns = [
26 | path("concurrency/", ConcurrencyView.as_view()),
27 | path("latency/", RandomLatencyView.as_view()),
28 | path("error/", ErrorOrOkView.as_view()),
29 | path("simple/", simple_handler),
30 | path("metrics/", metrics),
31 | ]
32 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/views/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/examples/django_example/django_example/views/__init__.py
--------------------------------------------------------------------------------
/examples/django_example/django_example/views/concurrency.py:
--------------------------------------------------------------------------------
1 | import time
2 | from autometrics import autometrics
3 | from django.http import HttpResponse
4 | from django.views import View
5 |
6 |
7 | class ConcurrencyView(View):
8 | """Here you can see how concurrency tracking works in autometrics.
9 | Just add the `track_concurrency=True` argument, and autometrics
10 | will track the number of concurrent requests to this endpoint."""
11 |
12 | @autometrics(track_concurrency=True)
13 | def get(self, request):
14 | time.sleep(0.25)
15 | return HttpResponse("Many clients wait for a reply from this endpoint!")
16 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/views/error.py:
--------------------------------------------------------------------------------
1 | import random
2 | from autometrics import autometrics
3 | from django.http import HttpResponse, HttpResponseServerError
4 | from django.views import View
5 |
6 |
7 | class ErrorOrOkView(View):
8 | """View that returns an error or an ok response depending on the
9 | coin flip
10 | """
11 |
12 | @autometrics
13 | def get(self, request):
14 | result = random.choice(["error", "ok"])
15 | if result == "error":
16 | raise Exception(result)
17 | return HttpResponse(result)
18 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/views/latency.py:
--------------------------------------------------------------------------------
1 | import random
2 | import time
3 | from autometrics import autometrics
4 | from django.http import HttpResponse
5 | from django.views import View
6 |
7 |
8 | class RandomLatencyView(View):
9 | """This view has a random latency between 1 and 500ms"""
10 |
11 | @autometrics
12 | def get(self, request):
13 | duration = random.randint(1, 10)
14 |
15 | time.sleep(duration / 10)
16 |
17 | return HttpResponse("i was waiting for {}ms!".format(duration))
18 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/views/metrics.py:
--------------------------------------------------------------------------------
1 | from prometheus_client import generate_latest
2 | from django.http import HttpResponse
3 |
4 |
5 | def metrics(_request):
6 | return HttpResponse(generate_latest())
7 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/views/simple.py:
--------------------------------------------------------------------------------
1 | from autometrics import autometrics
2 | from django.http import HttpResponse
3 |
4 |
5 | @autometrics
6 | def simple_handler(request):
7 | "This is the simplest possible handler. It just returns a string."
8 | return HttpResponse("Hello World")
9 |
--------------------------------------------------------------------------------
/examples/django_example/django_example/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for django_example project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/4.2/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_example.settings")
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/examples/django_example/locustfile.py:
--------------------------------------------------------------------------------
1 | import time
2 | from locust import HttpUser, task, between
3 |
4 |
5 | class DjangoUser(HttpUser):
6 | wait_time = between(1, 2.5)
7 |
8 | @task(10)
9 | def visit_concurrency_handler(self):
10 | self.client.get("/concurrency/")
11 |
12 | @task
13 | def visit_error_handler(self):
14 | self.client.get("/error/")
15 |
16 | @task
17 | def visit_simple_handler(self):
18 | self.client.get("/simple/")
19 |
20 | @task
21 | def visit_latency_handler(self):
22 | self.client.get("/latency/")
23 |
--------------------------------------------------------------------------------
/examples/django_example/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Django's command-line utility for administrative tasks."""
3 | import os
4 | import sys
5 |
6 |
7 | def main():
8 | """Run administrative tasks."""
9 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_example.settings")
10 | try:
11 | from django.core.management import execute_from_command_line
12 | except ImportError as exc:
13 | raise ImportError(
14 | "Couldn't import Django. Are you sure it's installed and "
15 | "available on your PYTHONPATH environment variable? Did you "
16 | "forget to activate a virtual environment?"
17 | ) from exc
18 | execute_from_command_line(sys.argv)
19 |
20 |
21 | if __name__ == "__main__":
22 | main()
23 |
--------------------------------------------------------------------------------
/examples/django_example/mypy.ini:
--------------------------------------------------------------------------------
1 | [mypy]
2 | plugins =
3 | mypy_django_plugin.main
4 |
5 | [mypy.plugins.django-stubs]
6 | django_settings_module = "django_example.settings"
7 |
--------------------------------------------------------------------------------
/examples/django_example/run_example.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # run the server itself
4 | poetry run python manage.py runserver 0.0.0.0:9464 &
5 | # run the locust load test
6 | poetry run locust --host=http://localhost:9464 --users=100 --headless --skip-log-setup &
7 |
8 | # kill all child processes on exit
9 | trap "trap - SIGTERM && kill -- -$$" INT TERM EXIT
10 | wait
--------------------------------------------------------------------------------
/examples/docs-example.py:
--------------------------------------------------------------------------------
1 | from autometrics import autometrics, init
2 |
3 | init()
4 |
5 |
6 | @autometrics
7 | def hello():
8 | """A function that prints hello"""
9 | print("Hello")
10 |
11 |
12 | # Use the built-in `help` function to print the docstring for `hello`
13 | #
14 | # In your console, you'll see links to prometheus metrics for the `hello` function,
15 | # which were added by the `autometrics` decorator.
16 | help(hello)
17 |
--------------------------------------------------------------------------------
/examples/example.py:
--------------------------------------------------------------------------------
1 | import time
2 | import random
3 | from autometrics import autometrics, init
4 | from autometrics.objectives import Objective, ObjectiveLatency, ObjectivePercentile
5 |
6 |
7 | # Defines a class called `Operations`` that has two methods:
8 | # 1. `add` - Perform addition
9 | # 2. `div_handled` - Perform division and handle errors
10 | #
11 | class Operations:
12 | def __init__(self, **args):
13 | self.args = args
14 |
15 | @autometrics
16 | def add(self, num1, num2):
17 | self.num1 = num1
18 | self.num2 = num2
19 | return self.num1 + self.num2
20 |
21 | @autometrics
22 | def div_handled(self, num1, num2):
23 | self.num1 = num1
24 | self.num2 = num2
25 | try:
26 | result = self.num1 / self.num2
27 | except Exception as e:
28 | result = e.__class__.__name__
29 | return result
30 |
31 |
32 | # Perform division without handling errors
33 | @autometrics
34 | def div_unhandled(num1, num2):
35 | result = num1 / num2
36 | return result
37 |
38 |
39 | RANDOM_SLO = Objective(
40 | "random",
41 | success_rate=ObjectivePercentile.P99_9,
42 | latency=(ObjectiveLatency.Ms250, ObjectivePercentile.P99),
43 | )
44 |
45 |
46 | @autometrics(objective=RANDOM_SLO)
47 | def random_error():
48 | """This function will randomly return an error or ok."""
49 |
50 | result = random.choice(["ok", "error"])
51 | if result == "error":
52 | time.sleep(1)
53 | raise RuntimeError("random error")
54 | return result
55 |
56 |
57 | ops = Operations()
58 |
59 | # Show the docstring (with links to prometheus metrics) for the `add` method
60 | print(ops.add.__doc__)
61 |
62 | # Show the docstring (with links to prometheus metrics) for the `div_unhandled` method
63 | print(div_unhandled.__doc__)
64 |
65 | # Initialize autometrics and start an HTTP server on port 8080 using
66 | # the Prometheus client library, which exposes our metrics to prometheus
67 | init(exporter={"type": "prometheus", "port": 8080})
68 |
69 | # Enter an infinite loop (with a 2 second sleep period), calling the "div_handled", "add", and "div_unhandled" methods,
70 | # in order to generate metrics.
71 | while True:
72 | try:
73 | ops.div_handled(2, 0)
74 | except Exception:
75 | pass
76 |
77 | ops.add(1, 2)
78 | ops.div_handled(2, 1)
79 |
80 | try:
81 | # Randomly call `div_unhandled` with a 50/50 chance of raising an error
82 | div_unhandled(2, random.randint(0, 1))
83 | except Exception:
84 | pass
85 |
86 | ops.add(1, 2)
87 | time.sleep(2)
88 |
89 | try:
90 | # Call `div_unhandled` such that it raises an error
91 | div_unhandled(2, 0)
92 | except Exception:
93 | pass
94 |
95 | try:
96 | # Call random_error. It will randomly raise an error or return "ok"
97 | random_error()
98 | except Exception:
99 | pass
100 |
--------------------------------------------------------------------------------
/examples/export_metrics/otel-prometheus.py:
--------------------------------------------------------------------------------
1 | import time
2 | from autometrics import autometrics, init
3 |
4 | # Autometrics supports exporting metrics to Prometheus via the OpenTelemetry.
5 | # This example uses the Prometheus Python client, available settings are same as the
6 | # Prometheus Python client. By default, the Prometheus exporter will expose metrics
7 | # on port 9464. If you don't have a Prometheus server running, you can run Tilt or
8 | # Docker Compose from the root of this repo to start one up.
9 |
10 | init(
11 | tracker="opentelemetry",
12 | exporter={
13 | "type": "prometheus",
14 | "port": 9464,
15 | },
16 | service_name="prom-exporter",
17 | )
18 |
19 |
20 | @autometrics
21 | def my_function():
22 | pass
23 |
24 |
25 | while True:
26 | my_function()
27 | time.sleep(1)
28 |
--------------------------------------------------------------------------------
/examples/export_metrics/otlp-grpc.py:
--------------------------------------------------------------------------------
1 | import time
2 | from autometrics import autometrics, init
3 | from opentelemetry.sdk.metrics import Counter
4 | from opentelemetry.sdk.metrics.export import (
5 | AggregationTemporality,
6 | )
7 |
8 | # Autometrics supports exporting metrics to OTLP collectors via gRPC and HTTP transports.
9 | # This example uses the gRPC transport, available settings are similar to the OpenTelemetry
10 | # Python SDK. By default, the OTLP exporter will send metrics to localhost:4317.
11 | # If you don't have an OTLP collector running, you can run Tilt or Docker Compose
12 | # to start one up.
13 |
14 | init(
15 | exporter={
16 | "type": "otlp-proto-grpc",
17 | "endpoint": "http://localhost:4317", # You don't need to set this if you are using the default endpoint
18 | "insecure": True, # Enabled for http transport
19 | "push_interval": 1000,
20 | # Here are some other available settings:
21 | # "timeout": 10,
22 | # "headers": {"x-something": "value"},
23 | # "aggregation_temporality": {
24 | # Counter: AggregationTemporality.CUMULATIVE,
25 | # },
26 | },
27 | service_name="otlp-exporter",
28 | )
29 |
30 |
31 | @autometrics
32 | def my_function():
33 | pass
34 |
35 |
36 | while True:
37 | my_function()
38 | time.sleep(1)
39 |
--------------------------------------------------------------------------------
/examples/export_metrics/otlp-http.py:
--------------------------------------------------------------------------------
1 | import time
2 | from autometrics import autometrics, init
3 | from opentelemetry.sdk.metrics import Counter
4 | from opentelemetry.sdk.metrics.export import (
5 | AggregationTemporality,
6 | )
7 |
8 | # Autometrics supports exporting metrics to OTLP collectors via gRPC and HTTP transports.
9 | # This example uses the HTTP transport, available settings are similar to the OpenTelemetry
10 | # Python SDK. By default, the OTLP exporter will send metrics to localhost:4318.
11 | # If you don't have an OTLP collector running, you can run Tilt or Docker Compose
12 | # to start one up.
13 |
14 | init(
15 | exporter={
16 | "type": "otlp-proto-http",
17 | "endpoint": "http://localhost:4318/", # You don't need to set this if you are using the default endpoint
18 | "push_interval": 1000,
19 | # Here are some other available settings:
20 | # "timeout": 10,
21 | # "headers": {"x-something": "value"},
22 | # "aggregation_temporality": {
23 | # Counter: AggregationTemporality.CUMULATIVE,
24 | # },
25 | },
26 | service_name="otlp-exporter",
27 | )
28 |
29 |
30 | @autometrics
31 | def my_function():
32 | pass
33 |
34 |
35 | while True:
36 | my_function()
37 | time.sleep(1)
38 |
--------------------------------------------------------------------------------
/examples/export_metrics/prometheus-client.py:
--------------------------------------------------------------------------------
1 | import time
2 | from autometrics import autometrics, init
3 |
4 | # Autometrics supports exporting metrics to Prometheus via the Prometheus Python client.
5 | # This example uses the Prometheus Python client, available settings are same as the
6 | # Prometheus Python client. By default, the Prometheus exporter will expose metrics
7 | # on port 9464. If you don't have a Prometheus server running, you can run Tilt or
8 | # Docker Compose from the root of this repo to start one up.
9 |
10 | init(
11 | tracker="prometheus",
12 | exporter={
13 | "type": "prometheus",
14 | "port": 9464,
15 | },
16 | service_name="prom-exporter",
17 | )
18 |
19 |
20 | @autometrics
21 | def my_function():
22 | pass
23 |
24 |
25 | while True:
26 | my_function()
27 | time.sleep(1)
28 |
--------------------------------------------------------------------------------
/examples/fastapi-example.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import uvicorn
3 |
4 | from autometrics import autometrics, init
5 | from fastapi import FastAPI, Response
6 | from fastapi.responses import JSONResponse
7 | from prometheus_client import generate_latest
8 |
9 | app = FastAPI()
10 |
11 |
12 | # Set up a metrics endpoint for Prometheus to scrape
13 | # `generate_latest` returns the latest metrics data in the Prometheus text format
14 | @app.get("/metrics")
15 | def metrics():
16 | return Response(generate_latest())
17 |
18 |
19 | # Set up the root endpoint of the API
20 | @app.get("/")
21 | @autometrics
22 | def read_root():
23 | do_something()
24 | return {"Hello": "World"}
25 |
26 |
27 | # Set up an async handler
28 | @app.get("/async")
29 | @autometrics
30 | async def async_route():
31 | message = await do_something_async()
32 | return {"Hello": message}
33 |
34 |
35 | @autometrics
36 | def do_something():
37 | print("done")
38 |
39 |
40 | @autometrics
41 | async def do_something_async():
42 | print("async start")
43 | await asyncio.sleep(2.0)
44 | print("async done")
45 | return "async world"
46 |
47 |
48 | def response_is_error(response: Response):
49 | if response.status_code >= 400:
50 | return True
51 |
52 |
53 | @app.get("/not-implemented")
54 | @autometrics(record_error_if=response_is_error)
55 | def not_implemented():
56 | return JSONResponse(
57 | status_code=501, content={"message": "This endpoint is not implemented"}
58 | )
59 |
60 |
61 | @app.get("/flowers/{flower_name}")
62 | def flower(flower_name: str):
63 | try:
64 | return JSONResponse(content={"message": get_pretty_flower(flower_name)})
65 | except NotFoundError as error:
66 | return JSONResponse(status_code=404, content={"message": str(error)})
67 |
68 |
69 | class NotFoundError(Exception):
70 | pass
71 |
72 |
73 | def is_not_found_error(error: Exception):
74 | return isinstance(error, NotFoundError)
75 |
76 |
77 | @autometrics(record_success_if=is_not_found_error)
78 | def get_pretty_flower(flower_name: str):
79 | """Returns whether the flower is pretty"""
80 | print(f"Getting pretty flower for {flower_name}")
81 | flowers = ["rose", "tulip", "daisy"]
82 | if flower_name not in flowers:
83 | raise NotFoundError(
84 | f"Flower {flower_name} not found. Perhaps you meant one of these: {', '.join(flowers)}?"
85 | )
86 | return f"A {flower_name} is pretty"
87 |
88 |
89 | init(service_name="fastapi")
90 |
91 |
92 | if __name__ == "__main__":
93 | uvicorn.run(app, host="0.0.0.0", port=8080)
94 |
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.11
2 | # Install dependencies manually
3 | # This is the easiest way in the current set up (given that this example is nested in
4 | # the autometrics-py repository) but it's in no way a best practice. :)
5 | RUN pip install fastapi autometrics prometheus-client uvicorn
6 |
7 | WORKDIR /code
8 |
9 | # Copy the python code into the container
10 | COPY app.py /code/
11 |
12 | # Expose the port that the app (by default) is running on
13 | EXPOSE 8080
14 |
15 | # Start the app in a flexible way, so it can be run on Heroku, Azure App Services, or locally
16 | # Heroku uses PORT, Azure App Services uses WEBSITES_PORT, Fly.io uses 8080 by default
17 | CMD ["sh", "-c", "uvicorn app:app --host 0.0.0.0 --port ${PORT:-${WEBSITES_PORT:-8080}}"]
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/README.md:
--------------------------------------------------------------------------------
1 | # Getting started FastAPI & Fly.io
2 |
3 | In this tutorial we'll create a simple back-end application that uses autometrics to track several of the api calls. If only interested in how to deploy a small [fastapi](https://fastapi.tiangolo.com/) app and see autometrics in action on fly.io, you can just clone this repository and continue at the [deploy & monitor section](#deploy--monitor). This app will report metrics like:
4 |
5 | - success rate
6 | - error ratio
7 | - latency
8 |
9 | ## Requirements
10 |
11 | In order to set this project up locally you need:
12 |
13 | - [poetry](https://python-poetry.org). This is used for managing dependencies
14 | - Python 3.8 or newer
15 | - [Fly.io](https://fly.io/) account and their CLI installed [flyctl](https://fly.io/docs/hands-on/install-flyctl/)
16 | - optionally: [docker](https://www.docker.com/) if you want to build the container locally
17 |
18 | ## Starting the project from scratch
19 |
20 | Let's start off by creating a new directory and go to it
21 |
22 | ```sh
23 | mkdir fastapi-fly
24 | cd fastapi-fly
25 | ```
26 |
27 | Next step is starting the project using poetry:
28 |
29 | ```sh
30 | poetry init
31 | ```
32 |
33 | This will trigger a wizard asking for:
34 |
35 | - your package name (the default `fastapi-fly` is fine)
36 | - version number (the default `0.1.0` value is also fine)
37 | - description (can be left empty)
38 | - author (you can leave empty or use the default value)
39 | - license (which I will set to MIT, but can be left empty )
40 | - Compatible Python version should be at least python 3.8 or higher
41 | - defining dependencies interactively: which we won't do
42 | - defining development dependencies interactively: which we won't do either
43 | - for confirmation of the generation (Which should be answered with the default yes)
44 |
45 | Let's add the our first set of dependencies:
46 |
47 | ```sh
48 | poetry add fastapi autometrics prometheus-client uvicorn
49 | ```
50 |
51 | Which will add the needed dependencies and install them.
52 |
53 | After this we're ready to add some code. Create a file named `app.py` in your favorite editor and add the following code:
54 |
55 | ```python
56 | import time
57 | from autometrics import autometrics, init
58 | # Import below is needed for the service level objective (SLO) support
59 | from autometrics.objectives import Objective, ObjectiveLatency, ObjectivePercentile
60 | from fastapi import FastAPI, Response
61 | from prometheus_client import start_http_server
62 | import uvicorn
63 |
64 | app = FastAPI()
65 |
66 |
67 | # Set up the root endpoint of the API
68 | @app.get("/")
69 | # Add the autometrics decorator to enable metrics for this endpoint
70 | # It needs to be added AFTER the fastapi decorator otherwise it won't
71 | # be triggered
72 | @autometrics
73 | def hello_world():
74 | do_something()
75 | return {"Hello": "World"}
76 |
77 |
78 | # Let's set up a service level objective (SLO), so we can check out
79 | ITEM_SLO = Objective(
80 | "sleep",
81 | success_rate=ObjectivePercentile.P99_9,
82 | latency=(ObjectiveLatency.Ms250, ObjectivePercentile.P99),
83 | )
84 |
85 |
86 | @app.get("/sleep/")
87 | @autometrics(objective=ITEM_SLO)
88 | def get_sleep(duration: int = 0):
89 | """A function that takes a duration parameter to determine how much the response
90 | needs to be delayed"""
91 |
92 | time.sleep(duration)
93 | return {"duration": duration}
94 |
95 |
96 | @app.get("/not-implemented")
97 | @autometrics
98 | def not_implemented():
99 | """An endpoint that always throws an exception"""
100 | raise NotImplementedError("Not implemented")
101 |
102 | @autometrics
103 | def do_something():
104 | # This function doesn't do much
105 | print("done")
106 |
107 | # Before starting the server, we need to initialize the autometrics
108 | # by calling init(). In order for prometheus to get the data
109 | # we'll also pass the configuration that will set
110 | # up a separate endpoint that exposes data in a format
111 | # that prometheus can understand.
112 | # This metrics server will run on port 8008
113 | init(exporter={"type": "prometheus", "port": 8008})
114 |
115 | # If the app is not run by fly.io in a container but using python
116 | # directly we enter this flow and it is run on port 8080
117 | if __name__ == "__main__":
118 | uvicorn.run(app, host="localhost", port=8080)
119 |
120 | ```
121 |
122 | Technically you can now run this example locally using:
123 |
124 | ```sh
125 | poetry run python app.py
126 | ```
127 |
128 | You can navigate to `localhost:8080` to see a small JSON response:
129 |
130 | ```json
131 | { "Hello": "World" }
132 | ```
133 |
134 | And if you go to `localhost:8008` you can see the metrics that prometheus will scrape:
135 |
136 | ```text
137 | # HELP function_calls_count_total Autometrics counter for tracking function calls
138 | # TYPE function_calls_count_total counter
139 | function_calls_count_total{caller="hello_world",function="do_something",module="app",objective_name="",objective_percentile="",result="ok"} 1.0
140 | function_calls_count_total{caller="run",function="hello_world",module="app",objective_name="",objective_percentile="",result="ok"} 1.0
141 | ```
142 |
143 | It will contain more data than just the autometrics ones but entries starting with `function_...` are typically generated by autometrics. Now that we've seen the code in action, let's start with the preparations for the deploy.
144 |
145 | ## Preparing for deployment
146 |
147 | In order to run on fly.io two more files are needed:
148 |
149 | 1. A dockerfile (though it's also possible to deploy without using a dockerfile)
150 | 2. fly.toml file (a configuration file containing information about which port the app is running on and whether or not custom metrics are used)
151 |
152 | Let's first write the Dockerfile.
153 |
154 | ```dockerfile
155 | FROM python:3.11
156 | # Install poetry
157 | RUN pip install poetry
158 |
159 | # This is the folder where our app will live
160 | WORKDIR /code
161 | COPY ./poetry.lock ./pyproject.toml /code/
162 |
163 | # Gereate requirements.txt and install dependencies
164 | RUN poetry export -f requirements.txt --output requirements.txt --without-hashes && pip install -r requirements.txt
165 |
166 | # Copy the python code into the container
167 | COPY app.py /code/
168 |
169 | # Expose the port that the app (by default) is running on
170 | EXPOSE 8080
171 |
172 | # Start the app in a flexible way, so it can be run on Heroku, Azure App Services, or locally
173 | # Heroku uses PORT, Azure App Services uses WEBSITES_PORT, Fly.io uses 8080 by default
174 | CMD ["sh", "-c", "uvicorn app:app --host 0.0.0.0 --port ${PORT:-${WEBSITES_PORT:-8080}}"]
175 | ```
176 |
177 | Next step: prepare for deployment to fly.io by creating a fly.toml file and in this case we do want to create this file before running the `launch` command because there is some additional configuration that needs to be added so prometheus on fly.io will scrape our metrics endpoint.
178 |
179 | Here is the minimal fly.toml file:
180 |
181 | ```toml
182 | [http_service]
183 | internal_port = 8080
184 | force_https = true
185 |
186 | [metrics]
187 | port = 8008
188 | path = "/metrics"
189 | ```
190 |
191 | ### Deploy & monitor
192 |
193 | Now we're ready to deploy to fly.io. You can do this using the launch command:
194 |
195 | ```bash
196 | flyctl launch --now
197 | ```
198 |
199 | This will launch a wizard that will ask a few questions:
200 |
201 | 1. An existing fly.toml file was found. Would you like to copy its configuration to the new app? This should be answered with `yes~
202 | 2. Choose an app name. For one this will determine the URL under which you can access the app. You can leave it empty and use a generated one
203 | 3. Choose a region for deployment. For me the Amsterdam region is suggested, but other regions should work fine.
204 | 4. Create .dockerignore from .gitignore? The default option No is fine.
205 |
206 | After that the actual deploy will happen, if this is your first application a builder machine might be launched
207 |
208 | ```
209 | Wrote config file fly.toml
210 | ==> Building image
211 | Remote builder fly-builder-divine-shadow-9546 ready
212 | ==> Creating build context
213 | --> Creating build context done
214 | ==> Building image with Docker
215 | --> docker host: 20.10.12 linux x86_64
216 | [+] Building 3.2s (0/1)
217 | ```
218 |
219 | After which the dockerfile will be build & pushed to the fly.io registry:
220 |
221 | ```
222 | => [internal] load remote build context 0.0s
223 | => copy /context / 0.3s
224 | => [internal] load metadata for docker.io/library/python:3.11 0.7s
225 | => CACHED [1/6] FROM docker.io/library/python:3.11@sha256:f7382f4f9dbc51183c72d621b9c196c1565f713a1fe40c119d215c961fa22815 0.0s
226 | => [2/6] RUN pip install poetry 11.5s
227 | => [3/6] WORKDIR /code 0.0s
228 | => [4/6] COPY ./poetry.lock ./pyproject.toml /code/ 0.0s
229 | => [5/6] RUN poetry install 2.4s
230 | => [6/6] COPY . /code/ 0.2s
231 | => exporting to image 0.8s
232 | => => exporting layers 0.8s
233 | => => writing image sha256:59b6775d0d880a37c84527171f527987bbf96391b04725d1b4e2a17cfb8fa0e1 0.0s
234 | => => naming to registry.fly.io/black-lake-4279:deployment-01GYFM65WZ6MVDS3FHR0CMKJHV 0.0s
235 | --> Building image done
236 | ==> Pushing image to fly
237 | The push refers to repository [registry.fly.io/black-lake-4279]
238 | ```
239 |
240 | After which the app is launched. Note: the name `black-lake-4279` is a randomly generated and information like ip addresses will likely be unique/different from what you're seeing.
241 |
242 | ```
243 | --> Pushing image done
244 | image: registry.fly.io/black-lake-4279:deployment-01GYFM65WZ6MVDS3FHR0CMKJHV
245 | image size: 1.1 GB
246 | Provisioning ips for black-lake-4279
247 | Dedicated ipv6: 2a09:8280:1::24:fc4
248 | Shared ipv4: 66.241.125.89
249 | Add a dedicated ipv4 with: fly ips allocate-v4
250 | Process groups have changed. This will:
251 | * create 1 "app" machine
252 |
253 | No machines in group 'app', launching one new machine
254 | [1/1] Waiting for 1781949b255d48 [app] to become healthy: 0/1
255 | ```
256 |
257 | Once everything is done the fly.toml file will be updated with the information like the region you've deployed to and the name of the app. It's good to check its contents because it's easy to deploy the app in a way that metrics aren't picked up (i.e. if you use the default answer to the `would you like to copy its configuration` question).
258 |
259 | It should look something like:
260 |
261 | ```toml
262 | # fly.toml file generated for black-lake-4279 on 2023-04-21T09:58:50+02:00
263 |
264 | app = "black-lake-4279"
265 | primary_region = "ams"
266 |
267 | [http_service]
268 | internal_port = 8080
269 | force_https = true
270 |
271 | [metrics]
272 | port = 8008
273 | path = "/metrics"
274 | ```
275 |
276 | It's also good to verify that everything is running, by running:
277 |
278 | ```bash
279 | flyctl status
280 | ```
281 |
282 | ```
283 | App
284 | Name = black-lake-4279
285 | Owner = personal
286 | Hostname = black-lake-4279.fly.dev
287 | Image = black-lake-4279:deployment-01GYHMK6F9RC78MAGR852F19PM
288 | Platform = machines
289 |
290 | Machines
291 | PROCESS ID VERSION REGION STATE CHECKS LAST UPDATED
292 | app 6e82d492f65308 1 ams started 2023-04-21T10:08:55Z
293 | ```
294 |
295 | If it's not in a started state (i.e. stopped) something went wrong and you may want to check the logs to see why the app isn't starting (for instance using `flyctl logs`)
296 |
297 | Time to open the website in a browser by running `flyctl open`. This will also trigger some metrics to be generated. Besides visiting the root of the site, you may also want to navigate to `/not-implemented` (which will result in an exception/internal server error) as well go to `/sleep` which response is delayed by `duration`, which you can pass in as a query string parameter (i.e. `/sleep?duration=1` will delay the response by one second). The sleep endpoint also has a decorator that ties the SLO with the function. So right now there should be data in Prometheus and in order to access it we can use the managed grafana (currently available as an early preview).
298 |
299 | Autometrics comes with several dashboards you can import and which will allow you to easily explore the metrics. The dashboards can be found in the [autometrics-shared repository](https://github.com/autometrics-dev/autometrics-shared). Let's add them to grafana!
300 |
301 | 1. Go to your apps dashboard by running `flyctl dashboard` and go the the `metrics` page and on the metrics page click on the `open in grafana` button.
302 | 2. Now that you're on grafana, navigate to the imports page, by clicking the dashboards icon in the side menu and click on `+ Import`
303 |
304 | 3. Go to the [autometrics-shared repository](https://github.com/autometrics-dev/autometrics-shared) repository and download the `Autometrics overview.json`
305 | 4. Click on the `Upload JSON file` button and select the just downloaded JSON file.
306 |
307 | 5. You will see a new form which will be mostly filled in except the `Prometheus` field. There `Prometheus on Fly (default)` needs to be selected.
308 |
309 | 6. Finally: click `import` and you will be redirected to the dashboard.
310 |
311 | It should look something similar to:
312 |
313 |
314 |
315 | Feel free to import the other dashboards and see more detailed information around SLO's and function specific details.
316 |
317 | That's it! If you have ideas for improving autometrics or feedback around this example, feel free to open up a ticket on our github repository: https://github.com/autometrics-dev/autometrics-py or contact us on [discord](https://discord.gg/kHtwcH8As9)
318 |
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/app.py:
--------------------------------------------------------------------------------
1 | import time
2 | from autometrics import autometrics, init
3 | from autometrics.objectives import Objective, ObjectiveLatency, ObjectivePercentile
4 | from fastapi import FastAPI, Response
5 | import uvicorn
6 |
7 | app = FastAPI()
8 |
9 |
10 | # Set up the root endpoint of the API
11 | @app.get("/")
12 | # Add the autometrics decorator to enable metrics for this endpoint
13 | # It needs to be added AFTER the fastapi decorator otherwise it won't
14 | # be triggered
15 | @autometrics
16 | def hello_world():
17 | do_something()
18 | return {"Hello": "World"}
19 |
20 |
21 | # Let's set up an SLO, so we can check out
22 | ITEM_SLO = Objective(
23 | "sleep",
24 | success_rate=ObjectivePercentile.P99_9,
25 | latency=(ObjectiveLatency.Ms250, ObjectivePercentile.P99),
26 | )
27 |
28 |
29 | @app.get("/sleep/")
30 | @autometrics(objective=ITEM_SLO)
31 | def get_sleep(duration: int = 0):
32 | """A function that takes a duration parameter to determine how much the response
33 | needs to be delayed"""
34 |
35 | time.sleep(duration)
36 | return {"duration": duration}
37 |
38 |
39 | @app.get("/not-implemented")
40 | @autometrics
41 | def not_implemented():
42 | """An endpoint that always throws an exception"""
43 | raise NotImplementedError("Not implemented")
44 |
45 |
46 | @autometrics
47 | def do_something():
48 | # This function doesn't do much
49 | print("done")
50 |
51 |
52 | # Before starting the server, we need to initialize the autometrics
53 | # by calling init(). In order for prometheus to get the data
54 | # we'll also pass the configuration that will set
55 | # up a separate endpoint that exposes data in a format
56 | # that prometheus can understand.
57 | # This metrics server will run on port 8008
58 | init(exporter={"type": "prometheus", "port": 8008})
59 |
60 |
61 | # If the app is not run by fly.io in a container but using python
62 | # directly we enter this flow and it is run on port 8080
63 | if __name__ == "__main__":
64 | uvicorn.run(app, host="localhost", port=8080)
65 |
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/fly.toml:
--------------------------------------------------------------------------------
1 | [http_service]
2 | internal_port = 8080
3 | force_https = true
4 |
5 | [metrics]
6 | port = 8008
7 | path = "/metrics"
8 |
9 |
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/images/go_to_imports.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/examples/fastapi-with-fly-io/images/go_to_imports.jpg
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/images/import_form.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/examples/fastapi-with-fly-io/images/import_form.jpg
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/images/overview_dashboard.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/examples/fastapi-with-fly-io/images/overview_dashboard.jpg
--------------------------------------------------------------------------------
/examples/fastapi-with-fly-io/images/select_prometheus.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/examples/fastapi-with-fly-io/images/select_prometheus.jpg
--------------------------------------------------------------------------------
/examples/starlette-otel-exemplars.py:
--------------------------------------------------------------------------------
1 | import uvicorn
2 |
3 | from autometrics import autometrics, init
4 | from opentelemetry import trace
5 | from opentelemetry.sdk.trace import TracerProvider
6 | from opentelemetry.sdk.trace.export import (
7 | BatchSpanProcessor,
8 | ConsoleSpanExporter,
9 | )
10 | from prometheus_client import REGISTRY
11 | from prometheus_client.openmetrics.exposition import generate_latest
12 | from starlette import applications
13 | from starlette.responses import PlainTextResponse
14 | from starlette.routing import Route
15 |
16 | # Let's start by setting up the OpenTelemetry SDK with some defaults
17 | provider = TracerProvider()
18 | processor = BatchSpanProcessor(ConsoleSpanExporter())
19 | provider.add_span_processor(processor)
20 | trace.set_tracer_provider(provider)
21 |
22 | # Now we can instrument our Starlette application
23 | tracer = trace.get_tracer(__name__)
24 |
25 | # Exemplars support requires some additional configuration on autometrics,
26 | # so we need to initialize it with the proper settings
27 | init(tracker="prometheus", enable_exemplars=True, service_name="starlette")
28 |
29 |
30 | # We need to add tracer decorator before autometrics so that we see the spans
31 | @tracer.start_as_current_span("request")
32 | @autometrics
33 | def outer_function(request):
34 | response = inner_function()
35 | return PlainTextResponse(response)
36 |
37 |
38 | # This function will also get an exemplar because it is called within
39 | # the span of the outer_function
40 | @autometrics
41 | def inner_function():
42 | return "Hello world!"
43 |
44 |
45 | def metrics(request):
46 | # Exemplars are not supported by default prometheus format, so we specifically
47 | # make an endpoint that uses the OpenMetrics format that supports exemplars.
48 | body = generate_latest(REGISTRY)
49 | return PlainTextResponse(body, media_type="application/openmetrics-text")
50 |
51 |
52 | app = applications.Starlette(
53 | routes=[Route("/", outer_function), Route("/metrics", metrics)]
54 | )
55 |
56 | if __name__ == "__main__":
57 | uvicorn.run(app, host="0.0.0.0", port=8080)
58 |
59 | # Start the app and make some requests to http://127.0.0.1:8080/, you should see the spans in the console.
60 | # With autometrics extension installed, you can now hover over the hello handler
61 | # and see the charts and queries associated with them. Open one of the queries
62 | # in Prometheus and you should see exemplars added to the metrics. Don't forget
63 | # to click the Show Exemplars button in Prometheus to see them!
64 |
--------------------------------------------------------------------------------
/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/prometheus.yaml:
--------------------------------------------------------------------------------
1 | global:
2 | scrape_interval: 15s
3 | evaluation_interval: 15s
4 |
5 | scrape_configs:
6 | # Use prometheus to scrape prometheus :)
7 | - job_name: "prometheus"
8 | scrape_interval: 5s
9 | static_configs:
10 | - targets: ["localhost:9090"]
11 |
12 | - job_name: "python-autometrics-example"
13 | # For a real deployment, you would want the scrape interval to be
14 | # longer but for testing, you want the data to show up quickly
15 | scrape_interval: 500ms
16 | static_configs:
17 | - targets: ["localhost:8080"]
18 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | name = "autometrics"
3 | version = "1.0.0"
4 | description = "Easily add metrics to your system – and actually understand them using automatically customized Prometheus queries"
5 | authors = ["Fiberplane "]
6 | license = "MIT OR Apache-2.0"
7 | readme = "README.md"
8 | repository = "https://github.com/autometrics-dev/autometrics-py"
9 | homepage = "https://github.com/autometrics-dev/autometrics-py"
10 | keywords = [
11 | "metrics",
12 | "telemetry",
13 | "prometheus",
14 | "monitoring",
15 | "observability",
16 | "instrumentation",
17 | "tracing",
18 | ]
19 | classifiers = [
20 | "Topic :: Software Development :: Build Tools",
21 | "Topic :: Software Development :: Libraries :: Python Modules",
22 | "Topic :: System :: Monitoring",
23 | "Typing :: Typed",
24 | ]
25 | packages = [{ include = "autometrics", from = "src" }]
26 |
27 | [tool.poetry.dependencies]
28 | # The prometheus exporter is pinned to a beta version because of how opentelemetry-python has been releasing it.
29 | # Technically, the version 1.12.0rc1 is the "latest" on pypi, but it's not the latest release.
30 | # 0.41b0 includes the fix for exporting gauge values (previously they were always turned into counters).
31 | opentelemetry-exporter-prometheus = "0.41b0"
32 | opentelemetry-exporter-otlp-proto-http = { version = "^1.20.0", optional = true }
33 | opentelemetry-exporter-otlp-proto-grpc = { version = "^1.20.0", optional = true }
34 | opentelemetry-sdk = "^1.17.0"
35 | prometheus-client = "^0.16.0 || ^0.17.0"
36 | pydantic = "^2.4.1"
37 | python = "^3.8"
38 | python-dotenv = "^1.0.0"
39 | typing-extensions = "^4.5.0"
40 |
41 | [tool.poetry.extras]
42 | exporter-otlp-proto-http = ["opentelemetry-exporter-otlp-proto-http"]
43 | exporter-otlp-proto-grpc = ["opentelemetry-exporter-otlp-proto-grpc"]
44 |
45 | [tool.poetry.group.dev]
46 | optional = true
47 |
48 | [tool.mypy]
49 | namespace_packages = true
50 | mypy_path = "src"
51 | enable_incomplete_feature = "Unpack"
52 |
53 | # This override is needed because with certain flavors of python and
54 | # mypy you can get the following error:
55 | # opentelemetry/attributes/__init__.py:14: error: invalid syntax
56 | # Which at the time of writing is a line that states ignore types:
57 | # `# type: ignore`
58 | [[tool.mypy.overrides]]
59 | module = ["opentelemetry.attributes"]
60 | follow_imports = "skip"
61 |
62 | [tool.pytest.ini_options]
63 | usefixtures = "reset_environment"
64 |
65 | [tool.poetry.group.dev.dependencies]
66 | pytest = "^7.3.0"
67 | pytest-asyncio = "^0.21.0"
68 | black = "^23.3.0"
69 | pytest-xdist = "^3.3.1"
70 | mypy = "^1.5.1"
71 | twine = "4.0.2"
72 |
73 |
74 | [tool.poetry.group.examples]
75 | optional = true
76 |
77 | [tool.poetry.group.examples.dependencies]
78 | anyio = "3.7.1"
79 | bleach = "6.0.0"
80 | build = "0.10.0"
81 | certifi = "2023.7.22"
82 | charset-normalizer = "3.1.0"
83 | click = "8.1.3"
84 | django = "^4.2"
85 | docutils = "0.19"
86 | fastapi = "^0.103.1"
87 | h11 = "0.14.0"
88 | idna = "3.4"
89 | # pinned importlib-metadat to version ~6.0.0 because of opentelemetry-api
90 | importlib-metadata = "~6.0.0"
91 | jaraco-classes = "3.2.3"
92 | keyring = "23.13.1"
93 | markdown-it-py = "2.2.0"
94 | mdurl = "0.1.2"
95 | more-itertools = "9.1.0"
96 | packaging = "23.0"
97 | pkginfo = "1.9.6"
98 | pygments = "2.16.1"
99 | pyproject-hooks = "1.0.0"
100 | readme-renderer = "37.3"
101 | requests = "2.31.0"
102 | requests-toolbelt = "0.10.1"
103 | rfc3986 = "2.0.0"
104 | rich = "13.3.2"
105 | six = "1.16.0"
106 | sniffio = "1.3.0"
107 | starlette = ">=0.27.0,<0.28.0"
108 | urllib3 = "1.26.18"
109 | uvicorn = "0.21.1"
110 | webencodings = "0.5.1"
111 | zipp = "3.15.0"
112 | locust = "^2.15.1"
113 | django-stubs = "4.2.3"
114 |
115 |
116 | [tool.poetry.group.development.dependencies]
117 | types-requests = "^2.31.0.2"
118 | django-stubs = "^4.2.3"
119 |
120 | [build-system]
121 | requires = ["poetry-core"]
122 | build-backend = "poetry.core.masonry.api"
123 |
--------------------------------------------------------------------------------
/pyrightconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "include": ["src"],
3 |
4 | "exclude": [
5 | "**/node_modules",
6 | "**/__pycache__",
7 | "src/experimental",
8 | "src/typestubs"
9 | ],
10 |
11 | "defineConstant": {
12 | "DEBUG": true
13 | },
14 |
15 | "reportMissingImports": true,
16 | "reportMissingTypeStubs": false,
17 |
18 | "pythonVersion": "3.8",
19 | "pythonPlatform": "Linux",
20 |
21 | "executionEnvironments": [
22 | {
23 | "root": "src"
24 | }
25 | ]
26 | }
27 |
--------------------------------------------------------------------------------
/src/autometrics/__init__.py:
--------------------------------------------------------------------------------
1 | from .decorator import *
2 | from .initialization import init
3 |
--------------------------------------------------------------------------------
/src/autometrics/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 | import pytest
3 |
4 | in_ci = os.getenv("CI", "false") == "true"
5 |
6 |
7 | @pytest.fixture()
8 | def reset_environment(monkeypatch):
9 | import importlib
10 | import opentelemetry
11 | import prometheus_client
12 | from . import initialization
13 | from .tracker import tracker
14 |
15 | importlib.reload(opentelemetry)
16 | importlib.reload(prometheus_client)
17 | importlib.reload(initialization)
18 | importlib.reload(tracker)
19 | # we'll set debug to true to ensure calling init more than once will fail whole test
20 | monkeypatch.setenv("AUTOMETRICS_DEBUG", "true")
21 |
22 | # github ci uses https so for tests to pass we force ssh url
23 | if in_ci:
24 | monkeypatch.setenv(
25 | "AUTOMETRICS_REPOSITORY_URL",
26 | "git@github.com:autometrics-dev/autometrics-py.git",
27 | )
28 |
--------------------------------------------------------------------------------
/src/autometrics/constants.py:
--------------------------------------------------------------------------------
1 | """Constants used by autometrics"""
2 |
3 | SPEC_VERSION = "1.0.0"
4 |
5 | COUNTER_NAME = "function.calls"
6 | HISTOGRAM_NAME = "function.calls.duration"
7 | CONCURRENCY_NAME = "function.calls.concurrent"
8 | # NOTE - The Rust implementation does not use `build.info`, instead opts for just `build_info`
9 | BUILD_INFO_NAME = "build_info"
10 | SERVICE_NAME = "service.name"
11 | REPOSITORY_URL = "repository.url"
12 | REPOSITORY_PROVIDER = "repository.provider"
13 | AUTOMETRICS_VERSION = "autometrics.version"
14 |
15 |
16 | COUNTER_NAME_PROMETHEUS = COUNTER_NAME.replace(".", "_")
17 | HISTOGRAM_NAME_PROMETHEUS = HISTOGRAM_NAME.replace(".", "_")
18 | CONCURRENCY_NAME_PROMETHEUS = CONCURRENCY_NAME.replace(".", "_")
19 | SERVICE_NAME_PROMETHEUS = SERVICE_NAME.replace(".", "_")
20 | REPOSITORY_URL_PROMETHEUS = REPOSITORY_URL.replace(".", "_")
21 | REPOSITORY_PROVIDER_PROMETHEUS = REPOSITORY_PROVIDER.replace(".", "_")
22 | AUTOMETRICS_VERSION_PROMETHEUS = AUTOMETRICS_VERSION.replace(".", "_")
23 |
24 | COUNTER_DESCRIPTION = "Autometrics counter for tracking function calls"
25 | HISTOGRAM_DESCRIPTION = "Autometrics histogram for tracking function call duration"
26 | CONCURRENCY_DESCRIPTION = "Autometrics gauge for tracking function call concurrency"
27 | BUILD_INFO_DESCRIPTION = (
28 | "Autometrics info metric for tracking software version and build details"
29 | )
30 |
31 | # The following constants are used to create the labels
32 | OBJECTIVE_NAME = "objective.name"
33 | OBJECTIVE_PERCENTILE = "objective.percentile"
34 | OBJECTIVE_LATENCY_THRESHOLD = "objective.latency_threshold"
35 | VERSION_KEY = "version"
36 | COMMIT_KEY = "commit"
37 | BRANCH_KEY = "branch"
38 |
39 | # The values are updated to use underscores instead of periods to avoid issues with prometheus.
40 | # A similar thing is done in the rust library, which supports multiple exporters
41 | OBJECTIVE_NAME_PROMETHEUS = OBJECTIVE_NAME.replace(".", "_")
42 | OBJECTIVE_PERCENTILE_PROMETHEUS = OBJECTIVE_PERCENTILE.replace(".", "_")
43 | OBJECTIVE_LATENCY_THRESHOLD_PROMETHEUS = OBJECTIVE_LATENCY_THRESHOLD.replace(".", "_")
44 |
--------------------------------------------------------------------------------
/src/autometrics/decorator.py:
--------------------------------------------------------------------------------
1 | """Autometrics module."""
2 | import time
3 | import inspect
4 |
5 | from contextvars import ContextVar, Token
6 | from functools import wraps
7 | from typing import overload, TypeVar, Callable, Optional, Awaitable, Union, Coroutine
8 | from typing_extensions import ParamSpec
9 |
10 | from .objectives import Objective
11 | from .tracker import get_tracker, Result
12 | from .utils import (
13 | get_function_name,
14 | get_module_name,
15 | append_docs_to_docstring,
16 | )
17 |
18 | Params = ParamSpec("Params")
19 | R = TypeVar("R")
20 | Y = TypeVar("Y")
21 | S = TypeVar("S")
22 |
23 | caller_module_var: ContextVar[str] = ContextVar("caller.module", default="")
24 | caller_function_var: ContextVar[str] = ContextVar("caller.function", default="")
25 |
26 |
27 | # Decorator with arguments (where decorated function returns an awaitable)
28 | @overload
29 | def autometrics(
30 | func: None = None,
31 | *,
32 | objective: Optional[Objective] = None,
33 | track_concurrency: Optional[bool] = False,
34 | record_error_if: Callable[[R], bool],
35 | record_success_if: Optional[Callable[[Exception], bool]] = None,
36 | ) -> Union[
37 | Callable[
38 | [Callable[Params, Coroutine[Y, S, R]]], Callable[Params, Coroutine[Y, S, R]]
39 | ],
40 | Callable[[Callable[Params, R]], Callable[Params, R]],
41 | ]:
42 | ...
43 |
44 |
45 | # Decorator with arguments (where decorated function returns an awaitable)
46 | @overload
47 | def autometrics(
48 | func: None = None,
49 | *,
50 | objective: Optional[Objective] = None,
51 | track_concurrency: Optional[bool] = False,
52 | record_success_if: Optional[Callable[[Exception], bool]] = None,
53 | ) -> Callable[[Callable[Params, R]], Callable[Params, R]]:
54 | ...
55 |
56 |
57 | # Using the func parameter
58 | # i.e. using @autometrics()
59 | @overload
60 | def autometrics(
61 | func: Callable[Params, R],
62 | ) -> Callable[Params, R]:
63 | ...
64 |
65 |
66 | def autometrics(
67 | func=None,
68 | objective=None,
69 | track_concurrency=None,
70 | record_error_if=None,
71 | record_success_if=None,
72 | ):
73 | """Decorator for tracking function calls and duration. Supports synchronous and async functions."""
74 |
75 | def register_function_info(
76 | function: str,
77 | module: str,
78 | ):
79 | get_tracker().initialize_counters(
80 | function=function, module=module, objective=objective
81 | )
82 |
83 | def track_start(function: str, module: str):
84 | get_tracker().start(
85 | function=function, module=module, track_concurrency=track_concurrency
86 | )
87 |
88 | def track_result_ok(
89 | duration: float,
90 | function: str,
91 | module: str,
92 | caller_module: str,
93 | caller_function: str,
94 | ):
95 | get_tracker().finish(
96 | duration,
97 | function=function,
98 | module=module,
99 | caller_module=caller_module,
100 | caller_function=caller_function,
101 | objective=objective,
102 | track_concurrency=track_concurrency,
103 | result=Result.OK,
104 | )
105 |
106 | def track_result_error(
107 | duration: float,
108 | function: str,
109 | module: str,
110 | caller_module: str,
111 | caller_function: str,
112 | ):
113 | get_tracker().finish(
114 | duration,
115 | function=function,
116 | module=module,
117 | caller_module=caller_module,
118 | caller_function=caller_function,
119 | objective=objective,
120 | track_concurrency=track_concurrency,
121 | result=Result.ERROR,
122 | )
123 |
124 | def sync_decorator(func: Callable[Params, R]) -> Callable[Params, R]:
125 | """Helper for decorating synchronous functions, to track calls and duration."""
126 |
127 | module_name = get_module_name(func)
128 | func_name = get_function_name(func)
129 | register_function_info(func_name, module_name)
130 |
131 | @wraps(func)
132 | def sync_wrapper(*args: Params.args, **kwds: Params.kwargs) -> R:
133 | caller_module = caller_module_var.get()
134 | caller_function = caller_function_var.get()
135 | context_token_module: Optional[Token] = None
136 | context_token_function: Optional[Token] = None
137 | start_time = time.time()
138 |
139 | try:
140 | context_token_module = caller_module_var.set(module_name)
141 | context_token_function = caller_function_var.set(func_name)
142 | if track_concurrency:
143 | track_start(module=module_name, function=func_name)
144 | result = func(*args, **kwds)
145 | duration = time.time() - start_time
146 | if record_error_if and record_error_if(result):
147 | track_result_error(
148 | duration,
149 | function=func_name,
150 | module=module_name,
151 | caller_module=caller_module,
152 | caller_function=caller_function,
153 | )
154 | else:
155 | track_result_ok(
156 | duration,
157 | function=func_name,
158 | module=module_name,
159 | caller_module=caller_module,
160 | caller_function=caller_function,
161 | )
162 |
163 | except Exception as exception:
164 | duration = time.time() - start_time
165 | if record_success_if and record_success_if(exception):
166 | track_result_ok(
167 | duration,
168 | function=func_name,
169 | module=module_name,
170 | caller_module=caller_module,
171 | caller_function=caller_function,
172 | )
173 | else:
174 | track_result_error(
175 | duration,
176 | function=func_name,
177 | module=module_name,
178 | caller_module=caller_module,
179 | caller_function=caller_function,
180 | )
181 | # Reraise exception
182 | raise exception
183 |
184 | finally:
185 | if context_token_module is not None:
186 | caller_module_var.reset(context_token_module)
187 | if context_token_function is not None:
188 | caller_function_var.reset(context_token_function)
189 |
190 | return result
191 |
192 | sync_wrapper.__doc__ = append_docs_to_docstring(func, func_name, module_name)
193 | return sync_wrapper
194 |
195 | def async_decorator(
196 | func: Callable[Params, Awaitable[R]]
197 | ) -> Callable[Params, Awaitable[R]]:
198 | """Helper for decorating async functions, to track calls and duration."""
199 |
200 | module_name = get_module_name(func)
201 | func_name = get_function_name(func)
202 | register_function_info(func_name, module_name)
203 |
204 | @wraps(func)
205 | async def async_wrapper(*args: Params.args, **kwds: Params.kwargs) -> R:
206 | caller_module = caller_module_var.get()
207 | caller_function = caller_function_var.get()
208 | context_token_module: Optional[Token] = None
209 | context_token_function: Optional[Token] = None
210 | start_time = time.time()
211 |
212 | try:
213 | context_token_module = caller_module_var.set(module_name)
214 | context_token_function = caller_function_var.set(func_name)
215 | if track_concurrency:
216 | track_start(module=module_name, function=func_name)
217 | result = await func(*args, **kwds)
218 | duration = time.time() - start_time
219 | if record_error_if and record_error_if(result):
220 | track_result_error(
221 | duration,
222 | function=func_name,
223 | module=module_name,
224 | caller_module=caller_module,
225 | caller_function=caller_function,
226 | )
227 | else:
228 | track_result_ok(
229 | duration,
230 | function=func_name,
231 | module=module_name,
232 | caller_module=caller_module,
233 | caller_function=caller_function,
234 | )
235 |
236 | except Exception as exception:
237 | duration = time.time() - start_time
238 | if record_success_if and record_success_if(exception):
239 | track_result_ok(
240 | duration,
241 | function=func_name,
242 | module=module_name,
243 | caller_module=caller_module,
244 | caller_function=caller_function,
245 | )
246 | else:
247 | track_result_error(
248 | duration,
249 | function=func_name,
250 | module=module_name,
251 | caller_module=caller_module,
252 | caller_function=caller_function,
253 | )
254 | # Reraise exception
255 | raise exception
256 |
257 | finally:
258 | if context_token_module is not None:
259 | caller_module_var.reset(context_token_module)
260 | if context_token_function is not None:
261 | caller_function_var.reset(context_token_function)
262 |
263 | return result
264 |
265 | async_wrapper.__doc__ = append_docs_to_docstring(func, func_name, module_name)
266 | return async_wrapper
267 |
268 | def pick_decorator(func):
269 | """Pick the correct decorator based on the function type."""
270 | if inspect.iscoroutinefunction(func):
271 | return async_decorator(func)
272 | return sync_decorator(func)
273 |
274 | if func is None:
275 | return pick_decorator
276 | elif inspect.iscoroutinefunction(func):
277 | return async_decorator(func)
278 | else:
279 | return sync_decorator(func)
280 |
--------------------------------------------------------------------------------
/src/autometrics/exemplar.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import trace
2 |
3 |
4 | def get_exemplar():
5 | """Generates an exemplar dictionary from the current implicit OTel context if available"""
6 | span_context = trace.get_current_span().get_span_context()
7 |
8 | # Only include the exemplar if it is valid and sampled
9 | if span_context.is_valid and span_context.trace_flags.sampled:
10 | # You must set the trace_id and span_id exemplar labels like this to link OTel and
11 | # Prometheus. They must be formatted as hexadecimal strings.
12 | return {
13 | "trace_id": trace.format_trace_id(span_context.trace_id),
14 | "span_id": trace.format_span_id(span_context.span_id),
15 | }
16 |
17 | return None
18 |
--------------------------------------------------------------------------------
/src/autometrics/exposition.py:
--------------------------------------------------------------------------------
1 | from opentelemetry.sdk.metrics.export import (
2 | AggregationTemporality,
3 | MetricReader,
4 | PeriodicExportingMetricReader,
5 | )
6 | from opentelemetry.exporter.prometheus import PrometheusMetricReader
7 | from prometheus_client import start_http_server
8 | from pydantic import ConfigDict, TypeAdapter
9 | from typing import Dict, Literal, Optional, Union
10 | from typing_extensions import TypedDict
11 |
12 | # GRPC is optional so we'll only type it if it's available
13 | try:
14 | from grpc import ChannelCredentials # type: ignore
15 | except ImportError:
16 | ChannelCredentials = None
17 |
18 |
19 | # All of these are split into two parts because having
20 | # a wall of Optional[...] is not very readable and Required[...] is 3.11+
21 | class OtlpGrpcExporterBase(TypedDict):
22 | """Base type for OTLP GRPC exporter configuration."""
23 |
24 | type: Literal["otlp-proto-grpc"]
25 |
26 |
27 | class OtlpGrpcExporterOptions(OtlpGrpcExporterBase, total=False):
28 | """Configuration for OTLP GRPC exporter."""
29 |
30 | __pydantic_config__ = ConfigDict(arbitrary_types_allowed=True) # type: ignore
31 | endpoint: str
32 | insecure: bool
33 | headers: Dict[str, str]
34 | credentials: ChannelCredentials
35 | push_interval: int
36 | timeout: int
37 | preferred_temporality: Dict[type, AggregationTemporality]
38 |
39 |
40 | OtlpGrpcExporterValidator = TypeAdapter(OtlpGrpcExporterOptions)
41 |
42 |
43 | class OtlpHttpExporterBase(TypedDict):
44 | """Base type for OTLP HTTP exporter configuration."""
45 |
46 | type: Literal["otlp-proto-http"]
47 |
48 |
49 | class OtlpHttpExporterOptions(OtlpHttpExporterBase, total=False):
50 | """Configuration for OTLP HTTP exporter."""
51 |
52 | endpoint: str
53 | headers: Dict[str, str]
54 | push_interval: int
55 | timeout: int
56 | preferred_temporality: Dict[type, AggregationTemporality]
57 |
58 |
59 | OtlpHttpExporterValidator = TypeAdapter(OtlpHttpExporterOptions)
60 |
61 |
62 | class PrometheusExporterBase(TypedDict):
63 | """Base type for OTLP Prometheus exporter configuration."""
64 |
65 | type: Literal["prometheus"]
66 |
67 |
68 | class PrometheusExporterOptions(PrometheusExporterBase, total=False):
69 | """Configuration for Prometheus exporter."""
70 |
71 | address: str
72 | port: int
73 |
74 |
75 | PrometheusValidator = TypeAdapter(PrometheusExporterOptions)
76 |
77 |
78 | class OtelCustomExporterBase(TypedDict):
79 | """Base type for OTLP Prometheus exporter configuration."""
80 |
81 | type: Literal["otel-custom"]
82 |
83 |
84 | class OtelCustomExporterOptions(OtelCustomExporterBase, total=False):
85 | """Configuration for OpenTelemetry Prometheus exporter."""
86 |
87 | __pydantic_config__ = ConfigDict(arbitrary_types_allowed=True) # type: ignore
88 | exporter: MetricReader
89 |
90 |
91 | OtelCustomValidator = TypeAdapter(OtelCustomExporterOptions)
92 |
93 |
94 | ExporterOptions = Union[
95 | OtlpGrpcExporterOptions,
96 | OtlpHttpExporterOptions,
97 | PrometheusExporterOptions,
98 | OtelCustomExporterOptions,
99 | ]
100 |
101 |
102 | def create_exporter(config: ExporterOptions) -> Optional[MetricReader]:
103 | """Create an exporter based on the configuration."""
104 | if config["type"] == "prometheus":
105 | config = PrometheusValidator.validate_python(config)
106 | start_http_server(
107 | config.get("port", 9464),
108 | config.get("address", "0.0.0.0"),
109 | )
110 | return PrometheusMetricReader()
111 | if config["type"] == "otlp-proto-http":
112 | config = OtlpHttpExporterValidator.validate_python(config)
113 | try:
114 | from opentelemetry.exporter.otlp.proto.http.metric_exporter import (
115 | OTLPMetricExporter as OTLPHTTPMetricExporter,
116 | )
117 |
118 | http_exporter = OTLPHTTPMetricExporter(
119 | endpoint=config.get("endpoint", None),
120 | headers=config.get("headers", None),
121 | timeout=config.get("timeout", None),
122 | preferred_temporality=config.get("preferred_temporality", {}),
123 | )
124 | http_reader = PeriodicExportingMetricReader(
125 | http_exporter,
126 | export_interval_millis=config.get("push_interval", None),
127 | export_timeout_millis=config.get("timeout", None),
128 | )
129 | return http_reader
130 | except ImportError:
131 | raise ImportError("OTLP exporter (HTTP) not installed")
132 | if config["type"] == "otlp-proto-grpc":
133 | config = OtlpGrpcExporterValidator.validate_python(config)
134 | try:
135 | from opentelemetry.exporter.otlp.proto.grpc.metric_exporter import ( # type: ignore
136 | OTLPMetricExporter as OTLPGRPCMetricExporter,
137 | )
138 |
139 | grpc_exporter = OTLPGRPCMetricExporter(
140 | endpoint=config.get("endpoint", None),
141 | insecure=config.get("insecure", None),
142 | credentials=config.get("credentials", None),
143 | headers=config.get("headers", None),
144 | timeout=config.get("timeout", None),
145 | preferred_temporality=config.get("preferred_temporality", {}),
146 | )
147 | grpc_reader = PeriodicExportingMetricReader(
148 | grpc_exporter,
149 | export_interval_millis=config.get("push_interval", None),
150 | export_timeout_millis=config.get("timeout", None),
151 | )
152 | return grpc_reader
153 | except ImportError:
154 | raise ImportError("OTLP exporter (GRPC) not installed")
155 | if config["type"] == "otel-custom":
156 | config = OtelCustomValidator.validate_python(config)
157 | return config.get("exporter", None)
158 | else:
159 | raise ValueError("Invalid exporter type")
160 |
--------------------------------------------------------------------------------
/src/autometrics/initialization.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 |
4 | from typing_extensions import Unpack
5 |
6 |
7 | from .tracker import init_tracker, get_tracker
8 | from .tracker.temporary import TemporaryTracker
9 | from .settings import AutometricsOptions, init_settings
10 |
11 | has_inited = False
12 | DOUBLE_INIT_ERROR = "Cannot call init() more than once."
13 | NOT_TEMP_TRACKER_ERROR = "Expected tracker to be TemporaryTracker."
14 |
15 |
16 | def init(**kwargs: Unpack[AutometricsOptions]):
17 | """Initialization function that is used to configure autometrics. This function should be called
18 | immediately after starting your app. You cannot call this function more than once.
19 | """
20 | global has_inited
21 | if has_inited:
22 | if os.environ.get("AUTOMETRICS_DEBUG") == "true":
23 | raise RuntimeError(DOUBLE_INIT_ERROR)
24 | else:
25 | logging.warn(f"{DOUBLE_INIT_ERROR} This init() call will be ignored.")
26 | return
27 | has_inited = True
28 |
29 | temp_tracker = get_tracker()
30 | if not isinstance(temp_tracker, TemporaryTracker):
31 | if os.environ.get("AUTOMETRICS_DEBUG") == "true":
32 | raise RuntimeError(NOT_TEMP_TRACKER_ERROR)
33 | else:
34 | logging.warn(f"{NOT_TEMP_TRACKER_ERROR} This init() call will be ignored.")
35 | return
36 | settings = init_settings(**kwargs)
37 | tracker = init_tracker(settings["tracker"], settings)
38 | temp_tracker.replay_queue(tracker)
39 |
--------------------------------------------------------------------------------
/src/autometrics/objectives.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from enum import Enum
4 | from re import match
5 | from typing import Optional, Tuple
6 |
7 |
8 | class ObjectivePercentile(Enum):
9 | """The percentage of requests that must meet the given criteria (success rate or latency)."""
10 |
11 | P90 = "90"
12 | P95 = "95"
13 | P99 = "99"
14 | P99_9 = "99.9"
15 |
16 |
17 | class ObjectiveLatency(Enum):
18 | """The latency threshold for the given percentile."""
19 |
20 | Ms5 = "0.005"
21 | Ms10 = "0.01"
22 | Ms25 = "0.025"
23 | Ms50 = "0.05"
24 | Ms75 = "0.075"
25 | Ms100 = "0.1"
26 | Ms250 = "0.25"
27 | Ms500 = "0.5"
28 | Ms750 = "0.75"
29 | Ms1000 = "1.0"
30 | Ms2500 = "2.5"
31 | Ms5000 = "5.0"
32 | Ms7500 = "7.5"
33 | Ms10000 = "10.0"
34 |
35 |
36 | # This represents a Service-Level Objective (SLO) for a function or group of functions.
37 | # The objective should be given a descriptive name and can represent
38 | # a success rate and/or latency objective.
39 | #
40 | # For details on SLOs, see
41 | #
42 | # Example:
43 | # ```python
44 | # from autometrics import autometrics
45 | # from autometrics.objectives import Objective, ObjectivePercentile, ObjectiveLatency
46 | # API_SLO = Objective(
47 | # "api",
48 | # success_rate=ObjectivePercentile.P99_9,
49 | # latency=(ObjectiveLatency.Ms250, ObjectivePercentile.P99),
50 | # )
51 | #
52 | # @autometrics(objective = API_SLO)
53 | # def api_handler() :
54 | # # ...
55 | # ```
56 | #
57 | # ## How this works
58 | #
59 | # When an objective is added to a function, the metrics for that function will
60 | # have additional labels attached to specify the SLO details.
61 | #
62 | # Autometrics comes with a set of Prometheus [recording rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)
63 | # and [alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)
64 | # that will fire alerts when the given objective is being violated.
65 | #
66 | # By default, these recording rules will effectively lay dormant.
67 | # However, they are enabled when the special labels are present on certain metrics.
68 | class Objective:
69 | """A Service-Level Objective (SLO) for a function or group of functions."""
70 |
71 | name: str
72 | """name: The name of the objective. This should be something descriptive of the function or group of functions it covers."""
73 | success_rate: Optional[ObjectivePercentile]
74 | """Specify the success rate for this objective.
75 |
76 | This means that the function or group of functions that are part of this objective
77 | should return an `Ok` result at least this percentage of the time."""
78 | latency: Optional[Tuple[ObjectiveLatency, ObjectivePercentile]]
79 |
80 | def __init__(
81 | self,
82 | name: str,
83 | success_rate: Optional[ObjectivePercentile] = None,
84 | latency: Optional[Tuple[ObjectiveLatency, ObjectivePercentile]] = None,
85 | ):
86 | """Create a new objective with the given name.
87 |
88 | The name should be something descriptive of the function or group of functions it covers.
89 | For example, if you have an objective covering all of the HTTP handlers in your API you might call it "api".
90 | """
91 |
92 | self.name = name
93 | self.success_rate = success_rate
94 | self.latency = latency
95 |
96 | # Check that name only contains alphanumeric characters and hyphens
97 | if match(r"^[\w-]+$", name) is None:
98 | logging.getLogger().warning(
99 | f"Objective name '{name}' contains invalid characters. Only alphanumeric characters and hyphens are allowed."
100 | )
101 |
--------------------------------------------------------------------------------
/src/autometrics/prometheus_url.py:
--------------------------------------------------------------------------------
1 | import urllib.parse
2 | import os
3 | from typing import Optional
4 | from dotenv import load_dotenv
5 |
6 | ADD_BUILD_INFO_LABELS = "* on (instance, job) group_left(version, commit) (last_over_time(build_info[1s]) or on (instance, job) up)"
7 |
8 |
9 | def cleanup_url(url: str) -> str:
10 | """Remove the trailing slash if there is one."""
11 | if url[-1] == "/":
12 | url = url[:-1]
13 | return url
14 |
15 |
16 | class Generator:
17 | """Generate prometheus query urls for a given function/module."""
18 |
19 | def __init__(
20 | self, function_name: str, module_name: str, base_url: Optional[str] = None
21 | ):
22 | load_dotenv()
23 | self.function_name = function_name
24 | self.module_name = module_name
25 |
26 | url = base_url or os.getenv("PROMETHEUS_URL") or "http://localhost:9090"
27 | self.base_url = cleanup_url(url)
28 |
29 | def create_urls(self):
30 | """Create the prometheus query urls for the function and module."""
31 | request_rate_query = f'sum by (function, module, commit, version) (rate (function_calls_count_total{{function="{self.function_name}",module="{self.module_name}"}}[5m]) {ADD_BUILD_INFO_LABELS})'
32 | latency_query = f'sum by (le, function, module, commit, version) (rate(function_calls_duration_bucket{{function="{self.function_name}",module="{self.module_name}"}}[5m]) {ADD_BUILD_INFO_LABELS})'
33 | error_ratio_query = f'sum by (function, module, commit, version) (rate (function_calls_count_total{{function="{self.function_name}",module="{self.module_name}", result="error"}}[5m]) {ADD_BUILD_INFO_LABELS}) / {request_rate_query}'
34 |
35 | queries = {
36 | "Request rate URL": request_rate_query,
37 | "Latency URL": latency_query,
38 | "Error Ratio URL": error_ratio_query,
39 | }
40 |
41 | urls = {}
42 | for [name, query] in queries.items():
43 | # for query in queries:
44 | generated_url = self.create_prometheus_url(query)
45 | urls[name] = generated_url
46 | return urls
47 |
48 | def create_prometheus_url(self, query: str):
49 | """Create a the full query url for a given query."""
50 | encoded_query = urllib.parse.quote(query)
51 | url = f"{self.base_url}/graph?g0.expr={encoded_query}&g0.tab=0"
52 | return url
53 |
--------------------------------------------------------------------------------
/src/autometrics/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/src/autometrics/py.typed
--------------------------------------------------------------------------------
/src/autometrics/settings.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from typing import cast, Dict, List, TypedDict, Optional, Any
4 | from typing_extensions import Unpack
5 |
6 | from .tracker.types import TrackerType
7 | from .exposition import ExporterOptions
8 | from .objectives import ObjectiveLatency
9 | from .utils import extract_repository_provider, read_repository_url_from_fs
10 |
11 |
12 | class AutometricsSettings(TypedDict):
13 | """Settings for autometrics."""
14 |
15 | histogram_buckets: List[float]
16 | tracker: TrackerType
17 | exporter: Optional[ExporterOptions]
18 | enable_exemplars: bool
19 | service_name: str
20 | commit: str
21 | version: str
22 | branch: str
23 | repository_url: str
24 | repository_provider: str
25 |
26 |
27 | class AutometricsOptions(TypedDict, total=False):
28 | """User supplied overrides for autometrics settings."""
29 |
30 | histogram_buckets: List[float]
31 | tracker: str
32 | exporter: Dict[str, Any]
33 | enable_exemplars: bool
34 | service_name: str
35 | commit: str
36 | version: str
37 | branch: str
38 | repository_url: str
39 | repository_provider: str
40 |
41 |
42 | def get_objective_boundaries():
43 | """Get the objective latency boundaries as float values in seconds (instead of strings)"""
44 | return list(map(lambda c: float(c.value), ObjectiveLatency))
45 |
46 |
47 | settings: Optional[AutometricsSettings] = None
48 |
49 |
50 | def init_settings(**overrides: Unpack[AutometricsOptions]) -> AutometricsSettings:
51 | tracker_setting = (
52 | overrides.get("tracker") or os.getenv("AUTOMETRICS_TRACKER") or "opentelemetry"
53 | )
54 | tracker_type = (
55 | TrackerType.PROMETHEUS
56 | if tracker_setting.lower() == "prometheus"
57 | else TrackerType.OPENTELEMETRY
58 | )
59 |
60 | exporter: Optional[ExporterOptions] = None
61 | exporter_option = overrides.get("exporter")
62 | if exporter_option:
63 | exporter = cast(ExporterOptions, exporter_option)
64 |
65 | repository_url: Optional[str] = overrides.get(
66 | "repository_url", os.getenv("AUTOMETRICS_REPOSITORY_URL")
67 | )
68 | if repository_url is None:
69 | repository_url = read_repository_url_from_fs()
70 |
71 | repository_provider: Optional[str] = overrides.get(
72 | "repository_provider", os.getenv("AUTOMETRICS_REPOSITORY_PROVIDER")
73 | )
74 | if repository_provider is None and repository_url is not None:
75 | repository_provider = extract_repository_provider(repository_url)
76 |
77 | config: AutometricsSettings = {
78 | "histogram_buckets": overrides.get("histogram_buckets")
79 | or get_objective_boundaries(),
80 | "enable_exemplars": overrides.get(
81 | "enable_exemplars", os.getenv("AUTOMETRICS_EXEMPLARS") == "true"
82 | ),
83 | "tracker": tracker_type,
84 | "exporter": exporter,
85 | "service_name": overrides.get(
86 | "service_name",
87 | os.getenv(
88 | "AUTOMETRICS_SERVICE_NAME",
89 | os.getenv("OTEL_SERVICE_NAME", __package__.rsplit(".", 1)[0]),
90 | ),
91 | ),
92 | "commit": overrides.get(
93 | "commit", os.getenv("AUTOMETRICS_COMMIT", os.getenv("COMMIT_SHA", ""))
94 | ),
95 | "branch": overrides.get(
96 | "branch", os.getenv("AUTOMETRICS_BRANCH", os.getenv("BRANCH_NAME", ""))
97 | ),
98 | "version": overrides.get("version", os.getenv("AUTOMETRICS_VERSION", "")),
99 | "repository_url": repository_url or "",
100 | "repository_provider": repository_provider or "",
101 | }
102 | validate_settings(config)
103 |
104 | global settings
105 | settings = config
106 | return settings
107 |
108 |
109 | def get_settings() -> AutometricsSettings:
110 | """Get the current settings."""
111 | global settings
112 | if settings is None:
113 | settings = init_settings()
114 | return settings
115 |
116 |
117 | def validate_settings(settings: AutometricsSettings):
118 | """Ensure that the settings are valid. For example, we don't support OpenTelemetry exporters with Prometheus tracker."""
119 | if settings["exporter"]:
120 | exporter_type = settings["exporter"]["type"]
121 | if settings["tracker"] == TrackerType.PROMETHEUS:
122 | if exporter_type != "prometheus":
123 | raise ValueError(
124 | f"Exporter type {exporter_type} is not supported with Prometheus tracker."
125 | )
126 |
--------------------------------------------------------------------------------
/src/autometrics/test_caller.py:
--------------------------------------------------------------------------------
1 | """Tests for caller tracking."""
2 | from functools import wraps
3 | from prometheus_client.exposition import generate_latest
4 |
5 | from .decorator import autometrics
6 | from .initialization import init
7 |
8 |
9 | def test_caller_detection():
10 | """This is a test to see if the caller is properly detected."""
11 | init()
12 |
13 | def dummy_decorator(func):
14 | @wraps(func)
15 | def dummy_wrapper(*args, **kwargs):
16 | return func(*args, **kwargs)
17 |
18 | return dummy_wrapper
19 |
20 | def another_decorator(func):
21 | @wraps(func)
22 | def another_wrapper(*args, **kwargs):
23 | return func(*args, **kwargs)
24 |
25 | return another_wrapper
26 |
27 | @dummy_decorator
28 | @autometrics
29 | @another_decorator
30 | def foo():
31 | pass
32 |
33 | @autometrics
34 | def bar():
35 | foo()
36 |
37 | bar()
38 |
39 | blob = generate_latest()
40 | assert blob is not None
41 | data = blob.decode("utf-8")
42 |
43 | expected = """function_calls_total{caller_function="test_caller_detection..bar",caller_module="autometrics.test_caller",function="test_caller_detection..foo",module="autometrics.test_caller",objective_name="",objective_percentile="",result="ok",service_name="autometrics"} 1.0"""
44 | assert "wrapper" not in data
45 | assert expected in data
46 |
--------------------------------------------------------------------------------
/src/autometrics/test_initialization.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from autometrics import init
4 | from autometrics.exposition import PrometheusExporterOptions
5 | from autometrics.tracker.opentelemetry import OpenTelemetryTracker
6 | from autometrics.tracker.prometheus import PrometheusTracker
7 | from autometrics.tracker.tracker import get_tracker
8 | from autometrics.tracker.types import TrackerType
9 | from autometrics.settings import get_settings
10 |
11 |
12 | def test_init():
13 | """Test that the default settings are set correctly"""
14 | init()
15 | settings = get_settings()
16 | assert settings == {
17 | "histogram_buckets": [
18 | 0.005,
19 | 0.01,
20 | 0.025,
21 | 0.05,
22 | 0.075,
23 | 0.1,
24 | 0.25,
25 | 0.5,
26 | 0.75,
27 | 1.0,
28 | 2.5,
29 | 5.0,
30 | 7.5,
31 | 10.0,
32 | ],
33 | "enable_exemplars": False,
34 | "tracker": TrackerType.OPENTELEMETRY,
35 | "exporter": None,
36 | "service_name": "autometrics",
37 | "commit": "",
38 | "branch": "",
39 | "version": "",
40 | "repository_url": "git@github.com:autometrics-dev/autometrics-py.git",
41 | "repository_provider": "github",
42 | }
43 | tracker = get_tracker()
44 | assert isinstance(tracker, OpenTelemetryTracker)
45 |
46 |
47 | def test_init_custom():
48 | """Test that setting custom settings works correctly"""
49 | init(
50 | tracker="prometheus",
51 | service_name="test",
52 | enable_exemplars=True,
53 | version="1.0.0",
54 | commit="123456",
55 | branch="main",
56 | )
57 | settings = get_settings()
58 | assert settings == {
59 | "histogram_buckets": [
60 | 0.005,
61 | 0.01,
62 | 0.025,
63 | 0.05,
64 | 0.075,
65 | 0.1,
66 | 0.25,
67 | 0.5,
68 | 0.75,
69 | 1.0,
70 | 2.5,
71 | 5.0,
72 | 7.5,
73 | 10.0,
74 | ],
75 | "enable_exemplars": True,
76 | "tracker": TrackerType.PROMETHEUS,
77 | "exporter": None,
78 | "service_name": "test",
79 | "commit": "123456",
80 | "branch": "main",
81 | "version": "1.0.0",
82 | "repository_url": "git@github.com:autometrics-dev/autometrics-py.git",
83 | "repository_provider": "github",
84 | }
85 | tracker = get_tracker()
86 | assert isinstance(tracker, PrometheusTracker)
87 |
88 |
89 | def test_init_env_vars(monkeypatch):
90 | """Test that setting custom settings via environment variables works correctly"""
91 | monkeypatch.setenv("AUTOMETRICS_TRACKER", "prometheus")
92 | monkeypatch.setenv("AUTOMETRICS_SERVICE_NAME", "test")
93 | monkeypatch.setenv("AUTOMETRICS_EXEMPLARS", "true")
94 | monkeypatch.setenv("AUTOMETRICS_VERSION", "1.0.0")
95 | monkeypatch.setenv("AUTOMETRICS_COMMIT", "123456")
96 | monkeypatch.setenv("AUTOMETRICS_BRANCH", "main")
97 | init()
98 | settings = get_settings()
99 |
100 | assert settings == {
101 | "histogram_buckets": [
102 | 0.005,
103 | 0.01,
104 | 0.025,
105 | 0.05,
106 | 0.075,
107 | 0.1,
108 | 0.25,
109 | 0.5,
110 | 0.75,
111 | 1.0,
112 | 2.5,
113 | 5.0,
114 | 7.5,
115 | 10.0,
116 | ],
117 | "enable_exemplars": True,
118 | "tracker": TrackerType.PROMETHEUS,
119 | "exporter": None,
120 | "service_name": "test",
121 | "commit": "123456",
122 | "branch": "main",
123 | "version": "1.0.0",
124 | "repository_url": "git@github.com:autometrics-dev/autometrics-py.git",
125 | "repository_provider": "github",
126 | }
127 |
128 |
129 | def test_double_init():
130 | """Test that calling init twice fails"""
131 | init()
132 | with pytest.raises(RuntimeError):
133 | init()
134 |
135 |
136 | def test_init_with_exporter():
137 | """Test that setting exporter works correctly"""
138 | init(
139 | tracker="prometheus",
140 | exporter={
141 | "type": "prometheus",
142 | },
143 | )
144 | settings = get_settings()
145 | assert settings == {
146 | "histogram_buckets": [
147 | 0.005,
148 | 0.01,
149 | 0.025,
150 | 0.05,
151 | 0.075,
152 | 0.1,
153 | 0.25,
154 | 0.5,
155 | 0.75,
156 | 1.0,
157 | 2.5,
158 | 5.0,
159 | 7.5,
160 | 10.0,
161 | ],
162 | "enable_exemplars": False,
163 | "tracker": TrackerType.PROMETHEUS,
164 | "exporter": PrometheusExporterOptions(type="prometheus"),
165 | "service_name": "autometrics",
166 | "commit": "",
167 | "branch": "",
168 | "version": "",
169 | "repository_url": "git@github.com:autometrics-dev/autometrics-py.git",
170 | "repository_provider": "github",
171 | }
172 | tracker = get_tracker()
173 | assert isinstance(tracker, PrometheusTracker)
174 |
175 |
176 | def test_init_exporter_validation():
177 | with pytest.raises(ValueError):
178 | init(
179 | tracker="prometheus",
180 | exporter={
181 | "type": "otel-custom",
182 | },
183 | )
184 |
185 |
186 | def test_init_repo_meta_suppress_detection():
187 | init(repository_url="", repository_provider="")
188 | settings = get_settings()
189 | assert settings["repository_provider"] is ""
190 | assert settings["repository_url"] is ""
191 |
--------------------------------------------------------------------------------
/src/autometrics/test_objectives.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from autometrics.objectives import Objective
4 |
5 |
6 | def test_objective_name_warning(caplog):
7 | """Test that a warning is logged when an objective name contains invalid characters."""
8 | caplog.set_level(logging.WARNING)
9 | caplog.clear()
10 | Objective("Incorrect name.")
11 | assert len(caplog.records) == 1
12 | assert caplog.records[0].levelname == "WARNING"
13 | assert "contains invalid characters" in caplog.records[0].message
14 | caplog.clear()
15 | Objective("correct-name-123")
16 | assert len(caplog.records) == 0
17 |
--------------------------------------------------------------------------------
/src/autometrics/test_prometheus_url.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from .prometheus_url import Generator
4 |
5 |
6 | # Defaults to localhost:9090
7 | @pytest.fixture(name="default_url_generator", autouse=True)
8 | def fixture_default_url() -> Generator:
9 | """Create a generator that uses the default url."""
10 | return Generator("myFunction", "myModule")
11 |
12 |
13 | def test_create_prometheus_url_with_default_url(default_url_generator: Generator):
14 | """Test that the prometheus url is created correctly."""
15 | url = default_url_generator.create_prometheus_url("myQuery")
16 |
17 | # Make sure the base URL is correct
18 | assert url.startswith("http://localhost:9090/graph?g0.expr=")
19 |
20 | # Make sure the query is included in the URL
21 | assert "myQuery" in url
22 |
23 |
24 | def test_create_urls_with_default_url(default_url_generator: Generator):
25 | urls = default_url_generator.create_urls()
26 |
27 | print(urls)
28 |
29 | result = {
30 | "Request rate URL": "http://localhost:9090/graph?g0.expr=sum%20by%20%28function%2C%20module%2C%20commit%2C%20version%29%20%28rate%20%28function_calls_count_total%7Bfunction%3D%22myFunction%22%2Cmodule%3D%22myModule%22%7D%5B5m%5D%29%20%2A%20on%20%28instance%2C%20job%29%20group_left%28version%2C%20commit%29%20%28last_over_time%28build_info%5B1s%5D%29%20or%20on%20%28instance%2C%20job%29%20up%29%29&g0.tab=0",
31 | "Latency URL": "http://localhost:9090/graph?g0.expr=sum%20by%20%28le%2C%20function%2C%20module%2C%20commit%2C%20version%29%20%28rate%28function_calls_duration_bucket%7Bfunction%3D%22myFunction%22%2Cmodule%3D%22myModule%22%7D%5B5m%5D%29%20%2A%20on%20%28instance%2C%20job%29%20group_left%28version%2C%20commit%29%20%28last_over_time%28build_info%5B1s%5D%29%20or%20on%20%28instance%2C%20job%29%20up%29%29&g0.tab=0",
32 | "Error Ratio URL": "http://localhost:9090/graph?g0.expr=sum%20by%20%28function%2C%20module%2C%20commit%2C%20version%29%20%28rate%20%28function_calls_count_total%7Bfunction%3D%22myFunction%22%2Cmodule%3D%22myModule%22%2C%20result%3D%22error%22%7D%5B5m%5D%29%20%2A%20on%20%28instance%2C%20job%29%20group_left%28version%2C%20commit%29%20%28last_over_time%28build_info%5B1s%5D%29%20or%20on%20%28instance%2C%20job%29%20up%29%29%20/%20sum%20by%20%28function%2C%20module%2C%20commit%2C%20version%29%20%28rate%20%28function_calls_count_total%7Bfunction%3D%22myFunction%22%2Cmodule%3D%22myModule%22%7D%5B5m%5D%29%20%2A%20on%20%28instance%2C%20job%29%20group_left%28version%2C%20commit%29%20%28last_over_time%28build_info%5B1s%5D%29%20or%20on%20%28instance%2C%20job%29%20up%29%29&g0.tab=0",
33 | }
34 | assert result == urls
35 |
36 |
37 | @pytest.fixture(name="custom_url_generator", autouse=True)
38 | def fixture_custom_url():
39 | return Generator("myFunction", "myModule", base_url="http://localhost:9091")
40 |
41 |
42 | def test_create_prometheus_url_with_custom_url(custom_url_generator: Generator):
43 | """Test the prometheus url generator with a custom base URL."""
44 | url = custom_url_generator.create_prometheus_url("myQuery")
45 |
46 | # Make sure the base URL is correct
47 | assert url.startswith("http://localhost:9091/graph?g0.expr=")
48 |
49 | # Make sure the query is included in the URL
50 | assert "myQuery" in url
51 |
--------------------------------------------------------------------------------
/src/autometrics/test_utils.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from autometrics.utils import get_repository_url
4 |
5 | config1 = """
6 | [core]
7 | key = value
8 | [remote "origin"]
9 | key2 = value2
10 | url = https://github.com/autometrics/autometrics-py.git
11 | key3 = value3
12 | [branch "main"]
13 | some-key = some-value
14 | """
15 |
16 | config2 = """
17 | [core]
18 | key = value
19 | """
20 |
21 | config3 = """
22 | [core]
23 | key = value
24 | [remote.origin]
25 | key2 = value2
26 | url = ssh://git@github.com:autometrics-dev/autometrics-py.git
27 | key3 = value3
28 | """
29 |
30 | config4 = """
31 | [remote "upstream"]
32 | url = "git@autometrics.dev/autometrics-ts.git"
33 |
34 | [remote "origin"]
35 | url = "git@github.com:autometrics-dev/autometrics-py.git"
36 | """
37 |
38 |
39 | @pytest.fixture(
40 | params=[
41 | (config1, "https://github.com/autometrics/autometrics-py.git"),
42 | (config2, None),
43 | (config3, "ssh://git@github.com:autometrics-dev/autometrics-py.git"),
44 | (config4, "git@github.com:autometrics-dev/autometrics-py.git"),
45 | ]
46 | )
47 | def git_config(request):
48 | return request.param
49 |
50 |
51 | def test_read_repository_url(monkeypatch, git_config):
52 | """Test that the repository url is read correctly from git config."""
53 | (config, expected_url) = git_config
54 | url = get_repository_url(config)
55 | assert url == expected_url
56 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/__init__.py:
--------------------------------------------------------------------------------
1 | from .tracker import *
2 | from .types import *
3 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/opentelemetry.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Dict, Optional, Mapping
3 |
4 | from opentelemetry.exporter.prometheus import PrometheusMetricReader
5 | from opentelemetry.metrics import (
6 | Counter,
7 | Histogram,
8 | UpDownCounter,
9 | set_meter_provider,
10 | )
11 | from opentelemetry.semconv.resource import ResourceAttributes
12 | from opentelemetry.sdk.metrics import MeterProvider
13 | from opentelemetry.sdk.metrics.view import View, ExplicitBucketHistogramAggregation
14 | from opentelemetry.sdk.metrics.export import MetricReader
15 | from opentelemetry.sdk.resources import Resource
16 | from opentelemetry.util.types import AttributeValue
17 |
18 | from ..exemplar import get_exemplar
19 | from .types import Result
20 | from ..objectives import Objective, ObjectiveLatency
21 | from ..constants import (
22 | AUTOMETRICS_VERSION,
23 | CONCURRENCY_NAME,
24 | CONCURRENCY_DESCRIPTION,
25 | COUNTER_DESCRIPTION,
26 | COUNTER_NAME,
27 | HISTOGRAM_DESCRIPTION,
28 | HISTOGRAM_NAME,
29 | BUILD_INFO_NAME,
30 | BUILD_INFO_DESCRIPTION,
31 | REPOSITORY_PROVIDER,
32 | REPOSITORY_URL,
33 | SERVICE_NAME,
34 | OBJECTIVE_NAME,
35 | OBJECTIVE_PERCENTILE,
36 | OBJECTIVE_LATENCY_THRESHOLD,
37 | SPEC_VERSION,
38 | )
39 | from ..settings import get_settings
40 |
41 | LabelValue = AttributeValue
42 | Attributes = Dict[str, LabelValue]
43 |
44 |
45 | def get_resource_attrs() -> Attributes:
46 | attrs: Attributes = {}
47 | if get_settings()["service_name"] is not None:
48 | attrs[ResourceAttributes.SERVICE_NAME] = get_settings()["service_name"]
49 | if get_settings()["version"] is not None:
50 | attrs[ResourceAttributes.SERVICE_VERSION] = get_settings()["version"]
51 | return attrs
52 |
53 |
54 | class OpenTelemetryTracker:
55 | """Tracker for OpenTelemetry."""
56 |
57 | __counter_instance: Counter
58 | __histogram_instance: Histogram
59 | __up_down_counter_build_info_instance: UpDownCounter
60 | __up_down_counter_concurrency_instance: UpDownCounter
61 |
62 | def __init__(self, reader: Optional[MetricReader] = None):
63 | view = View(
64 | name=HISTOGRAM_NAME,
65 | description=HISTOGRAM_DESCRIPTION,
66 | instrument_name=HISTOGRAM_NAME,
67 | aggregation=ExplicitBucketHistogramAggregation(
68 | boundaries=get_settings()["histogram_buckets"]
69 | ),
70 | )
71 | resource = Resource.create(get_resource_attrs())
72 | readers = [reader or PrometheusMetricReader()]
73 | meter_provider = MeterProvider(
74 | views=[view],
75 | resource=resource,
76 | metric_readers=readers,
77 | )
78 | set_meter_provider(meter_provider)
79 | meter = meter_provider.get_meter(name="autometrics")
80 | self.__counter_instance = meter.create_counter(
81 | name=COUNTER_NAME, description=COUNTER_DESCRIPTION
82 | )
83 | self.__histogram_instance = meter.create_histogram(
84 | name=HISTOGRAM_NAME,
85 | description=HISTOGRAM_DESCRIPTION,
86 | unit="seconds",
87 | )
88 | self.__up_down_counter_build_info_instance = meter.create_up_down_counter(
89 | name=BUILD_INFO_NAME,
90 | description=BUILD_INFO_DESCRIPTION,
91 | )
92 | self.__up_down_counter_concurrency_instance = meter.create_up_down_counter(
93 | name=CONCURRENCY_NAME,
94 | description=CONCURRENCY_DESCRIPTION,
95 | )
96 | self._has_set_build_info = False
97 |
98 | def __count(
99 | self,
100 | function: str,
101 | module: str,
102 | caller_module: str,
103 | caller_function: str,
104 | objective: Optional[Objective],
105 | exemplar: Optional[dict],
106 | result: Result,
107 | inc_by: int = 1,
108 | ):
109 | objective_name = "" if objective is None else objective.name
110 | percentile = (
111 | ""
112 | if objective is None or objective.success_rate is None
113 | else objective.success_rate.value
114 | )
115 | self.__counter_instance.add(
116 | inc_by,
117 | attributes={
118 | "function": function,
119 | "module": module,
120 | "result": result.value,
121 | "caller.module": caller_module,
122 | "caller.function": caller_function,
123 | OBJECTIVE_NAME: objective_name,
124 | OBJECTIVE_PERCENTILE: percentile,
125 | SERVICE_NAME: get_settings()["service_name"],
126 | },
127 | )
128 |
129 | def __histogram(
130 | self,
131 | function: str,
132 | module: str,
133 | duration: float,
134 | objective: Optional[Objective],
135 | exemplar: Optional[dict],
136 | ):
137 | objective_name = "" if objective is None else objective.name
138 | latency = None if objective is None else objective.latency
139 | percentile = ""
140 | threshold = ""
141 |
142 | if latency is not None:
143 | threshold = latency[0].value
144 | percentile = latency[1].value
145 |
146 | self.__histogram_instance.record(
147 | duration,
148 | attributes={
149 | "function": function,
150 | "module": module,
151 | SERVICE_NAME: get_settings()["service_name"],
152 | OBJECTIVE_NAME: objective_name,
153 | OBJECTIVE_PERCENTILE: percentile,
154 | OBJECTIVE_LATENCY_THRESHOLD: threshold,
155 | },
156 | )
157 |
158 | def set_build_info(self, commit: str, version: str, branch: str):
159 | if not self._has_set_build_info:
160 | self._has_set_build_info = True
161 | self.__up_down_counter_build_info_instance.add(
162 | 1.0,
163 | attributes={
164 | "commit": commit,
165 | "version": version,
166 | "branch": branch,
167 | SERVICE_NAME: get_settings()["service_name"],
168 | REPOSITORY_URL: get_settings()["repository_url"],
169 | REPOSITORY_PROVIDER: get_settings()["repository_provider"],
170 | AUTOMETRICS_VERSION: SPEC_VERSION,
171 | },
172 | )
173 |
174 | def start(
175 | self,
176 | function: str,
177 | module: str,
178 | track_concurrency: Optional[bool] = False,
179 | ):
180 | """Start tracking metrics for a function call."""
181 | if track_concurrency:
182 | self.__up_down_counter_concurrency_instance.add(
183 | 1.0,
184 | attributes={
185 | "function": function,
186 | "module": module,
187 | SERVICE_NAME: get_settings()["service_name"],
188 | },
189 | )
190 |
191 | def finish(
192 | self,
193 | duration: float,
194 | function: str,
195 | module: str,
196 | caller_module: str,
197 | caller_function: str,
198 | result: Result = Result.OK,
199 | objective: Optional[Objective] = None,
200 | track_concurrency: Optional[bool] = False,
201 | ):
202 | """Finish tracking metrics for a function call."""
203 | exemplar = None
204 | # Currently, exemplars are only supported by prometheus-client
205 | # https://github.com/autometrics-dev/autometrics-py/issues/41
206 | # if get_settings()["exemplars"]:
207 | # exemplar = get_exemplar()
208 | self.__count(
209 | function,
210 | module,
211 | caller_module,
212 | caller_function,
213 | objective,
214 | exemplar,
215 | result,
216 | )
217 | self.__histogram(function, module, duration, objective, exemplar)
218 | if track_concurrency:
219 | self.__up_down_counter_concurrency_instance.add(
220 | -1.0,
221 | attributes={
222 | "function": function,
223 | "module": module,
224 | SERVICE_NAME: get_settings()["service_name"],
225 | },
226 | )
227 |
228 | def initialize_counters(
229 | self,
230 | function: str,
231 | module: str,
232 | objective: Optional[Objective] = None,
233 | ):
234 | """Initialize tracking metrics for a function call at zero."""
235 | caller_module = ""
236 | caller_function = ""
237 | self.__count(
238 | function,
239 | module,
240 | caller_module,
241 | caller_function,
242 | objective,
243 | None,
244 | Result.OK,
245 | 0,
246 | )
247 | self.__count(
248 | function,
249 | module,
250 | caller_module,
251 | caller_function,
252 | objective,
253 | None,
254 | Result.ERROR,
255 | 0,
256 | )
257 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/prometheus.py:
--------------------------------------------------------------------------------
1 | import time
2 | from typing import Optional
3 | from prometheus_client import Counter, Histogram, Gauge
4 |
5 | from ..constants import (
6 | AUTOMETRICS_VERSION_PROMETHEUS,
7 | COUNTER_NAME_PROMETHEUS,
8 | HISTOGRAM_NAME_PROMETHEUS,
9 | CONCURRENCY_NAME_PROMETHEUS,
10 | REPOSITORY_PROVIDER_PROMETHEUS,
11 | REPOSITORY_URL_PROMETHEUS,
12 | SERVICE_NAME_PROMETHEUS,
13 | BUILD_INFO_NAME,
14 | COUNTER_DESCRIPTION,
15 | HISTOGRAM_DESCRIPTION,
16 | CONCURRENCY_DESCRIPTION,
17 | BUILD_INFO_DESCRIPTION,
18 | OBJECTIVE_NAME_PROMETHEUS,
19 | OBJECTIVE_PERCENTILE_PROMETHEUS,
20 | OBJECTIVE_LATENCY_THRESHOLD_PROMETHEUS,
21 | COMMIT_KEY,
22 | SPEC_VERSION,
23 | VERSION_KEY,
24 | BRANCH_KEY,
25 | )
26 |
27 | from ..exemplar import get_exemplar
28 | from .types import Result
29 | from ..objectives import Objective
30 | from ..settings import get_settings
31 |
32 |
33 | class PrometheusTracker:
34 | """A tracker for Prometheus metrics."""
35 |
36 | prom_counter = Counter(
37 | COUNTER_NAME_PROMETHEUS,
38 | COUNTER_DESCRIPTION,
39 | [
40 | "function",
41 | "module",
42 | SERVICE_NAME_PROMETHEUS,
43 | "result",
44 | "caller_module",
45 | "caller_function",
46 | OBJECTIVE_NAME_PROMETHEUS,
47 | OBJECTIVE_PERCENTILE_PROMETHEUS,
48 | ],
49 | )
50 | prom_histogram = Histogram(
51 | HISTOGRAM_NAME_PROMETHEUS,
52 | HISTOGRAM_DESCRIPTION,
53 | [
54 | "function",
55 | "module",
56 | SERVICE_NAME_PROMETHEUS,
57 | OBJECTIVE_NAME_PROMETHEUS,
58 | OBJECTIVE_PERCENTILE_PROMETHEUS,
59 | OBJECTIVE_LATENCY_THRESHOLD_PROMETHEUS,
60 | ],
61 | buckets=get_settings()["histogram_buckets"],
62 | unit="seconds",
63 | )
64 | prom_gauge_build_info = Gauge(
65 | BUILD_INFO_NAME,
66 | BUILD_INFO_DESCRIPTION,
67 | [
68 | COMMIT_KEY,
69 | VERSION_KEY,
70 | BRANCH_KEY,
71 | SERVICE_NAME_PROMETHEUS,
72 | REPOSITORY_URL_PROMETHEUS,
73 | REPOSITORY_PROVIDER_PROMETHEUS,
74 | AUTOMETRICS_VERSION_PROMETHEUS,
75 | ],
76 | )
77 | prom_gauge_concurrency = Gauge(
78 | CONCURRENCY_NAME_PROMETHEUS,
79 | CONCURRENCY_DESCRIPTION,
80 | [
81 | "function",
82 | "module",
83 | SERVICE_NAME_PROMETHEUS,
84 | ],
85 | )
86 |
87 | def __init__(self) -> None:
88 | self._has_set_build_info = False
89 |
90 | def _count(
91 | self,
92 | func_name: str,
93 | module_name: str,
94 | caller_module: str,
95 | caller_function: str,
96 | objective: Optional[Objective] = None,
97 | exemplar: Optional[dict] = None,
98 | result: Result = Result.OK,
99 | inc_by: int = 1,
100 | ):
101 | """Increment the counter for the function call."""
102 | objective_name = "" if objective is None else objective.name
103 | percentile = (
104 | ""
105 | if objective is None or objective.success_rate is None
106 | else objective.success_rate.value
107 | )
108 | service_name = get_settings()["service_name"]
109 |
110 | self.prom_counter.labels(
111 | func_name,
112 | module_name,
113 | service_name,
114 | result.value,
115 | caller_module,
116 | caller_function,
117 | objective_name,
118 | percentile,
119 | ).inc(inc_by, exemplar)
120 |
121 | def _histogram(
122 | self,
123 | func_name: str,
124 | module_name: str,
125 | duration: float,
126 | objective: Optional[Objective] = None,
127 | exemplar: Optional[dict] = None,
128 | ):
129 | """Observe the duration of the function call."""
130 |
131 | objective_name = "" if objective is None else objective.name
132 | latency = None if objective is None else objective.latency
133 | percentile = ""
134 | threshold = ""
135 | if latency is not None:
136 | threshold = latency[0].value
137 | percentile = latency[1].value
138 | service_name = get_settings()["service_name"]
139 |
140 | self.prom_histogram.labels(
141 | func_name,
142 | module_name,
143 | service_name,
144 | objective_name,
145 | percentile,
146 | threshold,
147 | ).observe(duration, exemplar)
148 |
149 | def set_build_info(self, commit: str, version: str, branch: str):
150 | if not self._has_set_build_info:
151 | self._has_set_build_info = True
152 | service_name = get_settings()["service_name"]
153 | repository_url = get_settings()["repository_url"]
154 | repository_provider = get_settings()["repository_provider"]
155 | self.prom_gauge_build_info.labels(
156 | commit,
157 | version,
158 | branch,
159 | service_name,
160 | repository_url,
161 | repository_provider,
162 | SPEC_VERSION,
163 | ).set(1)
164 |
165 | def start(
166 | self, function: str, module: str, track_concurrency: Optional[bool] = False
167 | ):
168 | """Start tracking metrics for a function call."""
169 | if track_concurrency:
170 | service_name = get_settings()["service_name"]
171 | self.prom_gauge_concurrency.labels(function, module, service_name).inc()
172 |
173 | def finish(
174 | self,
175 | duration: float,
176 | function: str,
177 | module: str,
178 | caller_module: str,
179 | caller_function: str,
180 | result: Result = Result.OK,
181 | objective: Optional[Objective] = None,
182 | track_concurrency: Optional[bool] = False,
183 | ):
184 | """Finish tracking metrics for a function call."""
185 | exemplar = None
186 | if get_settings()["enable_exemplars"]:
187 | exemplar = get_exemplar()
188 |
189 | self._count(
190 | function,
191 | module,
192 | caller_module,
193 | caller_function,
194 | objective,
195 | exemplar,
196 | result,
197 | )
198 | self._histogram(function, module, duration, objective, exemplar)
199 |
200 | if track_concurrency:
201 | service_name = get_settings()["service_name"]
202 | self.prom_gauge_concurrency.labels(function, module, service_name).dec()
203 |
204 | def initialize_counters(
205 | self,
206 | function: str,
207 | module: str,
208 | objective: Optional[Objective] = None,
209 | ):
210 | """Initialize tracking metrics for a function call at zero."""
211 | caller_module = ""
212 | caller_function = ""
213 | self._count(
214 | function,
215 | module,
216 | caller_module,
217 | caller_function,
218 | objective,
219 | None,
220 | Result.OK,
221 | 0,
222 | )
223 | self._count(
224 | function,
225 | module,
226 | caller_module,
227 | caller_function,
228 | objective,
229 | None,
230 | Result.ERROR,
231 | 0,
232 | )
233 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/temporary.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from typing import Optional
4 |
5 | from .types import Result, TrackerMessage, MessageQueue, TrackMetrics
6 | from ..objectives import Objective
7 |
8 |
9 | class TemporaryTracker:
10 | """A tracker that temporarily stores metrics only to hand them off to another tracker."""
11 |
12 | _queue: MessageQueue = []
13 | _is_closed: bool = False
14 | _new_tracker: Optional[TrackMetrics] = None
15 |
16 | def set_build_info(self, commit: str, version: str, branch: str):
17 | """Observe the build info. Should only be called once per tracker instance"""
18 | pass
19 |
20 | def start(
21 | self, function: str, module: str, track_concurrency: Optional[bool] = False
22 | ):
23 | """Start tracking metrics for a function call."""
24 | self.append_to_queue(("start", function, module, track_concurrency))
25 |
26 | def finish(
27 | self,
28 | duration: float,
29 | function: str,
30 | module: str,
31 | caller_module: str,
32 | caller_function: str,
33 | result: Result = Result.OK,
34 | objective: Optional[Objective] = None,
35 | track_concurrency: Optional[bool] = False,
36 | ):
37 | """Finish tracking metrics for a function call."""
38 | self.append_to_queue(
39 | (
40 | "finish",
41 | duration,
42 | function,
43 | module,
44 | caller_module,
45 | caller_function,
46 | result,
47 | objective,
48 | track_concurrency,
49 | )
50 | )
51 |
52 | def initialize_counters(
53 | self,
54 | function: str,
55 | module: str,
56 | objective: Optional[Objective] = None,
57 | ):
58 | """Initialize (counter) metrics for a function at zero."""
59 | self.append_to_queue(("initialize_counters", function, module, objective))
60 |
61 | def append_to_queue(self, message: TrackerMessage):
62 | """Append a message to the queue."""
63 | if not self._is_closed:
64 | self._queue.append(message)
65 | if len(self._queue) > 999:
66 | self._is_closed = True
67 | elif self._new_tracker is not None:
68 | function_name, *args = message
69 | function = getattr(self._new_tracker, function_name)
70 | function(*args)
71 | else:
72 | logging.error(
73 | "Temporary tracker queue is filled, this metric will be dropped. Please run init() when your application starts."
74 | )
75 |
76 | def replay_queue(self, tracker: TrackMetrics):
77 | """Replay a queue of messages on a different tracker."""
78 | self._new_tracker = tracker
79 | self._is_closed = True
80 | for function_name, *args in self._queue:
81 | function = getattr(tracker, function_name)
82 | function(*args)
83 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/test_concurrency.py:
--------------------------------------------------------------------------------
1 | from prometheus_client.exposition import generate_latest
2 | import asyncio
3 | import pytest
4 |
5 | from ..decorator import autometrics
6 | from ..initialization import init
7 |
8 |
9 | @autometrics(track_concurrency=True)
10 | async def sleep(time: float):
11 | await asyncio.sleep(time)
12 |
13 |
14 | @pytest.mark.asyncio
15 | async def test_concurrency_tracking_prometheus(monkeypatch):
16 | init(tracker="prometheus")
17 |
18 | # Create a 200ms async task
19 | loop = asyncio.get_event_loop()
20 | task = loop.create_task(sleep(0.2))
21 |
22 | # Await a separate 100ms async task.
23 | # This way, the 200ms task will still running once this task is done.
24 | # We have to do this to ensure that the 200ms task is kicked off before we call `generate_latest`
25 | await sleep(0.1)
26 | blob = generate_latest()
27 | await task
28 | assert blob is not None
29 | data = blob.decode("utf-8")
30 |
31 | assert (
32 | f"""# TYPE function_calls_concurrent gauge\nfunction_calls_concurrent{{function="sleep",module="autometrics.tracker.test_concurrency",service_name="autometrics"}} 1.0"""
33 | in data
34 | )
35 |
36 |
37 | @pytest.mark.asyncio
38 | async def test_concurrency_tracking_opentelemetry(monkeypatch):
39 | init(tracker="opentelemetry")
40 |
41 | # Create a 200ms async task
42 | loop = asyncio.get_event_loop()
43 | task = loop.create_task(sleep(0.2))
44 |
45 | # Await a separate 100ms async task.
46 | # This way, the 200ms task will still running once this task is done.
47 | # We have to do this to ensure that the 200ms task is kicked off before we call `generate_latest`
48 | await sleep(0.1)
49 | blob = generate_latest()
50 | await task
51 | assert blob is not None
52 | data = blob.decode("utf-8")
53 | assert (
54 | f"""# TYPE function_calls_concurrent gauge\nfunction_calls_concurrent{{function="sleep",module="autometrics.tracker.test_concurrency",service_name="autometrics"}} 1.0"""
55 | in data
56 | )
57 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/test_format.py:
--------------------------------------------------------------------------------
1 | from prometheus_client.exposition import generate_latest
2 | import pytest
3 |
4 | from . import TrackerType
5 | from ..decorator import autometrics
6 | from ..initialization import init
7 |
8 |
9 | @pytest.mark.parametrize("tracker", TrackerType)
10 | def test_metrics_format(tracker):
11 | """Test that the metrics are formatted correctly."""
12 | init(tracker=tracker.value, version="1.0")
13 |
14 | @autometrics
15 | def test_function():
16 | pass
17 |
18 | test_function()
19 |
20 | blob = generate_latest()
21 | assert blob is not None
22 | data = blob.decode("utf-8")
23 |
24 | assert "function_calls_total{" in data
25 | assert "function_calls_duration_seconds_bucket{" in data
26 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/test_tracker.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 | from prometheus_client.exposition import generate_latest
4 |
5 | from .opentelemetry import OpenTelemetryTracker
6 | from .prometheus import PrometheusTracker
7 | from .tracker import get_tracker
8 |
9 | from ..initialization import init
10 |
11 |
12 | @pytest.fixture(
13 | params=[
14 | (None, OpenTelemetryTracker),
15 | ("prometheus", PrometheusTracker),
16 | ("PROMETHEUS", PrometheusTracker),
17 | ("something_else", OpenTelemetryTracker),
18 | ]
19 | )
20 | def tracker_var(request):
21 | return request.param
22 |
23 |
24 | def test_default_tracker(monkeypatch, tracker_var):
25 | """Test that the default tracker is set correctly."""
26 | (env_value, Tracker) = tracker_var
27 | if env_value is not None:
28 | monkeypatch.setenv("AUTOMETRICS_TRACKER", env_value)
29 | else:
30 | monkeypatch.delenv("AUTOMETRICS_TRACKER", raising=False)
31 | init()
32 | tracker = get_tracker()
33 | assert isinstance(tracker, Tracker)
34 |
35 |
36 | def test_init_prometheus_tracker_set_build_info(monkeypatch):
37 | """Test that init_tracker (for a Prometheus tracker) calls set_build_info using env vars."""
38 |
39 | commit = "d6abce3"
40 | version = "1.0.1"
41 | branch = "main"
42 | tracker = "prometheus"
43 |
44 | monkeypatch.setenv("AUTOMETRICS_COMMIT", commit)
45 | monkeypatch.setenv("AUTOMETRICS_VERSION", version)
46 | monkeypatch.setenv("AUTOMETRICS_BRANCH", branch)
47 | monkeypatch.setenv("AUTOMETRICS_TRACKER", tracker)
48 | init()
49 |
50 | prom_tracker = get_tracker()
51 | assert isinstance(prom_tracker, PrometheusTracker)
52 |
53 | blob = generate_latest()
54 | assert blob is not None
55 | data = blob.decode("utf-8")
56 |
57 | prom_build_info = f"""build_info{{autometrics_version="1.0.0",branch="{branch}",commit="{commit}",repository_provider="github",repository_url="git@github.com:autometrics-dev/autometrics-py.git",service_name="autometrics",version="{version}"}} 1.0"""
58 | assert prom_build_info in data
59 |
60 | monkeypatch.delenv("AUTOMETRICS_VERSION", raising=False)
61 | monkeypatch.delenv("AUTOMETRICS_COMMIT", raising=False)
62 | monkeypatch.delenv("AUTOMETRICS_BRANCH", raising=False)
63 | monkeypatch.delenv("AUTOMETRICS_TRACKER", raising=False)
64 |
65 |
66 | def test_init_otel_tracker_set_build_info(monkeypatch):
67 | """
68 | Test that init_tracker (for an OTEL tracker) calls set_build_info using env vars.
69 | Note that the OTEL collector translates metrics to Prometheus.
70 | """
71 |
72 | commit = "a29a178"
73 | version = "0.0.1"
74 | branch = "main"
75 | tracker = "opentelemetry"
76 |
77 | monkeypatch.setenv("AUTOMETRICS_COMMIT", commit)
78 | monkeypatch.setenv("AUTOMETRICS_VERSION", version)
79 | monkeypatch.setenv("AUTOMETRICS_BRANCH", branch)
80 | monkeypatch.setenv("AUTOMETRICS_TRACKER", tracker)
81 | init()
82 |
83 | otel_tracker = get_tracker()
84 | assert isinstance(otel_tracker, OpenTelemetryTracker)
85 |
86 | blob = generate_latest()
87 | assert blob is not None
88 | data = blob.decode("utf-8")
89 |
90 | otel_build_info = f"""build_info{{autometrics_version="1.0.0",branch="{branch}",commit="{commit}",repository_provider="github",repository_url="git@github.com:autometrics-dev/autometrics-py.git",service_name="autometrics",version="{version}"}} 1.0"""
91 | assert otel_build_info in data
92 |
93 | monkeypatch.delenv("AUTOMETRICS_VERSION", raising=False)
94 | monkeypatch.delenv("AUTOMETRICS_COMMIT", raising=False)
95 | monkeypatch.delenv("AUTOMETRICS_BRANCH", raising=False)
96 | monkeypatch.delenv("AUTOMETRICS_TRACKER", raising=False)
97 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/tracker.py:
--------------------------------------------------------------------------------
1 | from typing import Optional, cast
2 | from opentelemetry.sdk.metrics.export import MetricReader
3 |
4 | from .types import TrackerType, TrackMetrics
5 | from .temporary import TemporaryTracker
6 | from ..exposition import create_exporter
7 | from ..settings import AutometricsSettings
8 |
9 |
10 | _tracker: TrackMetrics = TemporaryTracker()
11 |
12 |
13 | def get_tracker() -> TrackMetrics:
14 | """Get the tracker type."""
15 | global _tracker
16 | return _tracker
17 |
18 |
19 | def set_tracker(new_tracker: TrackMetrics):
20 | """Set the tracker type."""
21 | global _tracker
22 | _tracker = new_tracker
23 |
24 |
25 | def init_tracker(
26 | tracker_type: TrackerType, settings: AutometricsSettings
27 | ) -> TrackMetrics:
28 | """Create a tracker"""
29 |
30 | tracker_instance: TrackMetrics
31 | if tracker_type == TrackerType.OPENTELEMETRY:
32 | # pylint: disable=import-outside-toplevel
33 | from .opentelemetry import OpenTelemetryTracker
34 |
35 | exporter: Optional[MetricReader] = None
36 | if settings["exporter"]:
37 | exporter = create_exporter(settings["exporter"])
38 | tracker_instance = OpenTelemetryTracker(exporter)
39 | elif tracker_type == TrackerType.PROMETHEUS:
40 | # pylint: disable=import-outside-toplevel
41 | from .prometheus import PrometheusTracker
42 |
43 | if settings["exporter"]:
44 | exporter = create_exporter(settings["exporter"])
45 | tracker_instance = PrometheusTracker()
46 | # NOTE - Only set the build info when the tracker is initialized
47 | tracker_instance.set_build_info(
48 | commit=settings["commit"],
49 | version=settings["version"],
50 | branch=settings["branch"],
51 | )
52 |
53 | set_tracker(tracker_instance)
54 | return tracker_instance
55 |
--------------------------------------------------------------------------------
/src/autometrics/tracker/types.py:
--------------------------------------------------------------------------------
1 | from enum import Enum
2 | from typing import Union, Optional, Protocol, List, Literal, Tuple
3 |
4 | from ..objectives import Objective
5 |
6 |
7 | class Result(Enum):
8 | """Result of the function call."""
9 |
10 | OK = "ok"
11 | ERROR = "error"
12 |
13 |
14 | class TrackMetrics(Protocol):
15 | """Protocol for tracking metrics."""
16 |
17 | def set_build_info(self, commit: str, version: str, branch: str):
18 | """Observe the build info. Should only be called once per tracker instance"""
19 |
20 | def start(
21 | self, function: str, module: str, track_concurrency: Optional[bool] = False
22 | ):
23 | """Start tracking metrics for a function call."""
24 |
25 | def finish(
26 | self,
27 | duration: float,
28 | function: str,
29 | module: str,
30 | caller_module: str,
31 | caller_function: str,
32 | result: Result = Result.OK,
33 | objective: Optional[Objective] = None,
34 | track_concurrency: Optional[bool] = False,
35 | ):
36 | """Finish tracking metrics for a function call."""
37 |
38 | def initialize_counters(
39 | self,
40 | function: str,
41 | module: str,
42 | objective: Optional[Objective] = None,
43 | ):
44 | """Initialize (counter) metrics for a function at zero."""
45 |
46 |
47 | class TrackerType(Enum):
48 | """Type of tracker."""
49 |
50 | OPENTELEMETRY = "opentelemetry"
51 | PROMETHEUS = "prometheus"
52 |
53 |
54 | TrackerMessage = Union[
55 | Tuple[Literal["start"], str, str, Optional[bool]],
56 | Tuple[
57 | Literal["finish"],
58 | float,
59 | str,
60 | str,
61 | str,
62 | str,
63 | Result,
64 | Optional[Objective],
65 | Optional[bool],
66 | ],
67 | Tuple[Literal["initialize_counters"], str, str, Optional[Objective]],
68 | ]
69 | MessageQueue = List[TrackerMessage]
70 |
--------------------------------------------------------------------------------
/src/autometrics/utils.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | import os
3 |
4 | from collections.abc import Callable
5 | from typing import Optional
6 | from urllib.parse import urlparse
7 | from prometheus_client import start_wsgi_server, REGISTRY, CollectorRegistry
8 |
9 | from .prometheus_url import Generator
10 |
11 |
12 | def get_module_name(func: Callable) -> str:
13 | """Get the name of the module that contains the function."""
14 | module = inspect.getmodule(func)
15 | if module is None or module.__name__ == "__main__":
16 | return get_filename_as_module(func)
17 | return module.__name__
18 |
19 |
20 | def get_filename_as_module(func: Callable) -> str:
21 | """Get the filename of the module that contains the function."""
22 | fullpath = inspect.getsourcefile(func)
23 | if fullpath is None:
24 | return ""
25 |
26 | filename = os.path.basename(fullpath)
27 | module_part = os.path.splitext(filename)[0]
28 | return module_part
29 |
30 |
31 | def get_function_name(func: Callable) -> str:
32 | """Get the name of the function."""
33 | return func.__qualname__ or func.__name__
34 |
35 |
36 | def write_docs(func_name: str, module_name: str):
37 | """Write the prometheus query urls to the function docstring."""
38 | generator = Generator(func_name, module_name)
39 | docs = f"Prometheus Query URLs for Function - {func_name} and Module - {module_name}: \n\n"
40 |
41 | urls = generator.create_urls()
42 | for key, value in urls.items():
43 | docs = f"{docs}{key} : {value} \n\n"
44 |
45 | docs = f"{docs}-------------------------------------------\n"
46 | return docs
47 |
48 |
49 | def append_docs_to_docstring(func, func_name, module_name):
50 | """Helper for appending docs to a function's docstring."""
51 | if func.__doc__ is None:
52 | return write_docs(func_name, module_name)
53 | else:
54 | return f"{func.__doc__}\n{write_docs(func_name, module_name)}"
55 |
56 |
57 | def start_http_server(
58 | port: int = 9464, addr: str = "0.0.0.0", registry: CollectorRegistry = REGISTRY
59 | ):
60 | """Starts a WSGI server for prometheus metrics as a daemon thread."""
61 | start_wsgi_server(port, addr, registry)
62 |
63 |
64 | def read_repository_url_from_fs() -> Optional[str]:
65 | """Read the repository url from git config."""
66 | try:
67 | with open(".git/config", "r") as f:
68 | git_config = f.read()
69 | return get_repository_url(git_config)
70 | except:
71 | return None
72 |
73 |
74 | def get_repository_url(git_config: str) -> Optional[str]:
75 | """Get the repository url from git config."""
76 | lines = git_config.split("\n")
77 | is_in_remote_origin = False
78 | for line in lines:
79 | stripped_line = line.strip()
80 | # skip empty lines and comments
81 | if (
82 | line == "\n"
83 | or stripped_line.startswith("#")
84 | or stripped_line.startswith(";")
85 | ):
86 | continue
87 |
88 | if not is_in_remote_origin:
89 | lower_line = stripped_line.lower()
90 | # we are looking for the remote origin section
91 | # and skip everything else
92 | if lower_line in ['[remote "origin"]', "[remote.origin]"]:
93 | is_in_remote_origin = True
94 | continue
95 |
96 | # once inside the remote origin section, we are looking for key/value pairs
97 | # they are required to have '=' in them
98 | equal_index = stripped_line.find("=")
99 | if equal_index == -1:
100 | if stripped_line.startswith("["):
101 | # we have reached the end of the remote origin section
102 | is_in_remote_origin = False
103 | continue
104 |
105 | key = stripped_line[:equal_index].strip()
106 | # we are looking for the url key
107 | if key != "url":
108 | continue
109 |
110 | value = stripped_line[equal_index + 1 :].strip()
111 | # remove quotes and escape sequences
112 | if value.startswith('"') and value.endswith('"'):
113 | value = value[1:-1].replace("\\\\", "\\").replace('\\"', '"')
114 |
115 | return value
116 |
117 | return None
118 |
119 |
120 | def extract_repository_provider(url: str) -> Optional[str]:
121 | # we assume that there will be two types of urls:
122 | # https and ssh. urlparse can handle first type
123 | # so we try that first
124 | parsed_url = urlparse(url)
125 | if parsed_url.scheme == "https" and parsed_url.hostname:
126 | return get_provider_by_hostname(parsed_url.hostname)
127 | # if that fails, we try the second type, but we will need to
128 | # append the scheme to the url for urlparse to work
129 | parsed_url = urlparse(f"ssh://{url}")
130 | if parsed_url.hostname:
131 | return get_provider_by_hostname(parsed_url.hostname)
132 | return None
133 |
134 |
135 | def get_provider_by_hostname(hostname: str) -> str:
136 | if hostname in ["github.com", "gitlab.com", "bitbucket.org"]:
137 | return hostname.split(".")[0]
138 | return hostname
139 |
--------------------------------------------------------------------------------
/src/py.typed:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/autometrics-dev/autometrics-py/3b0e10c6975937ced1c1a7056aa8e8cbdecb9b20/src/py.typed
--------------------------------------------------------------------------------