├── .gitignore
├── LICENSE
├── README.md
├── Vagrantfile
├── demo1
├── Analysis.ipynb
├── Dockerfile
├── Dockerfile-client
├── README.md
├── docker-compose.yml
├── make-requests.sh
└── src
│ ├── app.py
│ ├── helpers
│ └── middleware.py
│ ├── metrics.csv
│ └── requirements.txt
├── demo2
├── Analysis.ipynb
├── Dockerfile
├── Dockerfile-client
├── README.md
├── Untitled.ipynb
├── docker-compose.yml
├── make-requests.sh
└── src
│ ├── app.py
│ ├── helpers
│ └── middleware.py
│ ├── metrics.csv
│ └── requirements.txt
├── django_prometheus
├── Dockerfile
├── README.md
├── config
│ ├── grafana
│ │ ├── dashboards
│ │ │ └── demo.json
│ │ └── datasources
│ │ │ └── prometheus.yml
│ └── prometheus
│ │ └── prometheus.yml
├── docker-compose-infra.yml
├── docker-compose.yml
└── src
│ ├── README.md
│ ├── manage.py
│ ├── requirements.txt
│ ├── start.sh
│ ├── til
│ ├── __init__.py
│ ├── settings.py
│ ├── urls.py
│ └── wsgi.py
│ └── tilweb
│ ├── __init__.py
│ ├── admin.py
│ ├── apps.py
│ ├── forms.py
│ ├── migrations
│ ├── 0001_initial.py
│ └── __init__.py
│ ├── models.py
│ ├── templates
│ └── tilweb
│ │ ├── base.html
│ │ ├── create_post.html
│ │ ├── index.html
│ │ ├── login.html
│ │ ├── me.html
│ │ ├── signup.html
│ │ ├── tag_view.html
│ │ └── view_post.html
│ ├── tests.py
│ ├── urls.py
│ └── views.py
├── django_statsd_prometheus
├── Dockerfile
├── README.md
├── config
│ ├── grafana
│ │ ├── dashboards
│ │ │ └── demo.json
│ │ └── datasources
│ │ │ └── prometheus.yml
│ ├── prometheus
│ │ └── prometheus.yml
│ └── statsd
│ │ └── mapping.yml
├── docker-compose-infra.yml
├── docker-compose.yml
└── src
│ ├── README.md
│ ├── db.sqlite3
│ ├── manage.py
│ ├── requirements.txt
│ ├── start.sh
│ ├── til
│ ├── __init__.py
│ ├── metrics_middleware.py
│ ├── settings.py
│ ├── urls.py
│ └── wsgi.py
│ └── tilweb
│ ├── __init__.py
│ ├── admin.py
│ ├── apps.py
│ ├── forms.py
│ ├── migrations
│ ├── 0001_initial.py
│ └── __init__.py
│ ├── models.py
│ ├── templates
│ └── tilweb
│ │ ├── base.html
│ │ ├── create_post.html
│ │ ├── index.html
│ │ ├── login.html
│ │ ├── me.html
│ │ ├── signup.html
│ │ ├── tag_view.html
│ │ └── view_post.html
│ ├── tests.py
│ ├── urls.py
│ └── views.py
├── flask_prometheus
├── Dockerfile
├── Dockerfile-client
├── README.md
├── config
│ └── prometheus
│ │ └── prometheus.yml
├── docker-compose-infra.yml
├── docker-compose.yml
├── make-requests.sh
└── src
│ ├── flask_app.py
│ ├── helpers
│ └── middleware.py
│ └── requirements.txt
├── flask_statsd
├── Dockerfile
├── Dockerfile-client
├── README.md
├── config
│ ├── graphite
│ │ └── storage-aggregation.conf
│ └── statsd
│ │ └── config_udp.js
├── docker-compose-infra.yml
├── docker-compose.yml
├── make-requests.sh
└── src
│ ├── app.py
│ ├── helpers
│ └── middleware.py
│ └── requirements.txt
├── flask_statsd_prometheus
├── Dockerfile
├── Dockerfile-client
├── README.md
├── config
│ └── prometheus
│ │ └── prometheus.yml
├── docker-compose-infra.yml
├── docker-compose.yml
├── make-requests.sh
└── src
│ ├── flask_app.py
│ ├── helpers
│ └── middleware.py
│ └── requirements.txt
├── miscellaneous-notes.md
├── open-telemetry
├── trace-metrics-otel-pipeline-multiple-process
│ ├── Dockerfile-client
│ ├── docker-compose.yml
│ ├── make-requests.sh
│ ├── mysql-init
│ │ ├── 01-create-table.sql
│ │ └── 02-insert-data.sql
│ ├── otel-collector-agent-config.yml
│ ├── otel-collector-config.yml
│ ├── prometheus.yml
│ ├── service1
│ │ ├── Dockerfile
│ │ ├── app.py
│ │ └── requirements.txt
│ └── service2
│ │ ├── Dockerfile
│ │ ├── app.py
│ │ └── requirements.txt
├── trace-metrics-otel-pipeline-single-process
│ ├── Dockerfile-client
│ ├── docker-compose.yml
│ ├── make-requests.sh
│ ├── mysql-init
│ │ ├── 01-create-table.sql
│ │ └── 02-insert-data.sql
│ ├── otel-collector-agent-config.yml
│ ├── otel-collector-config.yml
│ ├── prometheus.yml
│ ├── service1
│ │ ├── Dockerfile
│ │ ├── app.py
│ │ └── requirements.txt
│ └── service2
│ │ ├── Dockerfile
│ │ ├── app.py
│ │ └── requirements.txt
└── tracing-jaeger
│ ├── Dockerfile-client
│ ├── docker-compose.yml
│ ├── make-requests.sh
│ ├── mysql-init
│ ├── 01-create-table.sql
│ └── 02-insert-data.sql
│ ├── service1
│ ├── Dockerfile
│ ├── app.py
│ └── requirements.txt
│ └── service2
│ ├── Dockerfile
│ ├── app.py
│ └── requirements.txt
├── opensource-com-article
├── README.md
├── article.md
├── counter-graph.png
├── cumulative-histogram.png
├── gauge-graph.png
├── histogram-graph.png
├── histogram.png
└── pull_push_model.png
├── scripts
├── Figure_1.png
├── counter_demo.py
├── cumulative_histogram.py
├── gauge_demo.py
├── histogram.png
├── histogram.py
├── marks.txt
├── metrics_as_dataframes.py
├── percentile_score.py
├── pull_model.monopic
├── pull_model.png
├── pull_model_workers.monopic
├── pull_model_workers.png
├── pull_push_model.monopic
├── pull_push_model.png
├── push_model.monopic
├── statsd_prometheus copy.monopic
├── statsd_prometheus.monopic
└── statsd_prometheus.png
└── slides
├── Django-monitoring-with-prometheus.pdf
├── pycon-2018.pdf
└── sypy.pdf
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | env/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 |
49 | # Translations
50 | *.mo
51 | *.pot
52 |
53 | # Django stuff:
54 | *.log
55 | local_settings.py
56 |
57 | # Flask stuff:
58 | instance/
59 | .webassets-cache
60 |
61 | # Scrapy stuff:
62 | .scrapy
63 |
64 | # Sphinx documentation
65 | docs/_build/
66 |
67 | # PyBuilder
68 | target/
69 |
70 | # Jupyter Notebook
71 | .ipynb_checkpoints
72 |
73 | # pyenv
74 | .python-version
75 |
76 | # celery beat schedule file
77 | celerybeat-schedule
78 |
79 | # SageMath parsed files
80 | *.sage.py
81 |
82 | # dotenv
83 | .env
84 |
85 | # virtualenv
86 | .venv
87 | venv/
88 | ENV/
89 |
90 | # Spyder project settings
91 | .spyderproject
92 | .spyproject
93 |
94 | # Rope project settings
95 | .ropeproject
96 |
97 | # mkdocs documentation
98 | /site
99 |
100 | # mypy
101 | .mypy_cache/
102 |
103 | # vagrant
104 | .vagrant
105 |
106 | .DS_Store
107 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2018 Amit Saha
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Table of contents
2 |
3 | This repo contains the materials that I have prepared and referred to while exploring monitoring in Python.
4 |
5 |
6 | * [Materials for my talks/articles on Python monitoring](./README.md#materials-for-my-talksarticles-on-python-monitoring)
7 | * [Videos](./README.md#videos)
8 | * [Slides](./README.md#slides)
9 | * [Articles](./README.md#articles)
10 | * [Tips and Tricks](./README.md#tips-and-tricks)
11 | * [Playing with the demos](./README.md#playing-with-the-demos)
12 | * [VM Setup on Windows 10 with Hyper-V](./README.md#vm-setup-on-windows-10-with-hyper-v)
13 | * [VM Setup on Windows/Linux/OS X - VirtualBox](./README.md#vm-setup-on-windowslinuxos-x---virtualbox)
14 | * [Learn more](./README.md#learn-more)
15 | * [General](./README.md#general)
16 | * [Statsd/Graphite](./README.md#statsdgraphite)
17 | * [Prometheus](./README.md#prometheus)
18 | * [Readings: Doing things right](./README.md#doing-things-right)
19 |
20 |
21 | # Materials for my talks/articles on Python monitoring
22 |
23 | ## Videos
24 |
25 | - [PyCon 2018](https://www.youtube.com/watch?reload=9&v=R4kMwckrUlg)
26 |
27 | ## Slides
28 |
29 | - [Sydney Django Meetup - August 2019](./slides/Django-monitoring-with-prometheus.pdf)
30 | - [PyCon 2018](./slides/pycon-2018.pdf)
31 | - [Sydney Python Meetup - March, 2018](./slides/sypy.pdf)
32 |
33 |
34 | ## Articles
35 |
36 | - [Understanding metrics and monitoring with Python](https://opensource.com/article/18/4/metrics-monitoring-and-python)
37 | - [Exploring Security, Metrics, and Error-handling with gRPC in Python](https://blog.codeship.com/exploring-security-metrics-and-error-handling-with-grpc-in-python/)
38 | - [Your options for monitoring multi-process Python applications with Prometheus](http://echorand.me/your-options-for-monitoring-multi-process-python-applications-with-prometheus.html)
39 | - [Monitoring Your Synchronous Python Web Applications Using Prometheus](https://blog.codeship.com/monitoring-your-synchronous-python-web-applications-using-prometheus/)
40 | - [Monitoring Your Asynchronous Python Web Applications Using Prometheus](https://blog.codeship.com/monitoring-your-asynchronous-python-web-applications-using-prometheus/)
41 |
42 | ## Tips and Tricks
43 |
44 | - [Miscellaneous notes](./miscellaneous-notes.md)
45 |
46 | ## Playing with the demos
47 |
48 | I recommend using a VM to play with the demos. This repo ships with a [Vagrantfile](./Vagrantfile)
49 | for installing Ubuntu 16.04. Please install [Vagrant](https://vagrantup.com) for your operating system and then:
50 |
51 | ### VM Setup on Windows 10 with Hyper-V
52 |
53 | You will need to open a powershell session as Adminstrator and do the following from a clone of
54 | the git repository:
55 |
56 | ```
57 | C:\> ~\work\github.com\amitsaha\python-monitoring-talk [master ≡]> vagrant up --provider=hyperv
58 | Bringing machine 'default' up with 'hyperv' provider...
59 | ==> default: Verifying Hyper-V is enabled...
60 | ==> default: Importing a Hyper-V instance
61 | default: Please choose a switch to attach to your Hyper-V instance.
62 | default: If none of these are appropriate, please open the Hyper-V manager
63 | default: to create a new virtual switch.
64 | default:
65 | default: 1) Default Switch
66 | default: 2) nat
67 | default: 3) minikube-virtualswitch
68 | default:
69 | default: What switch would you like to use? 1
70 | default: Cloning virtual hard drive...
71 | default: Creating and registering the VM...
72 | default: Setting VM Integration Services
73 | default: Successfully imported a VM with name: ubuntu-18.04-amd64_1
74 | ==> default: Starting the machine...
75 | ==> default: Waiting for the machine to report its IP address...
76 | default: Timeout: 120 seconds
77 | ```
78 |
79 | Then, we will `ssh` into the VM using:
80 |
81 | ```
82 | C:\> ~\work\github.com\amitsaha\python-monitoring-talk [master ≡]> vagrant ssh
83 | ```
84 |
85 | ### VM Setup on Windows/Linux/OS X - VirtualBox
86 |
87 | ```
88 | $ vagrant up
89 | ...
90 | $ vagrant ssh
91 | ```
92 |
93 |
94 | Now, that we are in the VM, the `/vagrant` directory has a copy of the entire repository from where you
95 | can play with the demos:
96 |
97 | ```
98 | $ cd /vagrant
99 | $ ls
100 | demo1 LICENSE prometheus scripts statsd Vagrantfile
101 | demo2 opensource-com-article README.md slides statsd_prometheus
102 | ```
103 |
104 | Demos:
105 |
106 | - [demo1](./demo1)
107 | - [demo2](./demo2)
108 | - [statsd](./statsd)
109 | - [promtheus](./prometheus)
110 | - [statsd_prometheus](./statsd_prometheus)
111 |
112 | Each demo directory above has a README explaining the instructions of playing with the demo. In general,
113 | to access a network port running in the virtual machine, use the following address in your browser:
114 |
115 | ```
116 | $ 127.0.0.1:port
117 | ```
118 |
119 | If it doesn't work, please file an issue with OS + VM details.
120 |
121 |
122 | # Learn more
123 |
124 | The following resources are some of the ones that I found very useful:
125 |
126 | ## General
127 |
128 | - [Monitoring Distributed Systems](https://landing.google.com/sre/book/chapters/monitoring-distributed-systems.html)
129 | - [Monitoring best practices](http://www.integralist.co.uk/posts/monitoring-best-practices/?imm_mid=0fbebf&cmp=em-webops-na-na-newsltr_20180309)
130 | - [Who wants seconds?](https://www.robustperception.io/who-wants-seconds/)
131 | - [Monitoring: Not just for outages](https://www.robustperception.io/monitoring-not-just-for-outages)
132 | - [Avoid the wall of graphs](https://www.robustperception.io/avoid-the-wall-of-graphs)
133 | - [Latency primer](https://igor.io/latency/)
134 |
135 | ## Statsd/Graphite
136 |
137 | - [statsd metric types](https://github.com/etsy/statsd/blob/master/docs/metric_types.md)
138 |
139 | ## Prometheus
140 |
141 | - [Prometheus metric types](https://prometheus.io/docs/concepts/metric_types/)
142 | - [How does a prometheus gauge work?](https://www.robustperception.io/how-does-a-prometheus-gauge-work/)
143 | - [Why are prometheus histograms cumulative?](https://www.robustperception.io/why-are-prometheus-histograms-cumulative/)
144 | - [Monitoring batch jobs in Python](https://www.robustperception.io/monitoring-batch-jobs-in-python/)
145 | - [Promtheus monitoring at soundcloud](https://developers.soundcloud.com/blog/prometheus-monitoring-at-soundcloud)
146 | - [Why are Prometheus histograms cumulative?](https://www.robustperception.io/why-are-prometheus-histograms-cumulative/)
147 | - [Capturing spikes](https://utcc.utoronto.ca/~cks/space/blog/sysadmin/PrometheusSubqueriesForSpikes)
148 | - [Prometheus for developers](https://github.com/danielfm/prometheus-for-developers)
149 | ## Doing things right
150 |
151 |
152 | - [How not to measure latency](https://www.youtube.com/watch?v=lJ8ydIuPFeU&feature=youtu.be)
153 | - [Histograms with Prometheus: A Tale of Woe](http://linuxczar.net/blog/2017/06/15/prometheus-histogram-2/)
154 | - [Why Averages Suck and Percentiles are Great](https://www.dynatrace.com/news/blog/why-averages-suck-and-percentiles-are-great/)
155 | - [Everything you know about latency is wrong](https://bravenewgeek.com/everything-you-know-about-latency-is-wrong/)
156 | - [Who moved my 99th perecentile latency](https://engineering.linkedin.com/performance/who-moved-my-99th-percentile-latency)
157 | - [Logs and metrics and graphs](https://grafana.com/blog/2016/01/05/logs-and-metrics-and-graphs-oh-my/)
158 | - [HdrHistogram: A better latency capture method ](http://psy-lob-saw.blogspot.com.au/2015/02/hdrhistogram-better-latency-capture.html)
159 |
--------------------------------------------------------------------------------
/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure
5 | # configures the configuration version (we support older styles for
6 | # backwards compatibility). Please don't change it unless you know what
7 | # you're doing.
8 | Vagrant.configure("2") do |config|
9 | # The most common configuration options are documented and commented below.
10 | # For a complete reference, please see the online documentation at
11 | # https://docs.vagrantup.com.
12 |
13 | # Every Vagrant development environment requires a box. You can search for
14 | # boxes at https://vagrantcloud.com/search.
15 | config.vm.box = "bento/ubuntu-16.04"
16 |
17 | # Disable automatic box update checking. If you disable this, then
18 | # boxes will only be checked for updates when the user runs
19 | # `vagrant box outdated`. This is not recommended.
20 | # config.vm.box_check_update = false
21 |
22 | # Create a forwarded port mapping which allows access to a specific port
23 | # within the machine from a port on the host machine. In the example below,
24 | # accessing "localhost:8080" will access port 80 on the guest machine.
25 | # NOTE: This will enable public access to the opened port
26 | # config.vm.network "forwarded_port", guest: 80, host: 8080
27 |
28 | # Create a forwarded port mapping which allows access to a specific port
29 | # within the machine from a port on the host machine and only allow access
30 | # via 127.0.0.1 to disable public access
31 | config.vm.network "forwarded_port", guest: 8888, host: 8888, host_ip: "127.0.0.1"
32 |
33 | # Create a private network, which allows host-only access to the machine
34 | # using a specific IP.
35 | # config.vm.network "private_network", ip: "192.168.33.10"
36 |
37 | # Create a public network, which generally matched to bridged network.
38 | # Bridged networks make the machine appear as another physical device on
39 | # your network.
40 | # config.vm.network "public_network"
41 |
42 | # Share an additional folder to the guest VM. The first argument is
43 | # the path on the host to the actual folder. The second argument is
44 | # the path on the guest to mount the folder. And the optional third
45 | # argument is a set of non-required options.
46 | # config.vm.synced_folder "../data", "/vagrant_data"
47 |
48 | # Provider-specific configuration so you can fine-tune various
49 | # backing providers for Vagrant. These expose provider-specific options.
50 | # Example for VirtualBox:
51 | #
52 | # config.vm.provider "virtualbox" do |vb|
53 | # # Display the VirtualBox GUI when booting the machine
54 | # vb.gui = true
55 | #
56 | # # Customize the amount of memory on the VM:
57 | # vb.memory = "1024"
58 | # end
59 | #
60 | # View the documentation for the provider you are using for more
61 | # information on available options.
62 |
63 | # Enable provisioning with a shell script. Additional provisioners such as
64 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
65 | # documentation for more information about their specific syntax and use.
66 | config.vm.provision "shell", inline: <<-SHELL
67 | apt-get update
68 | apt-get install -y docker docker-compose
69 | SHELL
70 | end
71 |
--------------------------------------------------------------------------------
/demo1/Analysis.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": 1,
6 | "metadata": {},
7 | "outputs": [
8 | {
9 | "data": {
10 | "text/html": [
11 | "
\n",
12 | "\n",
25 | "
\n",
26 | " \n",
27 | " \n",
28 | " \n",
29 | " request_latency \n",
30 | " \n",
31 | " \n",
32 | " timestamp \n",
33 | " \n",
34 | " \n",
35 | " \n",
36 | " \n",
37 | " \n",
38 | " 1522219451 \n",
39 | " 0.111103 \n",
40 | " \n",
41 | " \n",
42 | " 1522219451 \n",
43 | " 0.056744 \n",
44 | " \n",
45 | " \n",
46 | " 1522219451 \n",
47 | " 0.052452 \n",
48 | " \n",
49 | " \n",
50 | " 1522219451 \n",
51 | " 0.051022 \n",
52 | " \n",
53 | " \n",
54 | " 1522219451 \n",
55 | " 0.053883 \n",
56 | " \n",
57 | " \n",
58 | " 1522219451 \n",
59 | " 0.057459 \n",
60 | " \n",
61 | " \n",
62 | " 1522219451 \n",
63 | " 0.052214 \n",
64 | " \n",
65 | " \n",
66 | " 1522219451 \n",
67 | " 0.050306 \n",
68 | " \n",
69 | " \n",
70 | " 1522219451 \n",
71 | " 0.053883 \n",
72 | " \n",
73 | " \n",
74 | " 1522219451 \n",
75 | " 0.058651 \n",
76 | " \n",
77 | " \n",
78 | " 1522219451 \n",
79 | " 0.058174 \n",
80 | " \n",
81 | " \n",
82 | " 1522219451 \n",
83 | " 0.051498 \n",
84 | " \n",
85 | " \n",
86 | " 1522219451 \n",
87 | " 0.051022 \n",
88 | " \n",
89 | " \n",
90 | " 1522219451 \n",
91 | " 0.051022 \n",
92 | " \n",
93 | " \n",
94 | " 1522219451 \n",
95 | " 0.051260 \n",
96 | " \n",
97 | " \n",
98 | " 1522219451 \n",
99 | " 0.050545 \n",
100 | " \n",
101 | " \n",
102 | " 1522219451 \n",
103 | " 0.051022 \n",
104 | " \n",
105 | " \n",
106 | " 1522219451 \n",
107 | " 0.050783 \n",
108 | " \n",
109 | " \n",
110 | " 1522219451 \n",
111 | " 0.050783 \n",
112 | " \n",
113 | " \n",
114 | " 1522219451 \n",
115 | " 0.044346 \n",
116 | " \n",
117 | " \n",
118 | " 1522219451 \n",
119 | " 0.043869 \n",
120 | " \n",
121 | " \n",
122 | " 1522219451 \n",
123 | " 0.044823 \n",
124 | " \n",
125 | " \n",
126 | " 1522219451 \n",
127 | " 0.139236 \n",
128 | " \n",
129 | " \n",
130 | " 1522219451 \n",
131 | " 0.044584 \n",
132 | " \n",
133 | " \n",
134 | " 1522219451 \n",
135 | " 0.043869 \n",
136 | " \n",
137 | " \n",
138 | " 1522219451 \n",
139 | " 0.043869 \n",
140 | " \n",
141 | " \n",
142 | " 1522219451 \n",
143 | " 0.043869 \n",
144 | " \n",
145 | " \n",
146 | " 1522219451 \n",
147 | " 0.039816 \n",
148 | " \n",
149 | " \n",
150 | " 1522219451 \n",
151 | " 0.039577 \n",
152 | " \n",
153 | " \n",
154 | " 1522219451 \n",
155 | " 0.039339 \n",
156 | " \n",
157 | " \n",
158 | "
\n",
159 | "
"
160 | ],
161 | "text/plain": [
162 | " request_latency\n",
163 | "timestamp \n",
164 | "1522219451 0.111103\n",
165 | "1522219451 0.056744\n",
166 | "1522219451 0.052452\n",
167 | "1522219451 0.051022\n",
168 | "1522219451 0.053883\n",
169 | "1522219451 0.057459\n",
170 | "1522219451 0.052214\n",
171 | "1522219451 0.050306\n",
172 | "1522219451 0.053883\n",
173 | "1522219451 0.058651\n",
174 | "1522219451 0.058174\n",
175 | "1522219451 0.051498\n",
176 | "1522219451 0.051022\n",
177 | "1522219451 0.051022\n",
178 | "1522219451 0.051260\n",
179 | "1522219451 0.050545\n",
180 | "1522219451 0.051022\n",
181 | "1522219451 0.050783\n",
182 | "1522219451 0.050783\n",
183 | "1522219451 0.044346\n",
184 | "1522219451 0.043869\n",
185 | "1522219451 0.044823\n",
186 | "1522219451 0.139236\n",
187 | "1522219451 0.044584\n",
188 | "1522219451 0.043869\n",
189 | "1522219451 0.043869\n",
190 | "1522219451 0.043869\n",
191 | "1522219451 0.039816\n",
192 | "1522219451 0.039577\n",
193 | "1522219451 0.039339"
194 | ]
195 | },
196 | "execution_count": 1,
197 | "metadata": {},
198 | "output_type": "execute_result"
199 | }
200 | ],
201 | "source": [
202 | "import pandas as pd\n",
203 | "import numpy as np\n",
204 | "metrics = pd.read_csv('./src/metrics.csv', index_col=0)\n",
205 | "metrics[:30]\n"
206 | ]
207 | },
208 | {
209 | "cell_type": "code",
210 | "execution_count": null,
211 | "metadata": {},
212 | "outputs": [],
213 | "source": []
214 | }
215 | ],
216 | "metadata": {
217 | "kernelspec": {
218 | "display_name": "Python 3",
219 | "language": "python",
220 | "name": "python3"
221 | },
222 | "language_info": {
223 | "codemirror_mode": {
224 | "name": "ipython",
225 | "version": 3
226 | },
227 | "file_extension": ".py",
228 | "mimetype": "text/x-python",
229 | "name": "python",
230 | "nbconvert_exporter": "python",
231 | "pygments_lexer": "ipython3",
232 | "version": "3.6.4"
233 | }
234 | },
235 | "nbformat": 4,
236 | "nbformat_minor": 2
237 | }
238 |
--------------------------------------------------------------------------------
/demo1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | ADD . /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | apache2-utils \
10 | ; \
11 | pip install -r src/requirements.txt; \
12 | apk del .build-deps;
13 | EXPOSE 5000
14 | WORKDIR /application
15 | CMD ["python", "app.py"]
16 |
--------------------------------------------------------------------------------
/demo1/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | RUN apk add --update curl apache2-utils && rm -rf /var/cache/apk/*
3 | ADD ./make-requests.sh /make-requests.sh
4 | VOLUME /data
5 | CMD /make-requests.sh
6 |
--------------------------------------------------------------------------------
/demo1/README.md:
--------------------------------------------------------------------------------
1 | # Demo 1
2 |
3 | This demo has two objectives:
4 |
5 | - Demonstrate using middleware to calculate metrics
6 | - Demonstrate metric calculation, reporting and analysis
7 |
8 | ## Using middleware to calculate metrics
9 |
10 | Depending on your underlying application framework, the mechanism of executing some code *before* a request is processed
11 | and *after* a request is processed will different. Usually, such code is often executed as *middleware*. The Flask framework
12 | supplies two decorator functions for this purpose:
13 |
14 | - [before_request](http://flask.pocoo.org/docs/0.12/api/#flask.Flask.before_request) allows executing code before a request
15 | is processed
16 | - [after_request](http://flask.pocoo.org/docs/0.12/api/#flask.Flask.after_request) allows executing code after a request is
17 | processed (but before a response is returned)
18 |
19 | The [src](./src) sub-directory contains the application code. The main application is defined in [app.py](./src/app.py) with
20 | the middleware functions defined in [helpers/middleware.py](./src/helpers/middleware.py).
21 |
22 | ### Key snippets from `app.py`
23 |
24 | The `setup_metrics()` function defined in the `middleware.py` file is called with the `app` instance we created for our
25 | application to register the appropriates function to be called:
26 |
27 | ```
28 | from helpers.middleware import setup_metrics
29 | ..
30 | app = Flask(__name__)
31 | setup_metrics(app)
32 | ..
33 | ```
34 |
35 | As far as our metrics reporting is concerned, that's the only change we do to our application.
36 |
37 | ### Key snippets from `middleware.py`
38 |
39 | THe `setup_metrics()` function which is called by our application above is defined in this module
40 | which has the following relevant code:
41 |
42 | ```
43 | from flask import request
44 | import time
45 |
46 |
47 | def start_timer():
48 | request.start_time = time.time()
49 |
50 |
51 | def stop_timer(response):
52 | # convert this into milliseconds for statsd
53 | resp_time = (time.time() - request.start_time)*1000
54 | ...
55 | return response
56 |
57 |
58 | def setup_metrics(app):
59 | app.before_request(start_timer)
60 | app.after_request(stop_timer)
61 | ```
62 |
63 | Using the `before_request()` function, the `start_timer()` function i
64 |
65 |
66 | ## Run demo
67 |
68 | - Install `docker` and `docker-compose`
69 | - `$ sudo docker-compose up`
70 |
71 | ## Play with the data
72 |
73 | `docker-compose` run will print a URL which you can copy-paste into the browser on
74 | our host.
75 |
76 | Then, open the `Analysis` Jupyter Notebook by navigating to the `demo1` directory.
77 |
--------------------------------------------------------------------------------
/demo1/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | services:
3 | webapp:
4 | build: ./
5 | image: amitsaha/flask_app:demo1
6 | container_name: webapp
7 | expose:
8 | - 5000
9 | volumes:
10 | - ./src:/application
11 | client:
12 | depends_on:
13 | - webapp
14 | build:
15 | context: ./
16 | dockerfile: Dockerfile-client
17 | image: amitsaha/flask_app:client
18 | container_name: client
19 | analyzer:
20 | depends_on:
21 | - client
22 | expose:
23 | - 8888
24 | ports:
25 | - 8888:8888
26 | image: jupyter/datascience-notebook
27 | container_name: analyser
28 | volumes:
29 | - ./src:/src
30 |
--------------------------------------------------------------------------------
/demo1/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ab -n 100 -c 3 http://webapp:5000/test/
3 |
--------------------------------------------------------------------------------
/demo1/src/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from helpers.middleware import setup_metrics
3 | import csv
4 |
5 | app = Flask(__name__)
6 | setup_metrics(app)
7 |
8 |
9 | @app.route('/test/')
10 | def test():
11 | return 'rest'
12 |
13 |
14 | @app.route('/test1/')
15 | def test1():
16 | 1/0
17 | return 'rest'
18 |
19 |
20 | @app.errorhandler(500)
21 | def handle_500(error):
22 | return str(error), 500
23 |
24 |
25 | if __name__ == '__main__':
26 | with open('metrics.csv', 'w', newline='') as f:
27 | csvwriter = csv.writer(f)
28 | csvwriter.writerow(['timestamp', 'request_latency'])
29 |
30 | app.run(host="0.0.0.0")
31 |
--------------------------------------------------------------------------------
/demo1/src/helpers/middleware.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 | import csv
3 | import time
4 |
5 |
6 | def start_timer():
7 | request.start_time = time.time()
8 |
9 |
10 | def stop_timer(response):
11 | # convert this into milliseconds for statsd
12 | resp_time = (time.time() - request.start_time)*1000
13 | with open('metrics.csv', 'a', newline='') as f:
14 | csvwriter = csv.writer(f)
15 | csvwriter.writerow([str(int(time.time())), str(resp_time)])
16 |
17 | return response
18 |
19 |
20 | def setup_metrics(app):
21 | app.before_request(start_timer)
22 | app.after_request(stop_timer)
23 |
--------------------------------------------------------------------------------
/demo1/src/metrics.csv:
--------------------------------------------------------------------------------
1 | timestamp,request_latency
2 | 1522219451,0.11110305786132812
3 | 1522219451,0.056743621826171875
4 | 1522219451,0.05245208740234375
5 | 1522219451,0.051021575927734375
6 | 1522219451,0.053882598876953125
7 | 1522219451,0.05745887756347656
8 | 1522219451,0.05221366882324219
9 | 1522219451,0.05030632019042969
10 | 1522219451,0.053882598876953125
11 | 1522219451,0.058650970458984375
12 | 1522219451,0.05817413330078125
13 | 1522219451,0.0514984130859375
14 | 1522219451,0.051021575927734375
15 | 1522219451,0.051021575927734375
16 | 1522219451,0.05125999450683594
17 | 1522219451,0.05054473876953125
18 | 1522219451,0.051021575927734375
19 | 1522219451,0.05078315734863281
20 | 1522219451,0.05078315734863281
21 | 1522219451,0.044345855712890625
22 | 1522219451,0.0438690185546875
23 | 1522219451,0.04482269287109375
24 | 1522219451,0.1392364501953125
25 | 1522219451,0.04458427429199219
26 | 1522219451,0.0438690185546875
27 | 1522219451,0.0438690185546875
28 | 1522219451,0.0438690185546875
29 | 1522219451,0.03981590270996094
30 | 1522219451,0.039577484130859375
31 | 1522219451,0.03933906555175781
32 | 1522219451,0.03886222839355469
33 | 1522219451,0.03910064697265625
34 | 1522219451,0.038623809814453125
35 | 1522219451,0.03910064697265625
36 | 1522219451,0.039577484130859375
37 | 1522219451,0.039577484130859375
38 | 1522219451,0.038623809814453125
39 | 1522219451,0.03910064697265625
40 | 1522219451,0.038623809814453125
41 | 1522219451,0.03814697265625
42 | 1522219451,0.03886222839355469
43 | 1522219451,0.03933906555175781
44 | 1522219451,0.03838539123535156
45 | 1522219451,0.03886222839355469
46 | 1522219451,0.03814697265625
47 | 1522219451,0.03838539123535156
48 | 1522219451,0.03910064697265625
49 | 1522219451,0.03886222839355469
50 | 1522219451,0.03886222839355469
51 | 1522219451,0.038623809814453125
52 | 1522219451,0.041484832763671875
53 | 1522219451,0.04291534423828125
54 | 1522219451,0.043392181396484375
55 | 1522219451,0.04458427429199219
56 | 1522219451,0.04315376281738281
57 | 1522219451,0.043392181396484375
58 | 1522219451,0.055789947509765625
59 | 1522219451,0.04792213439941406
60 | 1522219451,0.0476837158203125
61 | 1522219451,0.044345855712890625
62 | 1522219451,0.045299530029296875
63 | 1522219451,0.04172325134277344
64 | 1522219451,0.044345855712890625
65 | 1522219451,0.04315376281738281
66 | 1522219451,0.05340576171875
67 | 1522219451,0.05793571472167969
68 | 1522219451,0.23293495178222656
69 | 1522219451,0.04673004150390625
70 | 1522219451,0.043392181396484375
71 | 1522219451,0.041961669921875
72 | 1522219451,0.0400543212890625
73 | 1522219451,0.03886222839355469
74 | 1522219451,0.03886222839355469
75 | 1522219451,0.038623809814453125
76 | 1522219451,0.03814697265625
77 | 1522219451,0.03933906555175781
78 | 1522219451,0.039577484130859375
79 | 1522219451,0.03933906555175781
80 | 1522219451,0.03886222839355469
81 | 1522219451,0.03838539123535156
82 | 1522219451,0.03981590270996094
83 | 1522219451,0.04029273986816406
84 | 1522219451,0.0400543212890625
85 | 1522219451,0.03910064697265625
86 | 1522219451,0.04029273986816406
87 | 1522219451,0.0400543212890625
88 | 1522219451,0.03933906555175781
89 | 1522219451,0.03933906555175781
90 | 1522219451,0.04029273986816406
91 | 1522219451,0.03886222839355469
92 | 1522219451,0.041484832763671875
93 | 1522219451,0.044345855712890625
94 | 1522219451,0.04792213439941406
95 | 1522219451,0.04315376281738281
96 | 1522219451,0.044345855712890625
97 | 1522219451,0.04315376281738281
98 | 1522219451,0.06270408630371094
99 | 1522219451,0.04887580871582031
100 | 1522219451,0.049114227294921875
101 | 1522219451,0.046253204345703125
102 |
--------------------------------------------------------------------------------
/demo1/src/requirements.txt:
--------------------------------------------------------------------------------
1 | flask==1.0
2 |
--------------------------------------------------------------------------------
/demo2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | ADD . /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | apache2-utils \
10 | ; \
11 | pip install -r src/requirements.txt; \
12 | apk del .build-deps;
13 | EXPOSE 5000
14 | WORKDIR /application
15 | CMD ["python", "app.py"]
16 |
--------------------------------------------------------------------------------
/demo2/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | RUN apk add --update curl apache2-utils && rm -rf /var/cache/apk/*
3 | ADD ./make-requests.sh /make-requests.sh
4 | VOLUME /data
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/demo2/README.md:
--------------------------------------------------------------------------------
1 | # Demo 2
2 |
3 | This demo builds upon [Demo 1](../demo1). It demonstrates:
4 |
5 | - Adding *characteristics* to metrics
6 | - Analyzing the metrics using [pandas](https://pandas.pydata.org/pandas-docs/stable/index.html)
7 |
8 | ## Adding characteristics to metrics
9 |
10 | We update the [middlware.py](./src/helpers/middleware.py) to add characteristics to our data as follows:
11 |
12 |
13 | ```
14 | node_ids = ['10.0.1.1', '10.1.3.4']
15 |
16 |
17 | def start_timer():
18 | request.start_time = time.time()
19 |
20 |
21 | def stop_timer(response):
22 | # convert this into milliseconds for statsd
23 | resp_time = (time.time() - request.start_time)*1000
24 | node_id = node_ids[random.choice(range(len(node_ids)))]
25 | with open('metrics.csv', 'a', newline='') as f:
26 | csvwriter = csv.writer(f)
27 | csvwriter.writerow([
28 | str(int(time.time())), 'webapp1', node_id,
29 | request.endpoint, request.method, str(response.status_code),
30 | str(resp_time)
31 | ])
32 |
33 | return response
34 | ...
35 | ```
36 |
37 |
38 | ## Run demo
39 |
40 | - `$ sudo docker-compose up`
41 |
42 | ## Play with the data
43 |
44 | `docker-compose` run will print a URL which you can copy-paste into the browser on
45 | our host.
46 |
47 | Then, open the `Analysis` Jupyter Notebook by navigating to the `demo2` directory.
48 |
--------------------------------------------------------------------------------
/demo2/Untitled.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {},
7 | "outputs": [],
8 | "source": [
9 | "import pandas as pd\n",
10 | "metrics = pd.read_csv('./src/metrics.csv', index_col=0)"
11 | ]
12 | }
13 | ],
14 | "metadata": {
15 | "kernelspec": {
16 | "display_name": "Python 3",
17 | "language": "python",
18 | "name": "python3"
19 | },
20 | "language_info": {
21 | "codemirror_mode": {
22 | "name": "ipython",
23 | "version": 3
24 | },
25 | "file_extension": ".py",
26 | "mimetype": "text/x-python",
27 | "name": "python",
28 | "nbconvert_exporter": "python",
29 | "pygments_lexer": "ipython3",
30 | "version": "3.6.4"
31 | }
32 | },
33 | "nbformat": 4,
34 | "nbformat_minor": 2
35 | }
36 |
--------------------------------------------------------------------------------
/demo2/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | services:
3 | webapp:
4 | build: ./
5 | image: amitsaha/flask_app:demo2
6 | container_name: webapp
7 | expose:
8 | - 5000
9 | volumes:
10 | - ./src:/application
11 | client:
12 | depends_on:
13 | - webapp
14 | build:
15 | context: ./
16 | dockerfile: Dockerfile-client
17 | image: amitsaha/flask_app:client
18 | container_name: client
19 | analyzer:
20 | depends_on:
21 | - client
22 | expose:
23 | - 8888
24 | ports:
25 | - 8888:8888
26 | image: jupyter/datascience-notebook
27 | container_name: analyser
28 | volumes:
29 | - ./src:/src/
30 |
--------------------------------------------------------------------------------
/demo2/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sleep 30
3 | ab -n 100 -c 3 http://webapp:5000/test/
4 | ab -n 100 -c 3 http://webapp:5000/test1/
5 |
--------------------------------------------------------------------------------
/demo2/src/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | from helpers.middleware import setup_metrics
3 | import csv
4 | import random
5 |
6 | app = Flask(__name__)
7 | setup_metrics(app)
8 |
9 |
10 | @app.route('/test/')
11 | def test():
12 | if random.random() < 0.8:
13 | return 'rest'
14 | else:
15 | return 'Bad Request', 400
16 |
17 |
18 | @app.route('/test1/')
19 | def test1():
20 | 1/0
21 | return 'rest'
22 |
23 |
24 | @app.errorhandler(500)
25 | def handle_500(error):
26 | return str(error), 500
27 |
28 |
29 | if __name__ == '__main__':
30 | with open('metrics.csv', 'w', newline='') as f:
31 | csvwriter = csv.writer(f)
32 | csvwriter.writerow(['timestamp', 'app_prefix', 'node_id',
33 | 'http_endpoint', 'http_method', 'http_status',
34 | 'latency'])
35 |
36 | app.run(host="0.0.0.0")
37 |
--------------------------------------------------------------------------------
/demo2/src/helpers/middleware.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 | import csv
3 | import time
4 | import random
5 |
6 | node_ids = ['10.0.1.1', '10.1.3.4']
7 |
8 |
9 | def start_timer():
10 | request.start_time = time.time()
11 |
12 |
13 | def stop_timer(response):
14 | # convert this into milliseconds for statsd
15 | resp_time = (time.time() - request.start_time)*1000
16 | node_id = node_ids[random.choice(range(len(node_ids)))]
17 | with open('metrics.csv', 'a', newline='') as f:
18 | csvwriter = csv.writer(f)
19 | csvwriter.writerow([
20 | str(int(time.time())), 'webapp1', node_id,
21 | request.endpoint, request.method, str(response.status_code),
22 | str(resp_time)
23 | ])
24 |
25 | return response
26 |
27 |
28 | def setup_metrics(app):
29 | app.before_request(start_timer)
30 | app.after_request(stop_timer)
31 |
--------------------------------------------------------------------------------
/demo2/src/requirements.txt:
--------------------------------------------------------------------------------
1 | flask==1.0
2 |
--------------------------------------------------------------------------------
/django_prometheus/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-alpine
2 | ADD src /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | ; \
10 | pip install -r requirements.txt; \
11 | apk del .build-deps;
12 | EXPOSE 8000
13 |
14 | RUN chmod +x /application/start.sh
15 | CMD ["/application/start.sh"]
16 |
--------------------------------------------------------------------------------
/django_prometheus/README.md:
--------------------------------------------------------------------------------
1 | # Example Django application
2 |
3 | See `src` for the application code and top level README for the description of this repo from a functionality
4 | point of view.
5 |
6 |
7 | ## Building Docker image
8 |
9 | The Python 3 based [Dockerfile](Dockerfile) uses an Alpine Linux base image
10 | and copies the source code to the image:
11 |
12 | ```
13 | FROM python:3.7-alpine
14 | ADD src /application
15 | WORKDIR /application
16 | RUN set -e; \
17 | apk add --no-cache --virtual .build-deps \
18 | gcc \
19 | libc-dev \
20 | linux-headers \
21 | ; \
22 | pip install -r requirements.txt; \
23 | apk del .build-deps;
24 | EXPOSE 8000
25 |
26 | RUN chmod +x /application/start.sh
27 | CMD ["/application/start.sh"]
28 |
29 | ```
30 |
31 | The `/start.sh` script runs the Django DB migrations and then uses `gunicorn` to run our
32 | application using 5 worker processes.
33 |
34 | To build the image:
35 |
36 | ```
37 | $ docker build -t amitsaha/til:prometheus .
38 | ```
39 |
40 | ## Running the application
41 |
42 | We can just run the web application as follows:
43 |
44 | ```
45 | $ docker run -ti -p 8000:8000 amitsaha/til:prometheus
46 | ```
47 |
48 | ## Bringing up the web application, along with prometheus
49 |
50 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp.example.com` service which is our web application
51 | using the image `amitsaha/til:prometheus` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml)
52 | file brings up `prometheus` service and also starts the `grafana` service which
53 | is available on port 3000. The config directory contains a `prometheus.yml` file
54 | which sets up the targets for prometheus to scrape. The scrape configuration
55 | looks as follows:
56 |
57 | ```
58 | # my global config
59 | global:
60 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
61 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
62 | # scrape_timeout is set to the global default (10s).
63 |
64 | # Attach these labels to any time series or alerts when communicating with
65 | # external systems (federation, remote storage, Alertmanager).
66 | external_labels:
67 | monitor: 'my-project'
68 |
69 | # A scrape configuration containing exactly one endpoint to scrape:
70 | # Here it's Prometheus itself.
71 | scrape_configs:
72 | # The job name is added as a label `job=` to any timeseries scraped from this config.
73 | - job_name: 'prometheus'
74 |
75 | # Override the global default and scrape targets from this job every 5 seconds.
76 | scrape_interval: 5s
77 |
78 | # metrics_path defaults to '/metrics'
79 | # scheme defaults to 'http'.
80 |
81 | static_configs:
82 | - targets: ['localhost:9090']
83 | - job_name: 'webapp'
84 |
85 | # Override the global default and scrape targets from this job every 5 seconds.
86 | scrape_interval: 5s
87 |
88 | # metrics_path defaults to '/metrics'
89 | # scheme defaults to 'http'.
90 | static_configs:
91 | - targets: ['webapp.example.com:8000']
92 |
93 |
94 | ```
95 |
96 | Prometheus scrapes itself, which is the first target above. The second target
97 | is the webapp itself.
98 |
99 | Since these services are running via `docker-compose`, `webapp.example.com` automatically resolves to the IP of the django web application.
100 |
101 | To bring up all the services:
102 |
103 | ```
104 | $ docker-compose build
105 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up
106 | ```
107 |
108 | Then, create the following `/etc/hosts/` entry:
109 |
110 | ```
111 | 127.0.0.1 webapp.example.com
112 | ```
113 |
114 | Now, in your browser visit, `http://webapp.example.com:8080` and you should see the web application.
115 |
116 | Go to `http://127.0.0.1:3000` to access the Grafana instance and login with `admin` as username and
117 | `foobar` as password.
--------------------------------------------------------------------------------
/django_prometheus/config/grafana/dashboards/demo.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": "-- Grafana --",
7 | "enable": true,
8 | "hide": true,
9 | "iconColor": "rgba(0, 211, 255, 1)",
10 | "name": "Annotations & Alerts",
11 | "type": "dashboard"
12 | }
13 | ]
14 | },
15 | "editable": true,
16 | "gnetId": null,
17 | "graphTooltip": 0,
18 | "id": 1,
19 | "links": [],
20 | "panels": [
21 | {
22 | "aliasColors": {},
23 | "bars": false,
24 | "dashLength": 10,
25 | "dashes": false,
26 | "datasource": "prometheus",
27 | "fill": 1,
28 | "gridPos": {
29 | "h": 11,
30 | "w": 13,
31 | "x": 0,
32 | "y": 0
33 | },
34 | "id": 2,
35 | "legend": {
36 | "avg": false,
37 | "current": false,
38 | "max": false,
39 | "min": false,
40 | "show": true,
41 | "total": false,
42 | "values": false
43 | },
44 | "lines": true,
45 | "linewidth": 1,
46 | "links": [],
47 | "nullPointMode": "null",
48 | "percentage": false,
49 | "pointradius": 2,
50 | "points": false,
51 | "renderer": "flot",
52 | "seriesOverrides": [],
53 | "spaceLength": 10,
54 | "stack": false,
55 | "steppedLine": false,
56 | "targets": [
57 | {
58 | "expr": "django_http_responses_total_by_status_total",
59 | "format": "time_series",
60 | "hide": false,
61 | "intervalFactor": 1,
62 | "refId": "A"
63 | }
64 | ],
65 | "thresholds": [],
66 | "timeFrom": null,
67 | "timeRegions": [],
68 | "timeShift": null,
69 | "title": "HTTP Status Count",
70 | "tooltip": {
71 | "shared": true,
72 | "sort": 0,
73 | "value_type": "individual"
74 | },
75 | "type": "graph",
76 | "xaxis": {
77 | "buckets": null,
78 | "mode": "time",
79 | "name": null,
80 | "show": true,
81 | "values": []
82 | },
83 | "yaxes": [
84 | {
85 | "format": "short",
86 | "label": null,
87 | "logBase": 1,
88 | "max": null,
89 | "min": null,
90 | "show": true
91 | },
92 | {
93 | "format": "short",
94 | "label": null,
95 | "logBase": 1,
96 | "max": null,
97 | "min": null,
98 | "show": true
99 | }
100 | ],
101 | "yaxis": {
102 | "align": false,
103 | "alignLevel": null
104 | }
105 | },
106 | {
107 | "aliasColors": {},
108 | "bars": false,
109 | "dashLength": 10,
110 | "dashes": false,
111 | "datasource": "prometheus",
112 | "fill": 1,
113 | "gridPos": {
114 | "h": 11,
115 | "w": 11,
116 | "x": 13,
117 | "y": 0
118 | },
119 | "id": 3,
120 | "legend": {
121 | "avg": false,
122 | "current": false,
123 | "max": false,
124 | "min": false,
125 | "show": true,
126 | "total": false,
127 | "values": false
128 | },
129 | "lines": true,
130 | "linewidth": 1,
131 | "links": [],
132 | "nullPointMode": "null",
133 | "percentage": false,
134 | "pointradius": 2,
135 | "points": false,
136 | "renderer": "flot",
137 | "seriesOverrides": [],
138 | "spaceLength": 10,
139 | "stack": false,
140 | "steppedLine": false,
141 | "targets": [
142 | {
143 | "expr": "django_http_requests_latency_seconds_by_view_method_count",
144 | "format": "time_series",
145 | "hide": false,
146 | "intervalFactor": 1,
147 | "refId": "A"
148 | }
149 | ],
150 | "thresholds": [],
151 | "timeFrom": null,
152 | "timeRegions": [],
153 | "timeShift": null,
154 | "title": "View method count",
155 | "tooltip": {
156 | "shared": true,
157 | "sort": 0,
158 | "value_type": "individual"
159 | },
160 | "type": "graph",
161 | "xaxis": {
162 | "buckets": null,
163 | "mode": "time",
164 | "name": null,
165 | "show": true,
166 | "values": []
167 | },
168 | "yaxes": [
169 | {
170 | "format": "short",
171 | "label": null,
172 | "logBase": 1,
173 | "max": null,
174 | "min": null,
175 | "show": true
176 | },
177 | {
178 | "format": "short",
179 | "label": null,
180 | "logBase": 1,
181 | "max": null,
182 | "min": null,
183 | "show": true
184 | }
185 | ],
186 | "yaxis": {
187 | "align": false,
188 | "alignLevel": null
189 | }
190 | }
191 | ],
192 | "schemaVersion": 18,
193 | "style": "dark",
194 | "tags": [],
195 | "templating": {
196 | "list": []
197 | },
198 | "time": {
199 | "from": "now-15m",
200 | "to": "now"
201 | },
202 | "timepicker": {
203 | "refresh_intervals": [
204 | "5s",
205 | "10s",
206 | "30s",
207 | "1m",
208 | "5m",
209 | "15m",
210 | "30m",
211 | "1h",
212 | "2h",
213 | "1d"
214 | ],
215 | "time_options": [
216 | "5m",
217 | "15m",
218 | "1h",
219 | "6h",
220 | "12h",
221 | "24h",
222 | "2d",
223 | "7d",
224 | "30d"
225 | ]
226 | },
227 | "timezone": "",
228 | "title": "Django Metrics",
229 | "uid": "FUSpFMdWk",
230 | "version": 5
231 | }
--------------------------------------------------------------------------------
/django_prometheus/config/grafana/datasources/prometheus.yml:
--------------------------------------------------------------------------------
1 | # config file version
2 | apiVersion: 1
3 |
4 |
5 | datasources:
6 | - name: prometheus
7 | type: prometheus
8 | access: proxy
9 | orgId: 1
10 | url: http://prometheus:9090
--------------------------------------------------------------------------------
/django_prometheus/config/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | # my global config
2 | global:
3 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
5 | # scrape_timeout is set to the global default (10s).
6 |
7 | # Attach these labels to any time series or alerts when communicating with
8 | # external systems (federation, remote storage, Alertmanager).
9 | external_labels:
10 | monitor: 'my-project'
11 |
12 | # A scrape configuration containing exactly one endpoint to scrape:
13 | # Here it's Prometheus itself.
14 | scrape_configs:
15 | # The job name is added as a label `job=` to any timeseries scraped from this config.
16 | - job_name: 'prometheus'
17 |
18 | # Override the global default and scrape targets from this job every 5 seconds.
19 | scrape_interval: 5s
20 |
21 | # metrics_path defaults to '/metrics'
22 | # scheme defaults to 'http'.
23 |
24 | static_configs:
25 | - targets: ['localhost:9090']
26 | - job_name: 'webapp'
27 |
28 | # Override the global default and scrape targets from this job every 5 seconds.
29 | scrape_interval: 5s
30 |
31 | # metrics_path defaults to '/metrics'
32 | # scheme defaults to 'http'.
33 | static_configs:
34 | - targets: ['webapp.example.com:8000']
35 |
--------------------------------------------------------------------------------
/django_prometheus/docker-compose-infra.yml:
--------------------------------------------------------------------------------
1 | # Based off https://github.com/vegasbrianc/prometheus
2 | version: '2'
3 |
4 | volumes:
5 | prometheus_data: {}
6 | grafana_data: {}
7 |
8 |
9 | services:
10 | prometheus:
11 | image: prom/prometheus
12 | container_name: prometheus
13 | volumes:
14 | - "./config/prometheus/:/etc/prometheus/:Z"
15 | - prometheus_data:/prometheus
16 | command:
17 | - '--config.file=/etc/prometheus/prometheus.yml'
18 | expose:
19 | - 9090
20 | ports:
21 | - 9090:9090
22 | grafana:
23 | image: grafana/grafana
24 | depends_on:
25 | - prometheus
26 | ports:
27 | - 3000:3000
28 | volumes:
29 | - grafana_data:/var/lib/grafana
30 | - "./config/grafana/datasources/:/etc/grafana/provisioning/datasources:Z"
31 | environment:
32 | - GF_SECURITY_ADMIN_PASSWORD=foobar
33 | - GF_USERS_ALLOW_SIGN_UP=false
34 |
--------------------------------------------------------------------------------
/django_prometheus/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | webapp.example.com:
5 | image: amitsaha/til:prometheus
6 | build: .
7 | container_name: webapp
8 | expose:
9 | - 8000
10 | ports:
11 | - 8000:8000
12 |
--------------------------------------------------------------------------------
/django_prometheus/src/README.md:
--------------------------------------------------------------------------------
1 | ## Today I Learned - Demo application
2 |
3 |
4 | ## Model Setup
5 |
6 | ```
7 | $ pip install -r requirements.txt
8 | $ python manage.py migrate
9 | ```
10 |
11 | ## Start web application
12 |
13 | ```
14 | $ python manage.py runserver 0.0.0.0:8000
15 | ```
16 |
17 | Go to `127.0.0.1:8000`
18 |
19 |
--------------------------------------------------------------------------------
/django_prometheus/src/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import os
3 | import sys
4 |
5 | if __name__ == "__main__":
6 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "til.settings")
7 | try:
8 | from django.core.management import execute_from_command_line
9 | except ImportError:
10 | # The above import may fail for some other reason. Ensure that the
11 | # issue is really that Django is missing to avoid masking other
12 | # exceptions on Python 2.
13 | try:
14 | import django
15 | except ImportError:
16 | raise ImportError(
17 | "Couldn't import Django. Are you sure it's installed and "
18 | "available on your PYTHONPATH environment variable? Did you "
19 | "forget to activate a virtual environment?"
20 | )
21 | raise
22 | execute_from_command_line(sys.argv)
23 |
--------------------------------------------------------------------------------
/django_prometheus/src/requirements.txt:
--------------------------------------------------------------------------------
1 | Django
2 | django-bootstrap3
3 | django-prometheus
4 | gunicorn
5 |
--------------------------------------------------------------------------------
/django_prometheus/src/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | python3 manage.py migrate
3 | gunicorn --workers 5 --bind 0.0.0.0:8000 til.wsgi
4 |
--------------------------------------------------------------------------------
/django_prometheus/src/til/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_prometheus/src/til/__init__.py
--------------------------------------------------------------------------------
/django_prometheus/src/til/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for til project.
3 |
4 | Generated by 'django-admin startproject' using Django 1.11.4.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/1.11/topics/settings/
8 |
9 | For the full list of settings and their values, see
10 | https://docs.djangoproject.com/en/1.11/ref/settings/
11 | """
12 |
13 | import os
14 |
15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 |
18 |
19 | # Quick-start development settings - unsuitable for production
20 | # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
21 |
22 | # SECURITY WARNING: keep the secret key used in production secret!
23 | SECRET_KEY = 'y127*dm#z8u8@g+l&sf&3g5jx0ntfh0_6^h1-83tds%cmhbqxo'
24 |
25 | # SECURITY WARNING: don't run with debug turned on in production!
26 | DEBUG = True
27 |
28 | ALLOWED_HOSTS = ["webapp.example.com"]
29 |
30 |
31 | # Application definition
32 |
33 | INSTALLED_APPS = [
34 | 'bootstrap3',
35 | 'django_prometheus',
36 | 'tilweb.apps.TilwebConfig',
37 | 'django.contrib.admin',
38 | 'django.contrib.auth',
39 | 'django.contrib.contenttypes',
40 | 'django.contrib.sessions',
41 | 'django.contrib.messages',
42 | 'django.contrib.staticfiles',
43 | ]
44 |
45 | MIDDLEWARE = [
46 | 'django_prometheus.middleware.PrometheusBeforeMiddleware',
47 | 'django.middleware.security.SecurityMiddleware',
48 | 'django.contrib.sessions.middleware.SessionMiddleware',
49 | 'django.middleware.common.CommonMiddleware',
50 | 'django.middleware.csrf.CsrfViewMiddleware',
51 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
52 | 'django.contrib.messages.middleware.MessageMiddleware',
53 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
54 | 'django_prometheus.middleware.PrometheusAfterMiddleware',
55 | ]
56 |
57 | ROOT_URLCONF = 'til.urls'
58 |
59 | TEMPLATES = [
60 | {
61 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
62 | 'DIRS': [],
63 | 'APP_DIRS': True,
64 | 'OPTIONS': {
65 | 'context_processors': [
66 | 'django.template.context_processors.debug',
67 | 'django.template.context_processors.request',
68 | 'django.contrib.auth.context_processors.auth',
69 | 'django.contrib.messages.context_processors.messages',
70 | ],
71 | },
72 | },
73 | ]
74 |
75 | WSGI_APPLICATION = 'til.wsgi.application'
76 |
77 |
78 | # Database
79 | # https://docs.djangoproject.com/en/1.11/ref/settings/#databases
80 |
81 | DATABASES = {
82 | 'default': {
83 | 'ENGINE': 'django_prometheus.db.backends.sqlite3',
84 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
85 | }
86 | }
87 |
88 |
89 | # Password validation
90 | # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
91 |
92 | AUTH_PASSWORD_VALIDATORS = [
93 | {
94 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
95 | },
96 | {
97 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
98 | },
99 | {
100 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
101 | },
102 | {
103 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
104 | },
105 | ]
106 |
107 |
108 | # Internationalization
109 | # https://docs.djangoproject.com/en/1.11/topics/i18n/
110 |
111 | LANGUAGE_CODE = 'en-us'
112 |
113 | TIME_ZONE = 'Australia/Sydney'
114 |
115 | USE_I18N = True
116 |
117 | USE_L10N = True
118 |
119 | USE_TZ = True
120 |
121 |
122 | # Static files (CSS, JavaScript, Images)
123 | # https://docs.djangoproject.com/en/1.11/howto/static-files/
124 |
125 | STATIC_URL = '/static/'
126 |
--------------------------------------------------------------------------------
/django_prometheus/src/til/urls.py:
--------------------------------------------------------------------------------
1 | """til URL Configuration
2 |
3 | The `urlpatterns` list routes URLs to views. For more information please see:
4 | https://docs.djangoproject.com/en/1.11/topics/http/urls/
5 | Examples:
6 | Function views
7 | 1. Add an import: from my_app import views
8 | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
9 | Class-based views
10 | 1. Add an import: from other_app.views import Home
11 | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
12 | Including another URLconf
13 | 1. Import the include() function: from django.conf.urls import url, include
14 | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
15 | """
16 | from django.conf.urls import url, include
17 | from django.contrib import admin
18 |
19 | urlpatterns = [
20 | url(r'', include('tilweb.urls')),
21 | url(r'^admin/', admin.site.urls),
22 | url('', include('django_prometheus.urls')),
23 | ]
24 |
--------------------------------------------------------------------------------
/django_prometheus/src/til/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for til project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "til.settings")
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_prometheus/src/tilweb/__init__.py
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/admin.py:
--------------------------------------------------------------------------------
1 | from django.contrib import admin
2 |
3 | # Register your models here.
4 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/apps.py:
--------------------------------------------------------------------------------
1 | from django.apps import AppConfig
2 |
3 |
4 | class TilwebConfig(AppConfig):
5 | name = 'tilweb'
6 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/forms.py:
--------------------------------------------------------------------------------
1 | from django import forms
2 |
3 |
4 | class TilForm(forms.Form):
5 | subject = forms.CharField(label='Title', max_length=160)
6 | content = forms.CharField(label='What did I learn today?',
7 | widget=forms.Textarea, max_length=800)
8 | # four tags separated by a comma
9 | tags = forms.CharField(label='Tags (comma separated, maximum: 4)',
10 | required=False,
11 | max_length=43)
12 | public = forms.BooleanField(label='Public', required=False)
13 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/migrations/0001_initial.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by Django 1.11.4 on 2017-09-01 02:15
3 | from __future__ import unicode_literals
4 |
5 | from django.conf import settings
6 | from django.db import migrations, models
7 | import django.db.models.deletion
8 |
9 |
10 | class Migration(migrations.Migration):
11 |
12 | initial = True
13 |
14 | dependencies = [
15 | migrations.swappable_dependency(settings.AUTH_USER_MODEL),
16 | ]
17 |
18 | operations = [
19 | migrations.CreateModel(
20 | name='Post',
21 | fields=[
22 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
23 | ('subject', models.CharField(max_length=160)),
24 | ('content', models.CharField(max_length=800)),
25 | ('public', models.BooleanField(default=False)),
26 | ('post_date', models.DateTimeField(auto_now_add=True)),
27 | ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
28 | ],
29 | ),
30 | migrations.CreateModel(
31 | name='PostTag',
32 | fields=[
33 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
34 | ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tilweb.Post')),
35 | ],
36 | ),
37 | migrations.CreateModel(
38 | name='Tag',
39 | fields=[
40 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
41 | ('tag', models.CharField(max_length=10, unique=True)),
42 | ('creation_date', models.DateTimeField(auto_now_add=True)),
43 | ],
44 | ),
45 | migrations.AddField(
46 | model_name='posttag',
47 | name='tag',
48 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tilweb.Tag'),
49 | ),
50 | ]
51 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/migrations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_prometheus/src/tilweb/migrations/__init__.py
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/models.py:
--------------------------------------------------------------------------------
1 | from django.db import models
2 | from django.contrib.auth.models import User
3 |
4 |
5 | from django_prometheus.models import ExportModelOperationsMixin
6 |
7 | class Post(ExportModelOperationsMixin('post'), models.Model):
8 | subject = models.CharField(max_length=160)
9 | content = models.CharField(max_length=800)
10 | author = models.ForeignKey(User, on_delete=models.CASCADE)
11 | public = models.BooleanField(default=False)
12 | post_date = models.DateTimeField(auto_now_add=True)
13 |
14 |
15 | class Tag(ExportModelOperationsMixin('tag'), models.Model):
16 | # We will use the implcit id
17 | tag = models.CharField(max_length=10, unique=True)
18 | creation_date = models.DateTimeField(auto_now_add=True)
19 |
20 |
21 | class PostTag(ExportModelOperationsMixin('post_tag'), models.Model):
22 | post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
23 | tag = models.ForeignKey(Tag, on_delete=models.DO_NOTHING)
24 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/base.html:
--------------------------------------------------------------------------------
1 | {# Load the tag library #}
2 | {% load bootstrap3 %}
3 |
4 | {# Load CSS and JavaScript #}
5 | {% bootstrap_css %}
6 | {% bootstrap_javascript %}
7 |
8 |
9 |
16 |
17 |
18 | {% block content %}
19 | {% endblock %}
20 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/create_post.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 | {% load bootstrap3 %}
3 |
4 | {% block content %}
5 |
7 |
Create your post
8 |
TIL: Your personal learning journal
9 |
10 |
11 |
12 |
21 |
22 |
23 |
24 |
Your recent posts
25 | {% for post in posts %}
26 |
{{ post.subject }}
27 | {%endfor %}
28 | {% endblock %}
29 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/index.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
6 |
TIL: Your personal learning journal
7 |
8 |
9 |
10 |
11 |
Already a member?
12 |
13 |
Login
14 |
Join TIL today!
15 |
16 |
Signup
17 |
Recent community posts
18 | {% for post in posts %}
19 |
{{ post.subject }}
20 | {%endfor %}
21 |
22 |
23 | {% endblock %}
24 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/login.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 | {% block content %}
3 |
4 |
5 |
6 |
Login
7 |
8 |
9 | {% if form.errors %}
10 | {% for field in form %}
11 | {% for error in field.errors %}
12 |
13 | {{ error|escape }}
14 |
15 | {% endfor %}
16 | {% endfor %}
17 | {% endif %}
18 |
19 | {% if form.non_field_errors %}
20 | {% for error in form.non_field_errors %}
21 |
22 | {{ error|escape }}
23 |
24 | {% endfor %}
25 | {% endif %}
26 |
27 |
32 |
33 | {% endblock %}
34 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/me.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
6 |
Hello, {{user.username}}
7 |
TIL: Your personal learning journal
8 |
9 |
10 |
11 |
12 |
Create a new post
13 |
Your posts
14 | {% for post in posts %}
15 |
{{ post.subject }}
16 | {%endfor %}
17 |
18 |
Tags you have used
19 | {% for tag in tags %}
20 |
{{ tag.tag }}
21 | {%endfor %}
22 |
23 | {% endblock %}
24 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/signup.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
5 |
6 |
7 |
Sign up
8 |
9 |
10 | {% if form.errors %}
11 | {% for field in form %}
12 | {% for error in field.errors %}
13 |
14 | {{ error|escape }}
15 |
16 | {% endfor %}
17 | {% endfor %}
18 | {% for error in form.non_field_errors %}
19 |
20 | {{ error|escape }}
21 |
22 | {% endfor %}
23 | {% endif %}
24 |
25 |
30 |
31 | {% endblock %}
32 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/tag_view.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
6 |
Posts tagged {{ tag }}
7 |
TIL: Your personal learning journal
8 |
9 |
10 |
11 |
12 | {% for post in posts %}
13 |
{{ post.subject }}
14 | {%endfor %}
15 | {% endblock %}
16 |
17 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/templates/tilweb/view_post.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
5 |
{{post.subject|title}}
6 |
By {{post.author.username}}
7 | {% if tags %}
8 |
Tags:
9 | {% for tag in tags %}
10 | {{tag.tag.tag}}
11 |
12 | {% endfor %}
13 | {% endif %}
14 |
15 |
16 |
Published on {{ post.post_date }}
17 |
18 |
{{post.content}}
19 |
20 | {% endblock %}
21 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/tests.py:
--------------------------------------------------------------------------------
1 | from django.test import TestCase
2 |
3 | # Create your tests here.
4 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf.urls import url
2 |
3 |
4 | from . import views
5 |
6 | urlpatterns = [
7 | url(r'^$', views.index, name='index'),
8 | url(r'^login/$', views.login_view, name='login'),
9 | url(r'^logout/$', views.logout_view, name='logout'),
10 | url(r'^me/', views.me, name='me'),
11 | url(r'^post/$', views.create_post, name='create-post'),
12 | url(r'^post/(?P
\d+)/$', views.show_post, name='show-post'),
13 | url(r'^tag/(?P\w+)/$', views.tag_view, name='tag-view'),
14 | url(r'^signup/$', views.signup, name='signup'),
15 | ]
16 |
--------------------------------------------------------------------------------
/django_prometheus/src/tilweb/views.py:
--------------------------------------------------------------------------------
1 | from django.shortcuts import render
2 | from django.http import (
3 | HttpResponseNotAllowed, HttpResponseRedirect,
4 | HttpResponseForbidden, HttpResponseNotFound,
5 | )
6 | from .models import Post, Tag, PostTag
7 | from .forms import TilForm
8 |
9 |
10 | from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
11 | from django.contrib.auth import login, authenticate, logout
12 |
13 |
14 | def index(request):
15 | if not request.user.is_authenticated:
16 | latest_posts = Post.objects.filter(public=True).order_by('-post_date')[:5]
17 | return render(
18 | request, 'tilweb/index.html', {
19 | 'posts': latest_posts
20 | })
21 | else:
22 | return HttpResponseRedirect('/me/')
23 |
24 |
25 | def me(request):
26 | if request.user.is_authenticated:
27 | latest_posts = Post.objects.order_by('-post_date')
28 | post_tags = PostTag.objects.filter(
29 | post__in=[p for p in latest_posts]
30 | ).distinct()
31 | return render(
32 | request, 'tilweb/me.html', {
33 | 'user': request.user,
34 | 'posts': latest_posts,
35 | 'tags': set([tag.tag for tag in post_tags]),
36 | })
37 | else:
38 | return HttpResponseRedirect('/login/')
39 |
40 |
41 | def login_view(request):
42 | if request.user.is_authenticated:
43 | return HttpResponseRedirect('/post/')
44 | if request.method == 'GET':
45 | form = AuthenticationForm()
46 | return render(request, 'tilweb/login.html', {'form': form})
47 | if request.method == 'POST':
48 | form = AuthenticationForm(request=request, data=request.POST)
49 | if form.is_valid():
50 | username = form.cleaned_data.get('username')
51 | password = form.cleaned_data.get('password')
52 | user = authenticate(username=username, password=password)
53 | if user is not None:
54 | print(user)
55 | login(request, user)
56 | return HttpResponseRedirect('/post/')
57 | else:
58 | print('User not found')
59 | else:
60 | # If there were errors, we render the form with these
61 | # errors
62 | return render(request, 'tilweb/login.html', {'form': form})
63 |
64 |
65 | def logout_view(request):
66 | logout(request)
67 | return HttpResponseRedirect('/')
68 |
69 | def signup(request):
70 | if request.user.is_authenticated:
71 | return HttpResponseRedirect('/post/')
72 | if request.method == 'GET':
73 | form = UserCreationForm()
74 | return render(request, 'tilweb/signup.html', {'form': form})
75 | if request.method == 'POST':
76 | form = UserCreationForm(request.POST)
77 | if form.is_valid():
78 | # https://docs.djangoproject.com/en/1.11/topics/forms/modelforms/#the-save-method
79 | form.save()
80 | username = form.cleaned_data.get('username')
81 | password = form.cleaned_data.get('password1')
82 | user = authenticate(username=username, password=password)
83 | login(request, user)
84 | return HttpResponseRedirect('/post/')
85 | else:
86 | # If there were errors, we render the form with these
87 | # errors
88 | return render(request, 'tilweb/signup.html', {'form': form})
89 |
90 |
91 | def create_post(request):
92 | if not request.user.is_authenticated:
93 | return HttpResponseRedirect('/login/')
94 | elif request.method == 'GET':
95 | form = TilForm()
96 | latest_posts = Post.objects.filter(author=request.user).order_by('-post_date')[:5]
97 | return render(
98 | request, 'tilweb/create_post.html', {
99 | 'form': form, 'posts': latest_posts
100 | })
101 | elif request.method == 'POST':
102 | form = TilForm(request.POST)
103 | if form.is_valid():
104 | p = Post(
105 | subject=form.cleaned_data.get('subject'),
106 | content=form.cleaned_data.get('content'),
107 | author=request.user,
108 | public=form.cleaned_data.get('public'),
109 | )
110 | p.save()
111 | tags = form.cleaned_data.get('tags')
112 | if tags:
113 | tags_list = tags.split(',')
114 | for tag in tags_list:
115 | tag = tag.strip()
116 | t = Tag.objects.filter(tag=tag)
117 | if not t:
118 | t = Tag(tag=tag)
119 | t.save()
120 | else:
121 | t = t[0]
122 | pt = PostTag(post=p, tag=t)
123 | pt.save()
124 |
125 | return HttpResponseRedirect('/post/{0}/'.format(p.id))
126 | else:
127 | return render(
128 | request, 'tilweb/create_post.html', {
129 | 'form': form,
130 | })
131 | else:
132 | return HttpResponseNotAllowed('{0} Not allowed'.format(request.method))
133 |
134 |
135 | def show_post(request, post_id=None):
136 | post = Post.objects.filter(id=post_id)[0]
137 | # if the post is not public, only viewable by the author
138 | if not post.public:
139 | if not post.author == request.user:
140 | return HttpResponseForbidden()
141 | post_tags = PostTag.objects.filter(post=post)
142 | return render(request, 'tilweb/view_post.html', {
143 | 'post': post,
144 | 'tags': post_tags if len(post_tags) else None,
145 | })
146 |
147 |
148 | def tag_view(request, tag):
149 | t = Tag.objects.filter(tag=tag)
150 | if t.all():
151 | t = t[0]
152 | posts = PostTag.objects.filter(tag=t)
153 | # Query all the public posts or the posts by
154 | # the currently logged in user with the
155 | # given tag
156 | posts = Post.objects.filter(id__in=[
157 | p.post.id for p in posts if p.post.public or
158 | p.post.author == request.user]
159 | )
160 | return render(request, 'tilweb/tag_view.html', {
161 | 'tag': t.tag,
162 | 'posts': posts,
163 | })
164 | else:
165 | return HttpResponseNotFound(' Tag Not Found ')
166 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.7-alpine
2 | ADD src /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | ; \
10 | pip install -r requirements.txt; \
11 | apk del .build-deps;
12 | EXPOSE 8000
13 |
14 | RUN chmod +x /application/start.sh
15 | CMD ["/application/start.sh"]
16 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/README.md:
--------------------------------------------------------------------------------
1 | # Example Django application
2 |
3 | See `src` for the application code and top level README for the description of this repo from a functionality
4 | point of view. This demo shows how we can use the statsd exporter to monitor a Django application using
5 | prometheus.
6 |
7 |
8 | ## Building Docker image
9 |
10 | The Python 3 based [Dockerfile](Dockerfile) uses an Alpine Linux base image
11 | and copies the source code to the image:
12 |
13 | ```
14 | FROM python:3.7-alpine
15 | ADD src /application
16 | WORKDIR /application
17 | RUN set -e; \
18 | apk add --no-cache --virtual .build-deps \
19 | gcc \
20 | libc-dev \
21 | linux-headers \
22 | ; \
23 | pip install -r requirements.txt; \
24 | apk del .build-deps;
25 | EXPOSE 8000
26 |
27 | RUN chmod +x /application/start.sh
28 | CMD ["/application/start.sh"]
29 |
30 | ```
31 |
32 | The `/start.sh` script runs the Django DB migrations and then uses `gunicorn` to run our
33 | application using 5 worker processes.
34 |
35 | To build the image:
36 |
37 | ```
38 | $ docker build -t amitsaha/til:statsd-prometheus .
39 | ```
40 |
41 | ## Running the application
42 |
43 | We can just run the web application as follows:
44 |
45 | ```
46 | $ docker run -ti -p 8000:8000 amitsaha/til:statsd-prometheus
47 | ```
48 |
49 | ## Bringing up the web application, along with prometheus
50 |
51 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp.example.com` service which is our web application
52 | using the image `amitsaha/til:prometheus` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml)
53 | file brings up `prometheus` service, `statsd` service (running the statsd exporter) and also starts the `grafana` service which
54 | is available on port 3000. The config directory contains a `prometheus.yml` file
55 | which sets up the targets for prometheus to scrape. The scrape configuration
56 | looks as follows:
57 |
58 | ```
59 | # my global config
60 | global:
61 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
62 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
63 | # scrape_timeout is set to the global default (10s).
64 |
65 | # Attach these labels to any time series or alerts when communicating with
66 | # external systems (federation, remote storage, Alertmanager).
67 | external_labels:
68 | monitor: 'my-project'
69 |
70 | # A scrape configuration containing exactly one endpoint to scrape:
71 | # Here it's Prometheus itself.
72 | scrape_configs:
73 | - job_name: 'prometheus'
74 | scrape_interval: 5s
75 | static_configs:
76 | - targets: ['localhost:9090']
77 | - job_name: 'webapp'
78 | scrape_interval: 5s
79 | static_configs:
80 | - targets: ['statsd:9102']
81 |
82 |
83 | ```
84 |
85 | Prometheus scrapes itself, which is the first target above. The second target
86 | is the statsd exporter.
87 |
88 | Since these services are running via `docker-compose`, `webapp.example.com` automatically resolves to the IP of the django web application.
89 |
90 | To bring up all the services:
91 |
92 | ```
93 | $ docker-compose build
94 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up
95 | ```
96 |
97 | Then, create the following `/etc/hosts/` entry:
98 |
99 | ```
100 | 127.0.0.1 webapp.example.com
101 | ```
102 |
103 | Now, in your browser visit, `http://webapp.example.com:8080` and you should see the web application.
104 |
105 | Go to `http://127.0.0.1:3000` to access the Grafana instance and login with `admin` as username and
106 | `foobar` as password.
107 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/config/grafana/dashboards/demo.json:
--------------------------------------------------------------------------------
1 | {
2 | "annotations": {
3 | "list": [
4 | {
5 | "builtIn": 1,
6 | "datasource": "-- Grafana --",
7 | "enable": true,
8 | "hide": true,
9 | "iconColor": "rgba(0, 211, 255, 1)",
10 | "name": "Annotations & Alerts",
11 | "type": "dashboard"
12 | }
13 | ]
14 | },
15 | "editable": true,
16 | "gnetId": null,
17 | "graphTooltip": 0,
18 | "id": 1,
19 | "links": [],
20 | "panels": [
21 | {
22 | "aliasColors": {},
23 | "bars": false,
24 | "dashLength": 10,
25 | "dashes": false,
26 | "datasource": "prometheus",
27 | "fill": 1,
28 | "gridPos": {
29 | "h": 9,
30 | "w": 12,
31 | "x": 0,
32 | "y": 0
33 | },
34 | "id": 2,
35 | "legend": {
36 | "avg": false,
37 | "current": false,
38 | "max": false,
39 | "min": false,
40 | "show": true,
41 | "total": false,
42 | "values": false
43 | },
44 | "lines": true,
45 | "linewidth": 1,
46 | "links": [],
47 | "nullPointMode": "null",
48 | "percentage": false,
49 | "pointradius": 2,
50 | "points": false,
51 | "renderer": "flot",
52 | "seriesOverrides": [
53 | {
54 | "alias": "Request Latency*",
55 | "yaxis": 2
56 | },
57 | {}
58 | ],
59 | "spaceLength": 10,
60 | "stack": false,
61 | "steppedLine": false,
62 | "targets": [
63 | {
64 | "expr": "django_exceptions",
65 | "format": "time_series",
66 | "hide": false,
67 | "intervalFactor": 1,
68 | "refId": "A"
69 | },
70 | {
71 | "expr": "django_request_count",
72 | "format": "time_series",
73 | "hide": false,
74 | "intervalFactor": 1,
75 | "refId": "B"
76 | },
77 | {
78 | "expr": " rate(django_request_latency_ms{quantile=\"0.99\"}[5m])\n/\n rate(django_request_latency_ms{quantile=\"0.99\"}[5m])",
79 | "format": "time_series",
80 | "hide": false,
81 | "intervalFactor": 1,
82 | "legendFormat": "Request Latency-{{endpoint}}-{{status}}",
83 | "refId": "D"
84 | }
85 | ],
86 | "thresholds": [],
87 | "timeFrom": null,
88 | "timeRegions": [],
89 | "timeShift": null,
90 | "title": "Django Request Operations",
91 | "tooltip": {
92 | "shared": true,
93 | "sort": 0,
94 | "value_type": "individual"
95 | },
96 | "type": "graph",
97 | "xaxis": {
98 | "buckets": null,
99 | "mode": "time",
100 | "name": null,
101 | "show": true,
102 | "values": []
103 | },
104 | "yaxes": [
105 | {
106 | "format": "short",
107 | "label": null,
108 | "logBase": 1,
109 | "max": null,
110 | "min": null,
111 | "show": true
112 | },
113 | {
114 | "format": "ms",
115 | "label": null,
116 | "logBase": 1,
117 | "max": null,
118 | "min": null,
119 | "show": true
120 | }
121 | ],
122 | "yaxis": {
123 | "align": false,
124 | "alignLevel": null
125 | }
126 | },
127 | {
128 | "aliasColors": {},
129 | "bars": false,
130 | "dashLength": 10,
131 | "dashes": false,
132 | "datasource": "prometheus",
133 | "fill": 1,
134 | "gridPos": {
135 | "h": 9,
136 | "w": 12,
137 | "x": 12,
138 | "y": 0
139 | },
140 | "id": 4,
141 | "legend": {
142 | "avg": false,
143 | "current": false,
144 | "max": false,
145 | "min": false,
146 | "show": true,
147 | "total": false,
148 | "values": false
149 | },
150 | "lines": true,
151 | "linewidth": 1,
152 | "links": [],
153 | "nullPointMode": "null",
154 | "percentage": false,
155 | "pointradius": 2,
156 | "points": false,
157 | "renderer": "flot",
158 | "seriesOverrides": [
159 | {
160 | "alias": "Request Latency*",
161 | "yaxis": 2
162 | },
163 | {}
164 | ],
165 | "spaceLength": 10,
166 | "stack": false,
167 | "steppedLine": false,
168 | "targets": [
169 | {
170 | "expr": "django_exceptions",
171 | "format": "time_series",
172 | "hide": false,
173 | "intervalFactor": 1,
174 | "refId": "A"
175 | },
176 | {
177 | "expr": "django_request_count",
178 | "format": "time_series",
179 | "hide": false,
180 | "intervalFactor": 1,
181 | "refId": "B"
182 | },
183 | {
184 | "expr": " rate(django_request_latency_ms{quantile=\"0.99\"}[5m])\n/\n rate(django_request_latency_ms{quantile=\"0.99\"}[5m])",
185 | "format": "time_series",
186 | "hide": false,
187 | "intervalFactor": 1,
188 | "legendFormat": "Request Latency-{{endpoint}}-{{status}}",
189 | "refId": "D"
190 | }
191 | ],
192 | "thresholds": [],
193 | "timeFrom": null,
194 | "timeRegions": [],
195 | "timeShift": null,
196 | "title": "Django Request Operations",
197 | "tooltip": {
198 | "shared": true,
199 | "sort": 0,
200 | "value_type": "individual"
201 | },
202 | "type": "graph",
203 | "xaxis": {
204 | "buckets": null,
205 | "mode": "time",
206 | "name": null,
207 | "show": true,
208 | "values": []
209 | },
210 | "yaxes": [
211 | {
212 | "format": "short",
213 | "label": null,
214 | "logBase": 1,
215 | "max": null,
216 | "min": null,
217 | "show": true
218 | },
219 | {
220 | "format": "ms",
221 | "label": null,
222 | "logBase": 1,
223 | "max": null,
224 | "min": null,
225 | "show": true
226 | }
227 | ],
228 | "yaxis": {
229 | "align": false,
230 | "alignLevel": null
231 | }
232 | },
233 | {
234 | "aliasColors": {},
235 | "bars": false,
236 | "dashLength": 10,
237 | "dashes": false,
238 | "datasource": "prometheus",
239 | "fill": 1,
240 | "gridPos": {
241 | "h": 9,
242 | "w": 12,
243 | "x": 0,
244 | "y": 9
245 | },
246 | "id": 3,
247 | "legend": {
248 | "avg": false,
249 | "current": false,
250 | "max": false,
251 | "min": false,
252 | "show": true,
253 | "total": false,
254 | "values": false
255 | },
256 | "lines": true,
257 | "linewidth": 1,
258 | "links": [],
259 | "nullPointMode": "null",
260 | "percentage": false,
261 | "pointradius": 2,
262 | "points": false,
263 | "renderer": "flot",
264 | "seriesOverrides": [],
265 | "spaceLength": 10,
266 | "stack": false,
267 | "steppedLine": false,
268 | "targets": [
269 | {
270 | "expr": "django_user_login",
271 | "format": "time_series",
272 | "intervalFactor": 1,
273 | "legendFormat": "{{operation}}",
274 | "refId": "C"
275 | }
276 | ],
277 | "thresholds": [],
278 | "timeFrom": null,
279 | "timeRegions": [],
280 | "timeShift": null,
281 | "title": "User login metrics",
282 | "tooltip": {
283 | "shared": true,
284 | "sort": 0,
285 | "value_type": "individual"
286 | },
287 | "type": "graph",
288 | "xaxis": {
289 | "buckets": null,
290 | "mode": "time",
291 | "name": null,
292 | "show": true,
293 | "values": []
294 | },
295 | "yaxes": [
296 | {
297 | "format": "short",
298 | "label": null,
299 | "logBase": 1,
300 | "max": null,
301 | "min": null,
302 | "show": true
303 | },
304 | {
305 | "format": "short",
306 | "label": null,
307 | "logBase": 1,
308 | "max": null,
309 | "min": null,
310 | "show": true
311 | }
312 | ],
313 | "yaxis": {
314 | "align": false,
315 | "alignLevel": null
316 | }
317 | },
318 | {
319 | "aliasColors": {},
320 | "bars": false,
321 | "dashLength": 10,
322 | "dashes": false,
323 | "datasource": "prometheus",
324 | "fill": 1,
325 | "gridPos": {
326 | "h": 9,
327 | "w": 12,
328 | "x": 12,
329 | "y": 9
330 | },
331 | "id": 5,
332 | "legend": {
333 | "avg": false,
334 | "current": false,
335 | "max": false,
336 | "min": false,
337 | "show": true,
338 | "total": false,
339 | "values": false
340 | },
341 | "lines": true,
342 | "linewidth": 1,
343 | "links": [],
344 | "nullPointMode": "null",
345 | "percentage": false,
346 | "pointradius": 2,
347 | "points": false,
348 | "renderer": "flot",
349 | "seriesOverrides": [],
350 | "spaceLength": 10,
351 | "stack": false,
352 | "steppedLine": false,
353 | "targets": [
354 | {
355 | "expr": "django_cache_localmem",
356 | "format": "time_series",
357 | "intervalFactor": 1,
358 | "legendFormat": "{{operation}}",
359 | "refId": "C"
360 | }
361 | ],
362 | "thresholds": [],
363 | "timeFrom": null,
364 | "timeRegions": [],
365 | "timeShift": null,
366 | "title": "Cache metrics",
367 | "tooltip": {
368 | "shared": true,
369 | "sort": 0,
370 | "value_type": "individual"
371 | },
372 | "type": "graph",
373 | "xaxis": {
374 | "buckets": null,
375 | "mode": "time",
376 | "name": null,
377 | "show": true,
378 | "values": []
379 | },
380 | "yaxes": [
381 | {
382 | "format": "short",
383 | "label": null,
384 | "logBase": 1,
385 | "max": null,
386 | "min": null,
387 | "show": true
388 | },
389 | {
390 | "format": "short",
391 | "label": null,
392 | "logBase": 1,
393 | "max": null,
394 | "min": null,
395 | "show": true
396 | }
397 | ],
398 | "yaxis": {
399 | "align": false,
400 | "alignLevel": null
401 | }
402 | }
403 | ],
404 | "schemaVersion": 18,
405 | "style": "dark",
406 | "tags": [],
407 | "templating": {
408 | "list": []
409 | },
410 | "time": {
411 | "from": "now-15m",
412 | "to": "now"
413 | },
414 | "timepicker": {
415 | "refresh_intervals": [
416 | "5s",
417 | "10s",
418 | "30s",
419 | "1m",
420 | "5m",
421 | "15m",
422 | "30m",
423 | "1h",
424 | "2h",
425 | "1d"
426 | ],
427 | "time_options": [
428 | "5m",
429 | "15m",
430 | "1h",
431 | "6h",
432 | "12h",
433 | "24h",
434 | "2d",
435 | "7d",
436 | "30d"
437 | ]
438 | },
439 | "timezone": "",
440 | "title": "Django Metrics Demo",
441 | "uid": "BxmiR7dWz",
442 | "version": 3
443 | }
--------------------------------------------------------------------------------
/django_statsd_prometheus/config/grafana/datasources/prometheus.yml:
--------------------------------------------------------------------------------
1 | # config file version
2 | apiVersion: 1
3 |
4 |
5 | datasources:
6 | - name: prometheus
7 | type: prometheus
8 | access: proxy
9 | orgId: 1
10 | url: http://prometheus:9090
--------------------------------------------------------------------------------
/django_statsd_prometheus/config/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | # my global config
2 | global:
3 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
5 | # scrape_timeout is set to the global default (10s).
6 |
7 | # Attach these labels to any time series or alerts when communicating with
8 | # external systems (federation, remote storage, Alertmanager).
9 | external_labels:
10 | monitor: 'my-project'
11 |
12 | # A scrape configuration containing exactly one endpoint to scrape:
13 | # Here it's Prometheus itself.
14 | scrape_configs:
15 | - job_name: 'prometheus'
16 | scrape_interval: 5s
17 | static_configs:
18 | - targets: ['localhost:9090']
19 | - job_name: 'webapp'
20 | scrape_interval: 5s
21 | static_configs:
22 | - targets: ['statsd:9102']
23 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/config/statsd/mapping.yml:
--------------------------------------------------------------------------------
1 | mappings:
2 | - match: "timer.django_request_latency_ms"
3 | timer_type: summary
4 | name: "django_request_latency_ms"
--------------------------------------------------------------------------------
/django_statsd_prometheus/docker-compose-infra.yml:
--------------------------------------------------------------------------------
1 | # Based off https://github.com/vegasbrianc/prometheus
2 | version: '2'
3 |
4 | volumes:
5 | prometheus_data: {}
6 | grafana_data: {}
7 |
8 | services:
9 | stastd:
10 | image: prom/statsd-exporter:v0.12.2
11 | container_name: statsd
12 | command:
13 | - --log.level=debug
14 | expose:
15 | - 9125
16 | - 9102
17 | ports:
18 | - 9102:9102
19 | prometheus:
20 | image: prom/prometheus
21 | container_name: prometheus
22 | volumes:
23 | - "./config/prometheus/:/etc/prometheus/:Z"
24 | - prometheus_data:/prometheus
25 | command:
26 | - '--config.file=/etc/prometheus/prometheus.yml'
27 | - '--log.level=debug'
28 | expose:
29 | - 9090
30 | ports:
31 | - 9090:9090
32 | grafana:
33 | image: grafana/grafana
34 | depends_on:
35 | - prometheus
36 | ports:
37 | - 3000:3000
38 | volumes:
39 | - grafana_data:/var/lib/grafana
40 | - "./config/grafana/datasources/:/etc/grafana/provisioning/datasources:Z"
41 | environment:
42 | - GF_SECURITY_ADMIN_PASSWORD=foobar
43 | - GF_USERS_ALLOW_SIGN_UP=false
44 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | webapp.example.com:
5 | image: amitsaha/til:statsd-prometheus
6 | build: .
7 | container_name: webapp
8 | expose:
9 | - 8000
10 | ports:
11 | - 8000:8000
12 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/README.md:
--------------------------------------------------------------------------------
1 | ## Today I Learned - Demo application
2 |
3 |
4 | ## Model Setup
5 |
6 | ```
7 | $ pip install -r requirements.txt
8 | $ python manage.py migrate
9 | ```
10 |
11 | ## Start web application
12 |
13 | ```
14 | $ python manage.py runserver 0.0.0.0:8000
15 | ```
16 |
17 | Go to `127.0.0.1:8000`
18 |
19 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/db.sqlite3:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_statsd_prometheus/src/db.sqlite3
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/manage.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | """Django's command-line utility for administrative tasks."""
3 | import os
4 | import sys
5 |
6 |
7 | def main():
8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'til.settings')
9 | try:
10 | from django.core.management import execute_from_command_line
11 | except ImportError as exc:
12 | raise ImportError(
13 | "Couldn't import Django. Are you sure it's installed and "
14 | "available on your PYTHONPATH environment variable? Did you "
15 | "forget to activate a virtual environment?"
16 | ) from exc
17 | execute_from_command_line(sys.argv)
18 |
19 |
20 | if __name__ == '__main__':
21 | main()
22 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/requirements.txt:
--------------------------------------------------------------------------------
1 | Django
2 | django-bootstrap3
3 | datadog
4 | gunicorn
5 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/start.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | python3 manage.py migrate
3 | gunicorn --workers 5 --bind 0.0.0.0:8000 til.wsgi
4 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/til/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_statsd_prometheus/src/til/__init__.py
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/til/metrics_middleware.py:
--------------------------------------------------------------------------------
1 | import time
2 | from datadog import DogStatsd
3 | import time
4 | import sys
5 | from django.db.models.signals import post_save
6 | from django.db.backends.signals import connection_created
7 | from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed
8 |
9 | from django.dispatch import receiver
10 | from django.core.cache.backends import locmem
11 |
12 |
13 | statsd = DogStatsd(host="statsd", use_ms=True, port=9125)
14 |
15 | REQUEST_LATENCY_METRIC_NAME = 'django_request_latency_ms'
16 | REQUEST_COUNT_METRIC_NAME = 'django_request_count'
17 | DJANGO_EXCEPTION_COUNTER = 'django_exceptions'
18 | DJANGO_MODELS_NEW_ROW_METRIC_NAME = 'django_models_create_count'
19 | DJANGO_MODELS_UPDATE_ROW_METRIC_NAME = 'django_models_update_count'
20 | DJANGO_DB_CONNECTIONS_CREATED_METRIC_NAME = 'django_database_connections_count'
21 | DJANGO_USER_LOGIN_METRIC_NAME = 'django_user_login'
22 | DJANGO_LOCAL_CACHE_METRIC_NAME = 'django_cache_localmem'
23 |
24 | @receiver(post_save)
25 | def update_models_save_counter(sender, instance, created, raw, using, update_fields, **kwargs):
26 | if created:
27 | statsd.increment(DJANGO_MODELS_NEW_ROW_METRIC_NAME,
28 | tags=[
29 | 'model:%s' % sender,
30 | 'using:%s' % using,
31 | ]
32 | )
33 | else:
34 | statsd.increment(DJANGO_MODELS_UPDATE_ROW_METRIC_NAME,
35 | tags=[
36 | 'model:%s' % sender,
37 | 'using:%s' % using,
38 | ]
39 | )
40 |
41 | @receiver(connection_created)
42 | def update_connection_created_metric(sender, connection, **kwargs):
43 |
44 | statsd.increment(DJANGO_DB_CONNECTIONS_CREATED_METRIC_NAME,
45 | tags=[
46 | 'sender:%s' % sender,
47 | 'connection:%s' % connection.display_name,
48 | ]
49 | )
50 |
51 |
52 | @receiver(user_logged_in)
53 | def update_user_logged_in_metric(sender, request, user, **kwargs):
54 |
55 | statsd.increment(DJANGO_USER_LOGIN_METRIC_NAME,
56 | tags=[
57 | 'operation:login',
58 | ]
59 | )
60 |
61 | @receiver(user_logged_out)
62 | def update_user_logged_out_metric(sender, request, user, **kwargs):
63 |
64 | statsd.increment(DJANGO_USER_LOGIN_METRIC_NAME,
65 | tags=[
66 | 'operation:logout',
67 | ]
68 | )
69 |
70 | @receiver(user_login_failed)
71 | def update_user_failed_login_metric(sender, credentials, request, **kwargs):
72 |
73 | statsd.increment(DJANGO_USER_LOGIN_METRIC_NAME,
74 | tags=[
75 | 'operation:login_failed',
76 | ]
77 | )
78 |
79 | class InstrumentedLocMemCache(locmem.LocMemCache):
80 |
81 | def get(self, key, default=None, version=None):
82 | statsd.increment(DJANGO_LOCAL_CACHE_METRIC_NAME,
83 | tags=[
84 | 'operation:get',
85 | 'item:%s' % key,
86 | ]
87 | )
88 | v = super(InstrumentedLocMemCache, self).get(key, default=None, version=version)
89 | if v is not None:
90 | statsd.increment(DJANGO_LOCAL_CACHE_METRIC_NAME,
91 | tags=[
92 | 'operation:hit',
93 | 'item:%s' % key,
94 | ]
95 | )
96 | else:
97 | statsd.increment(DJANGO_LOCAL_CACHE_METRIC_NAME,
98 | tags=[
99 | 'operation:miss',
100 | 'item:%s' % key,
101 | ]
102 | )
103 | return v or default
104 |
105 |
106 | class StatsdReporter():
107 |
108 | def __init__(self, get_response):
109 | self.get_response = get_response
110 |
111 | def __call__(self, request):
112 | request.start_time = time.time()
113 | statsd.increment(REQUEST_COUNT_METRIC_NAME,
114 | tags=[
115 | 'endpoint:%s' % request.path_info,
116 | 'method:%s' % request.method,
117 | ]
118 | )
119 |
120 | response = self.get_response(request)
121 | if response:
122 | resp_time = (time.time() - request.start_time)*1000
123 | statsd.timing(REQUEST_LATENCY_METRIC_NAME,
124 | resp_time,
125 | tags=[
126 | 'endpoint:%s' % request.path_info,
127 | 'view_name:%s' % getattr(request, 'view_func_name', 'unknown'),
128 | 'method:%s' % request.method,
129 | 'status:%s' % str(response.status_code)
130 | ]
131 | )
132 | return response
133 |
134 | def process_view(self, request, view_func, view_args, view_kwargs):
135 | if hasattr(request, 'resolver_match') and request.resolver_match is not None:
136 | request.view_func_name = request.resolver_match.func.__name__
137 | else:
138 | request.view_func_name = "unknown"
139 |
140 | return None
141 |
142 | def process_exception(self, request, exception):
143 | statsd.increment(DJANGO_EXCEPTION_COUNTER,
144 | tags=[
145 | 'endpoint:%s' % request.path_info,
146 | 'method:%s' % request.method,
147 | 'exception_class:%s' % exception.__class__,
148 | ]
149 | )
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/til/settings.py:
--------------------------------------------------------------------------------
1 | """
2 | Django settings for til project.
3 |
4 | Generated by 'django-admin startproject' using Django 1.11.4.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/1.11/topics/settings/
8 |
9 | For the full list of settings and their values, see
10 | https://docs.djangoproject.com/en/1.11/ref/settings/
11 | """
12 |
13 | import os
14 |
15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
17 |
18 |
19 | # Quick-start development settings - unsuitable for production
20 | # See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
21 |
22 | # SECURITY WARNING: keep the secret key used in production secret!
23 | SECRET_KEY = 'y127*dm#z8u8@g+l&sf&3g5jx0ntfh0_6^h1-83tds%cmhbqxo'
24 |
25 | # SECURITY WARNING: don't run with debug turned on in production!
26 | DEBUG = True
27 |
28 | ALLOWED_HOSTS = ["webapp.example.com"]
29 |
30 |
31 | # Application definition
32 |
33 | INSTALLED_APPS = [
34 | 'bootstrap3',
35 | 'tilweb.apps.TilwebConfig',
36 | 'django.contrib.admin',
37 | 'django.contrib.auth',
38 | 'django.contrib.contenttypes',
39 | 'django.contrib.sessions',
40 | 'django.contrib.messages',
41 | 'django.contrib.staticfiles',
42 | ]
43 |
44 | MIDDLEWARE = [
45 | 'til.metrics_middleware.StatsdReporter',
46 | 'django.middleware.security.SecurityMiddleware',
47 | 'django.contrib.sessions.middleware.SessionMiddleware',
48 | 'django.middleware.common.CommonMiddleware',
49 | 'django.middleware.csrf.CsrfViewMiddleware',
50 | 'django.contrib.auth.middleware.AuthenticationMiddleware',
51 | 'django.contrib.messages.middleware.MessageMiddleware',
52 | 'django.middleware.clickjacking.XFrameOptionsMiddleware',
53 | ]
54 |
55 | ROOT_URLCONF = 'til.urls'
56 |
57 | TEMPLATES = [
58 | {
59 | 'BACKEND': 'django.template.backends.django.DjangoTemplates',
60 | 'DIRS': [],
61 | 'APP_DIRS': True,
62 | 'OPTIONS': {
63 | 'context_processors': [
64 | 'django.template.context_processors.debug',
65 | 'django.template.context_processors.request',
66 | 'django.contrib.auth.context_processors.auth',
67 | 'django.contrib.messages.context_processors.messages',
68 | ],
69 | },
70 | },
71 | ]
72 |
73 | WSGI_APPLICATION = 'til.wsgi.application'
74 |
75 | DATABASES = {
76 | 'default': {
77 | 'ENGINE': 'django.db.backends.sqlite3',
78 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
79 | }
80 | }
81 |
82 | CACHES = {
83 | 'default': {
84 | 'BACKEND': 'til.metrics_middleware.InstrumentedLocMemCache',
85 | }
86 | }
87 |
88 |
89 | # Password validation
90 | # https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
91 |
92 | AUTH_PASSWORD_VALIDATORS = [
93 | {
94 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
95 | },
96 | {
97 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
98 | },
99 | {
100 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
101 | },
102 | {
103 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
104 | },
105 | ]
106 |
107 |
108 | # Internationalization
109 | # https://docs.djangoproject.com/en/1.11/topics/i18n/
110 |
111 | LANGUAGE_CODE = 'en-us'
112 |
113 | TIME_ZONE = 'Australia/Sydney'
114 |
115 | USE_I18N = True
116 |
117 | USE_L10N = True
118 |
119 | USE_TZ = True
120 |
121 |
122 | # Static files (CSS, JavaScript, Images)
123 | # https://docs.djangoproject.com/en/1.11/howto/static-files/
124 |
125 | STATIC_URL = '/static/'
126 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/til/urls.py:
--------------------------------------------------------------------------------
1 | """til URL Configuration
2 |
3 | The `urlpatterns` list routes URLs to views. For more information please see:
4 | https://docs.djangoproject.com/en/1.11/topics/http/urls/
5 | Examples:
6 | Function views
7 | 1. Add an import: from my_app import views
8 | 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
9 | Class-based views
10 | 1. Add an import: from other_app.views import Home
11 | 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
12 | Including another URLconf
13 | 1. Import the include() function: from django.conf.urls import url, include
14 | 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
15 | """
16 | from django.conf.urls import url, include
17 | from django.contrib import admin
18 |
19 | urlpatterns = [
20 | url(r'', include('tilweb.urls')),
21 | url(r'^admin/', admin.site.urls),
22 | ]
23 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/til/wsgi.py:
--------------------------------------------------------------------------------
1 | """
2 | WSGI config for til project.
3 |
4 | It exposes the WSGI callable as a module-level variable named ``application``.
5 |
6 | For more information on this file, see
7 | https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
8 | """
9 |
10 | import os
11 |
12 | from django.core.wsgi import get_wsgi_application
13 |
14 | os.environ.setdefault("DJANGO_SETTINGS_MODULE", "til.settings")
15 |
16 | application = get_wsgi_application()
17 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_statsd_prometheus/src/tilweb/__init__.py
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/admin.py:
--------------------------------------------------------------------------------
1 | from django.contrib import admin
2 |
3 | # Register your models here.
4 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/apps.py:
--------------------------------------------------------------------------------
1 | from django.apps import AppConfig
2 |
3 |
4 | class TilwebConfig(AppConfig):
5 | name = 'tilweb'
6 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/forms.py:
--------------------------------------------------------------------------------
1 | from django import forms
2 |
3 |
4 | class TilForm(forms.Form):
5 | subject = forms.CharField(label='Title', max_length=160)
6 | content = forms.CharField(label='What did I learn today?',
7 | widget=forms.Textarea, max_length=800)
8 | # four tags separated by a comma
9 | tags = forms.CharField(label='Tags (comma separated, maximum: 4)',
10 | required=False,
11 | max_length=43)
12 | public = forms.BooleanField(label='Public', required=False)
13 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/migrations/0001_initial.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | # Generated by Django 1.11.4 on 2017-09-01 02:15
3 | from __future__ import unicode_literals
4 |
5 | from django.conf import settings
6 | from django.db import migrations, models
7 | import django.db.models.deletion
8 |
9 |
10 | class Migration(migrations.Migration):
11 |
12 | initial = True
13 |
14 | dependencies = [
15 | migrations.swappable_dependency(settings.AUTH_USER_MODEL),
16 | ]
17 |
18 | operations = [
19 | migrations.CreateModel(
20 | name='Post',
21 | fields=[
22 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
23 | ('subject', models.CharField(max_length=160)),
24 | ('content', models.CharField(max_length=800)),
25 | ('public', models.BooleanField(default=False)),
26 | ('post_date', models.DateTimeField(auto_now_add=True)),
27 | ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
28 | ],
29 | ),
30 | migrations.CreateModel(
31 | name='PostTag',
32 | fields=[
33 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
34 | ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tilweb.Post')),
35 | ],
36 | ),
37 | migrations.CreateModel(
38 | name='Tag',
39 | fields=[
40 | ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
41 | ('tag', models.CharField(max_length=10, unique=True)),
42 | ('creation_date', models.DateTimeField(auto_now_add=True)),
43 | ],
44 | ),
45 | migrations.AddField(
46 | model_name='posttag',
47 | name='tag',
48 | field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tilweb.Tag'),
49 | ),
50 | ]
51 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/migrations/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/django_statsd_prometheus/src/tilweb/migrations/__init__.py
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/models.py:
--------------------------------------------------------------------------------
1 | from django.db import models
2 | from django.contrib.auth.models import User
3 |
4 |
5 | class Post(models.Model):
6 | subject = models.CharField(max_length=160)
7 | content = models.CharField(max_length=800)
8 | author = models.ForeignKey(User, on_delete=models.CASCADE)
9 | public = models.BooleanField(default=False)
10 | post_date = models.DateTimeField(auto_now_add=True)
11 |
12 |
13 | class Tag(models.Model):
14 | # We will use the implcit id
15 | tag = models.CharField(max_length=10, unique=True)
16 | creation_date = models.DateTimeField(auto_now_add=True)
17 |
18 |
19 | class PostTag(models.Model):
20 | post = models.ForeignKey(Post, on_delete=models.DO_NOTHING)
21 | tag = models.ForeignKey(Tag, on_delete=models.DO_NOTHING)
22 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/base.html:
--------------------------------------------------------------------------------
1 | {# Load the tag library #}
2 | {% load bootstrap3 %}
3 |
4 | {# Load CSS and JavaScript #}
5 | {% bootstrap_css %}
6 | {% bootstrap_javascript %}
7 |
8 |
9 |
16 |
17 |
18 | {% block content %}
19 | {% endblock %}
20 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/create_post.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 | {% load bootstrap3 %}
3 |
4 | {% block content %}
5 |
7 |
Create your post
8 |
TIL: Your personal learning journal
9 |
10 |
11 |
12 |
21 |
22 |
23 |
24 |
Your recent posts
25 | {% for post in posts %}
26 |
{{ post.subject }}
27 | {%endfor %}
28 | {% endblock %}
29 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/index.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
6 |
TIL: Your personal learning journal
7 |
8 |
9 |
10 |
11 |
Already a member?
12 |
13 |
Login
14 |
Join TIL today!
15 |
16 |
Signup
17 |
Recent community posts
18 | {% for post in posts %}
19 |
{{ post.subject }}
20 | {%endfor %}
21 |
22 |
23 | {% endblock %}
24 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/login.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 | {% block content %}
3 |
4 |
5 |
6 |
Login
7 |
8 |
9 | {% if form.errors %}
10 | {% for field in form %}
11 | {% for error in field.errors %}
12 |
13 | {{ error|escape }}
14 |
15 | {% endfor %}
16 | {% endfor %}
17 | {% endif %}
18 |
19 | {% if form.non_field_errors %}
20 | {% for error in form.non_field_errors %}
21 |
22 | {{ error|escape }}
23 |
24 | {% endfor %}
25 | {% endif %}
26 |
27 |
32 |
33 | {% endblock %}
34 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/me.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
6 |
Hello, {{user.username}}
7 |
TIL: Your personal learning journal
8 |
9 |
10 |
11 |
12 |
Create a new post
13 |
Your posts
14 | {% for post in posts %}
15 |
{{ post.subject }}
16 | {%endfor %}
17 |
18 |
Tags you have used
19 | {% for tag in tags %}
20 |
{{ tag.tag }}
21 | {%endfor %}
22 |
23 | {% endblock %}
24 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/signup.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
5 |
6 |
7 |
Sign up
8 |
9 |
10 | {% if form.errors %}
11 | {% for field in form %}
12 | {% for error in field.errors %}
13 |
14 | {{ error|escape }}
15 |
16 | {% endfor %}
17 | {% endfor %}
18 | {% for error in form.non_field_errors %}
19 |
20 | {{ error|escape }}
21 |
22 | {% endfor %}
23 | {% endif %}
24 |
25 |
30 |
31 | {% endblock %}
32 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/tag_view.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
6 |
Posts tagged {{ tag }}
7 |
TIL: Your personal learning journal
8 |
9 |
10 |
11 |
12 | {% for post in posts %}
13 |
{{ post.subject }}
14 | {%endfor %}
15 | {% endblock %}
16 |
17 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/templates/tilweb/view_post.html:
--------------------------------------------------------------------------------
1 | {% extends 'tilweb/base.html' %}
2 |
3 | {% block content %}
4 |
5 |
{{post.subject|title}}
6 |
By {{post.author.username}}
7 | {% if tags %}
8 |
Tags:
9 | {% for tag in tags %}
10 | {{tag.tag.tag}}
11 |
12 | {% endfor %}
13 | {% endif %}
14 |
15 |
16 |
Published on {{ post.post_date }}
17 |
18 |
{{post.content}}
19 |
20 | {% endblock %}
21 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/tests.py:
--------------------------------------------------------------------------------
1 | from django.test import TestCase
2 |
3 | # Create your tests here.
4 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/urls.py:
--------------------------------------------------------------------------------
1 | from django.conf.urls import url
2 |
3 |
4 | from . import views
5 |
6 | urlpatterns = [
7 | url(r'^$', views.index, name='index'),
8 | url(r'^login/$', views.login_view, name='login'),
9 | url(r'^logout/$', views.logout_view, name='logout'),
10 | url(r'^me/', views.me, name='me'),
11 | url(r'^post/$', views.create_post, name='create-post'),
12 | url(r'^post/(?P
\d+)/$', views.show_post, name='show-post'),
13 | url(r'^tag/(?P\w+)/$', views.tag_view, name='tag-view'),
14 | url(r'^signup/$', views.signup, name='signup'),
15 | ]
16 |
--------------------------------------------------------------------------------
/django_statsd_prometheus/src/tilweb/views.py:
--------------------------------------------------------------------------------
1 | from django.shortcuts import render
2 | from django.http import (
3 | HttpResponseNotAllowed, HttpResponseRedirect,
4 | HttpResponseForbidden, HttpResponseNotFound,
5 | )
6 | from .models import Post, Tag, PostTag
7 | from .forms import TilForm
8 |
9 |
10 | from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
11 | from django.contrib.auth import login, authenticate, logout
12 |
13 | from django.views.decorators.cache import cache_page
14 |
15 |
16 | def index(request):
17 | if not request.user.is_authenticated:
18 | latest_posts = Post.objects.filter(public=True).order_by('-post_date')[:5]
19 | return render(
20 | request, 'tilweb/index.html', {
21 | 'posts': latest_posts
22 | })
23 | else:
24 | return HttpResponseRedirect('/me/')
25 |
26 |
27 | def me(request):
28 | if request.user.is_authenticated:
29 | latest_posts = Post.objects.order_by('-post_date')
30 | post_tags = PostTag.objects.filter(
31 | post__in=[p for p in latest_posts]
32 | ).distinct()
33 | return render(
34 | request, 'tilweb/me.html', {
35 | 'user': request.user,
36 | 'posts': latest_posts,
37 | 'tags': set([tag.tag for tag in post_tags]),
38 | })
39 | else:
40 | return HttpResponseRedirect('/login/')
41 |
42 |
43 | def login_view(request):
44 | if request.user.is_authenticated:
45 | return HttpResponseRedirect('/post/')
46 | if request.method == 'GET':
47 | form = AuthenticationForm()
48 | return render(request, 'tilweb/login.html', {'form': form})
49 | if request.method == 'POST':
50 | form = AuthenticationForm(request=request, data=request.POST)
51 | if form.is_valid():
52 | username = form.cleaned_data.get('username')
53 | password = form.cleaned_data.get('password')
54 | user = authenticate(username=username, password=password)
55 | if user is not None:
56 | print(user)
57 | login(request, user)
58 | return HttpResponseRedirect('/post/')
59 | else:
60 | print('User not found')
61 | else:
62 | # If there were errors, we render the form with these
63 | # errors
64 | return render(request, 'tilweb/login.html', {'form': form})
65 |
66 |
67 | def logout_view(request):
68 | logout(request)
69 | return HttpResponseRedirect('/')
70 |
71 | def signup(request):
72 | if request.user.is_authenticated:
73 | return HttpResponseRedirect('/post/')
74 | if request.method == 'GET':
75 | form = UserCreationForm()
76 | return render(request, 'tilweb/signup.html', {'form': form})
77 | if request.method == 'POST':
78 | form = UserCreationForm(request.POST)
79 | if form.is_valid():
80 | # https://docs.djangoproject.com/en/1.11/topics/forms/modelforms/#the-save-method
81 | form.save()
82 | username = form.cleaned_data.get('username')
83 | password = form.cleaned_data.get('password1')
84 | user = authenticate(username=username, password=password)
85 | login(request, user)
86 | return HttpResponseRedirect('/post/')
87 | else:
88 | # If there were errors, we render the form with these
89 | # errors
90 | return render(request, 'tilweb/signup.html', {'form': form})
91 |
92 |
93 | def create_post(request):
94 | if not request.user.is_authenticated:
95 | return HttpResponseRedirect('/login/')
96 | elif request.method == 'GET':
97 | form = TilForm()
98 | latest_posts = Post.objects.filter(author=request.user).order_by('-post_date')[:5]
99 | return render(
100 | request, 'tilweb/create_post.html', {
101 | 'form': form, 'posts': latest_posts
102 | })
103 | elif request.method == 'POST':
104 | form = TilForm(request.POST)
105 | if form.is_valid():
106 | p = Post(
107 | subject=form.cleaned_data.get('subject'),
108 | content=form.cleaned_data.get('content'),
109 | author=request.user,
110 | public=form.cleaned_data.get('public'),
111 | )
112 | p.save()
113 | tags = form.cleaned_data.get('tags')
114 | if tags:
115 | tags_list = tags.split(',')
116 | for tag in tags_list:
117 | tag = tag.strip()
118 | t = Tag.objects.filter(tag=tag)
119 | if not t:
120 | t = Tag(tag=tag)
121 | t.save()
122 | else:
123 | t = t[0]
124 | pt = PostTag(post=p, tag=t)
125 | pt.save()
126 |
127 | return HttpResponseRedirect('/post/{0}/'.format(p.id))
128 | else:
129 | return render(
130 | request, 'tilweb/create_post.html', {
131 | 'form': form,
132 | })
133 | else:
134 | return HttpResponseNotAllowed('{0} Not allowed'.format(request.method))
135 |
136 |
137 | @cache_page(60*15)
138 | def show_post(request, post_id=None):
139 | post = Post.objects.filter(id=post_id)[0]
140 | # if the post is not public, only viewable by the author
141 | if not post.public:
142 | if not post.author == request.user:
143 | return HttpResponseForbidden()
144 | post_tags = PostTag.objects.filter(post=post)
145 | return render(request, 'tilweb/view_post.html', {
146 | 'post': post,
147 | 'tags': post_tags if len(post_tags) else None,
148 | })
149 |
150 |
151 | def tag_view(request, tag):
152 | t = Tag.objects.filter(tag=tag)
153 | if t.all():
154 | t = t[0]
155 | posts = PostTag.objects.filter(tag=t)
156 | # Query all the public posts or the posts by
157 | # the currently logged in user with the
158 | # given tag
159 | posts = Post.objects.filter(id__in=[
160 | p.post.id for p in posts if p.post.public or
161 | p.post.author == request.user]
162 | )
163 | return render(request, 'tilweb/tag_view.html', {
164 | 'tag': t.tag,
165 | 'posts': posts,
166 | })
167 | else:
168 | return HttpResponseNotFound(' Tag Not Found ')
169 |
--------------------------------------------------------------------------------
/flask_prometheus/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | ADD . /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | ; \
10 | pip install -r src/requirements.txt; \
11 | apk del .build-deps;
12 | EXPOSE 5000
13 | VOLUME /application
14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 1
15 |
--------------------------------------------------------------------------------
/flask_prometheus/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | RUN apk add --update curl apache2-utils && rm -rf /var/cache/apk/*
3 | ADD ./make-requests.sh /make-requests.sh
4 | VOLUME /data
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/flask_prometheus/README.md:
--------------------------------------------------------------------------------
1 | # Example Flask application
2 |
3 | See ``src`` for the application code.
4 |
5 | ## Building Docker image
6 |
7 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image
8 | and expects the application source code to be volume mounted at `/application`
9 | when run:
10 |
11 | ```
12 | FROM python:3.6.1-alpine
13 | ADD . /application
14 | WORKDIR /application
15 | RUN set -e; \
16 | apk add --no-cache --virtual .build-deps \
17 | gcc \
18 | libc-dev \
19 | linux-headers \
20 | ; \
21 | pip install -r src/requirements.txt; \
22 | apk del .build-deps;
23 | EXPOSE 5000
24 | VOLUME /application
25 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5
26 | ```
27 |
28 | The last statement shows how we are running the application via `uwsgi` with 5
29 | worker processes.
30 |
31 | To build the image:
32 |
33 | ```
34 | $ docker build -t amitsaha/flask_app -f Dockerfile.py3 .
35 | ```
36 |
37 | ## Running the application
38 |
39 | We can just run the web application as follows:
40 |
41 | ```
42 | $ docker run -ti -p 5000:5000 -v `pwd`/src:/application amitsaha/flask_app
43 | ```
44 |
45 | ## Bringing up the web application, along with prometheus
46 |
47 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application
48 | using the image `amitsaha/flask_app` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml)
49 | file brings up the `prometheus` service and also starts the `grafana` service which
50 | is available on port 3000. The config directory contains a `prometheus.yml` file
51 | which sets up the targets for prometheus to scrape. The scrape configuration
52 | looks as follows:
53 |
54 | ```
55 | # A scrape configuration containing exactly one endpoint to scrape:
56 | # Here it's Prometheus itself.
57 | scrape_configs:
58 | # The job name is added as a label `job=` to any timeseries scraped from this config.
59 | - job_name: 'prometheus'
60 |
61 | # Override the global default and scrape targets from this job every 5 seconds.
62 | scrape_interval: 5s
63 |
64 | # metrics_path defaults to '/metrics'
65 | # scheme defaults to 'http'.
66 |
67 | static_configs:
68 | - targets: ['localhost:9090']
69 | - job_name: 'webapp'
70 |
71 | # Override the global default and scrape targets from this job every 5 seconds.
72 | scrape_interval: 5s
73 |
74 | # metrics_path defaults to '/metrics'
75 | # scheme defaults to 'http'.
76 | static_configs:
77 | - targets: ['webapp:5000']
78 | ```
79 |
80 | Prometheus scrapes itself, which is the first target above. The second target
81 | is the our web application on port 5000.
82 | Since these services are running via `docker-compose`, `webapp` automatically resolves to the IP of the webapp container.
83 |
84 | To bring up all the services:
85 |
86 | ```
87 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up
88 | ```
89 |
90 |
--------------------------------------------------------------------------------
/flask_prometheus/config/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | # my global config
2 | global:
3 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
5 | # scrape_timeout is set to the global default (10s).
6 |
7 | # Attach these labels to any time series or alerts when communicating with
8 | # external systems (federation, remote storage, Alertmanager).
9 | external_labels:
10 | monitor: 'my-project'
11 |
12 | # A scrape configuration containing exactly one endpoint to scrape:
13 | # Here it's Prometheus itself.
14 | scrape_configs:
15 | # The job name is added as a label `job=` to any timeseries scraped from this config.
16 | - job_name: 'prometheus'
17 |
18 | # Override the global default and scrape targets from this job every 5 seconds.
19 | scrape_interval: 5s
20 |
21 | # metrics_path defaults to '/metrics'
22 | # scheme defaults to 'http'.
23 |
24 | static_configs:
25 | - targets: ['localhost:9090']
26 | - job_name: 'webapp'
27 |
28 | # Override the global default and scrape targets from this job every 5 seconds.
29 | scrape_interval: 5s
30 |
31 | # metrics_path defaults to '/metrics'
32 | # scheme defaults to 'http'.
33 | static_configs:
34 | - targets: ['webapp:5000']
35 |
--------------------------------------------------------------------------------
/flask_prometheus/docker-compose-infra.yml:
--------------------------------------------------------------------------------
1 | # Based off https://github.com/vegasbrianc/prometheus
2 | version: '2'
3 |
4 | volumes:
5 | prometheus_data: {}
6 | grafana_data: {}
7 |
8 | services:
9 | prometheus:
10 | image: prom/prometheus
11 | container_name: prometheus
12 | volumes:
13 | - ./config/prometheus/:/etc/prometheus/
14 | - prometheus_data:/prometheus
15 | command:
16 | - '--config.file=/etc/prometheus/prometheus.yml'
17 | expose:
18 | - 9090
19 | ports:
20 | - 9090:9090
21 | grafana:
22 | image: grafana/grafana
23 | depends_on:
24 | - prometheus
25 | ports:
26 | - 3000:3000
27 | volumes:
28 | - grafana_data:/var/lib/grafana
29 | environment:
30 | - GF_SECURITY_ADMIN_PASSWORD=foobar
31 | - GF_USERS_ALLOW_SIGN_UP=false
32 |
--------------------------------------------------------------------------------
/flask_prometheus/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | webapp:
5 | build: ./
6 | image: amitsaha/flask_app_2
7 | container_name: webapp
8 | expose:
9 | - 5000
10 | ports:
11 | - 5000:5000
12 | volumes:
13 | - ./src:/application
14 | client:
15 | depends_on:
16 | - webapp
17 | build:
18 | context: ./
19 | dockerfile: Dockerfile-client
20 | image: amitsaha/flask_app:client
21 | container_name: client
22 |
--------------------------------------------------------------------------------
/flask_prometheus/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ab -n 1000 -c 3 http://webapp:5000/test/
3 | ab -n 1000 -c 3 http://webapp:5000/test1/
4 | top
5 |
--------------------------------------------------------------------------------
/flask_prometheus/src/flask_app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, Response
2 | from helpers.middleware import setup_metrics
3 | import prometheus_client
4 |
5 | CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8')
6 |
7 |
8 | app = Flask(__name__)
9 | setup_metrics(app)
10 |
11 | @app.route('/test/')
12 | def test():
13 | return 'rest'
14 |
15 | @app.route('/test1/')
16 | def test1():
17 | 1/0
18 | return 'rest'
19 |
20 | @app.errorhandler(500)
21 | def handle_500(error):
22 | return str(error), 500
23 |
24 | @app.route('/metrics')
25 | def metrics():
26 | return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST)
27 |
28 | if __name__ == '__main__':
29 | app.run()
30 |
--------------------------------------------------------------------------------
/flask_prometheus/src/helpers/middleware.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 | from prometheus_client import Counter, Histogram
3 | import time
4 | import sys
5 |
6 | REQUEST_COUNT = Counter(
7 | 'request_count', 'App Request Count',
8 | ['app_name', 'method', 'endpoint', 'http_status']
9 | )
10 | REQUEST_LATENCY = Histogram('request_latency_seconds', 'Request latency',
11 | ['app_name', 'endpoint']
12 | )
13 |
14 | def start_timer():
15 | request.start_time = time.time()
16 |
17 | def stop_timer(response):
18 | resp_time = time.time() - request.start_time
19 | REQUEST_LATENCY.labels('webapp', request.path).observe(resp_time)
20 |
21 | REQUEST_COUNT.labels('webapp', request.method, request.path,
22 | response.status_code).inc()
23 | return response
24 |
25 | def setup_metrics(app):
26 | app.before_request(start_timer)
27 | app.after_request(stop_timer)
28 |
--------------------------------------------------------------------------------
/flask_prometheus/src/requirements.txt:
--------------------------------------------------------------------------------
1 | flask==1.0.0
2 | prometheus_client==0.1.1.
3 | uwsgi==2.0.15
4 |
--------------------------------------------------------------------------------
/flask_statsd/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | ADD . /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | ; \
10 | pip install -r src/requirements.txt; \
11 | apk del .build-deps;
12 | EXPOSE 5000
13 | VOLUME /application
14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=app:app --enable-threads --processes 5
15 |
--------------------------------------------------------------------------------
/flask_statsd/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | RUN apk add --update curl apache2-utils && rm -rf /var/cache/apk/*
3 | ADD ./make-requests.sh /make-requests.sh
4 | VOLUME /data
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/flask_statsd/README.md:
--------------------------------------------------------------------------------
1 | # Reporting metrics to statsd
2 |
3 | This demo builds upon Demos 1 and 2 to switch out writing metrics to a CSV file with
4 | reporting to a `statsd` instance. It demonstrates:
5 |
6 | - Adding *characteristics* to metrics by using `nested.keys`
7 | - Graphite is setup as the storage backend for statsd
8 |
9 | ## Adding characteristics to metrics
10 |
11 | We update the [middlware.py](./src/helpers/middleware.py) to report metrics to `statsd` as follows:
12 |
13 |
14 | ```
15 | statsd = statsd.StatsClient(host='statsd', port=8125, prefix='webapp1')
16 |
17 | # request...http_.latency
18 | REQUEST_LATENCY_METRIC_KEY_PATTERN = 'instance1.{0}.{1}.http_{2}.latency'
19 |
20 | # request...http_
21 | REQUEST_COUNT_METRIC_KEY_PATTERN = 'instance1.request.{0}.{1}.http_{2}.count'
22 |
23 | def start_timer():
24 | request.start_time = time.time()
25 |
26 | def stop_timer(response):
27 | # convert this into milliseconds for statsd
28 | resp_time = (time.time() - request.start_time)*1000
29 | key = REQUEST_LATENCY_METRIC_KEY_PATTERN.format(
30 | request.endpoint,
31 | request.method,
32 | response.status_code,
33 | )
34 | statsd.timing(key, resp_time)
35 |
36 | key = REQUEST_COUNT_METRIC_KEY_PATTERN.format(
37 | request.endpoint,
38 | request.method,
39 | response.status_code,
40 | )
41 | statsd.incr(key)
42 | return response
43 | ..
44 | ```
45 |
46 |
47 | ## Run demo
48 |
49 | ```
50 | $ sudo docker-compose -f docker-compose.yml -f docker-compose-infra.yml up
51 | ```
52 |
53 | ## Play with the data
54 |
55 | If we now go to the address `http://>` on your host machine, you will
56 | see a [graphite browser window](http://graphite.readthedocs.io/en/latest/overview.html).
57 |
58 | Once you are there, we can play with the metrics that our application pushed.
59 |
--------------------------------------------------------------------------------
/flask_statsd/config/graphite/storage-aggregation.conf:
--------------------------------------------------------------------------------
1 | # Aggregation methods for whisper files. Entries are scanned in order,
2 | # and first match wins. This file is scanned for changes every 60 seconds
3 | #
4 | # [name]
5 | # pattern =
6 | # xFilesFactor =
7 | # aggregationMethod =
8 | #
9 | # name: Arbitrary unique name for the rule
10 | # pattern: Regex pattern to match against the metric name
11 | # xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur
12 | # aggregationMethod: function to apply to data points for aggregation
13 | #
14 | [min]
15 | pattern = \.lower$
16 | xFilesFactor = 0.1
17 | aggregationMethod = min
18 |
19 | [max]
20 | pattern = \.upper(_\d+)?$
21 | xFilesFactor = 0.1
22 | aggregationMethod = max
23 |
24 | [sum]
25 | pattern = \.sum$
26 | xFilesFactor = 0
27 | aggregationMethod = sum
28 |
29 |
30 | [count_legacy]
31 | pattern = stats_counts.*
32 | xFilesFactor = 0
33 | aggregationMethod = sum
34 |
35 | [default_average]
36 | pattern = .*
37 | xFilesFactor = 0.3
38 | aggregationMethod = average
39 |
--------------------------------------------------------------------------------
/flask_statsd/config/statsd/config_udp.js:
--------------------------------------------------------------------------------
1 | {
2 | "graphiteHost": "127.0.0.1",
3 | "graphitePort": 2003,
4 | "port": 8125,
5 | "deleteIdleStats": true,
6 | "flushInterval": 10000,
7 | "servers": [
8 | { server: "./servers/udp", address: "0.0.0.0", port: 8125 }
9 | ]
10 | }
11 |
--------------------------------------------------------------------------------
/flask_statsd/docker-compose-infra.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | services:
3 | statsd:
4 | image: graphiteapp/graphite-statsd
5 | container_name: graphitestatsd
6 | volumes:
7 | - ./config/statsd/config_udp.js:/opt/statsd/config_udp.js
8 | - ./config/graphite/storage-aggregation.conf:/opt/graphite/conf/storage-aggregation.conf
9 | ports:
10 | - "80:80"
11 | - "2003-2004:2003-2004"
12 | - "2023-2024:2023-2024"
13 | - "8125:8125/udp"
14 | - "8126:8126"
15 |
--------------------------------------------------------------------------------
/flask_statsd/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "2"
2 | services:
3 | webapp:
4 | build: ./
5 | image: amitsaha/flask_app_1
6 | container_name: webapp
7 | expose:
8 | - 5000
9 | ports:
10 | - 5000:5000
11 | volumes:
12 | - ./src:/application
13 | client:
14 | depends_on:
15 | - webapp
16 | build:
17 | context: ./
18 | dockerfile: Dockerfile-client
19 | image: amitsaha/flask_app:client
20 | container_name: client
21 |
22 |
--------------------------------------------------------------------------------
/flask_statsd/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ab -n 1000 -c 3 http://webapp:5000/test/
3 | ab -n 1000 -c 3 http://webapp:5000/test1/
4 | top
5 |
--------------------------------------------------------------------------------
/flask_statsd/src/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, Response
2 | from helpers.middleware import setup_metrics
3 |
4 | app = Flask(__name__)
5 | setup_metrics(app)
6 |
7 | @app.route('/test/')
8 | def test():
9 | return 'rest'
10 |
11 | @app.route('/test1/')
12 | def test1():
13 | 1/0
14 | return 'rest'
15 |
16 | @app.errorhandler(500)
17 | def handle_500(error):
18 | return str(error), 500
19 |
20 | if __name__ == '__main__':
21 | app.run()
22 |
--------------------------------------------------------------------------------
/flask_statsd/src/helpers/middleware.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 | import statsd
3 | import time
4 | import sys
5 |
6 | statsd = statsd.StatsClient(host='statsd', port=8125, prefix='webapp1')
7 |
8 | # request...http_.latency
9 | REQUEST_LATENCY_METRIC_KEY_PATTERN = 'instance1.{0}.{1}.http_{2}.latency'
10 |
11 | # request...http_
12 | REQUEST_COUNT_METRIC_KEY_PATTERN = 'instance1.request.{0}.{1}.http_{2}.count'
13 |
14 | def start_timer():
15 | request.start_time = time.time()
16 |
17 | def stop_timer(response):
18 | # convert this into milliseconds for statsd
19 | resp_time = (time.time() - request.start_time)*1000
20 | key = REQUEST_LATENCY_METRIC_KEY_PATTERN.format(
21 | request.endpoint,
22 | request.method,
23 | response.status_code,
24 | )
25 | statsd.timing(key, resp_time)
26 |
27 | key = REQUEST_COUNT_METRIC_KEY_PATTERN.format(
28 | request.endpoint,
29 | request.method,
30 | response.status_code,
31 | )
32 | statsd.incr(key)
33 | return response
34 |
35 | def setup_metrics(app):
36 | app.before_request(start_timer)
37 | app.after_request(stop_timer)
38 |
--------------------------------------------------------------------------------
/flask_statsd/src/requirements.txt:
--------------------------------------------------------------------------------
1 | uwsgi==2.0.15
2 | statsd==3.2.2
3 | flask==1.0
4 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | ADD . /application
3 | WORKDIR /application
4 | RUN set -e; \
5 | apk add --no-cache --virtual .build-deps \
6 | gcc \
7 | libc-dev \
8 | linux-headers \
9 | ; \
10 | pip install -r src/requirements.txt; \
11 | apk del .build-deps;
12 | EXPOSE 5000
13 | VOLUME /application
14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5
15 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.6.1-alpine
2 | RUN apk add --update curl apache2-utils && rm -rf /var/cache/apk/*
3 | ADD ./make-requests.sh /make-requests.sh
4 | VOLUME /data
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/README.md:
--------------------------------------------------------------------------------
1 | # Example Flask application
2 |
3 | See ``src`` for the application code.
4 |
5 | ## Building Docker image
6 |
7 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image
8 | and expects the application source code to be volume mounted at `/application`
9 | when run:
10 |
11 | ```
12 | FROM python:3.6.1-alpine
13 | ADD . /application
14 | WORKDIR /application
15 | RUN set -e; \
16 | apk add --no-cache --virtual .build-deps \
17 | gcc \
18 | libc-dev \
19 | linux-headers \
20 | ; \
21 | pip install -r src/requirements.txt; \
22 | apk del .build-deps;
23 | EXPOSE 5000
24 | VOLUME /application
25 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app_1:app --enable-threads --processes 5
26 | ```
27 |
28 | The last statement shows how we are running the application via `uwsgi` with 5
29 | worker processes.
30 |
31 | To build the image:
32 |
33 | ```
34 | $ docker build -t amitsaha/flask_app_1 -f Dockerfile.py3 .
35 | ```
36 |
37 | ## Running the application
38 |
39 | We can just run the web application as follows:
40 |
41 | ```
42 | $ docker run -ti -p 5000:5000 -v `pwd`/src:/application amitsaha/flask_app_1
43 | ```
44 |
45 | ## Bringing up the web application, along with prometheus
46 |
47 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application
48 | using the image `amitsaha/flask_app_1` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml)
49 | file brings up the `statsd` service which is the statsd exporter, `prometheus` service and also starts the `grafana` service which
50 | is available on port 3000. The config directory contains a `prometheus.yml` file
51 | which sets up the targets for prometheus to scrape. The scrape configuration
52 | looks as follows:
53 |
54 | ```
55 | # A scrape configuration containing exactly one endpoint to scrape:
56 | # Here it's Prometheus itself.
57 | scrape_configs:
58 | # The job name is added as a label `job=` to any timeseries scraped from this config.
59 | - job_name: 'prometheus'
60 |
61 | # Override the global default and scrape targets from this job every 5 seconds.
62 | scrape_interval: 5s
63 |
64 | # metrics_path defaults to '/metrics'
65 | # scheme defaults to 'http'.
66 |
67 | static_configs:
68 | - targets: ['localhost:9090']
69 | - job_name: 'webapp'
70 |
71 | # Override the global default and scrape targets from this job every 5 seconds.
72 | scrape_interval: 5s
73 |
74 | # metrics_path defaults to '/metrics'
75 | # scheme defaults to 'http'.
76 | static_configs:
77 | - targets: ['statsd:9102']
78 |
79 | ```
80 |
81 | Prometheus scrapes itself, which is the first target above. The second target
82 | is the statsd exporter on port 9102.
83 |
84 | Since these services are running via `docker-compose`, `statsd` automatically resolves to the IP of the statsd exporter container.
85 |
86 | To bring up all the services:
87 |
88 | ```
89 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up
90 | ```
91 |
92 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/config/prometheus/prometheus.yml:
--------------------------------------------------------------------------------
1 | # my global config
2 | global:
3 | scrape_interval: 15s # By default, scrape targets every 15 seconds.
4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds.
5 | # scrape_timeout is set to the global default (10s).
6 |
7 | # Attach these labels to any time series or alerts when communicating with
8 | # external systems (federation, remote storage, Alertmanager).
9 | external_labels:
10 | monitor: 'my-project'
11 |
12 | # A scrape configuration containing exactly one endpoint to scrape:
13 | # Here it's Prometheus itself.
14 | scrape_configs:
15 | # The job name is added as a label `job=` to any timeseries scraped from this config.
16 | - job_name: 'prometheus'
17 |
18 | # Override the global default and scrape targets from this job every 5 seconds.
19 | scrape_interval: 5s
20 |
21 | # metrics_path defaults to '/metrics'
22 | # scheme defaults to 'http'.
23 |
24 | static_configs:
25 | - targets: ['localhost:9090']
26 | - job_name: 'webapp'
27 |
28 | # Override the global default and scrape targets from this job every 5 seconds.
29 | scrape_interval: 5s
30 |
31 | # metrics_path defaults to '/metrics'
32 | # scheme defaults to 'http'.
33 | static_configs:
34 | - targets: ['statsd:9102']
35 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/docker-compose-infra.yml:
--------------------------------------------------------------------------------
1 | # Based off https://github.com/vegasbrianc/prometheus
2 | version: '2'
3 |
4 | volumes:
5 | prometheus_data: {}
6 | grafana_data: {}
7 |
8 | services:
9 | stastd:
10 | image: prom/statsd-exporter
11 | container_name: statsd
12 | expose:
13 | - 9125
14 | - 9102
15 | prometheus:
16 | image: prom/prometheus
17 | container_name: prometheus
18 | volumes:
19 | - ./config/prometheus/:/etc/prometheus/
20 | - prometheus_data:/prometheus
21 | command:
22 | - '--config.file=/etc/prometheus/prometheus.yml'
23 | expose:
24 | - 9090
25 | ports:
26 | - 9090:9090
27 | grafana:
28 | image: grafana/grafana
29 | depends_on:
30 | - prometheus
31 | ports:
32 | - 3000:3000
33 | volumes:
34 | - grafana_data:/var/lib/grafana
35 | environment:
36 | - GF_SECURITY_ADMIN_PASSWORD=foobar
37 | - GF_USERS_ALLOW_SIGN_UP=false
38 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '2'
2 |
3 | services:
4 | webapp:
5 | image: amitsaha/flask_app_1
6 | container_name: webapp
7 | expose:
8 | - 5000
9 | ports:
10 | - 5000:5000
11 | volumes:
12 | - ./src:/application
13 | client:
14 | depends_on:
15 | - webapp
16 | build:
17 | context: ./
18 | dockerfile: Dockerfile-client
19 | image: amitsaha/flask_app:client
20 | container_name: client
21 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ab -n 1000 -c 3 http://webapp:5000/test/
3 | ab -n 1000 -c 3 http://webapp:5000/test1/
4 | top
5 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/src/flask_app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask, Response
2 | from helpers.middleware import setup_metrics
3 |
4 | app = Flask(__name__)
5 | setup_metrics(app)
6 |
7 | @app.route('/test/')
8 | def test():
9 | return 'rest'
10 |
11 | @app.route('/test1/')
12 | def test1():
13 | 1/0
14 | return 'rest'
15 |
16 | @app.errorhandler(500)
17 | def handle_500(error):
18 | return str(error), 500
19 |
20 | if __name__ == '__main__':
21 | app.run()
22 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/src/helpers/middleware.py:
--------------------------------------------------------------------------------
1 | from flask import request
2 | from datadog import DogStatsd
3 | import time
4 | import sys
5 |
6 |
7 | statsd = DogStatsd(host="statsd", port=9125)
8 | REQUEST_LATENCY_METRIC_NAME = 'request_latency_seconds'
9 | REQUEST_COUNT_METRIC_NAME = 'request_count'
10 |
11 | def start_timer():
12 | request.start_time = time.time()
13 |
14 | def stop_timer(response):
15 | resp_time = time.time() - request.start_time
16 | statsd.histogram(REQUEST_LATENCY_METRIC_NAME,
17 | resp_time,
18 | tags=[
19 | 'service:webapp',
20 | 'endpoint: %s' % request.path,
21 | ]
22 | )
23 | return response
24 |
25 | def record_request_data(response):
26 | statsd.increment(REQUEST_COUNT_METRIC_NAME,
27 | tags=[
28 | 'service: webapp',
29 | 'method: %s' % request.method,
30 | 'endpoint: %s' % request.path,
31 | 'status: %s' % str(response.status_code)
32 | ]
33 | )
34 | return response
35 |
36 | def setup_metrics(app):
37 | app.before_request(start_timer)
38 | # The order here matters since we want stop_timer
39 | # to be executed first
40 | app.after_request(record_request_data)
41 | app.after_request(stop_timer)
42 |
--------------------------------------------------------------------------------
/flask_statsd_prometheus/src/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==1.0
2 | datadog==0.16.0
3 | uwsgi==2.0.15
4 |
--------------------------------------------------------------------------------
/miscellaneous-notes.md:
--------------------------------------------------------------------------------
1 | # Python prometheus client
2 |
3 | ## Gauge and Python multiprocess mode
4 |
5 | If you look at https://github.com/prometheus/client_python#multiprocess-mode-gunicorn, there are various modes for gauge:
6 |
7 | > Gauges have several modes they can run in, which can be selected with the multiprocess_mode parameter.
8 |
9 | > 'all': Default. Return a timeseries per process alive or dead.
10 | > 'liveall': Return a timeseries per process that is still alive.
11 | > 'livesum': Return a single timeseries that is the sum of the values of alive processes.
12 | > 'max': Return a single timeseries that is the maximum of the values of all processes, alive or dead.
13 | > 'min': Return a single timeseries that is the minimum of the values of all processes, alive or dead.
14 |
15 | If you see your gauge metrics being reported with a PID label, try using one of the other modes (based on https://github.com/prometheus/client_python/blob/master/prometheus_client/multiprocess.py)
16 |
17 | # StatsD Exporter
18 |
19 |
20 | ## Things to keep in mind
21 |
22 | The following are things to keep in mind while using the statsd exporter:
23 |
24 | - No persistent storage in statsd exporter, it dies, you don't get metrics
25 | - When using multiple statsd exporter instances, you may want to use a prometheus histogram rather than
26 | summary (This [blog post](https://signoz.io/blog/quantile-aggregation-for-statsd-exporter/) explains why)
27 |
28 | If you are using the DogStatsd Python client:
29 |
30 | - Report all timing metrics in milliseconds
31 | - If you are using the `timed()` context manager, initialize the statstd client using `use_ms`
32 |
33 | For timing data, statsd exporter converts the data into seconds before exporting. So when you plot in grafana,
34 | the units for timing data is always in seconds. ([PR updating README for statsd exporter](https://github.com/prometheus/statsd_exporter/pull/262))
35 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | RUN apt-get -y update && apt-get -y install apache2-utils
3 | ADD ./make-requests.sh /make-requests.sh
4 |
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | service1:
3 | build: ./service1
4 | depends_on:
5 | - service2
6 | - otel-agent
7 | links:
8 | - db
9 | - otel-agent
10 | environment:
11 | OTEL_AGENT: otel-agent
12 | service1-2:
13 | build: ./service1
14 | depends_on:
15 | - service2
16 | - otel-agent
17 | links:
18 | - db
19 | - otel-agent
20 | environment:
21 | OTEL_AGENT: otel-agent
22 | service2:
23 | build: ./service2
24 | depends_on:
25 | - db
26 | - otel-agent
27 | environment:
28 | OTEL_AGENT: otel-agent
29 | db:
30 | image: mysql
31 | command: --default-authentication-plugin=mysql_native_password
32 | restart: always
33 | environment:
34 | MYSQL_ROOT_PASSWORD: rootpassword
35 | MYSQL_DATABASE: service2
36 | MYSQL_USER: joe
37 | MYSQL_PASSWORD: password
38 | volumes:
39 | - ./mysql-init:/docker-entrypoint-initdb.d
40 | otel-agent:
41 | image: otel/opentelemetry-collector
42 | command: ["--config=/etc/otel-collector-agent-config.yml"]
43 | volumes:
44 | - ./otel-collector-agent-config.yml:/etc/otel-collector-agent-config.yml
45 | links:
46 | - otel-collector
47 | depends_on:
48 | - otel-collector
49 | otel-collector:
50 | image: otel/opentelemetry-collector
51 | command: ["--config=/etc/otel-collector-config.yml"]
52 | volumes:
53 | - ./otel-collector-config.yml:/etc/otel-collector-config.yml
54 | ports:
55 | - "8888:8888" # Prometheus metrics exposed by the collector
56 | - "8889:8889" # Prometheus exporter metrics
57 | depends_on:
58 | - jaeger-all-in-one
59 | links:
60 | - jaeger-all-in-one
61 | jaeger-all-in-one:
62 | image: jaegertracing/all-in-one
63 | ports:
64 | - "16686:16686"
65 | - "14268:14268"
66 | - "6831:6831/udp"
67 | client:
68 | links:
69 | - service1
70 | - service1-2
71 | depends_on:
72 | - service1
73 | - service1-2
74 | build:
75 | context: ./
76 | dockerfile: Dockerfile-client
77 | prometheus-server:
78 | image: prom/prometheus:latest
79 | links:
80 | - otel-collector
81 | volumes:
82 | - ./prometheus.yml:/etc/prometheus/prometheus.yml
83 | ports:
84 | - "9090:9090"
85 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sleep 20
3 | ab -n 55 -c 1 http://service1:5000/
4 | sleep 10
5 | ab -n 20 -c 1 http://service1:5000/
6 | sleep 10
7 | ab -n 5 -c 1 http://service1-2:5000/
8 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/mysql-init/01-create-table.sql:
--------------------------------------------------------------------------------
1 | use service2;
2 |
3 | CREATE TABLE users (
4 | id INT(6) AUTO_INCREMENT PRIMARY KEY,
5 | first_name VARCHAR(30) NOT NULL,
6 | last_name VARCHAR(30) NOT NULL
7 | )
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/mysql-init/02-insert-data.sql:
--------------------------------------------------------------------------------
1 | use service2;
2 | INSERT INTO users (first_name, last_name) VALUES("joe", "cool")
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/otel-collector-agent-config.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | opencensus:
3 | otlp:
4 | protocols:
5 | grpc:
6 | processors:
7 | attributes:
8 | actions:
9 | - key: environment
10 | value: test
11 | action: insert
12 | exporters:
13 | opencensus:
14 | endpoint: "otel-collector:55678"
15 | insecure: true
16 |
17 | extensions:
18 | pprof:
19 | endpoint: :1777
20 | zpages:
21 | endpoint: :55679
22 | health_check:
23 |
24 | service:
25 | extensions: [health_check, pprof, zpages]
26 | pipelines:
27 | traces:
28 | receivers: [otlp]
29 | processors: [attributes]
30 | exporters: [opencensus]
31 | metrics:
32 | receivers: [opencensus]
33 | exporters: [opencensus]
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/otel-collector-config.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | opencensus:
3 |
4 | exporters:
5 | logging:
6 | jaeger:
7 | endpoint: jaeger-all-in-one:14250
8 | insecure: true
9 | otlp:
10 | endpoint: "api.honeycomb.io:443"
11 | headers:
12 | "x-honeycomb-team": "SDK-KEY"
13 | "x-honeycomb-dataset": "otel-demo-python"
14 | prometheus:
15 | endpoint: "0.0.0.0:8889"
16 |
17 | processors:
18 | batch:
19 | queued_retry:
20 |
21 | extensions:
22 | health_check:
23 | pprof:
24 | endpoint: :1888
25 | zpages:
26 | endpoint: :55679
27 |
28 | service:
29 | extensions: [pprof, zpages, health_check]
30 | pipelines:
31 | traces:
32 | receivers: [opencensus]
33 | processors: [batch, queued_retry]
34 | exporters: [jaeger, otlp]
35 | metrics:
36 | receivers: [opencensus]
37 | exporters: [prometheus]
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/prometheus.yml:
--------------------------------------------------------------------------------
1 | scrape_configs:
2 | - job_name: 'otel-collector'
3 | scrape_interval: 10s
4 | static_configs:
5 | - targets: ['otel-collector:8889']
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/service1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | ADD . /application
3 | WORKDIR /application
4 | RUN pip install -r requirements.txt --use-deprecated=legacy-resolver
5 | CMD ["uwsgi", "--http", ":5000", "--mount", "/myapplication=app:app", "--processes", "10"]
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/service1/app.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import trace
2 |
3 | from opentelemetry.sdk.trace import TracerProvider
4 | from opentelemetry.sdk.trace import sampling
5 | from opentelemetry.sdk.trace.export import (
6 | ConsoleSpanExporter,
7 | SimpleExportSpanProcessor,
8 | )
9 |
10 | from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
11 | from opentelemetry.sdk.resources import Resource
12 |
13 | from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
14 |
15 | from opentelemetry.instrumentation.flask import FlaskInstrumentor
16 | from opentelemetry.instrumentation.requests import RequestsInstrumentor
17 |
18 |
19 | from opentelemetry import metrics, trace
20 | from opentelemetry.exporter.opencensus.metrics_exporter import (
21 | OpenCensusMetricsExporter,
22 | )
23 | from opentelemetry.sdk.metrics import Counter, MeterProvider
24 | from opentelemetry.sdk.metrics.export.controller import PushController
25 |
26 | from flask import Flask, request
27 | import requests
28 | import os
29 |
30 | resource = Resource({"service.name": "service1"})
31 |
32 | trace.set_tracer_provider(TracerProvider(resource=resource))
33 | tracer = trace.get_tracer(__name__)
34 |
35 | OTEL_AGENT = os.getenv('OTEL_AGENT', "otel-agent")
36 |
37 | otlp_exporter = OTLPSpanExporter(endpoint=OTEL_AGENT + ":4317", insecure=True)
38 | span_processor = BatchExportSpanProcessor(otlp_exporter)
39 | trace.get_tracer_provider().add_span_processor(span_processor)
40 |
41 |
42 | metric_exporter = OpenCensusMetricsExporter(
43 | endpoint=OTEL_AGENT + ":55678",
44 | service_name="service1",
45 | )
46 |
47 |
48 | # Meter is responsible for creating and recording metrics
49 | metrics.set_meter_provider(MeterProvider(resource=resource))
50 | meter = metrics.get_meter(__name__)
51 | # controller collects metrics created from meter and exports it via the
52 | # exporter every interval
53 | controller = PushController(meter, metric_exporter, 5)
54 |
55 | requests_counter = meter.create_counter(
56 | name="requests_count",
57 | description="number of requests",
58 | unit="1",
59 | value_type=int,
60 | )
61 | # Labels are used to identify key-values that are associated with a specific
62 | # metric that you want to record. These are useful for pre-aggregation and can
63 | # be used to store custom dimensions pertaining to a metric
64 | labels = {"service_id": "service1"}
65 |
66 |
67 | app = Flask(__name__)
68 | FlaskInstrumentor().instrument_app(app)
69 | RequestsInstrumentor().instrument()
70 |
71 | def do_stuff():
72 | return requests.get('http://service2:5000')
73 |
74 | @app.route('/')
75 | def index():
76 | requests_counter.add(1, labels)
77 | with tracer.start_as_current_span("service2-request"):
78 | data = do_stuff()
79 | return data.text, 200
80 |
81 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/service1/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==1.1.2
2 | uwsgi
3 | opentelemetry-instrumentation-flask
4 | opentelemetry-instrumentation-requests
5 | opentelemetry-exporter-otlp
6 | opentelemetry-exporter-opencensus
7 | grpcio
8 | opencensus-proto
9 | opentelemetry-api
10 | opentelemetry-sdk
11 |
12 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/service2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | RUN apt-get -y update && apt-get -y install libprotobuf17 python-pkg-resources python-protobuf python-six
3 | RUN pip install mysql-connector
4 |
5 | ADD . /application
6 | WORKDIR /application
7 | RUN pip install -r requirements.txt
8 | CMD ["uwsgi", "--http", ":5000", "--mount", "/myapplication=app:app", "--processes", "10"]
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/service2/app.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import metrics, trace
2 |
3 | from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
4 | from opentelemetry.sdk.resources import Resource
5 |
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace import sampling
8 | from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
9 | from opentelemetry.sdk.trace.export import (
10 | ConsoleSpanExporter,
11 | SimpleExportSpanProcessor,
12 | )
13 |
14 | from opentelemetry.exporter.opencensus.metrics_exporter import (
15 | OpenCensusMetricsExporter,
16 | )
17 | from opentelemetry.sdk.metrics import Counter, MeterProvider
18 | from opentelemetry.sdk.metrics.export.controller import PushController
19 |
20 | from opentelemetry.instrumentation.flask import FlaskInstrumentor
21 | from flask import Flask, request
22 |
23 | import mysql.connector
24 | from opentelemetry.instrumentation.mysql import MySQLInstrumentor
25 |
26 | import os
27 |
28 |
29 | resource = Resource({"service.name": "service2"})
30 |
31 | trace.set_tracer_provider(TracerProvider(resource=resource))
32 | tracer = trace.get_tracer(__name__)
33 |
34 | OTEL_AGENT = os.getenv('OTEL_AGENT', "otel-agent")
35 |
36 | otlp_exporter = OTLPSpanExporter(endpoint=OTEL_AGENT + ":4317", insecure=True)
37 | span_processor = BatchExportSpanProcessor(otlp_exporter)
38 | trace.get_tracer_provider().add_span_processor(span_processor)
39 |
40 |
41 | metric_exporter = OpenCensusMetricsExporter(
42 | endpoint=OTEL_AGENT + ":55678",
43 | service_name="service2",
44 | )
45 |
46 | # Meter is responsible for creating and recording metrics
47 | metrics.set_meter_provider(MeterProvider(resource=resource))
48 | meter = metrics.get_meter(__name__)
49 | # controller collects metrics created from meter and exports it via the
50 | # exporter every interval
51 | controller = PushController(meter, metric_exporter, 5)
52 |
53 | # TODO: We use a different metric name here due to:
54 | # https://github.com/open-telemetry/opentelemetry-python/issues/1510
55 | requests_counter = meter.create_counter(
56 | name="requests_count_service2",
57 | description="number of requests",
58 | unit="1",
59 | value_type=int,
60 | )
61 | # Labels are used to identify key-values that are associated with a specific
62 | # metric that you want to record. These are useful for pre-aggregation and can
63 | # be used to store custom dimensions pertaining to a metric
64 | labels = {"service_id": "service2"}
65 |
66 |
67 | app = Flask(__name__)
68 |
69 | FlaskInstrumentor().instrument_app(app)
70 | MySQLInstrumentor().instrument()
71 |
72 | @app.route('/')
73 | def index():
74 | requests_counter.add(1, labels)
75 | with tracer.start_as_current_span("service2-db"):
76 | # TODO - Move this to app initialization rather than per request
77 | cnx = mysql.connector.connect(user='joe', password='password',
78 | host='db',
79 | database='service2')
80 | data = "Data
"
81 | cursor = cnx.cursor()
82 | cursor.execute("SELECT first_name, last_name from users")
83 | rows = ""
84 | for first_name, last_name in cursor:
85 | rows += '{0} {1} '.format(first_name, last_name)
86 | return data.format(rows), 200
87 |
88 | if __name__ == '__main__':
89 | app.run(debug=True)
90 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-multiple-process/service2/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
2 | uwsgi
3 | opentelemetry-api
4 | opentelemetry-sdk
5 | opentelemetry-instrumentation-flask
6 | opentelemetry-instrumentation-requests
7 | opentelemetry-instrumentation-mysql
8 | opentelemetry-exporter-otlp
9 | opentelemetry-exporter-opencensus
10 | grpcio
11 | opencensus-proto
12 | opentelemetry-api
13 | opentelemetry-sdk
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | RUN apt-get -y update && apt-get -y install apache2-utils
3 | ADD ./make-requests.sh /make-requests.sh
4 |
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | service1:
3 | build: ./service1
4 | depends_on:
5 | - service2
6 | - otel-agent
7 | links:
8 | - db
9 | - otel-agent
10 | environment:
11 | OTEL_AGENT: otel-agent
12 | service1-2:
13 | build: ./service1
14 | depends_on:
15 | - service2
16 | - otel-agent
17 | links:
18 | - db
19 | - otel-agent
20 | environment:
21 | OTEL_AGENT: otel-agent
22 | service2:
23 | build: ./service2
24 | depends_on:
25 | - db
26 | - otel-agent
27 | environment:
28 | OTEL_AGENT: otel-agent
29 | db:
30 | image: mysql
31 | command: --default-authentication-plugin=mysql_native_password
32 | restart: always
33 | environment:
34 | MYSQL_ROOT_PASSWORD: rootpassword
35 | MYSQL_DATABASE: service2
36 | MYSQL_USER: joe
37 | MYSQL_PASSWORD: password
38 | volumes:
39 | - ./mysql-init:/docker-entrypoint-initdb.d
40 | otel-agent:
41 | image: otel/opentelemetry-collector
42 | command: ["--config=/etc/otel-collector-agent-config.yml"]
43 | volumes:
44 | - ./otel-collector-agent-config.yml:/etc/otel-collector-agent-config.yml
45 | links:
46 | - otel-collector
47 | depends_on:
48 | - otel-collector
49 | otel-collector:
50 | image: otel/opentelemetry-collector
51 | command: ["--config=/etc/otel-collector-config.yml"]
52 | volumes:
53 | - ./otel-collector-config.yml:/etc/otel-collector-config.yml
54 | ports:
55 | - "8888:8888" # Prometheus metrics exposed by the collector
56 | - "8889:8889" # Prometheus exporter metrics
57 | depends_on:
58 | - jaeger-all-in-one
59 | links:
60 | - jaeger-all-in-one
61 | jaeger-all-in-one:
62 | image: jaegertracing/all-in-one
63 | ports:
64 | - "16686:16686"
65 | - "14268:14268"
66 | - "6831:6831/udp"
67 | client:
68 | links:
69 | - service1
70 | - service1-2
71 | depends_on:
72 | - service1
73 | - service1-2
74 | build:
75 | context: ./
76 | dockerfile: Dockerfile-client
77 | prometheus-server:
78 | image: prom/prometheus:latest
79 | links:
80 | - otel-collector
81 | volumes:
82 | - ./prometheus.yml:/etc/prometheus/prometheus.yml
83 | ports:
84 | - "9090:9090"
85 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | sleep 20
3 | ab -n 35 -c 1 http://service1:5000/
4 | sleep 10
5 | ab -n 20 -c 1 http://service1:5000/
6 | sleep 10
7 | ab -n 5 -c 1 http://service1-2:5000/
8 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/mysql-init/01-create-table.sql:
--------------------------------------------------------------------------------
1 | use service2;
2 |
3 | CREATE TABLE users (
4 | id INT(6) AUTO_INCREMENT PRIMARY KEY,
5 | first_name VARCHAR(30) NOT NULL,
6 | last_name VARCHAR(30) NOT NULL
7 | )
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/mysql-init/02-insert-data.sql:
--------------------------------------------------------------------------------
1 | use service2;
2 | INSERT INTO users (first_name, last_name) VALUES("joe", "cool")
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/otel-collector-agent-config.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | opencensus:
3 | otlp:
4 | protocols:
5 | grpc:
6 | processors:
7 | attributes:
8 | actions:
9 | - key: environment
10 | value: test
11 | action: insert
12 | exporters:
13 | opencensus:
14 | endpoint: "otel-collector:55678"
15 | insecure: true
16 |
17 | extensions:
18 | pprof:
19 | endpoint: :1777
20 | zpages:
21 | endpoint: :55679
22 | health_check:
23 |
24 | service:
25 | extensions: [health_check, pprof, zpages]
26 | pipelines:
27 | traces:
28 | receivers: [otlp]
29 | processors: [attributes]
30 | exporters: [opencensus]
31 | metrics:
32 | receivers: [opencensus]
33 | exporters: [opencensus]
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/otel-collector-config.yml:
--------------------------------------------------------------------------------
1 | receivers:
2 | opencensus:
3 |
4 | exporters:
5 | logging:
6 | jaeger:
7 | endpoint: jaeger-all-in-one:14250
8 | insecure: true
9 | otlp:
10 | endpoint: "api.honeycomb.io:443"
11 | headers:
12 | "x-honeycomb-team": "SDK-KEY"
13 | "x-honeycomb-dataset": "otel-demo-python"
14 | prometheus:
15 | endpoint: "0.0.0.0:8889"
16 |
17 | processors:
18 | batch:
19 | queued_retry:
20 |
21 | extensions:
22 | health_check:
23 | pprof:
24 | endpoint: :1888
25 | zpages:
26 | endpoint: :55679
27 |
28 | service:
29 | extensions: [pprof, zpages, health_check]
30 | pipelines:
31 | traces:
32 | receivers: [opencensus]
33 | processors: [batch, queued_retry]
34 | exporters: [jaeger, otlp]
35 | metrics:
36 | receivers: [opencensus]
37 | exporters: [prometheus]
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/prometheus.yml:
--------------------------------------------------------------------------------
1 | scrape_configs:
2 | - job_name: 'otel-collector'
3 | scrape_interval: 10s
4 | static_configs:
5 | - targets: ['otel-collector:8889']
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/service1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | ADD . /application
3 | WORKDIR /application
4 | RUN pip install -r requirements.txt --use-deprecated=legacy-resolver
5 | CMD ["uwsgi", "--http", ":5000", "--mount", "/myapplication=app:app", "--processes", "1"]
6 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/service1/app.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import trace
2 |
3 | from opentelemetry.sdk.trace import TracerProvider
4 | from opentelemetry.sdk.trace import sampling
5 | from opentelemetry.sdk.trace.export import (
6 | ConsoleSpanExporter,
7 | SimpleExportSpanProcessor,
8 | )
9 |
10 | from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
11 | from opentelemetry.sdk.resources import Resource
12 |
13 | from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
14 |
15 | from opentelemetry.instrumentation.flask import FlaskInstrumentor
16 | from opentelemetry.instrumentation.requests import RequestsInstrumentor
17 |
18 |
19 | from opentelemetry import metrics, trace
20 | from opentelemetry.exporter.opencensus.metrics_exporter import (
21 | OpenCensusMetricsExporter,
22 | )
23 | from opentelemetry.sdk.metrics import Counter, MeterProvider
24 | from opentelemetry.sdk.metrics.export.controller import PushController
25 |
26 | from flask import Flask, request
27 | import requests
28 | import os
29 |
30 | resource = Resource({"service.name": "service1"})
31 |
32 | trace.set_tracer_provider(TracerProvider(resource=resource))
33 | tracer = trace.get_tracer(__name__)
34 |
35 | OTEL_AGENT = os.getenv('OTEL_AGENT', "otel-agent")
36 |
37 | otlp_exporter = OTLPSpanExporter(endpoint=OTEL_AGENT + ":4317", insecure=True)
38 | span_processor = BatchExportSpanProcessor(otlp_exporter)
39 | trace.get_tracer_provider().add_span_processor(span_processor)
40 |
41 |
42 | metric_exporter = OpenCensusMetricsExporter(
43 | endpoint=OTEL_AGENT + ":55678",
44 | service_name="service1",
45 | )
46 |
47 |
48 | # Meter is responsible for creating and recording metrics
49 | metrics.set_meter_provider(MeterProvider(resource=resource))
50 | meter = metrics.get_meter(__name__)
51 | # controller collects metrics created from meter and exports it via the
52 | # exporter every interval
53 | controller = PushController(meter, metric_exporter, 5)
54 |
55 | requests_counter = meter.create_counter(
56 | name="requests_count",
57 | description="number of requests",
58 | unit="1",
59 | value_type=int,
60 | )
61 | # Labels are used to identify key-values that are associated with a specific
62 | # metric that you want to record. These are useful for pre-aggregation and can
63 | # be used to store custom dimensions pertaining to a metric
64 | labels = {"service_id": "service1"}
65 |
66 |
67 | app = Flask(__name__)
68 | FlaskInstrumentor().instrument_app(app)
69 | RequestsInstrumentor().instrument()
70 |
71 | def do_stuff():
72 | return requests.get('http://service2:5000')
73 |
74 | @app.route('/')
75 | def index():
76 | requests_counter.add(1, labels)
77 | with tracer.start_as_current_span("service2-request"):
78 | data = do_stuff()
79 | return data.text, 200
80 |
81 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/service1/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask==1.1.2
2 | uwsgi
3 | opentelemetry-instrumentation-flask
4 | opentelemetry-instrumentation-requests
5 | opentelemetry-exporter-otlp
6 | opentelemetry-exporter-opencensus
7 | grpcio
8 | opencensus-proto
9 | opentelemetry-api
10 | opentelemetry-sdk
11 |
12 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/service2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | RUN apt-get -y update && apt-get -y install libprotobuf17 python-pkg-resources python-protobuf python-six
3 | RUN pip install mysql-connector
4 |
5 | ADD . /application
6 | WORKDIR /application
7 | RUN pip install -r requirements.txt
8 | CMD ["uwsgi", "--http", ":5000", "--mount", "/myapplication=app:app", "--processes", "1"]
9 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/service2/app.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import metrics, trace
2 |
3 | from opentelemetry.exporter.otlp.trace_exporter import OTLPSpanExporter
4 | from opentelemetry.sdk.resources import Resource
5 |
6 | from opentelemetry.sdk.trace import TracerProvider
7 | from opentelemetry.sdk.trace import sampling
8 | from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
9 | from opentelemetry.sdk.trace.export import (
10 | ConsoleSpanExporter,
11 | SimpleExportSpanProcessor,
12 | )
13 |
14 | from opentelemetry.exporter.opencensus.metrics_exporter import (
15 | OpenCensusMetricsExporter,
16 | )
17 | from opentelemetry.sdk.metrics import Counter, MeterProvider
18 | from opentelemetry.sdk.metrics.export.controller import PushController
19 |
20 | from opentelemetry.instrumentation.flask import FlaskInstrumentor
21 | from flask import Flask, request
22 |
23 | import mysql.connector
24 | from opentelemetry.instrumentation.mysql import MySQLInstrumentor
25 |
26 | import os
27 |
28 |
29 | resource = Resource({"service.name": "service2"})
30 |
31 | trace.set_tracer_provider(TracerProvider(resource=resource))
32 | tracer = trace.get_tracer(__name__)
33 |
34 | OTEL_AGENT = os.getenv('OTEL_AGENT', "otel-agent")
35 |
36 | otlp_exporter = OTLPSpanExporter(endpoint=OTEL_AGENT + ":4317", insecure=True)
37 | span_processor = BatchExportSpanProcessor(otlp_exporter)
38 | trace.get_tracer_provider().add_span_processor(span_processor)
39 |
40 |
41 | metric_exporter = OpenCensusMetricsExporter(
42 | endpoint=OTEL_AGENT + ":55678",
43 | service_name="service2",
44 | )
45 |
46 | # Meter is responsible for creating and recording metrics
47 | metrics.set_meter_provider(MeterProvider(resource=resource))
48 | meter = metrics.get_meter(__name__)
49 | # controller collects metrics created from meter and exports it via the
50 | # exporter every interval
51 | controller = PushController(meter, metric_exporter, 5)
52 |
53 | # TODO: We use a different metric name here due to:
54 | # https://github.com/open-telemetry/opentelemetry-python/issues/1510
55 | requests_counter = meter.create_counter(
56 | name="requests_count_service2",
57 | description="number of requests",
58 | unit="1",
59 | value_type=int,
60 | )
61 | # Labels are used to identify key-values that are associated with a specific
62 | # metric that you want to record. These are useful for pre-aggregation and can
63 | # be used to store custom dimensions pertaining to a metric
64 | labels = {"service_id": "service2"}
65 |
66 |
67 | app = Flask(__name__)
68 |
69 | FlaskInstrumentor().instrument_app(app)
70 | MySQLInstrumentor().instrument()
71 |
72 | @app.route('/')
73 | def index():
74 | requests_counter.add(1, labels)
75 | with tracer.start_as_current_span("service2-db"):
76 | # TODO - Move this to app initialization rather than per request
77 | cnx = mysql.connector.connect(user='joe', password='password',
78 | host='db',
79 | database='service2')
80 | data = "Data
"
81 | cursor = cnx.cursor()
82 | cursor.execute("SELECT first_name, last_name from users")
83 | rows = ""
84 | for first_name, last_name in cursor:
85 | rows += '{0} {1} '.format(first_name, last_name)
86 | return data.format(rows), 200
87 |
88 | if __name__ == '__main__':
89 | app.run(debug=True)
90 |
--------------------------------------------------------------------------------
/open-telemetry/trace-metrics-otel-pipeline-single-process/service2/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
2 | uwsgi
3 | opentelemetry-api
4 | opentelemetry-sdk
5 | opentelemetry-instrumentation-flask
6 | opentelemetry-instrumentation-requests
7 | opentelemetry-instrumentation-mysql
8 | opentelemetry-exporter-otlp
9 | opentelemetry-exporter-opencensus
10 | grpcio
11 | opencensus-proto
12 | opentelemetry-api
13 | opentelemetry-sdk
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/Dockerfile-client:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | RUN apt-get -y update && apt-get -y install apache2-utils
3 | ADD ./make-requests.sh /make-requests.sh
4 |
5 | CMD ["/bin/sh", "/make-requests.sh"]
6 |
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | service1:
3 | build: ./service1
4 | ports:
5 | - "5000:5000"
6 | links:
7 | - service2
8 | - jaeger
9 | depends_on:
10 | - service2
11 | - jaeger
12 | service2:
13 | build: ./service2
14 | links:
15 | - db
16 | - jaeger
17 | depends_on:
18 | - db
19 | - jaeger
20 | db:
21 | image: mysql
22 | command: --default-authentication-plugin=mysql_native_password
23 | restart: always
24 | environment:
25 | MYSQL_ROOT_PASSWORD: rootpassword
26 | MYSQL_DATABASE: service2
27 | MYSQL_USER: joe
28 | MYSQL_PASSWORD: password
29 | volumes:
30 | - ./mysql-init:/docker-entrypoint-initdb.d
31 | jaeger:
32 | image: jaegertracing/all-in-one
33 | ports:
34 | - "16686:16686"
35 | - "6831:6831/udp"
36 | client:
37 | links:
38 | - service1
39 | depends_on:
40 | - service1
41 | build:
42 | context: ./
43 | dockerfile: Dockerfile-client
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/make-requests.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | ab -n 100 -c 2 http://service1:5000/
3 | sleep 10
4 | ab -n 100 -c 2 http://service1:5000/
5 | sleep 20
6 | ab -n 100 -c 2 http://service1:5000/
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/mysql-init/01-create-table.sql:
--------------------------------------------------------------------------------
1 | use service2;
2 |
3 | CREATE TABLE users (
4 | id INT(6) AUTO_INCREMENT PRIMARY KEY,
5 | first_name VARCHAR(30) NOT NULL,
6 | last_name VARCHAR(30) NOT NULL
7 | )
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/mysql-init/02-insert-data.sql:
--------------------------------------------------------------------------------
1 | use service2;
2 | INSERT INTO users (first_name, last_name) VALUES("joe", "cool")
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/service1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | ADD . /application
3 | WORKDIR /application
4 | RUN pip install -r requirements.txt
5 | CMD ["uwsgi", "--http", ":5000", "--mount", "/myapplication=app:app", "--enable-threads", "--processes", "5"]
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/service1/app.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import trace
2 |
3 | from opentelemetry.sdk.trace import TracerProvider
4 | from opentelemetry.sdk.trace import sampling
5 | from opentelemetry.sdk.trace.export import (
6 | ConsoleSpanExporter,
7 | SimpleExportSpanProcessor,
8 | )
9 |
10 | from opentelemetry.exporter import jaeger
11 | from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
12 |
13 | from opentelemetry.instrumentation.flask import FlaskInstrumentor
14 | from opentelemetry.instrumentation.requests import RequestsInstrumentor
15 | from flask import Flask, request
16 | import requests
17 |
18 | trace.set_tracer_provider(TracerProvider(sampler=sampling.ALWAYS_ON))
19 |
20 | jaeger_exporter = jaeger.JaegerSpanExporter(
21 | service_name="service1",
22 | agent_host_name="jaeger",
23 | agent_port=6831,
24 | )
25 | trace.get_tracer_provider().add_span_processor(
26 | BatchExportSpanProcessor(jaeger_exporter)
27 | )
28 |
29 | tracer = trace.get_tracer(__name__)
30 |
31 | app = Flask(__name__)
32 | FlaskInstrumentor().instrument_app(app)
33 | RequestsInstrumentor().instrument()
34 |
35 | def do_stuff():
36 | return requests.get('http://service2:5000')
37 |
38 | @app.route('/')
39 | def index():
40 | with tracer.start_as_current_span("service2-request"):
41 | data = do_stuff()
42 | return data.text, 200
43 |
44 | if __name__ == '__main__':
45 | app.run(debug=True)
46 |
47 |
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/service1/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
2 | uwsgi
3 | opentelemetry-api
4 | opentelemetry-sdk
5 | opentelemetry-instrumentation-flask
6 | opentelemetry-instrumentation-requests
7 | opentelemetry-exporter-jaeger
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/service2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9
2 | RUN apt-get -y update && apt-get -y install libprotobuf17 python-pkg-resources python-protobuf python-six
3 | RUN pip install mysql-connector
4 |
5 | ADD . /application
6 | WORKDIR /application
7 | RUN pip install -r requirements.txt
8 | CMD ["uwsgi", "--http", ":5000", "--mount", "/myapplication=app:app", "--enable-threads", "--processes", "5"]
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/service2/app.py:
--------------------------------------------------------------------------------
1 | from opentelemetry import trace
2 |
3 | from opentelemetry.exporter import jaeger
4 | from opentelemetry.sdk.trace import TracerProvider
5 | from opentelemetry.sdk.trace import sampling
6 | from opentelemetry.sdk.trace.export import BatchExportSpanProcessor
7 | from opentelemetry.sdk.trace.export import (
8 | ConsoleSpanExporter,
9 | SimpleExportSpanProcessor,
10 | )
11 | from opentelemetry.instrumentation.flask import FlaskInstrumentor
12 | from flask import Flask, request
13 |
14 | import mysql.connector
15 | from opentelemetry.instrumentation.mysql import MySQLInstrumentor
16 |
17 |
18 | trace.set_tracer_provider(TracerProvider(sampler=sampling.ALWAYS_ON))
19 | #trace.get_tracer_provider().add_span_processor(
20 | # SimpleExportSpanProcessor(ConsoleSpanExporter())
21 | #)
22 | jaeger_exporter = jaeger.JaegerSpanExporter(
23 | service_name="service2",
24 | agent_host_name="jaeger",
25 | agent_port=6831,
26 | )
27 |
28 | trace.get_tracer_provider().add_span_processor(
29 | BatchExportSpanProcessor(jaeger_exporter)
30 | )
31 |
32 | tracer = trace.get_tracer(__name__)
33 |
34 | app = Flask(__name__)
35 |
36 | FlaskInstrumentor().instrument_app(app)
37 | MySQLInstrumentor().instrument()
38 |
39 | @app.route('/')
40 | def index():
41 | with tracer.start_as_current_span("service2-db"):
42 | # TODO - Move this to app initialization rather than per request
43 | cnx = mysql.connector.connect(user='joe', password='password',
44 | host='db',
45 | database='service2')
46 | data = "Data
"
47 | cursor = cnx.cursor()
48 | cursor.execute("SELECT first_name, last_name from users")
49 | rows = ""
50 | for first_name, last_name in cursor:
51 | rows += '{0} {1} '.format(first_name, last_name)
52 | return data.format(rows), 200
53 |
54 | if __name__ == '__main__':
55 | app.run(debug=True)
56 |
--------------------------------------------------------------------------------
/open-telemetry/tracing-jaeger/service2/requirements.txt:
--------------------------------------------------------------------------------
1 | Flask
2 | uwsgi
3 | opentelemetry-api
4 | opentelemetry-sdk
5 | opentelemetry-instrumentation-flask
6 | opentelemetry-instrumentation-requests
7 | opentelemetry-instrumentation-mysql
8 | opentelemetry-exporter-jaeger
--------------------------------------------------------------------------------
/opensource-com-article/README.md:
--------------------------------------------------------------------------------
1 | Article published at https://opensource.com/article/18/4/metrics-monitoring-and-python
2 |
--------------------------------------------------------------------------------
/opensource-com-article/counter-graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/opensource-com-article/counter-graph.png
--------------------------------------------------------------------------------
/opensource-com-article/cumulative-histogram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/opensource-com-article/cumulative-histogram.png
--------------------------------------------------------------------------------
/opensource-com-article/gauge-graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/opensource-com-article/gauge-graph.png
--------------------------------------------------------------------------------
/opensource-com-article/histogram-graph.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/opensource-com-article/histogram-graph.png
--------------------------------------------------------------------------------
/opensource-com-article/histogram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/opensource-com-article/histogram.png
--------------------------------------------------------------------------------
/opensource-com-article/pull_push_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/opensource-com-article/pull_push_model.png
--------------------------------------------------------------------------------
/scripts/Figure_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/Figure_1.png
--------------------------------------------------------------------------------
/scripts/counter_demo.py:
--------------------------------------------------------------------------------
1 | import random
2 | import matplotlib.pyplot as plt
3 |
4 | data = [5, 10, 20, 25]
5 |
6 | plt.xkcd()
7 |
8 | plt.plot(data)
9 | plt.xlabel('Time')
10 | plt.ylabel('Metric')
11 | plt.title('Counter')
12 |
13 | plt.grid()
14 | plt.show()
15 |
--------------------------------------------------------------------------------
/scripts/cumulative_histogram.py:
--------------------------------------------------------------------------------
1 | import random
2 | import matplotlib.pyplot as plt
3 |
4 | def generate_random_numbers(N):
5 | numbers = []
6 | for _ in range(N):
7 | numbers.append(random.uniform(15, 60))
8 | return numbers
9 |
10 |
11 | if __name__ == '__main__':
12 | data = generate_random_numbers(100)
13 | plt.xkcd()
14 | plt.xlabel('Age Group')
15 | plt.hist(data, cumulative=True)
16 | plt.title('Cumulative Histogram')
17 |
18 | plt.grid()
19 | plt.show()
20 |
--------------------------------------------------------------------------------
/scripts/gauge_demo.py:
--------------------------------------------------------------------------------
1 | import random
2 | import matplotlib.pyplot as plt
3 |
4 | data = [5, 3, 4, 6, 7, 5, 10]
5 |
6 | plt.xkcd()
7 |
8 | plt.plot(data)
9 | plt.xlabel('Time')
10 | plt.ylabel('Metric')
11 | plt.title('Gauge')
12 |
13 | plt.grid()
14 | plt.show()
15 |
--------------------------------------------------------------------------------
/scripts/histogram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/histogram.png
--------------------------------------------------------------------------------
/scripts/histogram.py:
--------------------------------------------------------------------------------
1 | import random
2 | import matplotlib.pyplot as plt
3 |
4 | def generate_random_numbers(N):
5 | numbers = []
6 | for _ in range(N):
7 | numbers.append(random.uniform(15, 60))
8 | return numbers
9 |
10 |
11 | if __name__ == '__main__':
12 | data = generate_random_numbers(100)
13 |
14 | plt.xkcd()
15 |
16 | plt.hist(data)
17 | plt.xlabel('Age Group')
18 | plt.title('Histogram')
19 |
20 | plt.grid()
21 | plt.show()
22 |
--------------------------------------------------------------------------------
/scripts/marks.txt:
--------------------------------------------------------------------------------
1 | 3
2 | 2
3 | 10
4 |
--------------------------------------------------------------------------------
/scripts/metrics_as_dataframes.py:
--------------------------------------------------------------------------------
1 | import pandas as pd
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | import random
5 | import time
6 |
7 | # seed values
8 | app_prefix= ['webapp']
9 | node_ids = ['10.1.1.30', '10.3.2.30']
10 | http_endpoints = ['test1', 'test2']
11 | http_methods = ['POST', 'GET']
12 | http_statuses = ['200', '400', '500']
13 | # FIXME: the following leads to negative timestamps when plotted in
14 | # matplotlib
15 | # timestamps = pd.date_range('25/3/2018', periods=10000, freq='10S')
16 | # timestamps_unix = timestamps.view('int') // pd.Timedelta(1, unit='s')
17 |
18 | timestamps_unix = [time.time() for _ in range(10000)]
19 |
20 | metrics = pd.DataFrame({
21 | 'app_prefix': [app_prefix[random.choice(range(len(app_prefix)))] for _ in range(10000)],
22 | 'node_id': [node_ids[random.choice(range(len(node_ids)))] for _ in range(10000)],
23 | 'http_endpoint': [http_endpoints[random.choice(range(len(http_endpoints)))] for _ in range(10000)],
24 | 'http_method': [http_methods[random.choice(range(len(http_methods)))] for _ in range(10000)],
25 | 'http_status':[http_statuses[random.choice(range(len(http_statuses)))] for _ in range(10000)],
26 | 'latency': np.random.normal(5000, 1000, 10000),
27 | }, index=timestamps_unix)
28 |
29 |
30 | print(metrics)
31 |
32 | print('\n\nMean Latency grouped by node and HTTP status\n\n')
33 | print(metrics.groupby(['node_id', 'http_status']).latency.aggregate(np.mean))
34 |
35 | print('\n\n99.999 percentile Latency grouped by node and HTTP status\n\n')
36 | print(metrics.groupby(['node_id', 'http_status']).latency.aggregate(np.percentile, 99.999))
37 |
38 | print('\n\n99.999 percentile Latency grouped by HTTP endpoint\n\n')
39 | print(metrics.groupby(['http_endpoint', 'http_method']).latency.aggregate(np.percentile, 99))
40 |
41 | plt.xkcd()
42 | # Rolling average
43 | latency = metrics['latency']
44 | rolling_average = latency.rolling(window=100, center=False, min_periods=1).mean()
45 | print(rolling_average)
46 | rolling_average.plot(title='Rolling average over 100 observations', use_index=True)
47 |
48 | # Histogram plot of latency
49 | metrics.plot.hist(y='latency')
50 | plt.show()
51 |
--------------------------------------------------------------------------------
/scripts/percentile_score.py:
--------------------------------------------------------------------------------
1 | '''
2 | percentile_score_microsoft_excel.py
3 |
4 | Calculate the number from a list of numbers which corresponds
5 | to a specific percentile
6 |
7 | This implements the "Microsoft Excel Method":
8 | https://en.wikipedia.org/wiki/Percentile#Microsoft_Excel_method
9 |
10 | '''
11 |
12 | def find_percentile_score(data, percentile):
13 | if percentile < 0 or percentile > 100:
14 | return None
15 | data.sort()
16 | if percentile == 0:
17 | return data[0]
18 | if percentile == 100:
19 | return data[-1]
20 | n = len(data)
21 | rank = (percentile/100)*(n-1) + 1
22 | k = int(rank)
23 | d = rank - k
24 |
25 | real_idx_1 = k-1
26 | real_idx_2 = k
27 |
28 | return data[real_idx_1] + d*(data[real_idx_2]-data[real_idx_1])
29 |
30 | def read_data(filename):
31 | numbers = []
32 | with open(filename) as f:
33 | for line in f:
34 | numbers.append(float(line))
35 | return numbers
36 |
37 | if __name__ == '__main__':
38 | percentile = float(input('Enter the percentile score you want to calculate: '))
39 | data = read_data('marks.txt')
40 | percentile_score = find_percentile_score(data, percentile)
41 | if percentile_score:
42 | print('The score at {0} percentile: {1}'.format(percentile, percentile_score))
43 | else:
44 | print('Could not find the score corresponding to {0} percentile'.format(percentile))
45 |
--------------------------------------------------------------------------------
/scripts/pull_model.monopic:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/pull_model.monopic
--------------------------------------------------------------------------------
/scripts/pull_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/pull_model.png
--------------------------------------------------------------------------------
/scripts/pull_model_workers.monopic:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/pull_model_workers.monopic
--------------------------------------------------------------------------------
/scripts/pull_model_workers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/pull_model_workers.png
--------------------------------------------------------------------------------
/scripts/pull_push_model.monopic:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/pull_push_model.monopic
--------------------------------------------------------------------------------
/scripts/pull_push_model.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/pull_push_model.png
--------------------------------------------------------------------------------
/scripts/push_model.monopic:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/push_model.monopic
--------------------------------------------------------------------------------
/scripts/statsd_prometheus copy.monopic:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/statsd_prometheus copy.monopic
--------------------------------------------------------------------------------
/scripts/statsd_prometheus.monopic:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/statsd_prometheus.monopic
--------------------------------------------------------------------------------
/scripts/statsd_prometheus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/scripts/statsd_prometheus.png
--------------------------------------------------------------------------------
/slides/Django-monitoring-with-prometheus.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/slides/Django-monitoring-with-prometheus.pdf
--------------------------------------------------------------------------------
/slides/pycon-2018.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/slides/pycon-2018.pdf
--------------------------------------------------------------------------------
/slides/sypy.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/amitsaha/python-monitoring-talk/b8d6ae8760aac6aa24157e2a2ee7b772aa65e7fe/slides/sypy.pdf
--------------------------------------------------------------------------------