├── .gitignore ├── LICENSE ├── README.md ├── aiohttp_app_prometheus ├── Dockerfile.py3 ├── README.md ├── config │ └── prometheus │ │ └── prometheus.yml ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── app.py │ ├── helpers │ └── middleware.py │ └── requirements.txt ├── django_app_gunicorn_statsd_prometheus ├── Dockerfile.py3 ├── README.md ├── config │ ├── prometheus │ │ └── prometheus.yml │ └── statsd-mapping.conf ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── db.sqlite3 │ ├── demo │ ├── __init__.py │ ├── settings.py │ ├── urls.py │ └── wsgi.py │ ├── manage.py │ └── requirements.txt ├── django_app_statsd_prometheus ├── Dockerfile.py3 ├── README.md ├── config │ └── prometheus │ │ └── prometheus.yml ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── db.sqlite3 │ ├── demo │ ├── __init__.py │ ├── metrics_middleware.py │ ├── settings.py │ ├── urls.py │ └── wsgi.py │ ├── manage.py │ └── requirements.txt ├── flask_app_prometheus ├── Dockerfile.py2 ├── Dockerfile.py3 ├── README.md ├── config │ └── prometheus │ │ └── prometheus.yml ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── flask_app.py │ ├── helpers │ └── middleware.py │ └── requirements.txt ├── flask_app_prometheus_multiprocessing ├── Dockerfile.py2 ├── Dockerfile.py3 ├── README.md ├── config │ └── prometheus │ │ └── prometheus.yml ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── flask_app.py │ ├── helpers │ ├── __init__.py │ └── middleware.py │ └── requirements.txt ├── flask_app_prometheus_worker_id ├── Dockerfile.py2 ├── Dockerfile.py3 ├── README.md ├── config │ └── prometheus │ │ └── prometheus.yml ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── flask_app.py │ ├── helpers │ └── middleware.py │ └── requirements.txt ├── flask_app_statsd_prometheus ├── Dockerfile.py2 ├── Dockerfile.py3 ├── README.md ├── config │ └── prometheus │ │ └── prometheus.yml ├── docker-compose-infra.yml ├── docker-compose.yml └── src │ ├── flask_app.py │ ├── helpers │ └── middleware.py │ └── requirements.txt └── flask_app_statsd_prometheus_kubernetes ├── Dockerfile.prom ├── Dockerfile.py3 ├── README.md ├── k8s_application.yaml ├── k8s_infra.yaml ├── prometheus.yml └── src ├── flask_app.py ├── helpers └── middleware.py └── requirements.txt /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | local_settings.py 55 | 56 | # Flask stuff: 57 | instance/ 58 | .webassets-cache 59 | 60 | # Scrapy stuff: 61 | .scrapy 62 | 63 | # Sphinx documentation 64 | docs/_build/ 65 | 66 | # PyBuilder 67 | target/ 68 | 69 | # IPython Notebook 70 | .ipynb_checkpoints 71 | 72 | # pyenv 73 | .python-version 74 | 75 | # celery beat schedule file 76 | celerybeat-schedule 77 | 78 | # dotenv 79 | .env 80 | 81 | # virtualenv 82 | venv/ 83 | ENV/ 84 | 85 | # Spyder project settings 86 | .spyderproject 87 | 88 | # Rope project settings 89 | .ropeproject 90 | 91 | # Misc 92 | .DS_Store 93 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Amit Saha 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Python + Prometheus Demo 2 | 3 | ## Demos based on codeship blog posts 4 | 5 | The following two applications are discussed in the [first blog post](https://t.co/7mUox6RZas): 6 | 7 | ### [flask_app_prometheus](https://github.com/amitsaha/python-prometheus-demo/tree/master/flask_app_prometheus) 8 | 9 | A Flask application using the native Prometheus Python client to expose metrics via the `/metrics` endpoint 10 | 11 | ### [flask_app_statsd_prometheus](https://github.com/amitsaha/python-prometheus-demo/tree/master/flask_app_statsd_prometheus) 12 | 13 | A Flask application which pushes the metrics to a `statsd` bridge which converts `DogStatsd` metrics to `Prometheus` compatible metrics. 14 | 15 | The [second blog post](https://t.co/AmQn2rxetI) refers to the next application: 16 | 17 | ### [aiohttp_app_prometheus](https://github.com/amitsaha/python-prometheus-demo/tree/master/aiohttp_app_prometheus) 18 | 19 | An aiohttp application with prometheus integeration. 20 | 21 | ## Django web application + statsd -> Prometheus 22 | 23 | ### [django_app_statsd_prometheus](./django_app_statsd_prometheus) 24 | 25 | This demo demonstrates how we can push HTTP metrics from a Django application into statsd exporter 26 | which is then scraped by prometheus. 27 | 28 | ### [django_app_gunicorn_statsd_prometheus](./django_app_gunicorn_statsd_prometheus) 29 | 30 | This demo demonstrates howe can push statsd metrics from gunicorn running a django application. 31 | I learned about this approach from this [blog post](https://medium.com/@damianmyerscough/monitoring-gunicorn-with-prometheus-789954150069). 32 | 33 | 34 | ## Attempts to get native prometheus export working 35 | 36 | See [blog post](http://echorand.me/your-options-for-monitoring-multi-process-python-applications-with-prometheus.html) 37 | 38 | ### [flask_app_prometheus_worker_id](https://github.com/amitsaha/python-prometheus-demo/tree/master/flask_app_prometheus_worker_id) 39 | 40 | ### [flask_app_prometheus_multiprocessing](https://github.com/amitsaha/python-prometheus-demo/tree/master/flask_app_prometheus_multiprocessing) 41 | 42 | 43 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.6.1-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 8080 13 | VOLUME /application 14 | CMD python app.py 15 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Example `aiohttp` application 2 | 3 | See ``src`` for the application code. 4 | 5 | ## Building Docker image 6 | 7 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 8 | and expects the application source code to be volume mounted at `/application` 9 | when run: 10 | 11 | ``` 12 | FROM python:3.6.1-alpine 13 | ADD . /application 14 | WORKDIR /application 15 | RUN set -e; \ 16 | apk add --no-cache --virtual .build-deps \ 17 | gcc \ 18 | libc-dev \ 19 | linux-headers \ 20 | ; \ 21 | pip install -r src/requirements.txt; \ 22 | apk del .build-deps; 23 | EXPOSE 5000 24 | VOLUME /application 25 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 26 | ``` 27 | 28 | The last statement shows how we are running the application via `uwsgi` with 5 29 | worker processes. 30 | 31 | To build the image: 32 | 33 | ``` 34 | $ docker build -t amitsaha/aiohttp_app1 -f Dockerfile.py3 . 35 | ``` 36 | 37 | ## Running the application 38 | 39 | We can just run the web application as follows: 40 | 41 | ``` 42 | $ docker run -ti -p 8080:8080 -v `pwd`/src:/application amitsaha/aiohttp_app1 43 | ``` 44 | 45 | ## Bringing up the web application, along with prometheus 46 | 47 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 48 | using the image `amitsaha/flask_app` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 49 | file brings up the `prometheus` service and also starts the `grafana` service which 50 | is available on port 3000. The config directory contains a `prometheus.yml` file 51 | which sets up the targets for prometheus to scrape. The scrape configuration 52 | looks as follows: 53 | 54 | ``` 55 | # A scrape configuration containing exactly one endpoint to scrape: 56 | # Here it's Prometheus itself. 57 | scrape_configs: 58 | # The job name is added as a label `job=` to any timeseries scraped from this config. 59 | - job_name: 'prometheus' 60 | 61 | # Override the global default and scrape targets from this job every 5 seconds. 62 | scrape_interval: 5s 63 | 64 | # metrics_path defaults to '/metrics' 65 | # scheme defaults to 'http'. 66 | 67 | static_configs: 68 | - targets: ['localhost:9090'] 69 | - job_name: 'webapp' 70 | 71 | # Override the global default and scrape targets from this job every 5 seconds. 72 | scrape_interval: 5s 73 | 74 | # metrics_path defaults to '/metrics' 75 | # scheme defaults to 'http'. 76 | static_configs: 77 | - targets: ['webapp:8080'] 78 | ``` 79 | 80 | Prometheus scrapes itself, which is the first target above. The second target 81 | is the our web application on port 5000. 82 | Since these services are running via `docker-compose`, `webapp` automatically resolves to the IP of the webapp container. 83 | 84 | To bring up all the services: 85 | 86 | ``` 87 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 88 | ``` 89 | 90 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['webapp:8080'] 35 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | prometheus: 10 | image: prom/prometheus 11 | container_name: prometheus 12 | volumes: 13 | - ./config/prometheus/:/etc/prometheus/ 14 | - prometheus_data:/prometheus 15 | command: 16 | - '--config.file=/etc/prometheus/prometheus.yml' 17 | expose: 18 | - 9090 19 | ports: 20 | - 9090:9090 21 | grafana: 22 | image: grafana/grafana 23 | depends_on: 24 | - prometheus 25 | ports: 26 | - 3000:3000 27 | volumes: 28 | - grafana_data:/var/lib/grafana 29 | environment: 30 | - GF_SECURITY_ADMIN_PASSWORD=foobar 31 | - GF_USERS_ALLOW_SIGN_UP=false 32 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/aiohttp_app1 6 | container_name: webapp 7 | expose: 8 | - 8080 9 | ports: 10 | - 8080:8080 11 | volumes: 12 | - ./src:/application 13 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/src/app.py: -------------------------------------------------------------------------------- 1 | from aiohttp import web 2 | from helpers.middleware import error_middleware, setup_metrics 3 | 4 | 5 | async def test(request): 6 | return web.Response(text='test') 7 | 8 | async def test1(request): 9 | 1/0 10 | 11 | if __name__ == '__main__': 12 | app = web.Application(middlewares=[error_middleware]) 13 | setup_metrics(app, "webapp_1") 14 | app.router.add_get('/test', test) 15 | app.router.add_get('/test1', test1) 16 | web.run_app(app, port=8080) 17 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/src/helpers/middleware.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter, Gauge, Histogram, CONTENT_TYPE_LATEST 2 | import prometheus_client 3 | import time 4 | import asyncio 5 | 6 | from aiohttp import web 7 | 8 | def prom_middleware(app_name): 9 | @asyncio.coroutine 10 | def factory(app, handler): 11 | @asyncio.coroutine 12 | def middleware_handler(request): 13 | try: 14 | request['start_time'] = time.time() 15 | request.app['REQUEST_IN_PROGRESS'].labels( 16 | app_name, request.path, request.method).inc() 17 | response = yield from handler(request) 18 | resp_time = time.time() - request['start_time'] 19 | request.app['REQUEST_LATENCY'].labels(app_name, request.path).observe(resp_time) 20 | request.app['REQUEST_IN_PROGRESS'].labels(app_name, request.path, request.method).dec() 21 | request.app['REQUEST_COUNT'].labels( 22 | app_name, request.method, request.path, response.status).inc() 23 | return response 24 | except Exception as ex: 25 | raise 26 | return middleware_handler 27 | return factory 28 | 29 | 30 | async def metrics(request): 31 | resp = web.Response(body=prometheus_client.generate_latest()) 32 | resp.content_type = CONTENT_TYPE_LATEST 33 | return resp 34 | 35 | 36 | def setup_metrics(app, app_name): 37 | app['REQUEST_COUNT'] = Counter( 38 | 'requests_total', 'Total Request Count', 39 | ['app_name', 'method', 'endpoint', 'http_status'] 40 | ) 41 | app['REQUEST_LATENCY'] = Histogram( 42 | 'request_latency_seconds', 'Request latency', 43 | ['app_name', 'endpoint'] 44 | ) 45 | 46 | app['REQUEST_IN_PROGRESS'] = Gauge( 47 | 'requests_in_progress_total', 'Requests in progress', 48 | ['app_name', 'endpoint', 'method'] 49 | ) 50 | 51 | app.middlewares.insert(0, prom_middleware(app_name)) 52 | app.router.add_get("/metrics", metrics) 53 | 54 | @asyncio.coroutine 55 | def error_middleware(app, handler): 56 | 57 | @asyncio.coroutine 58 | def middleware_handler(request): 59 | try: 60 | response = yield from handler(request) 61 | return response 62 | except web.HTTPException as ex: 63 | resp = web.Response(body=str(ex), status=ex.status) 64 | return resp 65 | except Exception as ex: 66 | resp = web.Response(body=str(ex), status=500) 67 | return resp 68 | 69 | 70 | 71 | return middleware_handler 72 | -------------------------------------------------------------------------------- /aiohttp_app_prometheus/src/requirements.txt: -------------------------------------------------------------------------------- 1 | aiohttp==3.7.4 2 | prometheus_client==0.0.19 3 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.7-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 8000 13 | WORKDIR /application 14 | VOLUME /application 15 | CMD gunicorn --statsd-host=statsd:9125 --statsd-prefix=django-demo --bind 0.0.0.0:8000 demo.wsgi 16 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Example Django application 2 | 3 | See `src` for the application code and top level README for the description of this repo from a functionality 4 | point of view. 5 | 6 | 7 | ## Building Docker image 8 | 9 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 10 | and expects the application source code to be volume mounted at `/application` 11 | when run: 12 | 13 | ``` 14 | FROM python:3.7-alpine 15 | ADD . /application 16 | WORKDIR /application 17 | RUN set -e; \ 18 | apk add --no-cache --virtual .build-deps \ 19 | gcc \ 20 | libc-dev \ 21 | linux-headers \ 22 | ; \ 23 | pip install -r src/requirements.txt; \ 24 | apk del .build-deps; 25 | EXPOSE 8000 26 | VOLUME /application 27 | 28 | CMD gunicorn --bind 0.0.0.0:8000 demo.wsgi 29 | ``` 30 | 31 | The last statement shows how we are running the application via `gunicorn` with 5 32 | worker processes. 33 | 34 | To build the image: 35 | 36 | ``` 37 | $ docker build -t amitsaha/django_app_1 -f Dockerfile.py3 . 38 | ``` 39 | 40 | ## Running the application 41 | 42 | We can just run the web application as follows: 43 | 44 | ``` 45 | $ docker run -ti -p 8000:8000 -v `pwd`/src:/application amitsaha/django_app_1 46 | ``` 47 | 48 | ## Bringing up the web application, along with prometheus 49 | 50 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 51 | using the image `amitsaha/flask_app_1` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 52 | file brings up the `statsd` service which is the statsd exporter, `prometheus` service and also starts the `grafana` service which 53 | is available on port 3000. The config directory contains a `prometheus.yml` file 54 | which sets up the targets for prometheus to scrape. The scrape configuration 55 | looks as follows: 56 | 57 | ``` 58 | # A scrape configuration containing exactly one endpoint to scrape: 59 | # Here it's Prometheus itself. 60 | scrape_configs: 61 | # The job name is added as a label `job=` to any timeseries scraped from this config. 62 | - job_name: 'prometheus' 63 | 64 | # Override the global default and scrape targets from this job every 5 seconds. 65 | scrape_interval: 5s 66 | 67 | # metrics_path defaults to '/metrics' 68 | # scheme defaults to 'http'. 69 | 70 | static_configs: 71 | - targets: ['localhost:9090'] 72 | - job_name: 'webapp' 73 | 74 | # Override the global default and scrape targets from this job every 5 seconds. 75 | scrape_interval: 5s 76 | 77 | # metrics_path defaults to '/metrics' 78 | # scheme defaults to 'http'. 79 | static_configs: 80 | - targets: ['statsd:9102'] 81 | 82 | ``` 83 | 84 | Prometheus scrapes itself, which is the first target above. The second target 85 | is the statsd exporter on port 9102. 86 | 87 | Since these services are running via `docker-compose`, `statsd` automatically resolves to the IP of the statsd exporter container. 88 | 89 | To bring up all the services: 90 | 91 | ``` 92 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 93 | ``` 94 | 95 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['statsd:9102'] 35 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/config/statsd-mapping.conf: -------------------------------------------------------------------------------- 1 | mappings: 2 | - match: (.*)\.gunicorn\.request\.status\.(.*) 3 | match_type: regex 4 | help: "http response code" 5 | name: "http_response_code" 6 | labels: 7 | status: "$2" 8 | application: "$1" 9 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | stastd: 10 | image: prom/statsd-exporter 11 | container_name: statsd 12 | volumes: 13 | - "./config/statsd-mapping.conf:/statsd/statsd-mapping.conf:Z" 14 | command: "--statsd.mapping-config=/statsd/statsd-mapping.conf" 15 | expose: 16 | - 9125 17 | - 9102 18 | prometheus: 19 | image: prom/prometheus 20 | container_name: prometheus 21 | volumes: 22 | - "./config/prometheus/:/etc/prometheus/:Z" 23 | - prometheus_data:/prometheus 24 | command: 25 | - '--config.file=/etc/prometheus/prometheus.yml' 26 | expose: 27 | - 9090 28 | ports: 29 | - 9090:9090 30 | grafana: 31 | image: grafana/grafana 32 | depends_on: 33 | - prometheus 34 | ports: 35 | - 3000:3000 36 | volumes: 37 | - grafana_data:/var/lib/grafana 38 | environment: 39 | - GF_SECURITY_ADMIN_PASSWORD=foobar 40 | - GF_USERS_ALLOW_SIGN_UP=false 41 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/django_app_1 6 | container_name: webapp 7 | expose: 8 | - 8000 9 | ports: 10 | - 8000:8000 11 | volumes: 12 | - ./src:/application:z 13 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/db.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitsaha/python-prometheus-demo/f758c5807568b6c5383b9b86f3997d34f53e14bd/django_app_gunicorn_statsd_prometheus/src/db.sqlite3 -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/demo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitsaha/python-prometheus-demo/f758c5807568b6c5383b9b86f3997d34f53e14bd/django_app_gunicorn_statsd_prometheus/src/demo/__init__.py -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/demo/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for demo project. 3 | 4 | Generated by 'django-admin startproject' using Django 2.2.1. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.2/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/2.2/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = 'yff70et1z#a(m1j2mul#bsft2r)#m6p26*wj0j4wedptwu7)k_' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'django.contrib.admin', 35 | 'django.contrib.auth', 36 | 'django.contrib.contenttypes', 37 | 'django.contrib.sessions', 38 | 'django.contrib.messages', 39 | 'django.contrib.staticfiles', 40 | ] 41 | 42 | MIDDLEWARE = [ 43 | 'django.middleware.security.SecurityMiddleware', 44 | 'django.contrib.sessions.middleware.SessionMiddleware', 45 | 'django.middleware.common.CommonMiddleware', 46 | 'django.middleware.csrf.CsrfViewMiddleware', 47 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 48 | 'django.contrib.messages.middleware.MessageMiddleware', 49 | 'django.middleware.clickjacking.XFrameOptionsMiddleware', 50 | ] 51 | 52 | ROOT_URLCONF = 'demo.urls' 53 | 54 | TEMPLATES = [ 55 | { 56 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 57 | 'DIRS': [], 58 | 'APP_DIRS': True, 59 | 'OPTIONS': { 60 | 'context_processors': [ 61 | 'django.template.context_processors.debug', 62 | 'django.template.context_processors.request', 63 | 'django.contrib.auth.context_processors.auth', 64 | 'django.contrib.messages.context_processors.messages', 65 | ], 66 | }, 67 | }, 68 | ] 69 | 70 | WSGI_APPLICATION = 'demo.wsgi.application' 71 | 72 | 73 | # Database 74 | # https://docs.djangoproject.com/en/2.2/ref/settings/#databases 75 | 76 | DATABASES = { 77 | 'default': { 78 | 'ENGINE': 'django.db.backends.sqlite3', 79 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 80 | } 81 | } 82 | 83 | 84 | # Password validation 85 | # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators 86 | 87 | AUTH_PASSWORD_VALIDATORS = [ 88 | { 89 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 90 | }, 91 | { 92 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 93 | }, 94 | { 95 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 96 | }, 97 | { 98 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 99 | }, 100 | ] 101 | 102 | 103 | # Internationalization 104 | # https://docs.djangoproject.com/en/2.2/topics/i18n/ 105 | 106 | LANGUAGE_CODE = 'en-us' 107 | 108 | TIME_ZONE = 'UTC' 109 | 110 | USE_I18N = True 111 | 112 | USE_L10N = True 113 | 114 | USE_TZ = True 115 | 116 | 117 | # Static files (CSS, JavaScript, Images) 118 | # https://docs.djangoproject.com/en/2.2/howto/static-files/ 119 | 120 | STATIC_URL = '/static/' 121 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/demo/urls.py: -------------------------------------------------------------------------------- 1 | """demo URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/2.2/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.urls import path 18 | 19 | urlpatterns = [ 20 | path('admin/', admin.site.urls), 21 | ] 22 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/demo/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for demo project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /django_app_gunicorn_statsd_prometheus/src/requirements.txt: -------------------------------------------------------------------------------- 1 | django 2 | gunicorn 3 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.7-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 8000 13 | VOLUME /application 14 | 15 | CMD gunicorn --workers 5 --bind 0.0.0.0:8000 demo.wsgi 16 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Example Django application 2 | 3 | See `src` for the application code and top level README for the description of this repo from a functionality 4 | point of view. 5 | 6 | 7 | ## Building Docker image 8 | 9 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 10 | and expects the application source code to be volume mounted at `/application` 11 | when run: 12 | 13 | ``` 14 | FROM python:3.7-alpine 15 | ADD . /application 16 | WORKDIR /application 17 | RUN set -e; \ 18 | apk add --no-cache --virtual .build-deps \ 19 | gcc \ 20 | libc-dev \ 21 | linux-headers \ 22 | ; \ 23 | pip install -r src/requirements.txt; \ 24 | apk del .build-deps; 25 | EXPOSE 8000 26 | VOLUME /application 27 | 28 | CMD gunicorn --bind 0.0.0.0:8000 demo.wsgi 29 | ``` 30 | 31 | The last statement shows how we are running the application via `gunicorn` with 5 32 | worker processes. 33 | 34 | To build the image: 35 | 36 | ``` 37 | $ docker build -t amitsaha/django_app_2 -f Dockerfile.py3 . 38 | ``` 39 | 40 | ## Running the application 41 | 42 | We can just run the web application as follows: 43 | 44 | ``` 45 | $ docker run -ti -p 8000:8000 -v `pwd`/src:/application amitsaha/django_app_2 46 | ``` 47 | 48 | ## Bringing up the web application, along with prometheus 49 | 50 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 51 | using the image `amitsaha/flask_app_1` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 52 | file brings up the `statsd` service which is the statsd exporter, `prometheus` service and also starts the `grafana` service which 53 | is available on port 3000. The config directory contains a `prometheus.yml` file 54 | which sets up the targets for prometheus to scrape. The scrape configuration 55 | looks as follows: 56 | 57 | ``` 58 | # A scrape configuration containing exactly one endpoint to scrape: 59 | # Here it's Prometheus itself. 60 | scrape_configs: 61 | # The job name is added as a label `job=` to any timeseries scraped from this config. 62 | - job_name: 'prometheus' 63 | 64 | # Override the global default and scrape targets from this job every 5 seconds. 65 | scrape_interval: 5s 66 | 67 | # metrics_path defaults to '/metrics' 68 | # scheme defaults to 'http'. 69 | 70 | static_configs: 71 | - targets: ['localhost:9090'] 72 | - job_name: 'webapp' 73 | 74 | # Override the global default and scrape targets from this job every 5 seconds. 75 | scrape_interval: 5s 76 | 77 | # metrics_path defaults to '/metrics' 78 | # scheme defaults to 'http'. 79 | static_configs: 80 | - targets: ['statsd:9102'] 81 | 82 | ``` 83 | 84 | Prometheus scrapes itself, which is the first target above. The second target 85 | is the statsd exporter on port 9102. 86 | 87 | Since these services are running via `docker-compose`, `statsd` automatically resolves to the IP of the statsd exporter container. 88 | 89 | To bring up all the services: 90 | 91 | ``` 92 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 93 | ``` 94 | 95 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['statsd:9102'] 35 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | stastd: 10 | image: prom/statsd-exporter 11 | container_name: statsd 12 | expose: 13 | - 9125 14 | - 9102 15 | prometheus: 16 | image: prom/prometheus 17 | container_name: prometheus 18 | volumes: 19 | - "./config/prometheus/:/etc/prometheus/:Z" 20 | - prometheus_data:/prometheus 21 | command: 22 | - '--config.file=/etc/prometheus/prometheus.yml' 23 | expose: 24 | - 9090 25 | ports: 26 | - 9090:9090 27 | grafana: 28 | image: grafana/grafana 29 | depends_on: 30 | - prometheus 31 | ports: 32 | - 3000:3000 33 | volumes: 34 | - grafana_data:/var/lib/grafana 35 | environment: 36 | - GF_SECURITY_ADMIN_PASSWORD=foobar 37 | - GF_USERS_ALLOW_SIGN_UP=false 38 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/django_app_2 6 | container_name: webapp 7 | expose: 8 | - 8000 9 | ports: 10 | - 8000:8000 11 | volumes: 12 | - ./src:/application:z 13 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/db.sqlite3: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitsaha/python-prometheus-demo/f758c5807568b6c5383b9b86f3997d34f53e14bd/django_app_statsd_prometheus/src/db.sqlite3 -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/demo/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitsaha/python-prometheus-demo/f758c5807568b6c5383b9b86f3997d34f53e14bd/django_app_statsd_prometheus/src/demo/__init__.py -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/demo/metrics_middleware.py: -------------------------------------------------------------------------------- 1 | import time 2 | from datadog import DogStatsd 3 | import time 4 | import sys 5 | 6 | 7 | statsd = DogStatsd(host="statsd", port=9125) 8 | REQUEST_LATENCY_METRIC_NAME = 'request_latency_seconds' 9 | REQUEST_COUNT_METRIC_NAME = 'request_count' 10 | 11 | class StatsdReporter(): 12 | 13 | def __init__(self, get_response): 14 | self.get_response = get_response 15 | 16 | def __call__(self, request): 17 | request.start_time = time.time() 18 | response = self.get_response(request) 19 | #FIXME: https://docs.djangoproject.com/en/2.2/ref/request-response/ 20 | print("Statsd middleware: request {0} {1}".format(request.path_info, request.method)) 21 | if response: 22 | resp_time = time.time() - request.start_time 23 | statsd.histogram(REQUEST_LATENCY_METRIC_NAME, 24 | resp_time, 25 | tags=[ 26 | 'service:webapp', 27 | 'endpoint: %s' % request.path_info, 28 | ] 29 | ) 30 | statsd.increment(REQUEST_COUNT_METRIC_NAME, 31 | tags=[ 32 | 'service: webapp', 33 | 'method: %s' % request.method, 34 | 'endpoint: %s' % request.path_info, 35 | 'status: %s' % str(response.status_code) 36 | ] 37 | ) 38 | return response 39 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/demo/settings.py: -------------------------------------------------------------------------------- 1 | """ 2 | Django settings for demo project. 3 | 4 | Generated by 'django-admin startproject' using Django 2.2.1. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.2/topics/settings/ 8 | 9 | For the full list of settings and their values, see 10 | https://docs.djangoproject.com/en/2.2/ref/settings/ 11 | """ 12 | 13 | import os 14 | 15 | # Build paths inside the project like this: os.path.join(BASE_DIR, ...) 16 | BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 17 | 18 | 19 | # Quick-start development settings - unsuitable for production 20 | # See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/ 21 | 22 | # SECURITY WARNING: keep the secret key used in production secret! 23 | SECRET_KEY = 'yff70et1z#a(m1j2mul#bsft2r)#m6p26*wj0j4wedptwu7)k_' 24 | 25 | # SECURITY WARNING: don't run with debug turned on in production! 26 | DEBUG = True 27 | 28 | ALLOWED_HOSTS = [] 29 | 30 | 31 | # Application definition 32 | 33 | INSTALLED_APPS = [ 34 | 'django.contrib.admin', 35 | 'django.contrib.auth', 36 | 'django.contrib.contenttypes', 37 | 'django.contrib.sessions', 38 | 'django.contrib.messages', 39 | 'django.contrib.staticfiles', 40 | ] 41 | 42 | MIDDLEWARE = [ 43 | #'django.middleware.security.SecurityMiddleware', 44 | #'django.contrib.sessions.middleware.SessionMiddleware', 45 | #'django.middleware.common.CommonMiddleware', 46 | #'django.middleware.csrf.CsrfViewMiddleware', 47 | #'django.contrib.auth.middleware.AuthenticationMiddleware', 48 | #'django.contrib.messages.middleware.MessageMiddleware', 49 | #'django.middleware.clickjacking.XFrameOptionsMiddleware', 50 | 'demo.metrics_middleware.StatsdReporter', 51 | ] 52 | 53 | ROOT_URLCONF = 'demo.urls' 54 | 55 | TEMPLATES = [ 56 | { 57 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 58 | 'DIRS': [], 59 | 'APP_DIRS': True, 60 | 'OPTIONS': { 61 | 'context_processors': [ 62 | 'django.template.context_processors.debug', 63 | 'django.template.context_processors.request', 64 | 'django.contrib.auth.context_processors.auth', 65 | 'django.contrib.messages.context_processors.messages', 66 | ], 67 | }, 68 | }, 69 | ] 70 | 71 | WSGI_APPLICATION = 'demo.wsgi.application' 72 | 73 | 74 | # Database 75 | # https://docs.djangoproject.com/en/2.2/ref/settings/#databases 76 | 77 | DATABASES = { 78 | 'default': { 79 | 'ENGINE': 'django.db.backends.sqlite3', 80 | 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), 81 | } 82 | } 83 | 84 | 85 | # Password validation 86 | # https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators 87 | 88 | AUTH_PASSWORD_VALIDATORS = [ 89 | { 90 | 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', 91 | }, 92 | { 93 | 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 94 | }, 95 | { 96 | 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', 97 | }, 98 | { 99 | 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', 100 | }, 101 | ] 102 | 103 | 104 | # Internationalization 105 | # https://docs.djangoproject.com/en/2.2/topics/i18n/ 106 | 107 | LANGUAGE_CODE = 'en-us' 108 | 109 | TIME_ZONE = 'UTC' 110 | 111 | USE_I18N = True 112 | 113 | USE_L10N = True 114 | 115 | USE_TZ = True 116 | 117 | 118 | # Static files (CSS, JavaScript, Images) 119 | # https://docs.djangoproject.com/en/2.2/howto/static-files/ 120 | 121 | STATIC_URL = '/static/' 122 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/demo/urls.py: -------------------------------------------------------------------------------- 1 | """demo URL Configuration 2 | 3 | The `urlpatterns` list routes URLs to views. For more information please see: 4 | https://docs.djangoproject.com/en/2.2/topics/http/urls/ 5 | Examples: 6 | Function views 7 | 1. Add an import: from my_app import views 8 | 2. Add a URL to urlpatterns: path('', views.home, name='home') 9 | Class-based views 10 | 1. Add an import: from other_app.views import Home 11 | 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') 12 | Including another URLconf 13 | 1. Import the include() function: from django.urls import include, path 14 | 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) 15 | """ 16 | from django.contrib import admin 17 | from django.urls import path 18 | 19 | urlpatterns = [ 20 | path('admin/', admin.site.urls), 21 | ] 22 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/demo/wsgi.py: -------------------------------------------------------------------------------- 1 | """ 2 | WSGI config for demo project. 3 | 4 | It exposes the WSGI callable as a module-level variable named ``application``. 5 | 6 | For more information on this file, see 7 | https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ 8 | """ 9 | 10 | import os 11 | 12 | from django.core.wsgi import get_wsgi_application 13 | 14 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 15 | 16 | application = get_wsgi_application() 17 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Django's command-line utility for administrative tasks.""" 3 | import os 4 | import sys 5 | 6 | 7 | def main(): 8 | os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'demo.settings') 9 | try: 10 | from django.core.management import execute_from_command_line 11 | except ImportError as exc: 12 | raise ImportError( 13 | "Couldn't import Django. Are you sure it's installed and " 14 | "available on your PYTHONPATH environment variable? Did you " 15 | "forget to activate a virtual environment?" 16 | ) from exc 17 | execute_from_command_line(sys.argv) 18 | 19 | 20 | if __name__ == '__main__': 21 | main() 22 | -------------------------------------------------------------------------------- /django_app_statsd_prometheus/src/requirements.txt: -------------------------------------------------------------------------------- 1 | django 2 | gunicorn 3 | datadog 4 | -------------------------------------------------------------------------------- /flask_app_prometheus/Dockerfile.py2: -------------------------------------------------------------------------------- 1 | FROM python:2.7-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 15 | -------------------------------------------------------------------------------- /flask_app_prometheus/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.6.1-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 15 | -------------------------------------------------------------------------------- /flask_app_prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Example Flask application 2 | 3 | See ``src`` for the application code. 4 | 5 | ## Building Docker image 6 | 7 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 8 | and expects the application source code to be volume mounted at `/application` 9 | when run: 10 | 11 | ``` 12 | FROM python:3.6.1-alpine 13 | ADD . /application 14 | WORKDIR /application 15 | RUN set -e; \ 16 | apk add --no-cache --virtual .build-deps \ 17 | gcc \ 18 | libc-dev \ 19 | linux-headers \ 20 | ; \ 21 | pip install -r src/requirements.txt; \ 22 | apk del .build-deps; 23 | EXPOSE 5000 24 | VOLUME /application 25 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 26 | ``` 27 | 28 | The last statement shows how we are running the application via `uwsgi` with 5 29 | worker processes. 30 | 31 | To build the image: 32 | 33 | ``` 34 | $ docker build -t amitsaha/flask_app -f Dockerfile.py3 . 35 | ``` 36 | 37 | ## Running the application 38 | 39 | We can just run the web application as follows: 40 | 41 | ``` 42 | $ docker run -ti -p 5000:5000 -v `pwd`/src:/application amitsaha/flask_app 43 | ``` 44 | 45 | ## Bringing up the web application, along with prometheus 46 | 47 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 48 | using the image `amitsaha/flask_app` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 49 | file brings up the `prometheus` service and also starts the `grafana` service which 50 | is available on port 3000. The config directory contains a `prometheus.yml` file 51 | which sets up the targets for prometheus to scrape. The scrape configuration 52 | looks as follows: 53 | 54 | ``` 55 | # A scrape configuration containing exactly one endpoint to scrape: 56 | # Here it's Prometheus itself. 57 | scrape_configs: 58 | # The job name is added as a label `job=` to any timeseries scraped from this config. 59 | - job_name: 'prometheus' 60 | 61 | # Override the global default and scrape targets from this job every 5 seconds. 62 | scrape_interval: 5s 63 | 64 | # metrics_path defaults to '/metrics' 65 | # scheme defaults to 'http'. 66 | 67 | static_configs: 68 | - targets: ['localhost:9090'] 69 | - job_name: 'webapp' 70 | 71 | # Override the global default and scrape targets from this job every 5 seconds. 72 | scrape_interval: 5s 73 | 74 | # metrics_path defaults to '/metrics' 75 | # scheme defaults to 'http'. 76 | static_configs: 77 | - targets: ['webapp:5000'] 78 | ``` 79 | 80 | Prometheus scrapes itself, which is the first target above. The second target 81 | is the our web application on port 5000. 82 | Since these services are running via `docker-compose`, `webapp` automatically resolves to the IP of the webapp container. 83 | 84 | To bring up all the services: 85 | 86 | ``` 87 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 88 | ``` 89 | 90 | -------------------------------------------------------------------------------- /flask_app_prometheus/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['webapp:5000'] 35 | -------------------------------------------------------------------------------- /flask_app_prometheus/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | prometheus: 10 | image: prom/prometheus 11 | container_name: prometheus 12 | volumes: 13 | - ./config/prometheus/:/etc/prometheus/ 14 | - prometheus_data:/prometheus 15 | command: 16 | - '--config.file=/etc/prometheus/prometheus.yml' 17 | expose: 18 | - 9090 19 | ports: 20 | - 9090:9090 21 | grafana: 22 | image: grafana/grafana 23 | depends_on: 24 | - prometheus 25 | ports: 26 | - 3000:3000 27 | volumes: 28 | - grafana_data:/var/lib/grafana 29 | environment: 30 | - GF_SECURITY_ADMIN_PASSWORD=foobar 31 | - GF_USERS_ALLOW_SIGN_UP=false 32 | -------------------------------------------------------------------------------- /flask_app_prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/flask_app 6 | container_name: webapp 7 | expose: 8 | - 5000 9 | ports: 10 | - 5000:5000 11 | volumes: 12 | - ./src:/application 13 | -------------------------------------------------------------------------------- /flask_app_prometheus/src/flask_app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response 2 | from helpers.middleware import setup_metrics 3 | import prometheus_client 4 | 5 | CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') 6 | 7 | 8 | app = Flask(__name__) 9 | setup_metrics(app) 10 | 11 | @app.route('/test/') 12 | def test(): 13 | return 'rest' 14 | 15 | @app.route('/test1/') 16 | def test1(): 17 | 1/0 18 | return 'rest' 19 | 20 | @app.errorhandler(500) 21 | def handle_500(error): 22 | return str(error), 500 23 | 24 | @app.route('/metrics') 25 | def metrics(): 26 | return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST) 27 | 28 | if __name__ == '__main__': 29 | app.run() 30 | -------------------------------------------------------------------------------- /flask_app_prometheus/src/helpers/middleware.py: -------------------------------------------------------------------------------- 1 | from flask import request 2 | from prometheus_client import Counter, Histogram 3 | import time 4 | import sys 5 | 6 | REQUEST_COUNT = Counter( 7 | 'request_count', 'App Request Count', 8 | ['app_name', 'method', 'endpoint', 'http_status'] 9 | ) 10 | REQUEST_LATENCY = Histogram('request_latency_seconds', 'Request latency', 11 | ['app_name', 'endpoint'] 12 | ) 13 | 14 | def start_timer(): 15 | request.start_time = time.time() 16 | 17 | def stop_timer(response): 18 | resp_time = time.time() - request.start_time 19 | REQUEST_LATENCY.labels('webapp', request.path).observe(resp_time) 20 | return response 21 | 22 | def record_request_data(response): 23 | REQUEST_COUNT.labels('webapp', request.method, request.path, 24 | response.status_code).inc() 25 | return response 26 | 27 | def setup_metrics(app): 28 | app.before_request(start_timer) 29 | # The order here matters since we want stop_timer 30 | # to be executed first 31 | app.after_request(record_request_data) 32 | app.after_request(stop_timer) 33 | -------------------------------------------------------------------------------- /flask_app_prometheus/src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.0 2 | prometheus_client==0.0.19 3 | uwsgi==2.0.15 4 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/Dockerfile.py2: -------------------------------------------------------------------------------- 1 | FROM python:2.7-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | RUN mkdir /tmp/prom_data 15 | ENV prometheus_multiproc_dir /tmp/prom_data 16 | CMD uwsgi --lazy --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 17 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.6.1-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | RUN mkdir /tmp/prom_data 15 | ENV prometheus_multiproc_dir /tmp/prom_data 16 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 17 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/README.md: -------------------------------------------------------------------------------- 1 | # Example Flask application 2 | 3 | See ``src`` for the application code. The difference from [flask_app_prometheus](https://github.com/amitsaha/python-prometheus-demo/tree/master/flask_app_prometheus) is that we are using the multiprocessing mode. Since we are using `uwsgi`, we don't have the following snippet: 4 | 5 | ``` 6 | from prometheus_client import multiprocess 7 | 8 | def child_exit(server, worker): 9 | multiprocess.mark_process_dead(worker.pid) 10 | ``` 11 | 12 | However, I still see metrics being inconsistent with Python 2 and 13 | Python 3. So, doesn't work yet! 14 | 15 | And then, I came across [this issue](https://github.com/korfuri/django-prometheus/issues/12) and adding `--lazy` to `uwsgi` seems ot have 16 | fixed it! 17 | 18 | 19 | ## Building Docker image 20 | 21 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 22 | and expects the application source code to be volume mounted at `/application` 23 | when run: 24 | 25 | ``` 26 | FROM python:3.6.1-alpine 27 | ADD . /application 28 | WORKDIR /application 29 | RUN set -e; \ 30 | apk add --no-cache --virtual .build-deps \ 31 | gcc \ 32 | libc-dev \ 33 | linux-headers \ 34 | ; \ 35 | pip install -r src/requirements.txt; \ 36 | apk del .build-deps; 37 | EXPOSE 5000 38 | VOLUME /application 39 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 40 | ``` 41 | 42 | The last statement shows how we are running the application via `uwsgi` with 5 43 | worker processes. 44 | 45 | To build the image: 46 | 47 | ``` 48 | $ docker build -t amitsaha/flask_app -f Dockerfile.py3 . 49 | ``` 50 | 51 | ## Running the application 52 | 53 | We can just run the web application as follows: 54 | 55 | ``` 56 | $ docker run -ti -p 5000:5000 -v `pwd`/src:/application amitsaha/flask_app 57 | ``` 58 | 59 | ## Bringing up the web application, along with prometheus 60 | 61 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 62 | using the image `amitsaha/flask_app` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 63 | file brings up the `prometheus` service and also starts the `grafana` service which 64 | is available on port 3000. The config directory contains a `prometheus.yml` file 65 | which sets up the targets for prometheus to scrape. The scrape configuration 66 | looks as follows: 67 | 68 | ``` 69 | # A scrape configuration containing exactly one endpoint to scrape: 70 | # Here it's Prometheus itself. 71 | scrape_configs: 72 | # The job name is added as a label `job=` to any timeseries scraped from this config. 73 | - job_name: 'prometheus' 74 | 75 | # Override the global default and scrape targets from this job every 5 seconds. 76 | scrape_interval: 5s 77 | 78 | # metrics_path defaults to '/metrics' 79 | # scheme defaults to 'http'. 80 | 81 | static_configs: 82 | - targets: ['localhost:9090'] 83 | - job_name: 'webapp' 84 | 85 | # Override the global default and scrape targets from this job every 5 seconds. 86 | scrape_interval: 5s 87 | 88 | # metrics_path defaults to '/metrics' 89 | # scheme defaults to 'http'. 90 | static_configs: 91 | - targets: ['webapp:5000'] 92 | ``` 93 | 94 | Prometheus scrapes itself, which is the first target above. The second target 95 | is the our web application on port 5000. 96 | Since these services are running via `docker-compose`, `webapp` automatically resolves to the IP of the webapp container. 97 | 98 | To bring up all the services: 99 | 100 | ``` 101 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 102 | ``` 103 | 104 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['webapp:5000'] 35 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | prometheus: 10 | image: prom/prometheus 11 | container_name: prometheus 12 | volumes: 13 | - ./config/prometheus/:/etc/prometheus/ 14 | - prometheus_data:/prometheus 15 | command: 16 | - '--config.file=/etc/prometheus/prometheus.yml' 17 | expose: 18 | - 9090 19 | ports: 20 | - 9090:9090 21 | grafana: 22 | image: grafana/grafana 23 | depends_on: 24 | - prometheus 25 | ports: 26 | - 3000:3000 27 | volumes: 28 | - grafana_data:/var/lib/grafana 29 | environment: 30 | - GF_SECURITY_ADMIN_PASSWORD=foobar 31 | - GF_USERS_ALLOW_SIGN_UP=false 32 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/flask_app 6 | container_name: webapp 7 | expose: 8 | - 5000 9 | ports: 10 | - 5000:5000 11 | volumes: 12 | - ./src:/application 13 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/src/flask_app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response 2 | from helpers.middleware import setup_metrics 3 | 4 | app = Flask(__name__) 5 | 6 | setup_metrics(app) 7 | 8 | @app.route('/test/') 9 | def test(): 10 | return 'rest' 11 | 12 | @app.route('/test1/') 13 | def test1(): 14 | 1/0 15 | return 'rest' 16 | 17 | @app.errorhandler(500) 18 | def handle_500(error): 19 | return str(error), 500 20 | 21 | if __name__ == '__main__': 22 | app.run() 23 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/src/helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/amitsaha/python-prometheus-demo/f758c5807568b6c5383b9b86f3997d34f53e14bd/flask_app_prometheus_multiprocessing/src/helpers/__init__.py -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/src/helpers/middleware.py: -------------------------------------------------------------------------------- 1 | from flask import request, Response 2 | from prometheus_client import Counter, Histogram 3 | import time 4 | import sys 5 | import prometheus_client 6 | from prometheus_client import multiprocess, CollectorRegistry 7 | 8 | CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') 9 | 10 | 11 | registry = CollectorRegistry() 12 | multiprocess.MultiProcessCollector(registry) 13 | 14 | REQUEST_COUNT = Counter( 15 | 'request_count', 'App Request Count', 16 | ['app_name', 'method', 'endpoint', 'http_status'], 17 | registry=registry, 18 | ) 19 | REQUEST_LATENCY = Histogram('request_latency_seconds', 'Request latency', 20 | ['app_name', 'endpoint'], 21 | registry=registry, 22 | ) 23 | 24 | def start_timer(): 25 | request.start_time = time.time() 26 | 27 | def stop_timer(response): 28 | resp_time = time.time() - request.start_time 29 | REQUEST_LATENCY.labels('webapp', request.path).observe(resp_time) 30 | return response 31 | 32 | def record_request_data(response): 33 | REQUEST_COUNT.labels('webapp', request.method, request.path, 34 | response.status_code).inc() 35 | return response 36 | 37 | def setup_metrics(app): 38 | app.before_request(start_timer) 39 | # The order here matters since we want stop_timer 40 | # to be executed first 41 | app.after_request(record_request_data) 42 | app.after_request(stop_timer) 43 | 44 | @app.route('/metrics') 45 | def metrics(): 46 | return Response(prometheus_client.generate_latest(registry), mimetype=CONTENT_TYPE_LATEST) 47 | 48 | 49 | -------------------------------------------------------------------------------- /flask_app_prometheus_multiprocessing/src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.0 2 | prometheus_client==0.0.19 3 | uwsgi==2.0.15 4 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/Dockerfile.py2: -------------------------------------------------------------------------------- 1 | FROM python:2.7-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 15 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.6.1-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 15 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/README.md: -------------------------------------------------------------------------------- 1 | # Example Flask application with Prometheus monitoring 2 | 3 | See ``src`` for the application code. The difference from [flask_app_prometheus](https://github.com/amitsaha/python-prometheus-demo/tree/master/flask_app_prometheus) is that the metrics add an additional label: 4 | `worker_id` to the metrics. This basically means that when running 5 | under `uwsgi` or `gunicorn`, the label will make each scrape that 6 | is answered by a different worker an entirely different metric. 7 | 8 | 9 | ## Building Docker image 10 | 11 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 12 | and expects the application source code to be volume mounted at `/application` 13 | when run: 14 | 15 | ``` 16 | FROM python:3.6.1-alpine 17 | ADD . /application 18 | WORKDIR /application 19 | RUN set -e; \ 20 | apk add --no-cache --virtual .build-deps \ 21 | gcc \ 22 | libc-dev \ 23 | linux-headers \ 24 | ; \ 25 | pip install -r src/requirements.txt; \ 26 | apk del .build-deps; 27 | EXPOSE 5000 28 | VOLUME /application 29 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 30 | ``` 31 | 32 | The last statement shows how we are running the application via `uwsgi` with 5 33 | worker processes. 34 | 35 | To build the image: 36 | 37 | ``` 38 | $ docker build -t amitsaha/flask_app -f Dockerfile.py3 . 39 | ``` 40 | 41 | ## Running the application 42 | 43 | We can just run the web application as follows: 44 | 45 | ``` 46 | $ docker run -ti -p 5000:5000 -v `pwd`/src:/application amitsaha/flask_app 47 | ``` 48 | 49 | ## Bringing up the web application, along with prometheus 50 | 51 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 52 | using the image `amitsaha/flask_app` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 53 | file brings up the `prometheus` service and also starts the `grafana` service which 54 | is available on port 3000. The config directory contains a `prometheus.yml` file 55 | which sets up the targets for prometheus to scrape. The scrape configuration 56 | looks as follows: 57 | 58 | ``` 59 | # A scrape configuration containing exactly one endpoint to scrape: 60 | # Here it's Prometheus itself. 61 | scrape_configs: 62 | # The job name is added as a label `job=` to any timeseries scraped from this config. 63 | - job_name: 'prometheus' 64 | 65 | # Override the global default and scrape targets from this job every 5 seconds. 66 | scrape_interval: 5s 67 | 68 | # metrics_path defaults to '/metrics' 69 | # scheme defaults to 'http'. 70 | 71 | static_configs: 72 | - targets: ['localhost:9090'] 73 | - job_name: 'webapp' 74 | 75 | # Override the global default and scrape targets from this job every 5 seconds. 76 | scrape_interval: 5s 77 | 78 | # metrics_path defaults to '/metrics' 79 | # scheme defaults to 'http'. 80 | static_configs: 81 | - targets: ['webapp:5000'] 82 | ``` 83 | 84 | Prometheus scrapes itself, which is the first target above. The second target 85 | is the our web application on port 5000. 86 | Since these services are running via `docker-compose`, `webapp` automatically resolves to the IP of the webapp container. 87 | 88 | To bring up all the services: 89 | 90 | ``` 91 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 92 | ``` 93 | 94 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['webapp:5000'] 35 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | prometheus: 10 | image: prom/prometheus 11 | container_name: prometheus 12 | volumes: 13 | - ./config/prometheus/:/etc/prometheus/ 14 | - prometheus_data:/prometheus 15 | command: 16 | - '--config.file=/etc/prometheus/prometheus.yml' 17 | expose: 18 | - 9090 19 | ports: 20 | - 9090:9090 21 | grafana: 22 | image: grafana/grafana 23 | depends_on: 24 | - prometheus 25 | ports: 26 | - 3000:3000 27 | volumes: 28 | - grafana_data:/var/lib/grafana 29 | environment: 30 | - GF_SECURITY_ADMIN_PASSWORD=foobar 31 | - GF_USERS_ALLOW_SIGN_UP=false 32 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/flask_app 6 | container_name: webapp 7 | expose: 8 | - 5000 9 | ports: 10 | - 5000:5000 11 | volumes: 12 | - ./src:/application 13 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/src/flask_app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response 2 | from helpers.middleware import setup_metrics 3 | import prometheus_client 4 | 5 | CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') 6 | 7 | 8 | app = Flask(__name__) 9 | setup_metrics(app) 10 | 11 | @app.route('/test/') 12 | def test(): 13 | return 'rest' 14 | 15 | @app.route('/test1/') 16 | def test1(): 17 | 1/0 18 | return 'rest' 19 | 20 | @app.errorhandler(500) 21 | def handle_500(error): 22 | return str(error), 500 23 | 24 | @app.route('/metrics') 25 | def metrics(): 26 | return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST) 27 | 28 | if __name__ == '__main__': 29 | app.run() 30 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/src/helpers/middleware.py: -------------------------------------------------------------------------------- 1 | from flask import request 2 | from prometheus_client import Counter, Histogram 3 | import time 4 | import sys 5 | try: 6 | import uwsgi 7 | use_uwsgi_worker_id = True 8 | except ImportError: 9 | import os 10 | use_uwsgi_worker_id = False 11 | 12 | 13 | REQUEST_COUNT = Counter( 14 | 'request_count', 'App Request Count', 15 | ['app_name', 'method', 'endpoint', 'http_status', 'worker_id'] 16 | ) 17 | REQUEST_LATENCY = Histogram('request_latency_seconds', 'Request latency', 18 | ['app_name', 'endpoint', 'worker_id'] 19 | ) 20 | 21 | def _get_worker_id(): 22 | if use_uwsgi_worker_id: 23 | worker_id = uwsgi.worker_id() 24 | else: 25 | worker_id = os.getpid() 26 | return worker_id 27 | 28 | def start_timer(): 29 | request.start_time = time.time() 30 | 31 | def stop_timer(response): 32 | resp_time = time.time() - request.start_time 33 | REQUEST_LATENCY.labels('webapp', request.path, _get_worker_id()).observe(resp_time) 34 | return response 35 | 36 | def record_request_data(response): 37 | REQUEST_COUNT.labels('webapp', request.method, request.path, 38 | response.status_code, _get_worker_id()).inc() 39 | return response 40 | 41 | def setup_metrics(app): 42 | app.before_request(start_timer) 43 | # The order here matters since we want stop_timer 44 | # to be executed first 45 | app.after_request(record_request_data) 46 | app.after_request(stop_timer) 47 | -------------------------------------------------------------------------------- /flask_app_prometheus_worker_id/src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.0 2 | prometheus_client==0.0.19 3 | uwsgi==2.0.15 4 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/Dockerfile.py2: -------------------------------------------------------------------------------- 1 | FROM python:2.7-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 15 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.6.1-alpine 2 | ADD . /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r src/requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | VOLUME /application 14 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 15 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/README.md: -------------------------------------------------------------------------------- 1 | # Example Flask application 2 | 3 | See ``src`` for the application code. 4 | 5 | ## Building Docker image 6 | 7 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 8 | and expects the application source code to be volume mounted at `/application` 9 | when run: 10 | 11 | ``` 12 | FROM python:3.6.1-alpine 13 | ADD . /application 14 | WORKDIR /application 15 | RUN set -e; \ 16 | apk add --no-cache --virtual .build-deps \ 17 | gcc \ 18 | libc-dev \ 19 | linux-headers \ 20 | ; \ 21 | pip install -r src/requirements.txt; \ 22 | apk del .build-deps; 23 | EXPOSE 5000 24 | VOLUME /application 25 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app_1:app --enable-threads --processes 5 26 | ``` 27 | 28 | The last statement shows how we are running the application via `uwsgi` with 5 29 | worker processes. 30 | 31 | To build the image: 32 | 33 | ``` 34 | $ docker build -t amitsaha/flask_app_1 -f Dockerfile.py3 . 35 | ``` 36 | 37 | ## Running the application 38 | 39 | We can just run the web application as follows: 40 | 41 | ``` 42 | $ docker run -ti -p 5000:5000 -v `pwd`/src:/application amitsaha/flask_app_1 43 | ``` 44 | 45 | ## Bringing up the web application, along with prometheus 46 | 47 | The [docker-compse.yml](docker-compose.yml) brings up the `webapp` service which is our web application 48 | using the image `amitsaha/flask_app_1` we built above. The [docker-compose-infra.yml](docker-compose-infra.yml) 49 | file brings up the `statsd` service which is the statsd exporter, `prometheus` service and also starts the `grafana` service which 50 | is available on port 3000. The config directory contains a `prometheus.yml` file 51 | which sets up the targets for prometheus to scrape. The scrape configuration 52 | looks as follows: 53 | 54 | ``` 55 | # A scrape configuration containing exactly one endpoint to scrape: 56 | # Here it's Prometheus itself. 57 | scrape_configs: 58 | # The job name is added as a label `job=` to any timeseries scraped from this config. 59 | - job_name: 'prometheus' 60 | 61 | # Override the global default and scrape targets from this job every 5 seconds. 62 | scrape_interval: 5s 63 | 64 | # metrics_path defaults to '/metrics' 65 | # scheme defaults to 'http'. 66 | 67 | static_configs: 68 | - targets: ['localhost:9090'] 69 | - job_name: 'webapp' 70 | 71 | # Override the global default and scrape targets from this job every 5 seconds. 72 | scrape_interval: 5s 73 | 74 | # metrics_path defaults to '/metrics' 75 | # scheme defaults to 'http'. 76 | static_configs: 77 | - targets: ['statsd:9102'] 78 | 79 | ``` 80 | 81 | Prometheus scrapes itself, which is the first target above. The second target 82 | is the statsd exporter on port 9102. 83 | 84 | Since these services are running via `docker-compose`, `statsd` automatically resolves to the IP of the statsd exporter container. 85 | 86 | To bring up all the services: 87 | 88 | ``` 89 | $ docker-compose -f docker-compose.yml -f docker-compose-infra.yml up 90 | ``` 91 | 92 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/config/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['statsd:9102'] 35 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/docker-compose-infra.yml: -------------------------------------------------------------------------------- 1 | # Based off https://github.com/vegasbrianc/prometheus 2 | version: '2' 3 | 4 | volumes: 5 | prometheus_data: {} 6 | grafana_data: {} 7 | 8 | services: 9 | stastd: 10 | image: prom/statsd-exporter 11 | container_name: statsd 12 | expose: 13 | - 9125 14 | - 9102 15 | prometheus: 16 | image: prom/prometheus 17 | container_name: prometheus 18 | volumes: 19 | - ./config/prometheus/:/etc/prometheus/ 20 | - prometheus_data:/prometheus 21 | command: 22 | - '--config.file=/etc/prometheus/prometheus.yml' 23 | expose: 24 | - 9090 25 | ports: 26 | - 9090:9090 27 | grafana: 28 | image: grafana/grafana 29 | depends_on: 30 | - prometheus 31 | ports: 32 | - 3000:3000 33 | volumes: 34 | - grafana_data:/var/lib/grafana 35 | environment: 36 | - GF_SECURITY_ADMIN_PASSWORD=foobar 37 | - GF_USERS_ALLOW_SIGN_UP=false 38 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | webapp: 5 | image: amitsaha/flask_app_1 6 | container_name: webapp 7 | expose: 8 | - 5000 9 | ports: 10 | - 5000:5000 11 | volumes: 12 | - ./src:/application 13 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/src/flask_app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response 2 | from helpers.middleware import setup_metrics 3 | 4 | app = Flask(__name__) 5 | setup_metrics(app) 6 | 7 | @app.route('/test/') 8 | def test(): 9 | return 'rest' 10 | 11 | @app.route('/test1/') 12 | def test1(): 13 | 1/0 14 | return 'rest' 15 | 16 | @app.errorhandler(500) 17 | def handle_500(error): 18 | return str(error), 500 19 | 20 | if __name__ == '__main__': 21 | app.run() 22 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/src/helpers/middleware.py: -------------------------------------------------------------------------------- 1 | from flask import request 2 | from datadog import DogStatsd 3 | import time 4 | import sys 5 | 6 | 7 | statsd = DogStatsd(host="statsd", port=9125) 8 | REQUEST_LATENCY_METRIC_NAME = 'request_latency_seconds' 9 | REQUEST_COUNT_METRIC_NAME = 'request_count' 10 | 11 | def start_timer(): 12 | request.start_time = time.time() 13 | 14 | def stop_timer(response): 15 | resp_time = time.time() - request.start_time 16 | statsd.histogram(REQUEST_LATENCY_METRIC_NAME, 17 | resp_time, 18 | tags=[ 19 | 'service:webapp', 20 | 'endpoint: %s' % request.path, 21 | ] 22 | ) 23 | return response 24 | 25 | def record_request_data(response): 26 | statsd.increment(REQUEST_COUNT_METRIC_NAME, 27 | tags=[ 28 | 'service: webapp', 29 | 'method: %s' % request.method, 30 | 'endpoint: %s' % request.path, 31 | 'status: %s' % str(response.status_code) 32 | ] 33 | ) 34 | return response 35 | 36 | def setup_metrics(app): 37 | app.before_request(start_timer) 38 | # The order here matters since we want stop_timer 39 | # to be executed first 40 | app.after_request(record_request_data) 41 | app.after_request(stop_timer) 42 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus/src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.0 2 | datadog==0.16.0 3 | uwsgi==2.0.15 4 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/Dockerfile.prom: -------------------------------------------------------------------------------- 1 | FROM prom/prometheus 2 | ADD prometheus.yml /etc/prometheus/ 3 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/Dockerfile.py3: -------------------------------------------------------------------------------- 1 | FROM python:3.6.1-alpine 2 | ADD src/ /application 3 | WORKDIR /application 4 | RUN set -e; \ 5 | apk add --no-cache --virtual .build-deps \ 6 | gcc \ 7 | libc-dev \ 8 | linux-headers \ 9 | ; \ 10 | pip install -r requirements.txt; \ 11 | apk del .build-deps; 12 | EXPOSE 5000 13 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app:app --enable-threads --processes 5 14 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Python application + Statsd exporter + Prometheus + Kubernetes 2 | 3 | First, we need to setup a local kubernetes cluster. [minikube](https://github.com/kubernetes/minikube) 4 | is a good option. 5 | 6 | See `src` for the application code. 7 | 8 | ## Building Docker images 9 | 10 | We want to use the docker engine running inside the `minikube` VM: 11 | 12 | ``` 13 | $ eval $(minikube docker-env) 14 | ``` 15 | 16 | 17 | The Python 3 based [Dockerfile](Dockerfile.py3) uses an Alpine Linux base image 18 | and expects the application source code to be volume mounted at `/application` 19 | when run: 20 | 21 | ``` 22 | FROM python:3.6.1-alpine 23 | ADD . /application 24 | WORKDIR /application 25 | RUN set -e; \ 26 | apk add --no-cache --virtual .build-deps \ 27 | gcc \ 28 | libc-dev \ 29 | linux-headers \ 30 | ; \ 31 | pip install -r src/requirements.txt; \ 32 | apk del .build-deps; 33 | EXPOSE 5000 34 | VOLUME /application 35 | CMD uwsgi --http :5000 --manage-script-name --mount /myapplication=flask_app_1:app --enable-threads --processes 5 36 | ``` 37 | 38 | The last statement shows how we are running the application via `uwsgi` with 5 39 | worker processes. 40 | 41 | To build the image: 42 | 43 | ``` 44 | $ docker build -t amitsaha/flask_app_1 -f Dockerfile.py3 . 45 | ``` 46 | 47 | The repository also has a dockerfile to build a custom prometheus image to make 48 | it easy to have our own target configuration: 49 | 50 | ``` 51 | $ docker build -t amitsaha/prometheus -f Dockerfile.prom . 52 | ``` 53 | 54 | ## Bringing up the web application, along with prometheus, grafana and statsd 55 | 56 | We will now run `kubectl`: 57 | 58 | ``` 59 | $ kubectl apply -f k8s_application.yaml -f k8s_infra.yaml 60 | ``` 61 | 62 | Next, we will expose the different services: 63 | 64 | ``` 65 | $ kubectl expose deployment prometheus --type=NodePort 66 | $ kubectl expose deployment flaskapp --type=NodePort 67 | $ kubectl expose deployment grafana --type=NodePort 68 | ``` 69 | 70 | We can obtain the exposed services' URLs via: 71 | 72 | ``` 73 | $ minikube service list 74 | |----------------------|----------------------|-----------------------------| 75 | | NAMESPACE | NAME | URL | 76 | |----------------------|----------------------|-----------------------------| 77 | | default | flaskapp | http://192.168.39.211:30630 | 78 | | default | flaskappsvc | No node port | 79 | | default | grafana | http://192.168.39.211:30649 | 80 | | default | grafanasvc | No node port | 81 | | default | kubernetes | No node port | 82 | | default | prometheus | http://192.168.39.211:32143 | 83 | | default | prometheussvc | No node port | 84 | ``` 85 | 86 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/k8s_application.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: flaskapp 5 | spec: 6 | replicas: 2 7 | template: 8 | metadata: 9 | labels: 10 | app: flaskapp 11 | spec: 12 | containers: 13 | - name: flaskapp 14 | image: amitsaha/flask_app_1 15 | env: 16 | - name: AUTHOR 17 | value: app1 18 | ports: 19 | - containerPort: 5000 20 | imagePullPolicy: Never 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: flaskappsvc 26 | spec: 27 | ports: 28 | - port: 80 29 | protocol: TCP 30 | targetPort: 5000 31 | selector: 32 | app: flaskapp 33 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/k8s_infra.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: statsd 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: statsd 11 | spec: 12 | containers: 13 | - name: statsd 14 | image: prom/statsd-exporter 15 | env: 16 | - name: AUTHOR 17 | value: app1 18 | ports: 19 | - containerPort: 9125 20 | - containerPort: 9102 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: statsd 26 | spec: 27 | ports: 28 | - port: 9125 29 | protocol: UDP 30 | targetPort: 9125 31 | name: ingest 32 | - port: 9102 33 | protocol: TCP 34 | targetPort: 9102 35 | name: exporter 36 | selector: 37 | app: statsd 38 | --- 39 | apiVersion: extensions/v1beta1 40 | kind: Deployment 41 | metadata: 42 | name: prometheus 43 | spec: 44 | replicas: 1 45 | template: 46 | metadata: 47 | labels: 48 | app: prometheus 49 | spec: 50 | containers: 51 | - name: prometheus 52 | image: amitsaha/prometheus 53 | env: 54 | - name: AUTHOR 55 | value: app1 56 | ports: 57 | - containerPort: 9090 58 | imagePullPolicy: Never 59 | --- 60 | apiVersion: v1 61 | kind: Service 62 | metadata: 63 | name: prometheussvc 64 | spec: 65 | ports: 66 | - port: 9090 67 | protocol: TCP 68 | targetPort: 9090 69 | selector: 70 | app: prometheus 71 | --- 72 | apiVersion: extensions/v1beta1 73 | kind: Deployment 74 | metadata: 75 | name: grafana 76 | spec: 77 | replicas: 1 78 | template: 79 | metadata: 80 | labels: 81 | app: grafana 82 | spec: 83 | containers: 84 | - name: grafana 85 | image: grafana/grafana:5.4.2 86 | env: 87 | - name: GF_SECURITY_ADMIN_PASSWORD 88 | value: admin 89 | ports: 90 | - containerPort: 3000 91 | --- 92 | apiVersion: v1 93 | kind: Service 94 | metadata: 95 | name: grafanasvc 96 | spec: 97 | ports: 98 | - port: 3000 99 | protocol: TCP 100 | targetPort: 3000 101 | selector: 102 | app: grafana 103 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/prometheus.yml: -------------------------------------------------------------------------------- 1 | # my global config 2 | global: 3 | scrape_interval: 15s # By default, scrape targets every 15 seconds. 4 | evaluation_interval: 15s # By default, scrape targets every 15 seconds. 5 | # scrape_timeout is set to the global default (10s). 6 | 7 | # Attach these labels to any time series or alerts when communicating with 8 | # external systems (federation, remote storage, Alertmanager). 9 | external_labels: 10 | monitor: 'my-project' 11 | 12 | # A scrape configuration containing exactly one endpoint to scrape: 13 | # Here it's Prometheus itself. 14 | scrape_configs: 15 | # The job name is added as a label `job=` to any timeseries scraped from this config. 16 | - job_name: 'prometheus' 17 | 18 | # Override the global default and scrape targets from this job every 5 seconds. 19 | scrape_interval: 5s 20 | 21 | # metrics_path defaults to '/metrics' 22 | # scheme defaults to 'http'. 23 | 24 | static_configs: 25 | - targets: ['localhost:9090'] 26 | - job_name: 'webapp' 27 | 28 | # Override the global default and scrape targets from this job every 5 seconds. 29 | scrape_interval: 5s 30 | 31 | # metrics_path defaults to '/metrics' 32 | # scheme defaults to 'http'. 33 | static_configs: 34 | - targets: ['statsd:9102'] 35 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/src/flask_app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, Response 2 | from helpers.middleware import setup_metrics 3 | 4 | app = Flask(__name__) 5 | setup_metrics(app) 6 | 7 | @app.route('/test/') 8 | def test(): 9 | return 'rest' 10 | 11 | @app.route('/test1/') 12 | def test1(): 13 | 1/0 14 | return 'rest' 15 | 16 | @app.errorhandler(500) 17 | def handle_500(error): 18 | return str(error), 500 19 | 20 | if __name__ == '__main__': 21 | app.run() 22 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/src/helpers/middleware.py: -------------------------------------------------------------------------------- 1 | from flask import request 2 | from datadog import DogStatsd 3 | import time 4 | import sys 5 | 6 | 7 | statsd = DogStatsd(host="statsd", port=9125) 8 | REQUEST_LATENCY_METRIC_NAME = 'request_latency_seconds' 9 | REQUEST_COUNT_METRIC_NAME = 'request_count' 10 | 11 | def start_timer(): 12 | request.start_time = time.time() 13 | 14 | def stop_timer(response): 15 | resp_time = time.time() - request.start_time 16 | statsd.histogram(REQUEST_LATENCY_METRIC_NAME, 17 | resp_time, 18 | tags=[ 19 | 'service:webapp', 20 | 'endpoint:%s' % request.path, 21 | ] 22 | ) 23 | return response 24 | 25 | def record_request_data(response): 26 | statsd.increment(REQUEST_COUNT_METRIC_NAME, 27 | tags=[ 28 | 'service:webapp', 29 | 'method:%s' % request.method, 30 | 'endpoint:%s' % request.path, 31 | 'status:%s' % str(response.status_code) 32 | ] 33 | ) 34 | return response 35 | 36 | def setup_metrics(app): 37 | app.before_request(start_timer) 38 | # The order here matters since we want stop_timer 39 | # to be executed first 40 | app.after_request(record_request_data) 41 | app.after_request(stop_timer) 42 | -------------------------------------------------------------------------------- /flask_app_statsd_prometheus_kubernetes/src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==1.0 2 | datadog==0.16.0 3 | uwsgi==2.0.15 4 | --------------------------------------------------------------------------------