├── .deepsource.toml ├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ ├── feature_request.md │ └── general-support-question.md ├── dependabot.yml ├── stale.yml └── workflows │ ├── master-publish.yml │ └── release-publish.yml ├── .gitignore ├── .hadolint.yaml ├── Dockerfile ├── LICENSE.txt ├── README.md ├── build.sh ├── conf ├── entrypoint ├── etc │ ├── collectd │ │ └── collectd.conf │ ├── logrotate.d │ │ └── graphite-statsd │ ├── nginx │ │ ├── nginx.conf │ │ └── sites-enabled │ │ │ └── graphite-statsd.conf │ ├── redis │ │ └── redis.conf │ ├── run_once │ │ └── carbon-cache │ └── service │ │ ├── brubeck │ │ └── run │ │ ├── carbon-aggregator │ │ ├── log │ │ │ └── run │ │ └── run │ │ ├── carbon-relay │ │ ├── log │ │ │ └── run │ │ └── run │ │ ├── carbon │ │ ├── log │ │ │ └── run │ │ └── run │ │ ├── collectd │ │ └── run │ │ ├── cron │ │ └── run │ │ ├── go-carbon │ │ └── run │ │ ├── graphite │ │ └── run │ │ ├── nginx │ │ └── run │ │ ├── redis │ │ └── run │ │ └── statsd │ │ ├── log │ │ └── run │ │ └── run ├── opt │ ├── graphite │ │ ├── bin │ │ │ └── django_admin_init.sh │ │ ├── conf │ │ │ ├── aggregation-rules.conf │ │ │ ├── blacklist.conf │ │ │ ├── brubeck.json │ │ │ ├── carbon.amqp.conf │ │ │ ├── carbon.conf │ │ │ ├── dashboard.conf │ │ │ ├── go-carbon.conf │ │ │ ├── graphTemplates.conf │ │ │ ├── relay-rules.conf │ │ │ ├── rewrite-rules.conf │ │ │ ├── storage-aggregation.conf │ │ │ ├── storage-schemas.conf │ │ │ └── whitelist.conf │ │ └── webapp │ │ │ └── graphite │ │ │ ├── app_settings.py │ │ │ └── local_settings.py │ └── statsd │ │ └── config │ │ ├── tcp.js │ │ └── udp.js └── usr │ └── local │ └── bin │ └── folder_empty └── docker-compose.yml /.deepsource.toml: -------------------------------------------------------------------------------- 1 | version = 1 2 | 3 | [[analyzers]] 4 | name = "docker" 5 | enabled = true 6 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .github 2 | .idea 3 | .hadolint* 4 | .gitignore 5 | build* 6 | __build* 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "[BUG]" 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Please open bug only for Docker component here, if you want to do so for Graphite, please use [graphite-web repo](https://github.com/graphite-project/graphite-web/issues/new/choose)** 11 | 12 | **Describe the bug** 13 | A clear and concise description of what the bug is. 14 | 15 | **To Reproduce** 16 | Steps to reproduce the behavior: 17 | 18 | **Expected behavior** 19 | A clear and concise description of what you expected to happen. 20 | 21 | **Environment (please complete the following information):** 22 | - OS flavor: [e.g. Ubuntu, CentOS, Debian, Windows 10, MacOS etc] 23 | - Image version [e.g. 1.1.5-7, 1.1.6-1, etc] 24 | - Custom volumes mounted [e.g. None, /opt/graphite/conf, /opt/graphite/storage etc] 25 | 26 | **Additional context** 27 | Add any other context about the problem here. 28 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Please open feature request only for Docker component here, if you want to do so for Graphite, please use [graphite-web repo](https://github.com/graphite-project/graphite-web/issues/new/choose)** 11 | 12 | **Is your feature request related to a problem? Please describe.** 13 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 14 | 15 | **Describe the solution you'd like** 16 | A clear and concise description of what you want to happen. 17 | 18 | **Describe alternatives you've considered** 19 | A clear and concise description of any alternative solutions or features you've considered. 20 | 21 | **Additional context** 22 | Add any other context or screenshots about the feature request here. 23 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/general-support-question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: General support question 3 | about: Ask anything about this project 4 | title: "[Q]" 5 | labels: question 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Please ask questions only for Docker component here, if you want to do so about Graphite, please use [graphite-web repo](https://github.com/graphite-project/graphite-web/issues/new/choose)** 11 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: monthly 7 | - package-ecosystem: docker 8 | directory: "/" 9 | schedule: 10 | interval: weekly 11 | time: '10:00' 12 | open-pull-requests-limit: 10 -------------------------------------------------------------------------------- /.github/stale.yml: -------------------------------------------------------------------------------- 1 | # Number of days of inactivity before an issue becomes stale 2 | daysUntilStale: 60 3 | # Number of days of inactivity before a stale issue is closed 4 | daysUntilClose: 7 5 | # Issues with these labels will never be considered stale 6 | exemptLabels: 7 | - pinned 8 | - security 9 | # Label to use when marking an issue as stale 10 | staleLabel: stale 11 | # Comment to post when marking an issue as stale. Set to `false` to disable 12 | markComment: > 13 | This issue has been automatically marked as stale because it has not had 14 | recent activity. It will be closed if no further activity occurs. Thank you 15 | for your contributions. 16 | # Comment to post when closing a stale issue. Set to `false` to disable 17 | closeComment: false 18 | -------------------------------------------------------------------------------- /.github/workflows/master-publish.yml: -------------------------------------------------------------------------------- 1 | name: Build master 2 | 3 | on: 4 | push: 5 | branches: [ master ] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | # QEMU is used to emulated the ARM architecture, allowing us 13 | # to build not-x86 images 14 | - uses: docker/setup-qemu-action@master 15 | with: 16 | platforms: all 17 | # Buildx provides an easier way of building Docker images for other architectures 18 | - uses: docker/setup-buildx-action@master 19 | - name: Build amd64 python3 image 20 | run: | 21 | docker buildx build --platform=linux/amd64 --load -t graphiteapp/graphite-statsd:master . 22 | - name: Run Snyk to check amd64 python3 image for vulnerabilities 23 | uses: snyk/actions/docker@master 24 | env: 25 | SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} 26 | with: 27 | command: monitor 28 | image: graphiteapp/graphite-statsd:master 29 | args: --file=Dockerfile --platform=linux/amd64 --project-name=graphite-project/docker-graphite-statsd 30 | - name: Login to DockerHub 31 | uses: docker/login-action@v3 32 | with: 33 | username: ${{ secrets.DOCKER_USERNAME }} 34 | password: ${{ secrets.DOCKER_PASSWORD }} 35 | - name: Build and push amd64 python3 image to Docker Hub 36 | run: | 37 | docker buildx build --platform=linux/amd64 --push -t graphiteapp/graphite-statsd:master . 38 | - name: Login to ghcr.io 39 | run: | 40 | echo "${{ secrets.GHCR_TOKEN }}" | docker login ghcr.io -u deniszh --password-stdin 41 | - name: Build and push amd64 image to ghcr.io 42 | run: | 43 | docker buildx build --platform=linux/amd64 --push -t ghcr.io/deniszh/graphite-statsd:master . 44 | -------------------------------------------------------------------------------- /.github/workflows/release-publish.yml: -------------------------------------------------------------------------------- 1 | name: Build release 2 | 3 | on: 4 | release: 5 | types: [ published ] 6 | 7 | jobs: 8 | build: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: actions/checkout@v4 12 | # QEMU is used to emulated the ARM architecture, allowing us 13 | # to build not-x86 images 14 | - uses: docker/setup-qemu-action@master 15 | with: 16 | platforms: all 17 | # Buildx provides an easier way of building Docker images for other architectures 18 | - uses: docker/setup-buildx-action@master 19 | - name: Build test amd64 python3 image 20 | run: | 21 | docker buildx build --platform=linux/amd64 --load -t graphiteapp/graphite-statsd:test . 22 | - name: Run Snyk to check test amd64 python3 image for vulnerabilities 23 | uses: snyk/actions/docker@master 24 | env: 25 | SNYK_TOKEN: ${{ secrets.SNYK_TOKEN }} 26 | with: 27 | command: monitor 28 | image: graphiteapp/graphite-statsd:test 29 | args: --file=Dockerfile --platform=linux/amd64 --project-name=graphite-project/docker-graphite-statsd 30 | - name: Login to DockerHub 31 | uses: docker/login-action@v3 32 | with: 33 | username: ${{ secrets.DOCKER_USERNAME }} 34 | password: ${{ secrets.DOCKER_PASSWORD }} 35 | - name: Build and push amd64 python3 image to Docker Hub 36 | run: | 37 | # Strip git ref prefix from version 38 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 39 | # Strip "v" prefix from tag name 40 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo $VERSION | sed -e 's/^v//') 41 | docker buildx build --platform=linux/amd64 --push -t graphiteapp/graphite-statsd:${VERSION} . 42 | docker buildx build --platform=linux/amd64 --push -t graphiteapp/graphite-statsd:latest . 43 | - name: Build and push multi-platform images to Docker Hub 44 | run: | 45 | # Strip git ref prefix from version 46 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 47 | # Strip "v" prefix from tag name 48 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo ${VERSION} | sed -e 's/^v//') 49 | docker buildx build --platform=linux/arm/v7,linux/arm64/v8,linux/s390x,linux/amd64 --push -t graphiteapp/graphite-statsd:${VERSION} . 50 | docker buildx build --platform=linux/arm/v7,linux/arm64/v8,linux/s390x,linux/amd64 --push -t graphiteapp/graphite-statsd:latest . 51 | - name: Login to ghcr.io 52 | run: | 53 | echo "${{ secrets.GHCR_TOKEN }}" | docker login ghcr.io -u deniszh --password-stdin 54 | - name: Build and push multi-platform images to ghcr.io 55 | run: | 56 | # Strip git ref prefix from version 57 | VERSION=$(echo "${{ github.ref }}" | sed -e 's,.*/\(.*\),\1,') 58 | # Strip "v" prefix from tag name 59 | [[ "${{ github.ref }}" == "refs/tags/"* ]] && VERSION=$(echo ${VERSION} | sed -e 's/^v//') 60 | docker buildx build --platform=linux/arm/v7,linux/arm64/v8,linux/s390x,linux/amd64 --push -t ghcr.io/deniszh/graphite-statsd:${VERSION} . 61 | docker buildx build --platform=linux/arm/v7,linux/arm64/v8,linux/s390x,linux/amd64 --push -t ghcr.io/deniszh/graphite-statsd:latest . 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | .idea 3 | 4 | # Vim 5 | [._]*.s[a-w][a-z] 6 | [._]s[a-w][a-z] 7 | *.un~ 8 | Session.vim 9 | .netrwhist 10 | *~ 11 | .project 12 | .build -------------------------------------------------------------------------------- /.hadolint.yaml: -------------------------------------------------------------------------------- 1 | ignored: 2 | - DL3003 3 | - DL3013 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASEIMAGE=alpine:3 2 | FROM $BASEIMAGE as base 3 | LABEL maintainer="Denys Zhdanov " 4 | LABEL org.opencontainers.image.source https://github.com/graphite-project/docker-graphite-statsd 5 | 6 | RUN true \ 7 | && apk add --update --no-cache \ 8 | cairo \ 9 | cairo-dev \ 10 | findutils \ 11 | librrd \ 12 | logrotate \ 13 | memcached \ 14 | nginx \ 15 | nodejs \ 16 | npm \ 17 | openldap \ 18 | redis \ 19 | runit \ 20 | sqlite \ 21 | expect \ 22 | dcron \ 23 | python3-dev \ 24 | mysql-client \ 25 | mysql-dev \ 26 | postgresql-client \ 27 | postgresql-dev \ 28 | librdkafka \ 29 | jansson \ 30 | bash \ 31 | && rm -rf \ 32 | /etc/nginx/conf.d/default.conf \ 33 | && mkdir -p \ 34 | /var/log/carbon \ 35 | /var/log/graphite \ 36 | && mv /etc/periodic/daily/logrotate /etc/periodic/hourly/logrotate \ 37 | && touch /var/log/messages 38 | 39 | # optional packages (e.g. not exist on S390 in alpine 3.13 yet) 40 | RUN apk add --update \ 41 | collectd collectd-disk collectd-nginx collectd-battery\ 42 | || true 43 | 44 | FROM base as build 45 | LABEL maintainer="Denys Zhdanov " 46 | 47 | ARG python_extra_flags="--single-version-externally-managed --root=/" 48 | ENV PYTHONDONTWRITEBYTECODE=1 49 | 50 | RUN true \ 51 | && apk add --update \ 52 | alpine-sdk \ 53 | curl \ 54 | git \ 55 | pkgconfig \ 56 | wget \ 57 | go \ 58 | cairo-dev \ 59 | libffi-dev \ 60 | openldap-dev \ 61 | python3-dev \ 62 | rrdtool-dev \ 63 | jansson-dev \ 64 | librdkafka-dev \ 65 | mysql-dev \ 66 | postgresql-dev \ 67 | py3-pip py3-setuptools py3-wheel py3-virtualenv \ 68 | && virtualenv -p python3 /opt/graphite \ 69 | && . /opt/graphite/bin/activate \ 70 | && echo 'INPUT ( libldap.so )' > /usr/lib/libldap_r.so \ 71 | && pip install --no-cache-dir \ 72 | cairocffi==1.1.0 \ 73 | django==4.2.15 \ 74 | django-tagging==0.5.0 \ 75 | django-statsd-mozilla \ 76 | gunicorn==20.1.0 \ 77 | eventlet>=0.24.1 \ 78 | gevent>=1.4 \ 79 | msgpack==0.6.2 \ 80 | redis \ 81 | rrdtool-bindings \ 82 | python-ldap \ 83 | mysqlclient \ 84 | psycopg2==2.8.6 \ 85 | django-cockroachdb==4.2.* 86 | 87 | ARG version=master 88 | 89 | # install whisper 90 | ARG whisper_version=${version} 91 | ARG whisper_repo=https://github.com/graphite-project/whisper.git 92 | RUN git clone -b ${whisper_version} --depth 1 ${whisper_repo} /usr/local/src/whisper \ 93 | && cd /usr/local/src/whisper \ 94 | && . /opt/graphite/bin/activate \ 95 | && pip3 install --no-cache-dir -r requirements.txt \ 96 | && python3 ./setup.py install $python_extra_flags 97 | 98 | # install carbon 99 | ARG carbon_version=${version} 100 | ARG carbon_repo=https://github.com/graphite-project/carbon.git 101 | RUN . /opt/graphite/bin/activate \ 102 | && git clone -b ${carbon_version} --depth 1 ${carbon_repo} /usr/local/src/carbon \ 103 | && cd /usr/local/src/carbon \ 104 | && pip3 install --no-cache-dir -r requirements.txt \ 105 | && python3 ./setup.py install $python_extra_flags 106 | 107 | # install graphite 108 | ARG graphite_version=${version} 109 | ARG graphite_repo=https://github.com/graphite-project/graphite-web.git 110 | RUN . /opt/graphite/bin/activate \ 111 | && git clone -b ${graphite_version} --depth 1 ${graphite_repo} /usr/local/src/graphite-web \ 112 | && cd /usr/local/src/graphite-web \ 113 | && pip3 install --no-cache-dir -r requirements.txt \ 114 | && python3 ./setup.py install $python_extra_flags 115 | 116 | # install statsd 117 | ARG statsd_version=0.10.2 118 | ARG statsd_repo=https://github.com/statsd/statsd.git 119 | WORKDIR /opt 120 | RUN git clone "${statsd_repo}" \ 121 | && cd /opt/statsd \ 122 | && git checkout tags/v"${statsd_version}" \ 123 | && npm install 124 | 125 | # build go-carbon (optional) 126 | # https://github.com/go-graphite/go-carbon/pull/340 127 | ARG gocarbon_version=0.17.3 128 | ARG gocarbon_repo=https://github.com/go-graphite/go-carbon.git 129 | RUN git clone "${gocarbon_repo}" /usr/local/src/go-carbon \ 130 | && cd /usr/local/src/go-carbon \ 131 | && git checkout tags/v"${gocarbon_version}" \ 132 | && make go-carbon \ 133 | && chmod +x go-carbon && mkdir -p /opt/graphite/bin/ \ 134 | && cp -fv go-carbon /opt/graphite/bin/go-carbon \ 135 | || true 136 | 137 | # install brubeck (experimental) 138 | ARG brubeck_version=f306c25df51181be05a58dcc108bfaefc39f7f4a 139 | ARG brubeck_repo=https://github.com/lukepalmer/brubeck.git 140 | ENV BRUBECK_NO_HTTP=1 141 | RUN git clone "${brubeck_repo}" /usr/local/src/brubeck \ 142 | && cd /usr/local/src/brubeck && git checkout "${brubeck_version}" \ 143 | && ./script/bootstrap \ 144 | && chmod +x brubeck && mkdir -p /opt/graphite/bin/ \ 145 | && cp -fv brubeck /opt/graphite/bin/brubeck 146 | 147 | COPY conf/opt/graphite/conf/ /opt/defaultconf/graphite/ 148 | COPY conf/opt/graphite/webapp/graphite/local_settings.py /opt/defaultconf/graphite/local_settings.py 149 | 150 | # config graphite 151 | COPY conf/opt/graphite/conf/* /opt/graphite/conf/ 152 | COPY conf/opt/graphite/webapp/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py 153 | WORKDIR /opt/graphite/webapp 154 | RUN mkdir -p /var/log/graphite/ \ 155 | && PYTHONPATH=/opt/graphite/webapp /opt/graphite/bin/django-admin collectstatic --noinput --settings=graphite.settings 156 | 157 | # config statsd 158 | COPY conf/opt/statsd/config/ /opt/defaultconf/statsd/config/ 159 | 160 | FROM base as production 161 | LABEL maintainer="Denys Zhdanov " 162 | 163 | ENV STATSD_INTERFACE udp 164 | 165 | # copy config BEFORE build 166 | COPY conf / 167 | 168 | # copy from build image 169 | COPY --from=build /opt /opt 170 | 171 | # defaults 172 | EXPOSE 80 2003-2004 2013-2014 2023-2024 8080 8125 8125/udp 8126 173 | VOLUME ["/opt/graphite/conf", "/opt/graphite/storage", "/opt/graphite/webapp/graphite/functions/custom", "/etc/nginx", "/opt/statsd/config", "/etc/logrotate.d", "/var/log", "/var/lib/redis"] 174 | 175 | STOPSIGNAL SIGHUP 176 | 177 | ENTRYPOINT ["/entrypoint"] 178 | 179 | HEALTHCHECK --interval=60s --start-interval=20s --timeout=3s \ 180 | CMD curl -f http://localhost/ || exit 1 181 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013-2016 Nathan Hopkins 2 | 3 | MIT License 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining 6 | a copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be 14 | included in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 17 | EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 19 | NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 20 | LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 21 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 22 | WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Docker Pulls](https://img.shields.io/docker/pulls/graphiteapp/graphite-statsd.svg?style=flat)](https://hub.docker.com/r/graphiteapp/graphite-statsd/) [![Docker Size](https://img.shields.io/docker/image-size/graphiteapp/graphite-statsd.svg?style=flat&?sort=date)](https://hub.docker.com/r/graphiteapp/graphite-statsd/) 2 | 3 | 4 | This is official Graphite docker image repo. 5 | 6 | This repo was based on [@hopsoft's](https://github.com/hopsoft/) [docker-graphite-statsd](https://github.com/hopsoft/docker-graphite-statsd) docker image and was used as base for "official" Graphite docker image with his permission. Also, it contains parts of famous [@obfuscurity's](https://github.com/obfuscurity/) [synthesize](https://github.com/obfuscurity/synthesize) Graphite installer. Thanks a lot, Nathan and Jason! 7 | 8 | Any suggestions / patches etc. are welcome! 9 | 10 | #### Tags / architectures history 11 | - Autobuild repo https://hub.docker.com/r/graphiteapp/docker-graphite-statsd (development repo, with automatic builds, unstable) is deprecated and was removed from Docker Hub. If you want to use unstable builds please use `master` tag in stable repo (https://hub.docker.com/r/graphiteapp/graphite-statsd). 12 | - Starting from `1.1.7-1` we're building arm/arm64 versions too. 13 | - Starting from `1.1.7-11` we're building linux/s390x versions too. 14 | - Starting from `1.1.7-6` and up to `1.1.8-5` we were building '-pypy' version of x64 image, but now pypy building is disabled because lacking recent pypy docker images based on Alpine. 15 | - Starting from `1.1.8-8` we're building linux/arm/v7 and linux/arm64/v8 by default. 16 | - Starting from `1.1.8-8` we're uploading docker images also to ghcr.io/deniszh repository. 17 | 18 | # Docker Image for Graphite & Statsd 19 | 20 | ## Get Graphite & Statsd running instantly 21 | 22 | Graphite & Statsd can be complex to setup. 23 | This image will have you running & collecting stats in just a few minutes. 24 | 25 | ## Quick Start 26 | 27 | ```sh 28 | docker run -d\ 29 | --name graphite\ 30 | --restart=always\ 31 | -p 80:80\ 32 | -p 2003-2004:2003-2004\ 33 | -p 2023-2024:2023-2024\ 34 | -p 8125:8125/udp\ 35 | -p 8126:8126\ 36 | graphiteapp/graphite-statsd 37 | ``` 38 | or 39 | ```sh 40 | docker run -d\ 41 | --name graphite\ 42 | --restart=always\ 43 | -p 80:80\ 44 | -p 2003-2004:2003-2004\ 45 | -p 2023-2024:2023-2024\ 46 | -p 8125:8125/udp\ 47 | -p 8126:8126\ 48 | ghcr.io/deniszh/graphite-statsd 49 | ``` 50 | This starts a Docker container named: **graphite** 51 | 52 | Please also note that you can freely remap container port to any host port in case of corresponding port is already occupied on host. It's also not mandatory to map all ports, map only required ports - please see table below. 53 | 54 | That's it, you're done ... almost. 55 | 56 | 57 | 58 | ### Includes the following components 59 | 60 | * [Nginx](http://nginx.org/) - reverse proxies the graphite dashboard 61 | * [Graphite](http://graphite.readthedocs.org/en/latest/) - front-end dashboard 62 | * [Carbon](http://graphite.readthedocs.org/en/latest/carbon-daemons.html) - back-end 63 | * [Statsd](https://github.com/etsy/statsd/wiki) - UDP based back-end proxy 64 | 65 | ### Mapped Ports 66 | 67 | Host | Container | Service 68 | ---- | --------- | ------------------------------------------------------------------------------------------------------------------- 69 | 80 | 80 | [nginx](https://www.nginx.com/resources/admin-guide/) 70 | 2003 | 2003 | [carbon receiver - plaintext](http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-plaintext-protocol) 71 | 2004 | 2004 | [carbon receiver - pickle](http://graphite.readthedocs.io/en/latest/feeding-carbon.html#the-pickle-protocol) 72 | 2023 | 2023 | [carbon aggregator - plaintext](http://graphite.readthedocs.io/en/latest/carbon-daemons.html#carbon-aggregator-py) 73 | 2024 | 2024 | [carbon aggregator - pickle](http://graphite.readthedocs.io/en/latest/carbon-daemons.html#carbon-aggregator-py) 74 | 8080 | 8080 | Graphite internal gunicorn port (without Nginx proxying). 75 | 8125 | 8125 | [statsd](https://github.com/etsy/statsd/blob/master/docs/server.md) 76 | 8126 | 8126 | [statsd admin](https://github.com/etsy/statsd/blob/master/docs/admin_interface.md) 77 | 78 | By default, statsd listens on the UDP port 8125. If you want it to listen on the TCP port 8125 instead, you can set the environment variable `STATSD_INTERFACE` to `tcp` when running the container. 79 | 80 | Please also note that you can freely remap container port to any host port in case of corresponding port is already occupied on host. 81 | 82 | ### Mounted Volumes 83 | 84 | Host | Container | Notes 85 | ----------------- | -------------------------- | ------------------------------- 86 | DOCKER ASSIGNED | /opt/graphite/conf | graphite config 87 | DOCKER ASSIGNED | /opt/graphite/storage | graphite stats storage 88 | DOCKER ASSIGNED | /opt/graphite/webapp/graphite/functions/custom | graphite custom functions dir 89 | DOCKER ASSIGNED | /etc/nginx | nginx config 90 | DOCKER ASSIGNED | /opt/statsd/config | statsd config 91 | DOCKER ASSIGNED | /etc/logrotate.d | logrotate config 92 | DOCKER ASSIGNED | /var/log | log files 93 | DOCKER ASSIGNED | /var/lib/redis | Redis TagDB data (optional) 94 | 95 | ### Base Image 96 | 97 | Version before (and including) 1.1.4-9 were built using [Phusion's base image](https://github.com/phusion/baseimage-docker). Current version is based on [Alpine Linux](https://alpinelinux.org/) because of image size (please see [PR#66](https://github.com/graphite-project/docker-graphite-statsd/pull/66) for details). 98 | 99 | * All Graphite related processes are run as daemons & monitored with [runit](http://smarden.org/runit/). 100 | * Includes additional services such as logrotate, nginx, optional Redis for TagDB and optional collectd instance. 101 | 102 | ## Start Using Graphite & Statsd 103 | 104 | ### Send Some Stats 105 | 106 | Let's fake some stats with a random counter to prove things are working. 107 | 108 | ```sh 109 | while true; do echo -n "example:$((RANDOM % 100))|c" | nc -w 1 -u 127.0.0.1 8125; done 110 | ``` 111 | 112 | ### Visualize the Data 113 | 114 | Open Graphite in a browser. 115 | 116 | * http://localhost/dashboard 117 | * http://localhost/render?from=-10mins&until=now&target=stats.example 118 | 119 | ## Secure the Django Admin 120 | 121 | Update the default Django admin user account. _The default is insecure._ 122 | 123 | * username: root 124 | * password: root 125 | * email: root.graphite@mailinator.com 126 | 127 | First login at: [http://localhost/account/login](http://localhost/account/login) 128 | Then update the root user's profile at: [http://localhost/admin/password_change/](http://localhost/admin/password_change/) 129 | 130 | ## Tunables 131 | Additional environment variables can be set to adjust performance. 132 | 133 | * GRAPHITE_WSGI_PROCESSES: (4) the number of WSGI daemon processes that should be started 134 | * GRAPHITE_WSGI_THREADS: (1) the number of threads to be created to handle requests in each daemon process. See [gunicorn docs](https://docs.gunicorn.org/en/stable/settings.html#threads). 135 | * GRAPHITE_WSGI_REQUEST_TIMEOUT: (65) maximum number of seconds that a request is allowed to run before the daemon process is restarted 136 | * GRAPHITE_WSGI_MAX_REQUESTS: (1000) limit on the number of requests a daemon process should process before it is shutdown and restarted. 137 | * GRAPHITE_WSGI_REQUEST_LINE: (0) The maximum size of HTTP request line in bytes. 138 | * GRAPHITE_WSGI_WORKER_CLASS ("sync"): The type of workers to use. The default class (sync) should handle most “normal” types of workloads. See [gunucorn docs](https://docs.gunicorn.org/en/stable/settings.html#worker-class). 139 | * GRAPHITE_WSGI_WORKER_CONNECTIONS (1000): The maximum number of simultaneous clients (for Eventlet and Gevent worker types only). See [gunicorn docs](https://docs.gunicorn.org/en/stable/settings.html#worker-connections). 140 | 141 | ### Graphite-web 142 | * GRAPHITE_ALLOWED_HOSTS: (*) In Django 1.5+ set this to the list of hosts your graphite instances is accessible as. See: [https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS](https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS) 143 | * GRAPHITE_TIME_ZONE: (Etc/UTC) Set your local timezone 144 | * GRAPHITE_DATE_FORMAT: (%m/%d) Set your local date format 145 | * GRAPHITE_UTF8_METRICS: (false) Allow UTF-8 metrics names (can cause performance issues) 146 | * GRAPHITE_LOG_ROTATION: (false) rotate logs using internal log rotation, otherwise use [logrotate](https://github.com/graphite-project/docker-graphite-statsd/blob/master/conf/etc/logrotate.d/graphite-statsd) instead 147 | * GRAPHITE_LOG_ROTATION_COUNT: (1) number of logs to keep (if GRAPHITE_LOG_ROTATION is true) 148 | * GRAPHITE_LOG_RENDERING_PERFORMANCE: (true) log performance information 149 | * GRAPHITE_LOG_CACHE_PERFORMANCE: (true) log cache performance information 150 | * GRAPHITE_LOG_INFO_PERFORMANCE: (false) log info performance information 151 | * GRAPHITE_LOG_FILE_INFO: (info.log), set to "-" for stdout/stderr 152 | * GRAPHITE_LOG_FILE_EXCEPTION: (exception.log), set to "-" for stdout/stderr 153 | * GRAPHITE_LOG_FILE_CACHE: (cache.log), set to "-" for stdout/stderr 154 | * GRAPHITE_LOG_FILE_RENDERING: (rendering.log), set to "-" for stdout/stderr 155 | * GRAPHITE_DEBUG: (false) Enable full debug page display on exceptions (Internal Server Error pages) 156 | * GRAPHITE_DEFAULT_CACHE_DURATION: (60) Duration to cache metric data and graphs 157 | * GRAPHITE_CARBONLINK_HOSTS: ('127.0.0.1:7002') List of carbonlink hosts 158 | * GRAPHITE_CARBONLINK_TIMEOUT: (1.0) Carbonlink request timeout 159 | * GRAPHITE_CARBONLINK_HASHING_TYPE: ('carbon_ch') Type of metric hashing function. 160 | * GRAPHITE_REPLICATION_FACTOR: (1) # The replication factor to use with consistent hashing. This should usually match the value configured in Carbon. 161 | * GRAPHITE_CLUSTER_SERVERS: ('') This should list of remote servers in the cluster. These servers must each have local access to metric data. Note that the first server to return a match for a query will be used. See [docs](https://graphite.readthedocs.io/en/latest/config-local-settings.html#cluster-configuration) for details. 162 | * GRAPHITE_USE_WORKER_POOL: (true) Creates a pool of worker threads to which tasks can be dispatched. This makes sense if there are multiple CLUSTER_SERVERS and/or STORAGE_FINDERS because then the communication with them can be parallelized. 163 | * GRAPHITE_REMOTE_FIND_TIMEOUT: (30) Timeout for metric find requests 164 | * GRAPHITE_REMOTE_FETCH_TIMEOUT: (60) Timeout to fetch series data 165 | * GRAPHITE_REMOTE_RETRY_DELAY: (0) Time before retrying a failed remote webapp. 166 | * GRAPHITE_MAX_FETCH_RETRIES: (2) Number of retries for a specific remote data fetch 167 | * GRAPHITE_FIND_CACHE_DURATION: (0) Time to cache remote metric find results 168 | * GRAPHITE_STATSD_HOST: ("127.0.0.1") If set, django_statsd.middleware.GraphiteRequestTimingMiddleware and django_statsd.middleware.GraphiteMiddleware will be enabled. 169 | * GRAPHITE_URL_ROOT: ('') Sets a url prefix if deploying graphite-web to a non-root location. 170 | 171 | ## TagDB 172 | Graphite stores tag information in a separate tag database (TagDB). Please check [tags documentation](https://graphite.readthedocs.io/en/latest/tags.html) for details. 173 | 174 | * CARBON_DISABLE_TAGS: (false) if set to 1 or true will disable TagDB on carbon-cache. 175 | * GRAPHITE_TAGDB: ('graphite.tags.localdatabase.LocalDatabaseTagDB') TagDB is a pluggable store, by default it uses the local SQLite database. 176 | * REDIS_TAGDB: (false) if set to 1 or true will use local Redis instance to store tags. 177 | * GRAPHITE_TAGDB_CACHE_DURATION: (60) Time to cache seriesByTag results. 178 | * GRAPHITE_TAGDB_AUTOCOMPLETE_LIMIT: (100) Autocomplete default result limit. 179 | * GRAPHITE_TAGDB_REDIS_HOST: ('localhost') Redis TagDB host 180 | * GRAPHITE_TAGDB_REDIS_PORT: (6379) Redis TagDB port 181 | * GRAPHITE_TAGDB_REDIS_DB: (0) Redis TagDB database number 182 | * GRAPHITE_TAGDB_HTTP_URL: ('') URL for HTTP TagDB 183 | * GRAPHITE_TAGDB_HTTP_USER: ('') Username for HTTP TagDB 184 | * GRAPHITE_TAGDB_HTTP_PASSWORD: ('') Password for HTTP TagDB 185 | * GRAPHITE_TAGDB_HTTP_AUTOCOMPLETE: (false) Does the remote TagDB support autocomplete? 186 | 187 | ## Collectd 188 | Use `COLLECTD=1` environment variable to enable local collectd instance 189 | 190 | ## AMQP 191 | * CARBON_ENABLE_AMQP: (false) if set to 1 or true will enable AMQP ingestion in Carbon. 192 | * CARBON_AMQP_VERBOSE: (false) if set to 1 or true will enable verbose AMQP output 193 | * CARBON_AMQP_HOST: (localhost) 194 | * CARBON_AMQP_PORT: (5672) 195 | * CARBON_AMQP_VHOST: (/) 196 | * CARBON_AMQP_USER: : (guest) 197 | * CARBON_AMQP_PASSWORD: (guest) 198 | * CARBON_AMQP_EXCHANGE: (graphite) 199 | * CARBON_AMQP_METRIC_NAME_IN_BODY: (false) 200 | 201 | ## Carbon-cache 202 | If custom environment `GRAPHITE_CARBONLINK_HOSTS` variable is setup `carbon-cache` instances as daemons/services are [managed](./conf/etc/run_once/carbon-cache) based on the that, otherwise default instance (`127.0.0.1:7002`) is used. 203 | 204 | **Note**: if default port `7002` is used among the hosts, need to setup `CARBON_DISABLED=1` in the environment. 205 | 206 | ## Carbon-relay 207 | Use `RELAY=1` environment variable to enable carbon relay instance. Use `[relay]` section of carbon.conf to configure it. 208 | 209 | **Note**: in order to use `carbon-relay` daemon correctly, it must accept & distribute incoming traffic within DESTINATIONS 210 | endpoints which by default isn't (`carbon-cache` is). As one of solutions is to adjust `graphitePort` value to 211 | carbon-relay LINE_RECEIVER_PORT in [`statsd`](https://github.com/statsd/statsd/blob/master/exampleConfig.js) config. 212 | 213 | ## Logrotate 214 | By default logs are rotated daily, based on configuration in `/etc/logrotate.d/graphite-statsd`. You can variate rotation by adding `size 10M` or changing time up to `hourly` (instead of `daily`), because of logrotate is is evaluated hourly by `/etc/periodic/hourly/logrotate`. Please note, that according to Docker [logging best practices](https://success.docker.com/article/logging-best-practices) "Ideally, applications log to stdout/stderr, and Docker sends those logs to the configured logging destination.". You can use `-` as log file name for such behaviour. 215 | 216 | ## Runit 217 | Each service started and controlled by runit will be gracefully shutdown when stopping the container : wait up to 7 seconds for the service to become down, then it will be killed. The runit environment variable `$SVWAIT` overrides this default timeout. Additionnally, a global timeout can be also specified with the docker-run option `--stop-timeout`. 218 | Each service started by default can be disabled by setting an environment variable named as : `$_DISABLED`. For instance : `CARBON_AGGREGATOR_DISABLED=1`, `STATSD_DISABLED=1`, etc. Please note, that any service in image can be disabled, so, some functionality can be broken in this case. 219 | 220 | ## Startup custom scripts 221 | At startup, entrypoint will run all scripts found in the directory /etc/run_once. It can be mounted with a docker-run option like this : `--mount type=bind,source=/path/to/run_once,destination=/etc/run_once`. 222 | 223 | ## Change the Configuration 224 | 225 | Read up on Graphite's [post-install tasks](https://graphite.readthedocs.org/en/latest/install.html#post-install-tasks). 226 | Focus on the [storage-schemas.conf](https://graphite.readthedocs.org/en/latest/config-carbon.html#storage-schemas-conf). 227 | 228 | 1. Stop the container `docker stop graphite`. 229 | 1. Find the configuration files on the host by inspecting the container `docker inspect graphite`. 230 | 1. Update the desired config files. 231 | 1. Restart the container `docker start graphite`. 232 | 233 | **Note**: If you change settings in `/opt/graphite/conf/storage-schemas.conf` 234 | be sure to delete the old whisper files under `/opt/graphite/storage/whisper/`. 235 | 236 | --- 237 | 238 | **Important:** Ensure your Statsd flush interval is at least as long as the highest-resolution retention. 239 | For example, if `/opt/statsd/config/udp.js` looks like this. 240 | 241 | ``` 242 | flushInterval: 10000 243 | ``` 244 | 245 | Ensure that `storage-schemas.conf` retentions are no finer grained than 10 seconds. 246 | 247 | ``` 248 | [all] 249 | pattern = .* 250 | retentions = 5s:12h # WRONG 251 | retentions = 10s:12h # OK 252 | retentions = 60s:12h # OK 253 | ``` 254 | 255 | ## Statsd Admin Management Interface 256 | 257 | A management interface (default on port 8126) allows you to manage statsd & retrieve stats. 258 | 259 | ```sh 260 | # show all current counters 261 | echo counters | nc localhost 8126 262 | ``` 263 | 264 | [More info & additional commands.](https://github.com/etsy/statsd/blob/master/docs/admin_interface.md) 265 | 266 | ## A Note on Volumes 267 | 268 | You may find it useful to mount explicit volumes so configs & data can be managed from a known location on the host. 269 | 270 | Simply specify the desired volumes when starting the container. 271 | 272 | ``` 273 | docker run -d\ 274 | --name graphite\ 275 | --restart=always\ 276 | -v /path/to/graphite/configs:/opt/graphite/conf\ 277 | -v /path/to/graphite/data:/opt/graphite/storage\ 278 | -v /path/to/statsd_config:/opt/statsd/config\ 279 | graphiteapp/graphite-statsd 280 | ``` 281 | 282 | **Note**: The container will initialize properly if you mount empty volumes at 283 | `/opt/graphite/conf`, `/opt/graphite/storage`, or `/opt/statsd/config`. 284 | 285 | ## Memcached config 286 | 287 | If you have a Memcached server running, and want to Graphite use it, you can do it using environment variables, like this: 288 | 289 | ``` 290 | docker run -d\ 291 | --name graphite\ 292 | --restart=always\ 293 | -p 80:80\ 294 | -p 2003-2004:2003-2004\ 295 | -p 2023-2024:2023-2024\ 296 | -p 8125:8125/udp\ 297 | -p 8126:8126\ 298 | -e "MEMCACHE_HOST=127.0.0.1:11211"\ # Memcached host. Separate by comma more than one servers. 299 | -e "GRAPHITE_DEFAULT_CACHE_DURATION=60"\ # in seconds 300 | graphiteapp/graphite-statsd 301 | ``` 302 | 303 | Also, you can specify more than one memcached server, using commas: 304 | 305 | ``` 306 | -e "MEMCACHE_HOST=127.0.0.1:11211,10.0.0.1:11211" 307 | ``` 308 | ## Running through docker-compose 309 | The following command will start the graphite statsd container through docker-compose 310 | ``` 311 | docker-compose up 312 | ``` 313 | 314 | ## Running through Kubernetes 315 | You can use this 3-rd party repo with Graphite Helm chart - https://github.com/kiwigrid/helm-charts/tree/master/charts/graphite 316 | 317 | ## About `root` process 318 | 319 | This image uses `runit` as init system, to run multiple processes in single container. It's not against Docker guidelines but bit against Docker philosophy. Also, `runit` require root privileges to run, so, it's not possible to stop using root privileges, without completely rewrite this image. This is possible, of course, but it's better to use separate images per component then, and having separate repository for this new project. Or you may use this image as base and create set of images to start Graphite components separately, without root access. Probably, result will be quite specific, but may be it's possible to make it generic enough to merge to this repo - in that case we would like to accept such PR. 320 | 321 | ## Experimental Features 322 | ### go-carbon 323 | 324 | Use `GOCARBON=1` environment variable to enable [go-carbon](https://github.com/lomik/go-carbon) instance instead of normal Carbon. Use `GRAPHITE_CLUSTER_SERVERS="127.0.0.1:8000"` if you want also use [carbonserver](https://github.com/grobian/carbonserver) feature. 325 | 326 | ### brubeck 327 | 328 | Use `BRUBECK=1` environment variable to enable [brubeck](https://github.com/lukepalmer/brubeck) instance of normal Statsd. Please note that brubeck has different config format and not fully compatible with original statsd. 329 | 330 | 331 | ## Additional Reading 332 | 333 | * [Introduction to Docker](http://docs.docker.io/#introduction) 334 | * [Official Statsd Documentation](https://github.com/etsy/statsd/) 335 | * [Practical Guide to StatsD/Graphite Monitoring](http://matt.aimonetti.net/posts/2013/06/26/practical-guide-to-graphite-monitoring/) 336 | * [Configuring Graphite for StatsD](https://github.com/etsy/statsd/blob/master/docs/graphite.md) 337 | 338 | ## Contributors 339 | 340 | Build the image yourself. 341 | 342 | 1. `git clone https://github.com/graphite-project/docker-graphite-statsd.git` 343 | 2. `docker build --build-arg python_binary=python3 -t graphiteapp/graphite-statsd .` 344 | #### For using pypy instead of python3 345 | 2. `docker build --build-arg BASEIMAGE=jamiehewland/alpine-pypy:3.6-7.3-alpine3.11 --build-arg python_binary=/usr/local/bin/pypy3 -t graphiteapp/graphite-statsd .` 346 | 347 | 348 | Alternate versions can be specified via `--build-arg`: 349 | 350 | * `version` will set the version/branch used for graphite-web, carbon & whisper 351 | * `graphite_version`, `carbon_version` & `whisper_version` set the version/branch used for individual components 352 | * `statsd_version` sets the version/branch used for statsd (note statsd version is prefixed with v) 353 | * `python_binary` sets path to python binary and `BASEIMAGE` sets path to base image. 354 | 355 | Alternate repositories can also be specified with the build args `graphite_repo`, `carbon_repo`, `whisper_repo` & `statsd_repo`. 356 | 357 | To build an image from latest graphite, whisper & carbon master, run: 358 | 359 | `docker build -t graphiteapp/graphite-statsd . --build-arg version=master --build-arg python_binary=python3` 360 | 361 | To build an image using a fork of graphite-web, run: 362 | 363 | `docker build -t forked/graphite-statsd . --build-arg version=master --build-arg graphite_repo=https://github.com/forked/graphite-web.git --build-arg python_binary=python3` 364 | -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | VERSION=1.1.11-rc0 3 | docker build . \ 4 | --build-arg python_extra_flags="--single-version-externally-managed --root=/" \ 5 | --no-cache --tag graphiteapp/graphite-statsd:$VERSION --progress tty -------------------------------------------------------------------------------- /conf/entrypoint: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ## Inspired from the script found at 4 | ## https://sanjeevan.co.uk/blog/running-services-inside-a-container-using-runit-and-alpine-linux/ 5 | 6 | shutdown() { 7 | echo "shutting down container" 8 | 9 | # first shutdown any service started by runit 10 | for _srv in $(ls -1 /etc/service); do 11 | sv force-stop $_srv 12 | done 13 | 14 | echo "shutting down runsvdir" 15 | 16 | # shutdown runsvdir command 17 | kill -HUP $RUNSVDIR 18 | wait $RUNSVDIR 19 | 20 | # give processes time to stop 21 | sleep 0.5 22 | 23 | echo "killing rest processes" 24 | # kill any other processes still running in the container 25 | for _pid in $(ps -eo pid | grep -v PID | tr -d ' ' | grep -v '^1$' | head -n -6); do 26 | timeout 5 /bin/sh -c "kill $_pid && wait $_pid || kill -9 $_pid" 27 | done 28 | exit 29 | } 30 | 31 | . /opt/graphite/bin/activate 32 | 33 | PATH="${PATH}:/usr/local/bin" 34 | 35 | # run all scripts in the run_once folder 36 | [ -d /etc/run_once ] && /bin/run-parts /etc/run_once 37 | 38 | ## check services to disable 39 | for _srv in $(ls -1 /etc/service); do 40 | eval X=$`echo -n $_srv | tr [:lower:]- [:upper:]_`_DISABLED 41 | [ -n "$X" ] && touch /etc/service/$_srv/down 42 | done 43 | 44 | # remove stale pids 45 | find /opt/graphite/storage -maxdepth 1 -name '*.pid' -delete 46 | 47 | # chmod logrotate fle (#111) 48 | chmod 0644 /etc/logrotate.d/* 49 | 50 | exec runsvdir -P /etc/service & 51 | RUNSVDIR=$! 52 | echo "Started runsvdir, PID is $RUNSVDIR" 53 | echo "wait for processes to start...." 54 | 55 | sleep 5 56 | for _srv in $(ls -1 /etc/service); do 57 | sv status $_srv 58 | done 59 | 60 | # catch shutdown signals 61 | trap shutdown SIGTERM SIGHUP SIGQUIT SIGINT 62 | wait $RUNSVDIR 63 | 64 | shutdown 65 | -------------------------------------------------------------------------------- /conf/etc/collectd/collectd.conf: -------------------------------------------------------------------------------- 1 | # Config file for collectd(1). 2 | # 3 | # Some plugins need additional configuration and are disabled by default. 4 | # Please read collectd.conf(5) for details. 5 | # 6 | # You should also read /usr/share/doc/collectd-core/README.Debian.plugins 7 | # before enabling any more plugins. 8 | 9 | #Hostname "localhost" 10 | Hostname "graphite" 11 | FQDNLookup true 12 | #BaseDir "/var/lib/collectd" 13 | #PluginDir "/usr/lib/collectd" 14 | #TypesDB "/usr/share/collectd/types.db" "/etc/collectd/my_types.db" 15 | Interval 10 16 | #Interval 60 17 | #Timeout 2 18 | #ReadThreads 5 19 | 20 | #LoadPlugin logfile 21 | LoadPlugin syslog 22 | 23 | # 24 | # LogLevel "info" 25 | # File STDOUT 26 | # Timestamp true 27 | # PrintSeverity false 28 | # 29 | 30 | 31 | LogLevel info 32 | 33 | 34 | #LoadPlugin amqp 35 | #LoadPlugin apache 36 | #LoadPlugin apcups 37 | #LoadPlugin ascent 38 | LoadPlugin battery 39 | #LoadPlugin bind 40 | #LoadPlugin conntrack 41 | #LoadPlugin contextswitch 42 | LoadPlugin cpu 43 | #LoadPlugin cpufreq 44 | #LoadPlugin csv 45 | #LoadPlugin curl 46 | #LoadPlugin curl_json 47 | #LoadPlugin curl_xml 48 | #LoadPlugin dbi 49 | LoadPlugin df 50 | LoadPlugin disk 51 | #LoadPlugin dns 52 | #LoadPlugin email 53 | LoadPlugin entropy 54 | #LoadPlugin ethstat 55 | #LoadPlugin exec 56 | LoadPlugin filecount 57 | #LoadPlugin fscache 58 | #LoadPlugin gmond 59 | #LoadPlugin hddtemp 60 | LoadPlugin interface 61 | #LoadPlugin ipmi 62 | #LoadPlugin iptables 63 | #LoadPlugin ipvs 64 | #LoadPlugin irq 65 | #LoadPlugin java 66 | #LoadPlugin libvirt 67 | LoadPlugin load 68 | #LoadPlugin madwifi 69 | #LoadPlugin mbmon 70 | #LoadPlugin md 71 | #LoadPlugin memcachec 72 | #LoadPlugin memcached 73 | LoadPlugin memory 74 | #LoadPlugin multimeter 75 | #LoadPlugin mysql 76 | #LoadPlugin netlink 77 | #LoadPlugin network 78 | #LoadPlugin nfs 79 | LoadPlugin nginx 80 | #LoadPlugin notify_desktop 81 | #LoadPlugin notify_email 82 | #LoadPlugin ntpd 83 | #LoadPlugin numa 84 | #LoadPlugin nut 85 | #LoadPlugin olsrd 86 | #LoadPlugin openvpn 87 | # 88 | # Globals true 89 | # 90 | #LoadPlugin pinba 91 | #LoadPlugin ping 92 | #LoadPlugin postgresql 93 | #LoadPlugin powerdns 94 | LoadPlugin processes 95 | #LoadPlugin protocols 96 | # 97 | # Globals true 98 | # 99 | #LoadPlugin rrdcached 100 | #LoadPlugin rrdtool 101 | #LoadPlugin sensors 102 | #LoadPlugin serial 103 | #LoadPlugin snmp 104 | LoadPlugin swap 105 | #LoadPlugin table 106 | LoadPlugin tail 107 | #LoadPlugin tcpconns 108 | #LoadPlugin teamspeak2 109 | #LoadPlugin ted 110 | #LoadPlugin thermal 111 | #LoadPlugin tokyotyrant 112 | #LoadPlugin unixsock 113 | #LoadPlugin uptime 114 | #LoadPlugin users 115 | #LoadPlugin uuid 116 | #LoadPlugin varnish 117 | #LoadPlugin vmem 118 | #LoadPlugin vserver 119 | #LoadPlugin wireless 120 | LoadPlugin write_graphite 121 | #LoadPlugin write_http 122 | #LoadPlugin write_mongodb 123 | 124 | # 125 | # 126 | # Host "localhost" 127 | # Port "5672" 128 | # VHost "/" 129 | # User "guest" 130 | # Password "guest" 131 | # Exchange "amq.fanout" 132 | # RoutingKey "collectd" 133 | # Persistent false 134 | # StoreRates false 135 | # 136 | # 137 | 138 | # 139 | # 140 | # URL "https://127.0.0.1/server-status?auto" 141 | # VerifyPeer false 142 | # VerifyHost false 143 | # 144 | # 145 | # URL "http://localhost/server-status?auto" 146 | # User "www-user" 147 | # Password "secret" 148 | # VerifyPeer false 149 | # VerifyHost false 150 | # CACert "/etc/ssl/ca.crt" 151 | # Server "apache" 152 | # 153 | # 154 | # 155 | # URL "http://some.domain.tld/status?auto" 156 | # Host "some.domain.tld" 157 | # Server "lighttpd" 158 | # 159 | # 160 | 161 | # 162 | # Host "localhost" 163 | # Port "3551" 164 | # 165 | 166 | # 167 | # URL "http://localhost/ascent/status/" 168 | # User "www-user" 169 | # Password "secret" 170 | # VerifyPeer false 171 | # VerifyHost false 172 | # CACert "/etc/ssl/ca.crt" 173 | # 174 | 175 | # 176 | # URL "http://localhost:8053/" 177 | # 178 | # ParseTime false 179 | # 180 | # OpCodes true 181 | # QTypes true 182 | # ServerStats true 183 | # ZoneMaintStats true 184 | # ResolverStats false 185 | # MemoryStats true 186 | # 187 | # 188 | # QTypes true 189 | # ResolverStats true 190 | # CacheRRSets true 191 | # 192 | # Zone "127.in-addr.arpa/IN" 193 | # 194 | # 195 | 196 | # 197 | # DataDir "/var/lib/collectd/csv" 198 | # StoreRates false 199 | # 200 | 201 | # 202 | # 203 | # URL "http://finance.google.com/finance?q=NYSE%3AAMD" 204 | # User "foo" 205 | # Password "bar" 206 | # VerifyPeer false 207 | # VerifyHost false 208 | # CACert "/etc/ssl/ca.crt" 209 | # MeasureResponseTime false 210 | # 211 | # Regex "]*> *([0-9]*\\.[0-9]+) *" 212 | # DSType "GaugeAverage" 213 | # Type "stock_value" 214 | # Instance "AMD" 215 | # 216 | # 217 | # 218 | 219 | # 220 | ## See: http://wiki.apache.org/couchdb/Runtime_Statistics 221 | # 222 | # Instance "httpd" 223 | # 224 | # Type "http_requests" 225 | # 226 | # 227 | # 228 | # Type "http_request_methods" 229 | # 230 | # 231 | # 232 | # Type "http_response_codes" 233 | # 234 | # 235 | ## Database status metrics: 236 | # 237 | # Instance "dbs" 238 | # 239 | # Type "gauge" 240 | # 241 | # 242 | # Type "counter" 243 | # 244 | # 245 | # Type "bytes" 246 | # 247 | # 248 | # 249 | 250 | # 251 | # 252 | # Host "my_host" 253 | # Instance "some_instance" 254 | # User "collectd" 255 | # Password "thaiNg0I" 256 | # VerifyPeer true 257 | # VerifyHost true 258 | # CACert "/path/to/ca.crt" 259 | # 260 | # 261 | # Type "magic_level" 262 | # InstancePrefix "prefix-" 263 | # InstanceFrom "td[1]" 264 | # ValuesFrom "td[2]/span[@class=\"level\"]" 265 | # 266 | # 267 | # 268 | 269 | # 270 | # 271 | # Statement "SELECT 'customers' AS c_key, COUNT(*) AS c_value \ 272 | # FROM customers_tbl" 273 | # MinVersion 40102 274 | # MaxVersion 50042 275 | # 276 | # Type "gauge" 277 | # InstancePrefix "customer" 278 | # InstancesFrom "c_key" 279 | # ValuesFrom "c_value" 280 | # 281 | # 282 | # 283 | # 284 | # Driver "mysql" 285 | # DriverOption "host" "localhost" 286 | # DriverOption "username" "collectd" 287 | # DriverOption "password" "secret" 288 | # DriverOption "dbname" "custdb0" 289 | # SelectDB "custdb0" 290 | # Query "num_of_customers" 291 | # Query "..." 292 | # 293 | # 294 | 295 | 296 | MountPoint "/" 297 | ReportInodes true 298 | ValuesAbsolute true 299 | IgnoreSelected false 300 | # Device "/dev/sda1" 301 | # Device "192.168.0.2:/mnt/nfs" 302 | # MountPoint "/home" 303 | # FSType "ext3" 304 | # IgnoreSelected false 305 | # ReportByDevice false 306 | # ReportReserved false 307 | # ReportInodes false 308 | 309 | 310 | # 311 | # Disk "hda" 312 | # Disk "/sda[23]/" 313 | # IgnoreSelected false 314 | # 315 | 316 | # 317 | # Interface "eth0" 318 | # IgnoreSource "192.168.0.1" 319 | # SelectNumericQueryTypes false 320 | # 321 | 322 | # 323 | # SocketFile "/var/run/collectd-email" 324 | # SocketGroup "collectd" 325 | # SocketPerms "0770" 326 | # MaxConns 5 327 | # 328 | 329 | # 330 | # Interface "eth0" 331 | # Map "rx_csum_offload_errors" "if_rx_errors" "checksum_offload" 332 | # Map "multicast" "if_multicast" 333 | # MappedOnly false 334 | # 335 | 336 | # 337 | # Exec user "/path/to/exec" 338 | # Exec "user:group" "/path/to/exec" 339 | # NotificationExec user "/path/to/exec" 340 | # 341 | 342 | 343 | 344 | Instance "whisper" 345 | Name "*.wsp" 346 | # 347 | # Instance "foodir" 348 | # Name "*.conf" 349 | # MTime "-5m" 350 | # Size "+10k" 351 | # Recursive true 352 | # IncludeHidden false 353 | 354 | 355 | 356 | # 357 | # MCReceiveFrom "239.2.11.71" "8649" 358 | # 359 | # 360 | # Type "swap" 361 | # TypeInstance "total" 362 | # DataSource "value" 363 | # 364 | # 365 | # 366 | # Type "swap" 367 | # TypeInstance "free" 368 | # DataSource "value" 369 | # 370 | # 371 | 372 | # 373 | # Host "127.0.0.1" 374 | # Port 7634 375 | # 376 | 377 | # 378 | # Interface "eth0" 379 | # IgnoreSelected false 380 | # 381 | 382 | # 383 | # Sensor "some_sensor" 384 | # Sensor "another_one" 385 | # IgnoreSelected false 386 | # NotifySensorAdd false 387 | # NotifySensorRemove true 388 | # NotifySensorNotPresent false 389 | # 390 | 391 | # 392 | # Chain "table" "chain" 393 | # 394 | 395 | # 396 | # Irq 7 397 | # Irq 8 398 | # Irq 9 399 | # IgnoreSelected true 400 | # 401 | 402 | # 403 | # JVMArg "-verbose:jni" 404 | # JVMArg "-Djava.class.path=/usr/share/collectd/java/collectd-api.jar" 405 | # 406 | # LoadPlugin "org.collectd.java.GenericJMX" 407 | # 408 | # # See /usr/share/doc/collectd/examples/GenericJMX.conf 409 | # # for an example config. 410 | # 411 | # 412 | 413 | # 414 | # Connection "xen:///" 415 | # RefreshInterval 60 416 | # Domain "name" 417 | # BlockDevice "name:device" 418 | # InterfaceDevice "name:device" 419 | # IgnoreSelected false 420 | # HostnameFormat name 421 | # InterfaceFormat name 422 | # 423 | 424 | # 425 | # Interface "wlan0" 426 | # IgnoreSelected false 427 | # Source "SysFS" 428 | # WatchSet "None" 429 | # WatchAdd "node_octets" 430 | # WatchAdd "node_rssi" 431 | # WatchAdd "is_rx_acl" 432 | # WatchAdd "is_scan_active" 433 | # 434 | 435 | # 436 | # Host "127.0.0.1" 437 | # Port 411 438 | # 439 | 440 | # 441 | # Device "/dev/md0" 442 | # IgnoreSelected false 443 | # 444 | 445 | # 446 | # 447 | # Server "localhost" 448 | # Key "page_key" 449 | # 450 | # Regex "(\\d+) bytes sent" 451 | # ExcludeRegex "" 452 | # DSType CounterAdd 453 | # Type "ipt_octets" 454 | # Instance "type_instance" 455 | # 456 | # 457 | # 458 | 459 | # 460 | # Socket "/var/run/memcached.sock" 461 | # or: 462 | # Host "127.0.0.1" 463 | # Port "11211" 464 | # 465 | 466 | # 467 | # 468 | # Host "database.serv.er" 469 | # Port "3306" 470 | # User "db_user" 471 | # Password "secret" 472 | # Database "db_name" 473 | # MasterStats true 474 | # 475 | # 476 | # 477 | # Host "localhost" 478 | # Socket "/var/run/mysql/mysqld.sock" 479 | # SlaveStats true 480 | # SlaveNotifications true 481 | # 482 | # 483 | 484 | # 485 | # Interface "All" 486 | # VerboseInterface "All" 487 | # QDisc "eth0" "pfifo_fast-1:0" 488 | # Class "ppp0" "htb-1:10" 489 | # Filter "ppp0" "u32-1:0" 490 | # IgnoreSelected false 491 | # 492 | 493 | # 494 | # # client setup: 495 | # Server "ff18::efc0:4a42" "25826" 496 | # 497 | # SecurityLevel Encrypt 498 | # Username "user" 499 | # Password "secret" 500 | # Interface "eth0" 501 | # 502 | # TimeToLive "128" 503 | # 504 | # # server setup: 505 | # Listen "ff18::efc0:4a42" "25826" 506 | # 507 | # SecurityLevel Sign 508 | # AuthFile "/etc/collectd/passwd" 509 | # Interface "eth0" 510 | # 511 | # MaxPacketSize 1024 512 | # 513 | # # proxy setup (client and server as above): 514 | # Forward true 515 | # 516 | # # statistics about the network plugin itself 517 | # ReportStats false 518 | # 519 | # # "garbage collection" 520 | # CacheFlush 1800 521 | # 522 | 523 | 524 | URL "http://localhost/nginx_status?auto" 525 | VerifyPeer false 526 | VerifyHost false 527 | 528 | 529 | # 530 | # OkayTimeout 1000 531 | # WarningTimeout 5000 532 | # FailureTimeout 0 533 | # 534 | 535 | # 536 | # SMTPServer "localhost" 537 | # SMTPPort 25 538 | # SMTPUser "my-username" 539 | # SMTPPassword "my-password" 540 | # From "collectd@main0server.com" 541 | # # on . 542 | # # Beware! Do not use not more than two placeholders (%)! 543 | # Subject "[collectd] %s on %s!" 544 | # Recipient "email1@domain1.net" 545 | # Recipient "email2@domain2.com" 546 | # 547 | 548 | # 549 | # Host "localhost" 550 | # Port 123 551 | # ReverseLookups false 552 | # 553 | 554 | # 555 | # UPS "upsname@hostname:port" 556 | # 557 | 558 | # 559 | # Host "127.0.0.1" 560 | # Port "2006" 561 | # CollectLinks "Summary" 562 | # CollectRoutes "Summary" 563 | # CollectTopology "Summary" 564 | # 565 | 566 | # 567 | # StatusFile "/etc/openvpn/openvpn-status.log" 568 | # ImprovedNamingSchema false 569 | # CollectCompression true 570 | # CollectIndividualUsers true 571 | # CollectUserCount false 572 | # 573 | 574 | # 575 | # IncludeDir "/my/include/path" 576 | # BaseName "Collectd::Plugins" 577 | # EnableDebugger "" 578 | # LoadPlugin Monitorus 579 | # LoadPlugin OpenVZ 580 | # 581 | # 582 | # Foo "Bar" 583 | # Qux "Baz" 584 | # 585 | # 586 | 587 | # 588 | # Address "::0" 589 | # Port "30002" 590 | # 591 | # Host "host name" 592 | # Server "server name" 593 | # Script "script name" 594 | # 595 | # 596 | 597 | # 598 | # Host "host.foo.bar" 599 | # Host "host.baz.qux" 600 | # Interval 1.0 601 | # Timeout 0.9 602 | # TTL 255 603 | # SourceAddress "1.2.3.4" 604 | # Device "eth0" 605 | # MaxMissed -1 606 | # 607 | 608 | # 609 | # 610 | # Statement "SELECT magic FROM wizard WHERE host = $1;" 611 | # Param hostname 612 | # 613 | # 614 | # Type gauge 615 | # InstancePrefix "magic" 616 | # ValuesFrom "magic" 617 | # 618 | # 619 | # 620 | # 621 | # Statement "SELECT COUNT(type) AS count, type \ 622 | # FROM (SELECT CASE \ 623 | # WHEN resolved = 'epoch' THEN 'open' \ 624 | # ELSE 'resolved' END AS type \ 625 | # FROM tickets) type \ 626 | # GROUP BY type;" 627 | # 628 | # 629 | # Type counter 630 | # InstancePrefix "rt36_tickets" 631 | # InstancesFrom "type" 632 | # ValuesFrom "count" 633 | # 634 | # 635 | # 636 | # 637 | # Host "hostname" 638 | # Port 5432 639 | # User "username" 640 | # Password "secret" 641 | # 642 | # SSLMode "prefer" 643 | # KRBSrvName "kerberos_service_name" 644 | # 645 | # Query magic 646 | # 647 | # 648 | # 649 | # Interval 60 650 | # Service "service_name" 651 | # 652 | # Query backend # predefined 653 | # Query rt36_tickets 654 | # 655 | # 656 | 657 | # 658 | # 659 | # Collect "latency" 660 | # Collect "udp-answers" "udp-queries" 661 | # Socket "/var/run/pdns.controlsocket" 662 | # 663 | # 664 | # Collect "questions" 665 | # Collect "cache-hits" "cache-misses" 666 | # Socket "/var/run/pdns_recursor.controlsocket" 667 | # 668 | # LocalSocket "/opt/collectd/var/run/collectd-powerdns" 669 | # 670 | 671 | # 672 | # Process "name" 673 | # ProcessMatch "foobar" "/usr/bin/perl foobar\\.pl.*" 674 | # 675 | 676 | # 677 | # Value "/^Tcp:/" 678 | # IgnoreSelected false 679 | # 680 | 681 | # 682 | # ModulePath "/path/to/your/python/modules" 683 | # LogTraces true 684 | # Interactive true 685 | # Import "spam" 686 | # 687 | # 688 | # spam "wonderful" "lovely" 689 | # 690 | # 691 | 692 | # 693 | # DaemonAddress "unix:/var/run/rrdcached.sock" 694 | # DataDir "/var/lib/rrdcached/db/collectd" 695 | # CreateFiles true 696 | # CollectStatistics true 697 | # 698 | 699 | # 700 | # DataDir "/var/lib/collectd/rrd" 701 | # CacheTimeout 120 702 | # CacheFlush 900 703 | # WritesPerSecond 30 704 | # RandomTimeout 0 705 | # 706 | # The following settings are rather advanced 707 | # and should usually not be touched: 708 | # StepSize 10 709 | # HeartBeat 20 710 | # RRARows 1200 711 | # RRATimespan 158112000 712 | # XFF 0.1 713 | # 714 | 715 | # 716 | # SensorConfigFile "/etc/sensors3.conf" 717 | # Sensor "it8712-isa-0290/temperature-temp1" 718 | # Sensor "it8712-isa-0290/fanspeed-fan3" 719 | # Sensor "it8712-isa-0290/voltage-in8" 720 | # IgnoreSelected false 721 | # 722 | 723 | # See /usr/share/doc/collectd/examples/snmp-data.conf.gz for a 724 | # comprehensive sample configuration. 725 | # 726 | # 727 | # Type "voltage" 728 | # Table false 729 | # Instance "input_line1" 730 | # Scale 0.1 731 | # Values "SNMPv2-SMI::enterprises.6050.5.4.1.1.2.1" 732 | # 733 | # 734 | # Type "users" 735 | # Table false 736 | # Instance "" 737 | # Shift -1 738 | # Values "HOST-RESOURCES-MIB::hrSystemNumUsers.0" 739 | # 740 | # 741 | # Type "if_octets" 742 | # Table true 743 | # InstancePrefix "traffic" 744 | # Instance "IF-MIB::ifDescr" 745 | # Values "IF-MIB::ifInOctets" "IF-MIB::ifOutOctets" 746 | # 747 | # 748 | # 749 | # Address "192.168.0.2" 750 | # Version 1 751 | # Community "community_string" 752 | # Collect "std_traffic" 753 | # Inverval 120 754 | # 755 | # 756 | # Address "192.168.0.42" 757 | # Version 2 758 | # Community "another_string" 759 | # Collect "std_traffic" "hr_users" 760 | # 761 | # 762 | # Address "192.168.0.3" 763 | # Version 1 764 | # Community "more_communities" 765 | # Collect "powerplus_voltge_input" 766 | # Interval 300 767 | # 768 | # 769 | 770 | # 771 | # ReportByDevice false 772 | # 773 | 774 | # 775 | # 776 | # Instance "slabinfo" 777 | # Separator " " 778 | # 779 | # Type gauge 780 | # InstancePrefix "active_objs" 781 | # InstancesFrom 0 782 | # ValuesFrom 1 783 | # 784 | # 785 | # Type gauge 786 | # InstancePrefix "objperslab" 787 | # InstancesFrom 0 788 | # ValuesFrom 4 789 | # 790 | #
791 | #
792 | 793 | 794 | 795 | # cache performance 796 | 797 | Instance "graphite_web" 798 | 799 | Regex "Request-Cache hit " 800 | DSType "CounterInc" 801 | Type "counter" 802 | Instance "request_cache_hit" 803 | 804 | 805 | Regex "Request-Cache miss " 806 | DSType "CounterInc" 807 | Type "counter" 808 | Instance "request_cache_miss" 809 | 810 | 811 | Regex "CarbonLink creating a new socket " 812 | DSType "CounterInc" 813 | Type "counter" 814 | Instance "socket_create_count" 815 | 816 | 817 | Regex "CarbonLink cache-query request " 818 | DSType "CounterInc" 819 | Type "counter" 820 | Instance "query_count" 821 | 822 | 823 | Regex "CarbonLink set-metadata request " 824 | DSType "CounterInc" 825 | Type "counter" 826 | Instance "set-metadata_count" 827 | 828 | 829 | Regex "Data-Cache hit " 830 | DSType "CounterInc" 831 | Type "counter" 832 | Instance "data_cache_hit" 833 | 834 | 835 | Regex "Data-Cache miss " 836 | DSType "CounterInc" 837 | Type "counter" 838 | Instance "data_cache_miss" 839 | 840 | 841 | 842 | # rendering performance 843 | 844 | Instance "graphite_web" 845 | 846 | # PNG's 847 | 848 | Regex "Rendered PNG in ([0-9\.]+) seconds" 849 | DSType "CounterInc" 850 | Type "requests" 851 | Instance "render_png_count" 852 | 853 | 854 | Regex "Rendered PNG in ([0-9\.]+) seconds" 855 | DSType "GaugeMin" 856 | Type "response_time" 857 | Instance "render_png_time_min" 858 | 859 | 860 | Regex "Rendered PNG in ([0-9\.]+) seconds" 861 | DSType "GaugeMax" 862 | Type "response_time" 863 | Instance "render_png_time_max" 864 | 865 | 866 | Regex "Rendered PNG in ([0-9\.]+) seconds" 867 | DSType "GaugeAverage" 868 | Type "response_time" 869 | Instance "render_png_time_avg" 870 | 871 | 872 | # pickle (carbonlink) 873 | 874 | Regex "Total pickle rendering time ([0-9\.]+)" 875 | DSType "CounterInc" 876 | Type "counter" 877 | Instance "render_pickle_count" 878 | 879 | 880 | Regex "Total pickle rendering time ([0-9\.]+)" 881 | DSType "GaugeMin" 882 | Type "response_time" 883 | Instance "render_pickle_time_min" 884 | 885 | 886 | Regex "Total pickle rendering time ([0-9\.]+)" 887 | DSType "GaugeMax" 888 | Type "response_time" 889 | Instance "render_pickle_time_max" 890 | 891 | 892 | Regex "Total pickle rendering time ([0-9\.]+)" 893 | DSType "GaugeAverage" 894 | Type "response_time" 895 | Instance "render_pickle_time_avg" 896 | 897 | 898 | # rawData (json, csv, etc) 899 | 900 | Regex "Total rawData rendering time ([0-9\.]+)" 901 | DSType "CounterInc" 902 | Type "counter" 903 | Instance "render_rawdata_count" 904 | 905 | 906 | Regex "Total rawData rendering time ([0-9\.]+)" 907 | DSType "GaugeMin" 908 | Type "response_time" 909 | Instance "render_rawdata_time_min" 910 | 911 | 912 | Regex "Total rawData rendering time ([0-9\.]+)" 913 | DSType "GaugeMax" 914 | Type "response_time" 915 | Instance "render_rawdata_time_max" 916 | 917 | 918 | Regex "Total rawData rendering time ([0-9\.]+)" 919 | DSType "GaugeAverage" 920 | Type "response_time" 921 | Instance "render_rawdata_time_avg" 922 | 923 | 924 | # total render time 925 | 926 | Regex "Total rendering time ([0-9\.]+) seconds" 927 | DSType "CounterInc" 928 | Type "counter" 929 | Instance "total_render_count" 930 | 931 | 932 | Regex "Total rendering time ([0-9\.]+) seconds" 933 | DSType "GaugeMin" 934 | Type "response_time" 935 | Instance "total_render_time_min" 936 | 937 | 938 | Regex "Total rendering time ([0-9\.]+) seconds" 939 | DSType "GaugeMax" 940 | Type "response_time" 941 | Instance "total_render_time_max" 942 | 943 | 944 | Regex "Total rendering time ([0-9\.]+) seconds" 945 | DSType "GaugeAverage" 946 | Type "response_time" 947 | Instance "total_render_time_avg" 948 | 949 | 950 | # cached response time 951 | 952 | Regex "Returned cached response in ([0-9\.]+) seconds" 953 | DSType "CounterInc" 954 | Type "counter" 955 | Instance "cached_response_time_count" 956 | 957 | 958 | Regex "Returned cached response in ([0-9\.]+) seconds" 959 | DSType "GaugeMin" 960 | Type "response_time" 961 | Instance "cached_response_time_min" 962 | 963 | 964 | Regex "Returned cached response in ([0-9\.]+) seconds" 965 | DSType "GaugeMax" 966 | Type "response_time" 967 | Instance "cached_response_time_max" 968 | 969 | 970 | Regex "Returned cached response in ([0-9\.]+) seconds" 971 | DSType "GaugeAverage" 972 | Type "response_time" 973 | Instance "cached_response_time_avg" 974 | 975 | 976 | # data retrieval time 977 | 978 | Regex "Retrieval of [^ ]+ took ([0-9\.]+)" 979 | DSType "CounterInc" 980 | Type "counter" 981 | Instance "retrieval_count" 982 | 983 | 984 | Regex "Retrieval of [^ ]+ took ([0-9\.]+)" 985 | DSType "GaugeMin" 986 | Type "response_time" 987 | Instance "retrieval_time_min" 988 | 989 | 990 | Regex "Retrieval of [^ ]+ took ([0-9\.]+)" 991 | DSType "GaugeMax" 992 | Type "response_time" 993 | Instance "retrieval_time_max" 994 | 995 | 996 | Regex "Retrieval of [^ ]+ took ([0-9\.]+)" 997 | DSType "GaugeAverage" 998 | Type "response_time" 999 | Instance "retrieval_time_avg" 1000 | 1001 | 1002 | # 1003 | # Instance "exim" 1004 | # 1005 | # Regex "S=([1-9][0-9]*)" 1006 | # DSType "CounterAdd" 1007 | # Type "ipt_bytes" 1008 | # Instance "total" 1009 | # 1010 | # 1011 | # Regex "\\" 1012 | # ExcludeRegex "\\.*mail_spool defer" 1013 | # DSType "CounterInc" 1014 | # Type "counter" 1015 | # Instance "local_user" 1016 | # 1017 | # 1018 | 1019 | 1020 | # 1021 | # ListeningPorts false 1022 | # LocalPort "25" 1023 | # RemotePort "25" 1024 | # 1025 | 1026 | # 1027 | # Host "127.0.0.1" 1028 | # Port "51234" 1029 | # Server "8767" 1030 | # 1031 | 1032 | # 1033 | # Device "/dev/ttyUSB0" 1034 | # Retries 0 1035 | # 1036 | 1037 | # 1038 | # ForceUseProcfs false 1039 | # Device "THRM" 1040 | # IgnoreSelected false 1041 | # 1042 | 1043 | # 1044 | # Host "localhost" 1045 | # Port "1978" 1046 | # 1047 | 1048 | # 1049 | # SocketFile "/var/run/collectd-unixsock" 1050 | # SocketGroup "collectd" 1051 | # SocketPerms "0660" 1052 | # DeleteSocket false 1053 | # 1054 | 1055 | # 1056 | # UUIDFile "/etc/uuid" 1057 | # 1058 | 1059 | # 1060 | # 1061 | # CollectCache true 1062 | # CollectBackend true 1063 | # CollectConnections true 1064 | # CollectSHM true 1065 | # CollectESI false 1066 | # CollectFetch false 1067 | # CollectHCB false 1068 | # CollectSMA false 1069 | # CollectSMS false 1070 | # CollectSM false 1071 | # CollectTotals false 1072 | # CollectWorkers false 1073 | # 1074 | # 1075 | # 1076 | # CollectCache true 1077 | # 1078 | # 1079 | 1080 | # 1081 | # Verbose false 1082 | # 1083 | 1084 | 1085 | 1086 | Host "localhost" 1087 | # Port "2003" 1088 | Prefix "collectd." 1089 | Protocol "tcp" 1090 | # Postfix "collectd" 1091 | # StoreRates false 1092 | # AlwaysAppendDS false 1093 | # EscapeCharacter "_" 1094 | 1095 | 1096 | 1097 | # 1098 | # 1099 | # User "collectd" 1100 | # Password "secret" 1101 | # VerifyPeer true 1102 | # VerifyHost true 1103 | # CACert "/etc/ssl/ca.crt" 1104 | # Format "Command" 1105 | # StoreRates false 1106 | # 1107 | # 1108 | 1109 | # 1110 | # 1111 | # Host "localhost" 1112 | # Port "27017" 1113 | # Timeout 1000 1114 | # StoreRates false 1115 | # 1116 | # 1117 | 1118 | Include "/etc/collectd/collectd.conf.d/*.conf" 1119 | -------------------------------------------------------------------------------- /conf/etc/logrotate.d/graphite-statsd: -------------------------------------------------------------------------------- 1 | /var/log/*.log /var/log/carbon/*.log /var/log/graphite/*.log /var/log/nginx/*.log { 2 | daily 3 | missingok 4 | rotate 14 5 | compress 6 | notifempty 7 | copytruncate 8 | } 9 | -------------------------------------------------------------------------------- /conf/etc/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 4; 3 | pid /run/nginx.pid; 4 | daemon off; 5 | 6 | events { 7 | worker_connections 768; 8 | # multi_accept on; 9 | } 10 | 11 | http { 12 | 13 | ## 14 | # Basic Settings 15 | ## 16 | 17 | sendfile on; 18 | tcp_nopush on; 19 | tcp_nodelay on; 20 | keepalive_timeout 65; 21 | types_hash_max_size 2048; 22 | # server_tokens off; 23 | 24 | # server_names_hash_bucket_size 64; 25 | # server_name_in_redirect off; 26 | 27 | include /etc/nginx/mime.types; 28 | default_type application/octet-stream; 29 | 30 | ## 31 | # Logging Settings 32 | ## 33 | 34 | access_log /var/log/nginx/access.log; 35 | error_log /var/log/nginx/error.log; 36 | 37 | ## 38 | # Gzip Settings 39 | ## 40 | 41 | gzip on; 42 | gzip_disable "msie6"; 43 | 44 | # gzip_vary on; 45 | # gzip_proxied any; 46 | # gzip_comp_level 6; 47 | # gzip_buffers 16 8k; 48 | # gzip_http_version 1.1; 49 | # gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; 50 | 51 | ## 52 | # nginx-naxsi config 53 | ## 54 | # Uncomment it if you installed nginx-naxsi 55 | ## 56 | 57 | #include /etc/nginx/naxsi_core.rules; 58 | 59 | ## 60 | # nginx-passenger config 61 | ## 62 | # Uncomment it if you installed nginx-passenger 63 | ## 64 | 65 | #passenger_root /usr; 66 | #passenger_ruby /usr/bin/ruby; 67 | 68 | ## 69 | # Virtual Host Configs 70 | ## 71 | 72 | include /etc/nginx/conf.d/*.conf; 73 | include /etc/nginx/sites-enabled/*; 74 | } 75 | 76 | 77 | #mail { 78 | # # See sample authentication script at: 79 | # # http://wiki.nginx.org/ImapAuthenticateWithApachePhpScript 80 | # 81 | # # auth_http localhost/auth.php; 82 | # # pop3_capabilities "TOP" "USER"; 83 | # # imap_capabilities "IMAP4rev1" "UIDPLUS"; 84 | # 85 | # server { 86 | # listen localhost:110; 87 | # protocol pop3; 88 | # proxy on; 89 | # } 90 | # 91 | # server { 92 | # listen localhost:143; 93 | # protocol imap; 94 | # proxy on; 95 | # } 96 | #} 97 | -------------------------------------------------------------------------------- /conf/etc/nginx/sites-enabled/graphite-statsd.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | root /opt/graphite/static; 4 | index index.html; 5 | 6 | location /nginx_status { 7 | stub_status on; 8 | access_log off; 9 | allow 127.0.0.1; 10 | deny all; 11 | } 12 | 13 | location /media { 14 | # django admin static files 15 | alias /usr/local/lib/python3.6/dist-packages/django/contrib/admin/media/; 16 | } 17 | 18 | location /admin/auth/admin { 19 | alias /usr/local/lib/python3.6/dist-packages/django/contrib/admin/static/admin; 20 | } 21 | 22 | location /admin/auth/user/admin { 23 | alias /usr/local/lib/python3.6/dist-packages/django/contrib/admin/static/admin; 24 | } 25 | 26 | location / { 27 | proxy_pass http://127.0.0.1:8080; 28 | proxy_set_header Host $http_host; 29 | proxy_set_header X-Real-IP $remote_addr; 30 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 31 | 32 | add_header 'Access-Control-Allow-Origin' '*'; 33 | add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; 34 | add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type'; 35 | add_header 'Access-Control-Allow-Credentials' 'true'; 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /conf/etc/redis/redis.conf: -------------------------------------------------------------------------------- 1 | daemonize no 2 | pidfile /var/run/redis/redis-server.pid 3 | port 6379 4 | bind 127.0.0.1 5 | timeout 0 6 | tcp-keepalive 0 7 | loglevel notice 8 | logfile /var/log/redis/redis-server.log 9 | databases 16 10 | save 900 1 11 | save 300 10 12 | save 60 10000 13 | stop-writes-on-bgsave-error yes 14 | rdbcompression yes 15 | rdbchecksum yes 16 | dbfilename dump.rdb 17 | dir /var/lib/redis 18 | slave-serve-stale-data yes 19 | slave-read-only yes 20 | repl-disable-tcp-nodelay no 21 | slave-priority 100 22 | appendonly yes 23 | appendfilename "appendonly.aof" 24 | appendfsync everysec 25 | no-appendfsync-on-rewrite no 26 | auto-aof-rewrite-percentage 100 27 | auto-aof-rewrite-min-size 64mb 28 | lua-time-limit 5000 29 | slowlog-log-slower-than 10000 30 | slowlog-max-len 128 31 | notify-keyspace-events "" 32 | hash-max-ziplist-entries 512 33 | hash-max-ziplist-value 64 34 | list-max-ziplist-entries 512 35 | list-max-ziplist-value 64 36 | set-max-intset-entries 512 37 | zset-max-ziplist-entries 128 38 | zset-max-ziplist-value 64 39 | activerehashing yes 40 | client-output-buffer-limit normal 0 0 0 41 | client-output-buffer-limit slave 256mb 64mb 60 42 | client-output-buffer-limit pubsub 32mb 8mb 60 43 | hz 10 44 | aof-rewrite-incremental-fsync yes -------------------------------------------------------------------------------- /conf/etc/run_once/carbon-cache: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ -z "$GRAPHITE_CARBONLINK_HOSTS" ]; then 4 | exit 0 5 | fi 6 | 7 | IFS="," 8 | for i in $GRAPHITE_CARBONLINK_HOSTS; do 9 | INSTANCE_ID=$(echo $i | tr -d '[:space:]' | awk -F ':' '{print $3}') 10 | 11 | [ -z $INSTANCE_ID ] && continue 12 | S=/etc/service/carbon-$INSTANCE_ID 13 | [ -d $S ] && continue 14 | mkdir -p $S/log 15 | sed "s/start/--instance=$INSTANCE_ID start/" /etc/service/carbon/run > $S/run 16 | sed "s/carbon.log/carbon-$INSTANCE_ID.log/" /etc/service/carbon/log/run > $S/log/run 17 | chmod +x $S/run $S/log/run 18 | done 19 | -------------------------------------------------------------------------------- /conf/etc/service/brubeck/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -n "${BRUBECK}" ]] || exit 1 4 | 5 | [[ -f "./down" ]] && exit 1 6 | 7 | if [ ! -f /opt/graphite/conf/brubeck.json ]; then 8 | cp /opt/defaultconf/graphite/brubeck.json /opt/graphite/conf/brubeck.json 9 | fi 10 | 11 | exec /opt/graphite/bin/brubeck --config=/opt/graphite/conf/brubeck.json 2>&1 12 | -------------------------------------------------------------------------------- /conf/etc/service/carbon-aggregator/log/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec tee -a /var/log/carbon-aggregator.log -------------------------------------------------------------------------------- /conf/etc/service/carbon-aggregator/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -f "./down" ]] && exit 1 4 | 5 | BIN=python3 6 | [[ -f "/opt/graphite/bin/pypy3" ]] && BIN=/opt/graphite/bin/pypy3 7 | exec ${BIN} /opt/graphite/bin/carbon-aggregator.py start --debug 2>&1 8 | -------------------------------------------------------------------------------- /conf/etc/service/carbon-relay/log/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec tee -a /var/log/carbon-relay.log -------------------------------------------------------------------------------- /conf/etc/service/carbon-relay/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -n "${RELAY}" ]] || exit 1 4 | 5 | [[ -f "./down" ]] && exit 1 6 | 7 | BIN=/opt/graphite/bin/python3 8 | [[ -f "/opt/graphite/bin/pypy3" ]] && BIN=/opt/graphite/bin/pypy3 9 | exec ${BIN} /opt/graphite/bin/carbon-relay.py start --debug 2>&1 10 | -------------------------------------------------------------------------------- /conf/etc/service/carbon/log/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec tee -a /var/log/carbon.log -------------------------------------------------------------------------------- /conf/etc/service/carbon/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # disable if go-carbon enabled 4 | [[ -n "${GOCARBON}" ]] && exit 1 5 | 6 | [[ -f "./down" ]] && exit 1 7 | 8 | if [ -n "${CARBON_DISABLE_TAGS}" ]; then 9 | # trying to disable tags in carbon.conf file 10 | sed -i 's/ENABLE_TAGS = True/ENABLE_TAGS = False/g' /opt/graphite/conf/carbon.conf 11 | fi 12 | 13 | # AMQP params 14 | if [ -n "${CARBON_ENABLE_AMQP}" ]; then 15 | sed -i 's/# ENABLE_AMQP = False/ENABLE_AMQP = True/g' /opt/graphite/conf/carbon.conf 16 | if [ -n "${CARBON_AMQP_VERBOSE}" ]; then 17 | sed -i 's/# AMQP_VERBOSE = False/AMQP_VERBOSE = True/g' /opt/graphite/conf/carbon.conf 18 | fi 19 | if [ -n "${CARBON_AMQP_HOST}" ]; then 20 | sed -i "s/AMQP_HOST = localhost/AMQP_HOST = ${CARBON_AMQP_HOST}/g" /opt/graphite/conf/carbon.conf 21 | fi 22 | if [ -n "${CARBON_AMQP_PORT}" ]; then 23 | sed -i "s/AMQP_PORT = 5672/AMQP_PORT = ${CARBON_AMQP_PORT}/g" /opt/graphite/conf/carbon.conf 24 | fi 25 | if [ -n "${CARBON_AMQP_VHOST}" ]; then 26 | sed -i "s/AMQP_VHOST = \//AMQP_VHOST = ${CARBON_AMQP_VHOST}/g" /opt/graphite/conf/carbon.conf 27 | fi 28 | if [ -n "${CARBON_AMQP_USER}" ]; then 29 | sed -i "s/AMQP_USER = graphite/AMQP_USER = ${CARBON_AMQP_USER}/g" /opt/graphite/conf/carbon.conf 30 | fi 31 | if [ -n "${CARBON_AMQP_PASSWORD}" ]; then 32 | sed -i "s/AMQP_PASSWORD = guest/AMQP_PASSWORD = ${CARBON_AMQP_PASSWORD}/g" /opt/graphite/conf/carbon.conf 33 | fi 34 | if [ -n "${CARBON_AMQP_EXCHANGE}" ]; then 35 | sed -i "s/AMQP_EXCHANGE = graphite/AMQP_EXCHANGE = ${CARBON_AMQP_EXCHANGE}/g" /opt/graphite/conf/carbon.conf 36 | fi 37 | if [ -n "${CARBON_AMQP_METRIC_NAME_IN_BODY}" ]; then 38 | sed -i 's/AMQP_METRIC_NAME_IN_BODY = False/AMQP_METRIC_NAME_IN_BODY = True/g' /opt/graphite/conf/carbon.conf 39 | fi 40 | fi 41 | 42 | BIN=/opt/graphite/bin/python3 43 | [[ -f "/opt/graphite/bin/pypy3" ]] && BIN=/opt/graphite/bin/pypy3 44 | exec ${BIN} /opt/graphite/bin/carbon-cache.py start --debug 2>&1 45 | -------------------------------------------------------------------------------- /conf/etc/service/collectd/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [ -n "${COLLECTD}" ] || exit 0 4 | 5 | [[ -f "./down" ]] && exit 1 6 | 7 | [ -d /etc/collectd/collectd.conf.d ] || mkdir -p /etc/collectd/collectd.conf.d 8 | if folder_empty /etc/collectd/collectd.conf.d; then 9 | touch /etc/collectd/collectd.conf.d/do_not_spill_warning_about_folder_is_empty.conf 10 | fi 11 | 12 | exec /usr/sbin/collectd -f -C /etc/collectd/collectd.conf 13 | -------------------------------------------------------------------------------- /conf/etc/service/cron/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -f "./down" ]] && exit 1 4 | 5 | exec /usr/sbin/crond -f 6 | -------------------------------------------------------------------------------- /conf/etc/service/go-carbon/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -n "${GOCARBON}" ]] || exit 1 4 | 5 | [[ -f "./down" ]] && exit 1 6 | 7 | if [ ! -f /opt/graphite/conf/go-carbon.conf ]; then 8 | cp /opt/defaultconf/graphite/go-carbon.conf /opt/graphite/conf/go-carbon.conf 9 | fi 10 | 11 | exec /opt/graphite/bin/go-carbon -config="/opt/graphite/conf/go-carbon.conf" -daemon=false 2>&1 12 | -------------------------------------------------------------------------------- /conf/etc/service/graphite/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -f "./down" ]] && exit 1 4 | 5 | if folder_empty /var/log/graphite; then 6 | mkdir -p /var/log/graphite 7 | touch /var/log/syslog 8 | fi 9 | 10 | if folder_empty /opt/graphite/conf; then 11 | cp /opt/defaultconf/graphite/*.conf /opt/graphite/conf/ 12 | fi 13 | 14 | if folder_empty /opt/graphite/webapp/graphite; then 15 | cp /opt/defaultconf/graphite/local_settings.py /opt/graphite/webapp/graphite/local_settings.py 16 | fi 17 | 18 | if folder_empty /opt/graphite/storage; then 19 | mkdir -p /opt/graphite/storage/whisper 20 | fi 21 | 22 | BIN=/opt/graphite/bin/python3 23 | [[ -f "/opt/graphite/bin/pypy3" ]] && BIN=/opt/graphite/bin/pypy3 24 | 25 | export PYTHONPATH=/opt/graphite/webapp 26 | export DJANGO_SETTINGS_MODULE=graphite.settings 27 | ${BIN} /opt/graphite/bin/django-admin makemigrations 28 | ${BIN} /opt/graphite/bin/django-admin migrate auth 29 | ${BIN} /opt/graphite/bin/django-admin migrate --run-syncdb 30 | /opt/graphite/bin/django_admin_init.sh || true 31 | 32 | if folder_empty /opt/graphite/webapp/graphite/functions/custom; then 33 | touch /opt/graphite/webapp/graphite/functions/custom/__init__.py 34 | fi 35 | 36 | export GRAPHITE_WSGI_PROCESSES=${GRAPHITE_WSGI_PROCESSES:-4} 37 | export GRAPHITE_WSGI_THREADS=${GRAPHITE_WSGI_THREADS:-1} 38 | export GRAPHITE_WSGI_REQUEST_TIMEOUT=${GRAPHITE_WSGI_REQUEST_TIMEOUT:-65} 39 | export GRAPHITE_WSGI_REQUEST_LINE=${GRAPHITE_WSGI_REQUEST_LINE:-0} 40 | export GRAPHITE_WSGI_MAX_REQUESTS=${GRAPHITE_WSGI_MAX_REQUESTS:-1000} 41 | export GRAPHITE_WSGI_WORKER_CLASS=${GRAPHITE_WSGI_WORKER_CLASS:-"sync"} 42 | export GRAPHITE_WSGI_WORKER_CONNECTIONS=${GRAPHITE_WSGI_WORKER_CONNECTIONS:-1000} 43 | if [ "${GRAPHITE_WSGI_WORKER_CLASS}" == "gevent" ]; then 44 | export GUNICORN_EXTRA_PARAMS="--worker-connections=${GRAPHITE_WSGI_WORKER_CONNECTIONS} " 45 | else 46 | export GUNICORN_EXTRA_PARAMS="--preload --threads=${GRAPHITE_WSGI_THREADS} " 47 | fi 48 | 49 | # start nginx if not disabled 50 | [[ -f "/etc/service/nginx/down" ]] || sv start nginx || exit 1 51 | 52 | exec /opt/graphite/bin/gunicorn wsgi --pythonpath=/opt/graphite/webapp/graphite \ 53 | ${GUNICORN_EXTRA_PARAMS} \ 54 | --worker-class=${GRAPHITE_WSGI_WORKER_CLASS} \ 55 | --workers=${GRAPHITE_WSGI_PROCESSES} \ 56 | --limit-request-line=${GRAPHITE_WSGI_REQUEST_LINE} \ 57 | --max-requests=${GRAPHITE_WSGI_MAX_REQUESTS} \ 58 | --timeout=${GRAPHITE_WSGI_REQUEST_TIMEOUT} \ 59 | --bind=0.0.0.0:8080 \ 60 | --log-file=/var/log/gunicorn.log 61 | -------------------------------------------------------------------------------- /conf/etc/service/nginx/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -f "./down" ]] && exit 1 4 | 5 | mkdir -p /var/log/nginx 6 | exec /usr/sbin/nginx -c /etc/nginx/nginx.conf 7 | -------------------------------------------------------------------------------- /conf/etc/service/redis/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [ -n "${REDIS_TAGDB}" ] || exit 0 4 | 5 | [[ -f "./down" ]] && exit 1 6 | 7 | mkdir -p /var/log/redis/ 8 | exec /usr/bin/redis-server /etc/redis/redis.conf 9 | -------------------------------------------------------------------------------- /conf/etc/service/statsd/log/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec tee -a /var/log/statsd.log -------------------------------------------------------------------------------- /conf/etc/service/statsd/run: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [[ -n "${BRUBECK}" ]] && exit 1 4 | 5 | if folder_empty /opt/statsd/config/; then 6 | cp /opt/defaultconf/statsd/config/*.js /opt/statsd/config/ 7 | fi 8 | 9 | exec node /opt/statsd/stats.js /opt/statsd/config/$STATSD_INTERFACE.js 2>&1 10 | -------------------------------------------------------------------------------- /conf/opt/graphite/bin/django_admin_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | BIN=/opt/graphite/bin/python3 4 | [[ -f "/opt/graphite/bin/pypy3" ]] && BIN=/opt/graphite/bin/pypy3 5 | 6 | cat <.applications... 14 | # 15 | # You could configure some aggregations like so: 16 | # 17 | # .applications..all.requests (60) = sum .applications..*.requests 18 | # .applications..all.latency (60) = avg .applications..*.latency 19 | # 20 | # As an example, if the following metrics are received: 21 | # 22 | # prod.applications.apache.www01.requests 23 | # prod.applications.apache.www01.requests 24 | # 25 | # They would all go into the same aggregation buffer and after 60 seconds the 26 | # aggregate metric 'prod.applications.apache.all.requests' would be calculated 27 | # by summing their values. 28 | # 29 | # Template components such as will match everything up to the next dot. 30 | # To match metric multiple components including the dots, use <> in the 31 | # input template: 32 | # 33 | # .applications..all. (60) = sum .applications..*.<> 34 | # 35 | # Note that any time this file is modified, it will be re-read automatically. 36 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/blacklist.conf: -------------------------------------------------------------------------------- 1 | # This file takes a single regular expression per line 2 | # If USE_WHITELIST is set to True in carbon.conf, any metrics received which 3 | # match one of these expressions will be dropped 4 | # This file is reloaded automatically when changes are made 5 | ^some\.noisy\.metric\.prefix\..* 6 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/brubeck.json: -------------------------------------------------------------------------------- 1 | { 2 | "sharding" : false, 3 | "server_name" : "brubeck", 4 | "dumpfile" : "/tmp/brubeck.dump", 5 | "capacity" : 15, 6 | "backends" : [ 7 | { 8 | "type" : "carbon", 9 | "address" : "localhost", 10 | "port" : 2003, 11 | "frequency" : 10 12 | } 13 | ], 14 | "samplers" : [ 15 | { 16 | "type" : "statsd", 17 | "address" : "0.0.0.0", 18 | "port" : 8125, 19 | "workers" : 4, 20 | "multisock" : true, 21 | "multimsg" : 8 22 | } 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/carbon.amqp.conf: -------------------------------------------------------------------------------- 1 | # This is a configuration file with AMQP enabled 2 | 3 | [cache] 4 | LOCAL_DATA_DIR = 5 | 6 | # Specify the user to drop privileges to 7 | # If this is blank carbon runs as the user that invokes it 8 | # This user must have write access to the local data directory 9 | USER = 10 | 11 | # Limit the size of the cache to avoid swapping or becoming CPU bound. 12 | # Sorts and serving cache queries gets more expensive as the cache grows. 13 | # Use the value "inf" (infinity) for an unlimited cache size. 14 | MAX_CACHE_SIZE = inf 15 | 16 | # Limits the number of whisper update_many() calls per second, which effectively 17 | # means the number of write requests sent to the disk. This is intended to 18 | # prevent over-utilizing the disk and thus starving the rest of the system. 19 | # When the rate of required updates exceeds this, then carbon's caching will 20 | # take effect and increase the overall throughput accordingly. 21 | MAX_UPDATES_PER_SECOND = 1000 22 | 23 | # Softly limits the number of whisper files that get created each minute. 24 | # Setting this value low (like at 50) is a good way to ensure your graphite 25 | # system will not be adversely impacted when a bunch of new metrics are 26 | # sent to it. The trade off is that it will take much longer for those metrics' 27 | # database files to all get created and thus longer until the data becomes usable. 28 | # Setting this value high (like "inf" for infinity) will cause graphite to create 29 | # the files quickly but at the risk of slowing I/O down considerably for a while. 30 | MAX_CREATES_PER_MINUTE = inf 31 | 32 | LINE_RECEIVER_INTERFACE = 0.0.0.0 33 | LINE_RECEIVER_PORT = 2003 34 | 35 | UDP_RECEIVER_INTERFACE = 0.0.0.0 36 | UDP_RECEIVER_PORT = 2003 37 | 38 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 39 | PICKLE_RECEIVER_PORT = 2004 40 | 41 | CACHE_QUERY_INTERFACE = 0.0.0.0 42 | CACHE_QUERY_PORT = 7002 43 | 44 | # Enable AMQP if you want to receve metrics using you amqp broker 45 | ENABLE_AMQP = True 46 | 47 | # Verbose means a line will be logged for every metric received 48 | # useful for testing 49 | AMQP_VERBOSE = True 50 | 51 | # your credentials for the amqp server 52 | # AMQP_USER = guest 53 | # AMQP_PASSWORD = guest 54 | 55 | # the network settings for the amqp server 56 | # AMQP_HOST = localhost 57 | # AMQP_PORT = 5672 58 | 59 | # if you want to include the metric name as part of the message body 60 | # instead of as the routing key, set this to True 61 | # AMQP_METRIC_NAME_IN_BODY = False 62 | 63 | # NOTE: you cannot run both a cache and a relay on the same server 64 | # with the default configuration, you have to specify a distinict 65 | # interfaces and ports for the listeners. 66 | 67 | [relay] 68 | LINE_RECEIVER_INTERFACE = 0.0.0.0 69 | LINE_RECEIVER_PORT = 2003 70 | 71 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 72 | PICKLE_RECEIVER_PORT = 2004 73 | 74 | CACHE_SERVERS = server1, server2, server3 75 | MAX_QUEUE_SIZE = 10000 76 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/carbon.conf: -------------------------------------------------------------------------------- 1 | [cache] 2 | # Configure carbon directories. 3 | # 4 | # OS environment variables can be used to tell carbon where graphite is 5 | # installed, where to read configuration from and where to write data. 6 | # 7 | # GRAPHITE_ROOT - Root directory of the graphite installation. 8 | # Defaults to ../ 9 | # GRAPHITE_CONF_DIR - Configuration directory (where this file lives). 10 | # Defaults to $GRAPHITE_ROOT/conf/ 11 | # GRAPHITE_STORAGE_DIR - Storage directory for whisper/rrd/log/pid files. 12 | # Defaults to $GRAPHITE_ROOT/storage/ 13 | # 14 | # To change other directory paths, add settings to this file. The following 15 | # configuration variables are available with these default values: 16 | # 17 | # STORAGE_DIR = $GRAPHITE_STORAGE_DIR 18 | # LOCAL_DATA_DIR = %(STORAGE_DIR)s/whisper/ 19 | # WHITELISTS_DIR = %(STORAGE_DIR)s/lists/ 20 | # CONF_DIR = %(STORAGE_DIR)s/conf/ 21 | # LOG_DIR = %(STORAGE_DIR)s/log/ 22 | # PID_DIR = %(STORAGE_DIR)s/ 23 | # 24 | # For FHS style directory structures, use: 25 | # 26 | # STORAGE_DIR = /var/lib/carbon/ 27 | # CONF_DIR = /etc/carbon/ 28 | # LOG_DIR = /var/log/carbon/ 29 | # PID_DIR = /var/run/ 30 | # 31 | #LOCAL_DATA_DIR = /opt/graphite/storage/whisper/ 32 | 33 | # Specify the database library used to store metric data on disk. Each database 34 | # may have configurable options to change the behaviour of how it writes to 35 | # persistent storage. 36 | # 37 | # whisper - Fixed-size database, similar in design and purpose to RRD. This is 38 | # the default storage backend for carbon and the most rigorously tested. 39 | # 40 | # ceres - Experimental alternative database that supports storing data in sparse 41 | # files of arbitrary fixed-size resolutions. 42 | DATABASE = whisper 43 | 44 | # Enable daily log rotation. If disabled, a new file will be opened whenever the log file path no 45 | # longer exists (i.e. it is removed or renamed) 46 | ENABLE_LOGROTATION = True 47 | 48 | # Specify the user to drop privileges to 49 | # If this is blank carbon-cache runs as the user that invokes it 50 | # This user must have write access to the local data directory 51 | USER = 52 | 53 | # Limit the size of the cache to avoid swapping or becoming CPU bound. 54 | # Sorts and serving cache queries gets more expensive as the cache grows. 55 | # Use the value "inf" (infinity) for an unlimited cache size. 56 | # value should be an integer number of metric datapoints. 57 | MAX_CACHE_SIZE = inf 58 | 59 | # Limits the number of whisper update_many() calls per second, which effectively 60 | # means the number of write requests sent to the disk. This is intended to 61 | # prevent over-utilizing the disk and thus starving the rest of the system. 62 | # When the rate of required updates exceeds this, then carbon's caching will 63 | # take effect and increase the overall throughput accordingly. 64 | MAX_UPDATES_PER_SECOND = 500 65 | 66 | # If defined, this changes the MAX_UPDATES_PER_SECOND in Carbon when a 67 | # stop/shutdown is initiated. This helps when MAX_UPDATES_PER_SECOND is 68 | # relatively low and carbon has cached a lot of updates; it enables the carbon 69 | # daemon to shutdown more quickly. 70 | # MAX_UPDATES_PER_SECOND_ON_SHUTDOWN = 1000 71 | 72 | # Softly limits the number of whisper files that get created each minute. 73 | # Setting this value low (e.g. 50) is a good way to ensure that your carbon 74 | # system will not be adversely impacted when a bunch of new metrics are 75 | # sent to it. The trade off is that any metrics received in excess of this 76 | # value will be silently dropped, and the whisper file will not be created 77 | # until such point as a subsequent metric is received and fits within the 78 | # defined rate limit. Setting this value high (like "inf" for infinity) will 79 | # cause carbon to create the files quickly but at the risk of increased I/O. 80 | MAX_CREATES_PER_MINUTE = 50 81 | 82 | # Set the minimum timestamp resolution supported by this instance. This allows 83 | # internal optimisations by overwriting points with equal truncated timestamps 84 | # in order to limit the number of updates to the database. It defaults to one 85 | # second. 86 | MIN_TIMESTAMP_RESOLUTION = 1 87 | 88 | # Set the minimum lag in seconds for a point to be written to the database 89 | # in order to optimize batching. This means that each point will wait at least 90 | # the duration of this lag before being written. Setting this to 0 disable the feature. 91 | # This currently only works when using the timesorted write strategy. 92 | # MIN_TIMESTAMP_LAG = 0 93 | 94 | # Set the interface and port for the line (plain text) listener. Setting the 95 | # interface to 0.0.0.0 listens on all interfaces. Port can be set to 0 to 96 | # disable this listener if it is not required. 97 | LINE_RECEIVER_INTERFACE = 0.0.0.0 98 | LINE_RECEIVER_PORT = 2003 99 | 100 | # Set this to True to enable the UDP listener. By default this is off 101 | # because it is very common to run multiple carbon daemons and managing 102 | # another (rarely used) port for every carbon instance is not fun. 103 | ENABLE_UDP_LISTENER = False 104 | UDP_RECEIVER_INTERFACE = 0.0.0.0 105 | UDP_RECEIVER_PORT = 2003 106 | 107 | # Set the interface and port for the pickle listener. Setting the interface to 108 | # 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this 109 | # listener if it is not required. 110 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 111 | PICKLE_RECEIVER_PORT = 2004 112 | 113 | # Set the interface and port for the protobuf listener. Setting the interface to 114 | # 0.0.0.0 listens on all interfaces. Port can be set to 0 to disable this 115 | # listener if it is not required. 116 | # PROTOBUF_RECEIVER_INTERFACE = 0.0.0.0 117 | # PROTOBUF_RECEIVER_PORT = 2005 118 | 119 | # Limit the number of open connections the receiver can handle as any time. 120 | # Default is no limit. Setting up a limit for sites handling high volume 121 | # traffic may be recommended to avoid running out of TCP memory or having 122 | # thousands of TCP connections reduce the throughput of the service. 123 | #MAX_RECEIVER_CONNECTIONS = inf 124 | 125 | # Per security concerns outlined in Bug #817247 the pickle receiver 126 | # will use a more secure and slightly less efficient unpickler. 127 | # Set this to True to revert to the old-fashioned insecure unpickler. 128 | USE_INSECURE_UNPICKLER = False 129 | 130 | CACHE_QUERY_INTERFACE = 0.0.0.0 131 | CACHE_QUERY_PORT = 7002 132 | 133 | # Set this to False to drop datapoints received after the cache 134 | # reaches MAX_CACHE_SIZE. If this is True (the default) then sockets 135 | # over which metrics are received will temporarily stop accepting 136 | # data until the cache size falls below 95% MAX_CACHE_SIZE. 137 | USE_FLOW_CONTROL = True 138 | 139 | # If enabled this setting is used to timeout metric client connection if no 140 | # metrics have been sent in specified time in seconds 141 | #METRIC_CLIENT_IDLE_TIMEOUT = None 142 | 143 | # By default, carbon-cache will log every whisper update and cache hit. 144 | # This can be excessive and degrade performance if logging on the same 145 | # volume as the whisper data is stored. 146 | LOG_UPDATES = False 147 | LOG_CREATES = False 148 | LOG_CACHE_HITS = False 149 | LOG_CACHE_QUEUE_SORTS = False 150 | 151 | # The thread that writes metrics to disk can use one of the following strategies 152 | # determining the order in which metrics are removed from cache and flushed to 153 | # disk. The default option preserves the same behavior as has been historically 154 | # available in version 0.9.10. 155 | # 156 | # sorted - All metrics in the cache will be counted and an ordered list of 157 | # them will be sorted according to the number of datapoints in the cache at the 158 | # moment of the list's creation. Metrics will then be flushed from the cache to 159 | # disk in that order. 160 | # 161 | # timesorted - All metrics in the list will be looked at and sorted according 162 | # to the timestamp of there datapoints. The metric that were the least recently 163 | # written will be written first. This is an hybrid strategy between max and 164 | # sorted which is particularly adapted to sets of metrics with non-uniform 165 | # resolutions. 166 | # 167 | # max - The writer thread will always pop and flush the metric from cache 168 | # that has the most datapoints. This will give a strong flush preference to 169 | # frequently updated metrics and will also reduce random file-io. Infrequently 170 | # updated metrics may only ever be persisted to disk at daemon shutdown if 171 | # there are a large number of metrics which receive very frequent updates OR if 172 | # disk i/o is very slow. 173 | # 174 | # naive - Metrics will be flushed from the cache to disk in an unordered 175 | # fashion. This strategy may be desirable in situations where the storage for 176 | # whisper files is solid state, CPU resources are very limited or deference to 177 | # the OS's i/o scheduler is expected to compensate for the random write 178 | # pattern. 179 | # 180 | CACHE_WRITE_STRATEGY = sorted 181 | 182 | # On some systems it is desirable for whisper to write synchronously. 183 | # Set this option to True if you'd like to try this. Basically it will 184 | # shift the onus of buffering writes from the kernel into carbon's cache. 185 | WHISPER_AUTOFLUSH = False 186 | 187 | # By default new Whisper files are created pre-allocated with the data region 188 | # filled with zeros to prevent fragmentation and speed up contiguous reads and 189 | # writes (which are common). Enabling this option will cause Whisper to create 190 | # the file sparsely instead. Enabling this option may allow a large increase of 191 | # MAX_CREATES_PER_MINUTE but may have longer term performance implications 192 | # depending on the underlying storage configuration. 193 | # WHISPER_SPARSE_CREATE = False 194 | 195 | # Only beneficial on linux filesystems that support the fallocate system call. 196 | # It maintains the benefits of contiguous reads/writes, but with a potentially 197 | # much faster creation speed, by allowing the kernel to handle the block 198 | # allocation and zero-ing. Enabling this option may allow a large increase of 199 | # MAX_CREATES_PER_MINUTE. If enabled on an OS or filesystem that is unsupported 200 | # this option will gracefully fallback to standard POSIX file access methods. 201 | # If enabled, disables WHISPER_SPARSE_CREATE regardless of the value. 202 | WHISPER_FALLOCATE_CREATE = False 203 | 204 | # Enabling this option will cause Whisper to lock each Whisper file it writes 205 | # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when 206 | # multiple carbon-cache daemons are writing to the same files. 207 | # WHISPER_LOCK_WRITES = False 208 | 209 | # On systems which has a large number of metrics, an amount of Whisper write(2)'s 210 | # pageback sometimes cause disk thrashing due to memory shortage, so that abnormal 211 | # disk reads occur. Enabling this option makes it possible to decrease useless 212 | # page cache memory by posix_fadvise(2) with POSIX_FADVISE_RANDOM option. 213 | # WHISPER_FADVISE_RANDOM = False 214 | 215 | # By default all nodes stored in Ceres are cached in memory to improve the 216 | # throughput of reads and writes to underlying slices. Turning this off will 217 | # greatly reduce memory consumption for databases with millions of metrics, at 218 | # the cost of a steep increase in disk i/o, approximately an extra two os.stat 219 | # calls for every read and write. Reasons to do this are if the underlying 220 | # storage can handle stat() with practically zero cost (SSD, NVMe, zRAM). 221 | # Valid values are: 222 | # all - all nodes are cached 223 | # none - node caching is disabled 224 | # CERES_NODE_CACHING_BEHAVIOR = all 225 | 226 | # Ceres nodes can have many slices and caching the right ones can improve 227 | # performance dramatically. Note that there are many trade-offs to tinkering 228 | # with this, and unless you are a ceres developer you *really* should not 229 | # mess with this. Valid values are: 230 | # latest - only the most recent slice is cached 231 | # all - all slices are cached 232 | # none - slice caching is disabled 233 | # CERES_SLICE_CACHING_BEHAVIOR = latest 234 | 235 | # If a Ceres node accumulates too many slices, performance can suffer. 236 | # This can be caused by intermittently reported data. To mitigate 237 | # slice fragmentation there is a tolerance for how much space can be 238 | # wasted within a slice file to avoid creating a new one. That tolerance 239 | # level is determined by MAX_SLICE_GAP, which is the number of consecutive 240 | # null datapoints allowed in a slice file. 241 | # If you set this very low, you will waste less of the *tiny* bit disk space 242 | # that this feature wastes, and you will be prone to performance problems 243 | # caused by slice fragmentation, which can be pretty severe. 244 | # If you set this really high, you will waste a bit more disk space (each 245 | # null datapoint wastes 8 bytes, but keep in mind your filesystem's block 246 | # size). If you suffer slice fragmentation issues, you should increase this or 247 | # run the ceres-maintenance defrag plugin more often. However you should not 248 | # set it to be huge because then if a large but allowed gap occurs it has to 249 | # get filled in, which means instead of a simple 8-byte write to a new file we 250 | # could end up doing an (8 * MAX_SLICE_GAP)-byte write to the latest slice. 251 | # CERES_MAX_SLICE_GAP = 80 252 | 253 | # Enabling this option will cause Ceres to lock each Ceres file it writes to 254 | # to with an exclusive lock (LOCK_EX, see: man 2 flock). This is useful when 255 | # multiple carbon-cache daemons are writing to the same files. 256 | # CERES_LOCK_WRITES = False 257 | 258 | # Set this to True to enable whitelisting and blacklisting of metrics in 259 | # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is 260 | # missing or empty, all metrics will pass through 261 | # USE_WHITELIST = False 262 | 263 | # By default, carbon itself will log statistics (such as a count, 264 | # metricsReceived) with the top level prefix of 'carbon' at an interval of 60 265 | # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation 266 | # CARBON_METRIC_PREFIX = carbon 267 | CARBON_METRIC_INTERVAL = 10 268 | 269 | # Enable AMQP if you want to receve metrics using an amqp broker 270 | ENABLE_AMQP = False 271 | 272 | # Verbose means a line will be logged for every metric received 273 | # useful for testing 274 | AMQP_VERBOSE = False 275 | 276 | AMQP_HOST = localhost 277 | AMQP_PORT = 5672 278 | AMQP_VHOST = / 279 | AMQP_USER = guest 280 | AMQP_PASSWORD = guest 281 | AMQP_EXCHANGE = graphite 282 | AMQP_METRIC_NAME_IN_BODY = False 283 | 284 | # The manhole interface allows you to SSH into the carbon daemon 285 | # and get a python interpreter. BE CAREFUL WITH THIS! If you do 286 | # something like time.sleep() in the interpreter, the whole process 287 | # will sleep! This is *extremely* helpful in debugging, assuming 288 | # you are familiar with the code. If you are not, please don't 289 | # mess with this, you are asking for trouble :) 290 | # 291 | # ENABLE_MANHOLE = False 292 | # MANHOLE_INTERFACE = 127.0.0.1 293 | # MANHOLE_PORT = 7222 294 | # MANHOLE_USER = admin 295 | # MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE= 296 | 297 | # Patterns for all of the metrics this machine will store. Read more at 298 | # http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings 299 | # 300 | # Example: store all sales, linux servers, and utilization metrics 301 | # BIND_PATTERNS = sales.#, servers.linux.#, #.utilization 302 | # 303 | # Example: store everything 304 | # BIND_PATTERNS = # 305 | 306 | # URL of graphite-web instance, this is used to add incoming series to the tag database 307 | GRAPHITE_URL = http://127.0.0.1:8080 308 | 309 | # Tag support, when enabled carbon will make HTTP calls to graphite-web to update the tag index 310 | ENABLE_TAGS = True 311 | 312 | # Tag update interval, this specifies how frequently updates to existing series will trigger 313 | # an update to the tag index, the default setting is once every 100 updates 314 | # TAG_UPDATE_INTERVAL = 100 315 | 316 | # Tag hash filenames, this specifies whether tagged metric filenames should use the hash of the metric name 317 | # or a human-readable name, using hashed names avoids issues with path length when using a large number of tags 318 | # TAG_HASH_FILENAMES = True 319 | 320 | # Tag batch size, this specifies the maximum number of series to be sent to graphite-web in a single batch 321 | # TAG_BATCH_SIZE = 100 322 | 323 | # Tag queue size, this specifies the maximum number of series to be queued for sending to graphite-web 324 | # There are separate queues for new series and for updates to existing series 325 | # TAG_QUEUE_SIZE = 10000 326 | 327 | # Set to enable Sentry.io exception monitoring. 328 | # RAVEN_DSN='YOUR_DSN_HERE'. 329 | 330 | # To configure special settings for the carbon-cache instance 'b', uncomment this: 331 | #[cache:b] 332 | #LINE_RECEIVER_PORT = 2103 333 | #PICKLE_RECEIVER_PORT = 2104 334 | #CACHE_QUERY_PORT = 7102 335 | # and any other settings you want to customize, defaults are inherited 336 | # from the [cache] section. 337 | # You can then specify the --instance=b option to manage this instance 338 | # 339 | # In order to turn on logging of successful connections for the line 340 | # receiver, set this to True 341 | LOG_LISTENER_CONN_SUCCESS = False 342 | 343 | [relay] 344 | LINE_RECEIVER_INTERFACE = 0.0.0.0 345 | LINE_RECEIVER_PORT = 2013 346 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 347 | PICKLE_RECEIVER_PORT = 2014 348 | 349 | # Carbon-relay has several options for metric routing controlled by RELAY_METHOD 350 | # 351 | # Use relay-rules.conf to route metrics to destinations based on pattern rules 352 | #RELAY_METHOD = rules 353 | # 354 | # Use consistent-hashing for even distribution of metrics between destinations 355 | #RELAY_METHOD = consistent-hashing 356 | # 357 | # Use consistent-hashing but take into account an aggregation-rules.conf shared 358 | # by downstream carbon-aggregator daemons. This will ensure that all metrics 359 | # that map to a given aggregation rule are sent to the same carbon-aggregator 360 | # instance. 361 | # Enable this for carbon-relays that send to a group of carbon-aggregators 362 | #RELAY_METHOD = aggregated-consistent-hashing 363 | # 364 | # You can also use fast-hashing and fast-aggregated-hashing which are in O(1) 365 | # and will always redirect the metrics to the same destination but do not try 366 | # to minimize rebalancing when the list of destinations is changing. 367 | RELAY_METHOD = rules 368 | 369 | # If you use consistent-hashing you can add redundancy by replicating every 370 | # datapoint to more than one machine. 371 | REPLICATION_FACTOR = 1 372 | 373 | # For REPLICATION_FACTOR >=2, set DIVERSE_REPLICAS to True to guarantee replicas 374 | # across distributed hosts. With this setting disabled, it's possible that replicas 375 | # may be sent to different caches on the same host. This has been the default 376 | # behavior since introduction of 'consistent-hashing' relay method. 377 | # Note that enabling this on an existing pre-0.9.14 cluster will require rebalancing 378 | # your metrics across the cluster nodes using a tool like Carbonate. 379 | #DIVERSE_REPLICAS = True 380 | 381 | # This is a list of carbon daemons we will send any relayed or 382 | # generated metrics to. The default provided would send to a single 383 | # carbon-cache instance on the default port. However if you 384 | # use multiple carbon-cache instances then it would look like this: 385 | # 386 | # DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b 387 | # 388 | # The general form is IP:PORT:INSTANCE where the :INSTANCE part is 389 | # optional and refers to the "None" instance if omitted. 390 | # 391 | # Note that if the destinations are all carbon-caches then this should 392 | # exactly match the webapp's CARBONLINK_HOSTS setting in terms of 393 | # instances listed (order matters!). 394 | # 395 | # If using RELAY_METHOD = rules, all destinations used in relay-rules.conf 396 | # must be defined in this list 397 | DESTINATIONS = 127.0.0.1:2004 398 | 399 | # This define the protocol to use to contact the destination. It can be 400 | # set to one of "line", "pickle", "udp" and "protobuf". This list can be 401 | # extended with CarbonClientFactory plugins and defaults to "pickle". 402 | # DESTINATION_PROTOCOL = pickle 403 | 404 | # This defines the wire transport, either none or ssl. 405 | # If SSL is used any TCP connection will be upgraded to TLS1. The system's 406 | # trust authority will be used unless DESTINATION_SSL_CA is specified in 407 | # which case an alternative certificate authority chain will be used for 408 | # verifying the remote certificate. 409 | # To use SSL you'll need the cryptography, service_identity, and twisted >= 14 410 | # DESTINATION_TRANSPORT = none 411 | # DESTINATION_SSL_CA=/path/to/private-ca.crt 412 | 413 | # This allows to have multiple connections per destinations, this will 414 | # pool all the replicas of a single host in the same queue and distribute 415 | # points accross these replicas instead of replicating them. 416 | # The following example will balance the load between :0 and :1. 417 | ## DESTINATIONS = foo:2001:0, foo:2001:1 418 | ## RELAY_METHOD = rules 419 | # Note: this is currently incompatible with USE_RATIO_RESET which gets 420 | # disabled if this option is enabled. 421 | # DESTINATIONS_POOL_REPLICAS = False 422 | 423 | # When using consistent hashing it sometime makes sense to make 424 | # the ring dynamic when you don't want to loose points when a 425 | # single destination is down. Replication is an answer to that 426 | # but it can be quite expensive. 427 | # DYNAMIC_ROUTER = False 428 | 429 | # Controls the number of connection attempts before marking a 430 | # destination as down. We usually do one connection attempt per 431 | # second. 432 | # DYNAMIC_ROUTER_MAX_RETRIES = 5 433 | 434 | # This is the maximum number of datapoints that can be queued up 435 | # for a single destination. Once this limit is hit, we will 436 | # stop accepting new data if USE_FLOW_CONTROL is True, otherwise 437 | # we will drop any subsequently received datapoints. 438 | MAX_QUEUE_SIZE = 10000 439 | 440 | # This defines the maximum "message size" between carbon daemons. If 441 | # your queue is large, setting this to a lower number will cause the 442 | # relay to forward smaller discrete chunks of stats, which may prevent 443 | # overloading on the receiving side after a disconnect. 444 | MAX_DATAPOINTS_PER_MESSAGE = 500 445 | 446 | # Limit the number of open connections the receiver can handle as any time. 447 | # Default is no limit. Setting up a limit for sites handling high volume 448 | # traffic may be recommended to avoid running out of TCP memory or having 449 | # thousands of TCP connections reduce the throughput of the service. 450 | #MAX_RECEIVER_CONNECTIONS = inf 451 | 452 | # Specify the user to drop privileges to 453 | # If this is blank carbon-relay runs as the user that invokes it 454 | # USER = 455 | 456 | # This is the percentage that the queue must be empty before it will accept 457 | # more messages. For a larger site, if the queue is very large it makes sense 458 | # to tune this to allow for incoming stats. So if you have an average 459 | # flow of 100k stats/minute, and a MAX_QUEUE_SIZE of 3,000,000, it makes sense 460 | # to allow stats to start flowing when you've cleared the queue to 95% since 461 | # you should have space to accommodate the next minute's worth of stats 462 | # even before the relay incrementally clears more of the queue 463 | QUEUE_LOW_WATERMARK_PCT = 0.8 464 | 465 | # To allow for batch efficiency from the pickle protocol and to benefit from 466 | # other batching advantages, all writes are deferred by putting them into a queue, 467 | # and then the queue is flushed and sent a small fraction of a second later. 468 | TIME_TO_DEFER_SENDING = 0.0001 469 | 470 | # Set this to False to drop datapoints when any send queue (sending datapoints 471 | # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the 472 | # default) then sockets over which metrics are received will temporarily stop accepting 473 | # data until the send queues fall below QUEUE_LOW_WATERMARK_PCT * MAX_QUEUE_SIZE. 474 | USE_FLOW_CONTROL = True 475 | 476 | # If enabled this setting is used to timeout metric client connection if no 477 | # metrics have been sent in specified time in seconds 478 | #METRIC_CLIENT_IDLE_TIMEOUT = None 479 | 480 | # Set this to True to enable whitelisting and blacklisting of metrics in 481 | # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is 482 | # missing or empty, all metrics will pass through 483 | # USE_WHITELIST = False 484 | 485 | # By default, carbon itself will log statistics (such as a count, 486 | # metricsReceived) with the top level prefix of 'carbon' at an interval of 60 487 | # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation 488 | # CARBON_METRIC_PREFIX = carbon 489 | CARBON_METRIC_INTERVAL = 10 490 | # 491 | # In order to turn on logging of successful connections for the line 492 | # receiver, set this to True 493 | LOG_LISTENER_CONN_SUCCESS = False 494 | 495 | # If you're connecting from the relay to a destination that's over the 496 | # internet or similarly iffy connection, a backlog can develop because 497 | # of internet weather conditions, e.g. acks getting lost or similar issues. 498 | # To deal with that, you can enable USE_RATIO_RESET which will let you 499 | # re-set the connection to an individual destination. Defaults to being off. 500 | USE_RATIO_RESET=False 501 | 502 | # When there is a small number of stats flowing, it's not desirable to 503 | # perform any actions based on percentages - it's just too "twitchy". 504 | MIN_RESET_STAT_FLOW=1000 505 | 506 | # When the ratio of stats being sent in a reporting interval is far 507 | # enough from 1.0, we will disconnect the socket and reconnecto to 508 | # clear out queued stats. The default ratio of 0.9 indicates that 10% 509 | # of stats aren't being delivered within one CARBON_METRIC_INTERVAL 510 | # (default of 60 seconds), which can lead to a queue backup. Under 511 | # some circumstances re-setting the connection can fix this, so 512 | # set this according to your tolerance, and look in the logs for 513 | # "resetConnectionForQualityReasons" to observe whether this is kicking 514 | # in when your sent queue is building up. 515 | MIN_RESET_RATIO=0.9 516 | 517 | # The minimum time between resets. When a connection is re-set, we 518 | # need to wait before another reset is performed. 519 | # (2*CARBON_METRIC_INTERVAL) + 1 second is the minimum time needed 520 | # before stats for the new connection will be available. Setting this 521 | # below (2*CARBON_METRIC_INTERVAL) + 1 second will result in a lot of 522 | # reset connections for no good reason. 523 | MIN_RESET_INTERVAL=121 524 | 525 | # Enable TCP Keep Alive (http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html). 526 | # Default settings will send a probe every 30s. Default is False. 527 | # TCP_KEEPALIVE=True 528 | # The interval between the last data packet sent (simple ACKs are not 529 | # considered data) and the first keepalive probe; after the connection is marked 530 | # to need keepalive, this counter is not used any further. 531 | # TCP_KEEPIDLE=10 532 | # The interval between subsequential keepalive probes, regardless of what 533 | # the connection has exchanged in the meantime. 534 | # TCP_KEEPINTVL=30 535 | # The number of unacknowledged probes to send before considering the connection 536 | # dead and notifying the application layer. 537 | # TCP_KEEPCNT=2 538 | 539 | 540 | [aggregator] 541 | LINE_RECEIVER_INTERFACE = 0.0.0.0 542 | LINE_RECEIVER_PORT = 2023 543 | 544 | PICKLE_RECEIVER_INTERFACE = 0.0.0.0 545 | PICKLE_RECEIVER_PORT = 2024 546 | 547 | # If set true, metric received will be forwarded to DESTINATIONS in addition to 548 | # the output of the aggregation rules. If set false the carbon-aggregator will 549 | # only ever send the output of aggregation. 550 | FORWARD_ALL = True 551 | 552 | # Filenames of the configuration files to use for this instance of aggregator. 553 | # Filenames are relative to CONF_DIR. 554 | # 555 | # AGGREGATION_RULES = aggregation-rules.conf 556 | # REWRITE_RULES = rewrite-rules.conf 557 | 558 | # This is a list of carbon daemons we will send any relayed or 559 | # generated metrics to. The default provided would send to a single 560 | # carbon-cache instance on the default port. However if you 561 | # use multiple carbon-cache instances then it would look like this: 562 | # 563 | # DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b 564 | # 565 | # The format is comma-delimited IP:PORT:INSTANCE where the :INSTANCE part is 566 | # optional and refers to the "None" instance if omitted. 567 | # 568 | # Note that if the destinations are all carbon-caches then this should 569 | # exactly match the webapp's CARBONLINK_HOSTS setting in terms of 570 | # instances listed (order matters!). 571 | DESTINATIONS = 127.0.0.1:2004 572 | 573 | # If you want to add redundancy to your data by replicating every 574 | # datapoint to more than one machine, increase this. 575 | REPLICATION_FACTOR = 1 576 | 577 | # This is the maximum number of datapoints that can be queued up 578 | # for a single destination. Once this limit is hit, we will 579 | # stop accepting new data if USE_FLOW_CONTROL is True, otherwise 580 | # we will drop any subsequently received datapoints. 581 | MAX_QUEUE_SIZE = 10000 582 | 583 | # Set this to False to drop datapoints when any send queue (sending datapoints 584 | # to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the 585 | # default) then sockets over which metrics are received will temporarily stop accepting 586 | # data until the send queues fall below 80% MAX_QUEUE_SIZE. 587 | USE_FLOW_CONTROL = True 588 | 589 | # If enabled this setting is used to timeout metric client connection if no 590 | # metrics have been sent in specified time in seconds 591 | #METRIC_CLIENT_IDLE_TIMEOUT = None 592 | 593 | # This defines the maximum "message size" between carbon daemons. 594 | # You shouldn't need to tune this unless you really know what you're doing. 595 | MAX_DATAPOINTS_PER_MESSAGE = 500 596 | 597 | # This defines how many datapoints the aggregator remembers for 598 | # each metric. Aggregation only happens for datapoints that fall in 599 | # the past MAX_AGGREGATION_INTERVALS * intervalSize seconds. 600 | MAX_AGGREGATION_INTERVALS = 5 601 | 602 | # Limit the number of open connections the receiver can handle as any time. 603 | # Default is no limit. Setting up a limit for sites handling high volume 604 | # traffic may be recommended to avoid running out of TCP memory or having 605 | # thousands of TCP connections reduce the throughput of the service. 606 | #MAX_RECEIVER_CONNECTIONS = inf 607 | 608 | # By default (WRITE_BACK_FREQUENCY = 0), carbon-aggregator will write back 609 | # aggregated data points once every rule.frequency seconds, on a per-rule basis. 610 | # Set this (WRITE_BACK_FREQUENCY = N) to write back all aggregated data points 611 | # every N seconds, independent of rule frequency. This is useful, for example, 612 | # to be able to query partially aggregated metrics from carbon-cache without 613 | # having to first wait rule.frequency seconds. 614 | # WRITE_BACK_FREQUENCY = 0 615 | 616 | # Set this to True to enable whitelisting and blacklisting of metrics in 617 | # CONF_DIR/whitelist.conf and CONF_DIR/blacklist.conf. If the whitelist is 618 | # missing or empty, all metrics will pass through 619 | # USE_WHITELIST = False 620 | 621 | # By default, carbon itself will log statistics (such as a count, 622 | # metricsReceived) with the top level prefix of 'carbon' at an interval of 60 623 | # seconds. Set CARBON_METRIC_INTERVAL to 0 to disable instrumentation 624 | # CARBON_METRIC_PREFIX = carbon 625 | CARBON_METRIC_INTERVAL = 10 626 | 627 | # In order to turn off logging of successful connections for the line 628 | # receiver, set this to True 629 | LOG_LISTENER_CONN_SUCCESS = False 630 | 631 | # In order to turn off logging of metrics with no corresponding 632 | # aggregation rules receiver, set this to False 633 | # LOG_AGGREGATOR_MISSES = False 634 | 635 | # Specify the user to drop privileges to 636 | # If this is blank carbon-aggregator runs as the user that invokes it 637 | # USER = 638 | 639 | # Part of the code, and particularly aggregator rules, need 640 | # to cache metric names. To avoid leaking too much memory you 641 | # can tweak the size of this cache. The default allow for 1M 642 | # different metrics per rule (~200MiB). 643 | # CACHE_METRIC_NAMES_MAX=1000000 644 | 645 | # You can optionally set a ttl to this cache. 646 | # CACHE_METRIC_NAMES_TTL=600 -------------------------------------------------------------------------------- /conf/opt/graphite/conf/dashboard.conf: -------------------------------------------------------------------------------- 1 | # This configuration file controls the behavior of the Dashboard UI, available 2 | # at http://my-graphite-server/dashboard/. 3 | # 4 | # This file must contain a [ui] section that defines values for all of the 5 | # following settings. 6 | [ui] 7 | default_graph_width = 400 8 | default_graph_height = 250 9 | automatic_variants = true 10 | refresh_interval = 60 11 | autocomplete_delay = 375 12 | merge_hover_delay = 750 13 | 14 | # You can set this 'default', 'white', or a custom theme name. 15 | # To create a custom theme, copy the dashboard-default.css file 16 | # to dashboard-myThemeName.css in the content/css directory and 17 | # modify it to your liking. 18 | theme = default 19 | 20 | [keyboard-shortcuts] 21 | toggle_toolbar = ctrl-z 22 | toggle_metrics_panel = ctrl-space 23 | erase_all_graphs = alt-x 24 | save_dashboard = alt-s 25 | completer_add_metrics = alt-enter 26 | completer_del_metrics = alt-backspace 27 | give_completer_focus = shift-space 28 | 29 | # These settings apply to the UI as a whole, all other sections in this file 30 | # pertain only to specific metric types. 31 | # 32 | # The dashboard presents only metrics that fall into specified naming schemes 33 | # defined in this file. This creates a simpler, more targetted view of the 34 | # data. The general form for defining a naming scheme is as follows: 35 | # 36 | #[Metric Type] 37 | #scheme = basis.path... 38 | #field1.label = Foo 39 | #field2.label = Bar 40 | # 41 | # 42 | # Where each will be displayed as a dropdown box 43 | # in the UI and the remaining portion of the namespace 44 | # shown in the Metric Selector panel. The .label options set the labels 45 | # displayed for each dropdown. 46 | # 47 | # For example: 48 | # 49 | #[Sales] 50 | #scheme = sales... 51 | #channel.label = Channel 52 | #type.label = Product Type 53 | #brand.label = Brand 54 | # 55 | # This defines a 'Sales' metric type that uses 3 dropdowns in the Context Selector 56 | # (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc) 57 | # will be available in the Metric Selector (upper-right panel). 58 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/go-carbon.conf: -------------------------------------------------------------------------------- 1 | [common] 2 | # Run as user. Works only in daemon mode 3 | user = "root" 4 | # Prefix for store all internal go-carbon graphs. Supported macroses: {host} 5 | graph-prefix = "carbon.agents.{host}" 6 | # Endpoint for store internal carbon metrics. Valid values: "" or "local", "tcp://host:port", "udp://host:port" 7 | metric-endpoint = "local" 8 | # Interval of storing internal metrics. Like CARBON_METRIC_INTERVAL 9 | metric-interval = "1m0s" 10 | # Increase for configuration with multi persister workers 11 | max-cpu = 4 12 | 13 | [whisper] 14 | data-dir = "/opt/graphite/storage/whisper" 15 | # http://graphite.readthedocs.org/en/latest/config-carbon.html#storage-schemas-conf. Required 16 | schemas-file = "/opt/graphite/conf/storage-schemas.conf" 17 | # http://graphite.readthedocs.org/en/latest/config-carbon.html#storage-aggregation-conf. Optional 18 | aggregation-file = "/opt/graphite/conf/storage-aggregation.conf" 19 | # Worker threads count. Metrics sharded by "crc32(metricName) % workers" 20 | workers = 8 21 | # Limits the number of whisper update_many() calls per second. 0 - no limit 22 | max-updates-per-second = 500 23 | # Softly limits the number of whisper files that get created each second. 0 - no limit 24 | max-creates-per-second = 500 25 | # Make max-creates-per-second a hard limit. Extra new metrics are dropped. A hard throttle of 0 drops all new metrics. 26 | hard-max-creates-per-second = false 27 | # Sparse file creation 28 | sparse-create = false 29 | # use flock on every file call (ensures consistency if there are concurrent read/writes to the same file) 30 | flock = true 31 | enabled = true 32 | # Use hashed filenames for tagged metrics instead of human readable 33 | # https://github.com/lomik/go-carbon/pull/225 34 | hash-filenames = true 35 | # specify to enable/disable compressed format. IMPORTANT: Only one process/thread could write to compressed whisper files at a time, especially when you are rebalancing graphite clusters (with buckytools, for example), flock needs to be enabled both in go-carbon and your tooling. 36 | compressed = false 37 | # automatically delete empty whisper file caused by edge cases like server reboot 38 | remove-empty-file = true 39 | 40 | [cache] 41 | # Limit of in-memory stored points (not metrics) 42 | max-size = 100000000 43 | # Capacity of queue between receivers and cache 44 | # Strategy to persist metrics. Values: "max","sorted","noop" 45 | # "max" - write metrics with most unwritten datapoints first 46 | # "sorted" - sort by timestamp of first unwritten datapoint. 47 | # "noop" - pick metrics to write in unspecified order, 48 | # requires least CPU and improves cache responsiveness 49 | write-strategy = "max" 50 | 51 | [udp] 52 | listen = ":2003" 53 | enabled = true 54 | # Optional internal queue between receiver and cache 55 | buffer-size = 0 56 | 57 | [tcp] 58 | listen = ":2003" 59 | enabled = true 60 | # Optional internal queue between receiver and cache 61 | buffer-size = 0 62 | 63 | [pickle] 64 | listen = ":2004" 65 | # Limit message size for prevent memory overflow 66 | max-message-size = 67108864 67 | enabled = true 68 | # Optional internal queue between receiver and cache 69 | buffer-size = 0 70 | 71 | # You can define unlimited count of additional receivers 72 | # Common definition scheme: 73 | # [receiver.] 74 | # protocol = "" 75 | # 76 | # 77 | # All available protocols: 78 | # 79 | # [receiver.udp2] 80 | # protocol = "udp" 81 | # listen = ":2003" 82 | # # Enable optional logging of incomplete messages (chunked by max UDP packet size) 83 | # log-incomplete = false 84 | # 85 | # [receiver.tcp2] 86 | # protocol = "tcp" 87 | # listen = ":2003" 88 | # 89 | # [receiver.pickle2] 90 | # protocol = "pickle" 91 | # listen = ":2004" 92 | # # Limit message size for prevent memory overflow 93 | # max-message-size = 67108864 94 | # 95 | # [receiver.protobuf] 96 | # protocol = "protobuf" 97 | # # Same framing protocol as pickle, but message encoded in protobuf format 98 | # # See https://github.com/lomik/go-carbon/blob/master/helper/carbonpb/carbon.proto 99 | # listen = ":2005" 100 | # # Limit message size for prevent memory overflow 101 | # max-message-size = 67108864 102 | # 103 | # [receiver.http] 104 | # protocol = "http" 105 | # # This receiver receives data from POST requests body. 106 | # # Data can be encoded in plain text format (default), 107 | # # protobuf (with Content-Type: application/protobuf header) or 108 | # # pickle (with Content-Type: application/python-pickle header). 109 | # listen = ":2007" 110 | # max-message-size = 67108864 111 | # 112 | # [receiver.kafka] 113 | # protocol = "kafka 114 | # # This receiver receives data from kafka 115 | # # You can use Partitions and Topics to do sharding 116 | # # State is saved in local file to avoid problems with multiple consumers 117 | # 118 | # # Encoding of messages 119 | # # Available options: "plain" (default), "protobuf", "pickle" 120 | # # Please note that for "plain" you must pass metrics with leading "\n". 121 | # # e.x. 122 | # # echo "test.metric $(date +%s) $(date +%s)" | kafkacat -D $'\0' -z snappy -T -b localhost:9092 -t graphite 123 | # parse-protocol = "protobuf" 124 | # # Kafka connection parameters 125 | # brokers = [ "host1:9092", "host2:9092" ] 126 | # topic = "graphite" 127 | # partition = 0 128 | # 129 | # # Specify how often receiver will try to connect to kafka in case of network problems 130 | # reconnect-interval = "5m" 131 | # # How often receiver will ask Kafka for new data (in case there was no messages available to read) 132 | # fetch-interval = "200ms" 133 | # 134 | # # Path to saved kafka state. Used for restarts 135 | # state-file = "/var/lib/graphite/kafka.state" 136 | # # Initial offset, if there is no saved state. Can be relative time or "newest" or "oldest". 137 | # # In case offset is unavailable (in future, etc) fallback is "oldest" 138 | # initial-offset = "-30m" 139 | # 140 | # # Specify kafka feature level (default: 0.11.0.0). 141 | # # Please note that some features (consuming lz4 compressed streams) requires kafka >0.11 142 | # # You must specify version in full. E.x. '0.11.0.0' - ok, but '0.11' is not. 143 | # # Supported version (as of 22 Jan 2018): 144 | # # 0.8.2.0 145 | # # 0.8.2.1 146 | # # 0.8.2.2 147 | # # 0.9.0.0 148 | # # 0.9.0.1 149 | # # 0.10.0.0 150 | # # 0.10.0.1 151 | # # 0.10.1.0 152 | # # 0.10.2.0 153 | # # 0.11.0.0 154 | # # 1.0.0 155 | # kafka-version = "0.11.0.0" 156 | # 157 | # [receiver.pubsub] 158 | # # This receiver receives data from Google PubSub 159 | # # - Authentication is managed through APPLICATION_DEFAULT_CREDENTIALS: 160 | # # - https://cloud.google.com/docs/authentication/production#providing_credentials_to_your_application 161 | # # - Currently the subscription must exist before running go-carbon. 162 | # # - The "receiver_*" settings are optional and directly map to the google pubsub 163 | # # libraries ReceiveSettings (https://godoc.org/cloud.google.com/go/pubsub#ReceiveSettings) 164 | # # - How to think about the "receiver_*" settings: In an attempt to maximize throughput the 165 | # # pubsub library will spawn 'receiver_go_routines' to fetch messages from the server. 166 | # # These goroutines simply buffer them into memory until 'receiver_max_messages' or 'receiver_max_bytes' 167 | # # have been read. This does not affect the actual handling of these messages which are processed by other goroutines. 168 | # protocol = "pubsub" 169 | # project = "project-name" 170 | # subscription = "subscription-name" 171 | # receiver_go_routines = 4 172 | # receiver_max_messages = 1000 173 | # receiver_max_bytes = 500000000 # default 500MB 174 | 175 | [carbonlink] 176 | listen = "127.0.0.1:7002" 177 | enabled = true 178 | # Close inactive connections after "read-timeout" 179 | read-timeout = "30s" 180 | 181 | # grpc api 182 | # protocol: https://github.com/lomik/go-carbon/blob/master/helper/carbonpb/carbon.proto 183 | # samples: https://github.com/lomik/go-carbon/tree/master/api/sample 184 | [grpc] 185 | listen = "127.0.0.1:7003" 186 | enabled = false 187 | 188 | # http://graphite.readthedocs.io/en/latest/tags.html 189 | [tags] 190 | enabled = true 191 | # TagDB url. It should support /tags/tagMultiSeries endpoint 192 | tagdb-url = "http://127.0.0.1:8080" 193 | tagdb-chunk-size = 32 194 | tagdb-update-interval = 100 195 | # Directory for send queue (based on leveldb) 196 | local-dir = "/tmp" 197 | # POST timeout 198 | tagdb-timeout = "1s" 199 | 200 | [carbonserver] 201 | # Please NOTE: carbonserver is not intended to fully replace graphite-web 202 | # It acts as a "REMOTE_STORAGE" for graphite-web or carbonzipper/carbonapi 203 | listen = "127.0.0.1:8000" 204 | # Carbonserver support is still experimental and may contain bugs 205 | # Or be incompatible with github.com/grobian/carbonserver 206 | enabled = true 207 | # Buckets to track response times 208 | buckets = 10 209 | # carbonserver-specific metrics will be sent as counters 210 | # For compatibility with grobian/carbonserver 211 | metrics-as-counters = true 212 | # Read and Write timeouts for HTTP server 213 | read-timeout = "60s" 214 | write-timeout = "60s" 215 | # Enable /render cache, it will cache the result for 1 minute 216 | query-cache-enabled = false 217 | # 0 for unlimited 218 | query-cache-size-mb = 0 219 | # Enable /metrics/find cache, it will cache the result for 5 minutes 220 | find-cache-enabled = true 221 | # Control trigram index 222 | # This index is used to speed-up /find requests 223 | # However, it will lead to increased memory consumption 224 | # Estimated memory consumption is approx. 500 bytes per each metric on disk 225 | # Another drawback is that it will recreate index every scan-frequency interval 226 | # All new/deleted metrics will still be searchable until index is recreated 227 | trigram-index = true 228 | # carbonserver keeps track of all available whisper files 229 | # in memory. This determines how often it will check FS 230 | # for new or deleted metrics. 231 | scan-frequency = "5m0s" 232 | # Control trie index 233 | # This index is built as an alternative to trigram index, with shorter indexing 234 | # time and less memory usage (around 2 - 5 times). For most of the queries, 235 | # trie is faster than trigram. For queries with keyword wrap around by widcards 236 | # (like ns1.ns2.*keywork*.metric), trigram index performs better. Trie index 237 | # could be speeded up by enabling adding trigrams to trie, at the some costs of 238 | # memory usage (by setting both trie-index and trigram-index to true). 239 | trie-index = false 240 | 241 | # Maximum amount of globs in a single metric in index 242 | # This value is used to speed-up /find requests with 243 | # a lot of globs, but will lead to increased memory consumption 244 | max-globs = 100 245 | # Fail if amount of globs more than max-globs 246 | fail-on-max-globs = false 247 | 248 | # Maximum metrics could be returned by glob/wildcard in find request (currently 249 | # works only for trie index) 250 | max-metrics-globbed = 30000 251 | # Maximum metrics could be returned in render request (works both all types of 252 | # indexes) 253 | max-metrics-rendered = 1000 254 | 255 | # graphite-web-10-mode 256 | # Use Graphite-web 1.0 native structs for pickle response 257 | # This mode will break compatibility with graphite-web 0.9.x 258 | # If false, carbonserver won't send graphite-web 1.0 specific structs 259 | # That might degrade performance of the cluster 260 | # But will be compatible with both graphite-web 1.0 and 0.9.x 261 | graphite-web-10-strict-mode = true 262 | # Allows to keep track for "last time readed" between restarts, leave empty to disable 263 | internal-stats-dir = "" 264 | # Calculate /render request time percentiles for the bucket, '95' means calculate 95th Percentile. To disable this feature, leave the list blank 265 | stats-percentiles = [99, 98, 95, 75, 50] 266 | 267 | [dump] 268 | # Enable dump/restore function on USR2 signal 269 | enabled = false 270 | # Directory for store dump data. Should be writeable for carbon 271 | path = "/tmp" 272 | # Restore speed. 0 - unlimited 273 | restore-per-second = 0 274 | 275 | [pprof] 276 | listen = "localhost:7007" 277 | enabled = false 278 | 279 | #[prometheus] 280 | #enabled = true 281 | 282 | #[prometheus.labels] 283 | #foo = "test" 284 | #bar = "baz" 285 | 286 | # Default logger 287 | [[logging]] 288 | # logger name 289 | # available loggers: 290 | # * "" - default logger for all messages without configured special logger 291 | # @TODO 292 | logger = "" 293 | # Log output: filename, "stderr", "stdout", "none", "" (same as "stderr") 294 | file = "/var/log/go-carbon.log" 295 | # Log level: "debug", "info", "warn", "error", "dpanic", "panic", and "fatal" 296 | level = "info" 297 | # Log format: "json", "console", "mixed" 298 | encoding = "mixed" 299 | # Log time format: "millis", "nanos", "epoch", "iso8601" 300 | encoding-time = "iso8601" 301 | # Log duration format: "seconds", "nanos", "string" 302 | encoding-duration = "seconds" 303 | 304 | # You can define multiply loggers: 305 | 306 | [[logging]] 307 | logger = "" 308 | file = "stderr" 309 | level = "info" 310 | encoding = "mixed" 311 | encoding-time = "iso8601" 312 | encoding-duration = "seconds" -------------------------------------------------------------------------------- /conf/opt/graphite/conf/graphTemplates.conf: -------------------------------------------------------------------------------- 1 | [default] 2 | background = black 3 | foreground = white 4 | majorLine = white 5 | minorLine = grey 6 | lineColors = blue,green,red,purple,brown,yellow,aqua,grey,magenta,pink,gold,rose 7 | fontName = Sans 8 | fontSize = 10 9 | fontBold = False 10 | fontItalic = False 11 | 12 | [noc] 13 | background = black 14 | foreground = white 15 | majorLine = white 16 | minorLine = grey 17 | lineColors = blue,green,red,yellow,purple,brown,aqua,grey,magenta,pink,gold,rose 18 | fontName = Sans 19 | fontSize = 10 20 | fontBold = False 21 | fontItalic = False 22 | 23 | [plain] 24 | background = white 25 | foreground = black 26 | minorLine = grey 27 | majorLine = rose 28 | 29 | [summary] 30 | background = black 31 | lineColors = #6666ff, #66ff66, #ff6666 32 | 33 | [alphas] 34 | background = white 35 | foreground = black 36 | majorLine = grey 37 | minorLine = rose 38 | lineColors = 00ff00aa,ff000077,00337799 39 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/relay-rules.conf: -------------------------------------------------------------------------------- 1 | # Relay destination rules for carbon-relay. Entries are scanned in order, 2 | # and the first pattern a metric matches will cause processing to cease after sending 3 | # unless `continue` is set to true 4 | # 5 | # [name] 6 | # pattern = 7 | # destinations = 8 | # continue = # default: False 9 | # 10 | # name: Arbitrary unique name to identify the rule 11 | # pattern: Regex pattern to match against the metric name 12 | # destinations: Comma-separated list of destinations. 13 | # ex: 127.0.0.1, 10.1.2.3:2004, 10.1.2.4:2004:a, myserver.mydomain.com 14 | # continue: Continue processing rules if this rule matches (default: False) 15 | 16 | # You must have exactly one section with 'default = true' 17 | # Note that all destinations listed must also exist in carbon.conf 18 | # in the DESTINATIONS setting in the [relay] section 19 | [default] 20 | default = true 21 | destinations = 127.0.0.1:2004 22 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/rewrite-rules.conf: -------------------------------------------------------------------------------- 1 | # This file defines regular expression patterns that can be used to 2 | # rewrite metric names in a search & replace fashion. It consists of two 3 | # sections, [pre] and [post]. The rules in the pre section are applied to 4 | # metric names as soon as they are received. The post rules are applied 5 | # after aggregation has taken place. 6 | # 7 | # The general form of each rule is as follows: 8 | # 9 | # regex-pattern = replacement-text 10 | # 11 | # For example: 12 | # 13 | # [post] 14 | # _sum$ = 15 | # _avg$ = 16 | # 17 | # These rules would strip off a suffix of _sum or _avg from any metric names 18 | # after aggregation. 19 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/storage-aggregation.conf: -------------------------------------------------------------------------------- 1 | # Aggregation methods for whisper files. Entries are scanned in order, 2 | # and first match wins. This file is scanned for changes every 60 seconds 3 | # 4 | # [name] 5 | # pattern = 6 | # xFilesFactor = 7 | # aggregationMethod = 8 | # 9 | # name: Arbitrary unique name for the rule 10 | # pattern: Regex pattern to match against the metric name 11 | # xFilesFactor: Ratio of valid data points required for aggregation to the next retention to occur 12 | # aggregationMethod: function to apply to data points for aggregation 13 | # 14 | [min] 15 | pattern = \.lower$ 16 | xFilesFactor = 0.1 17 | aggregationMethod = min 18 | 19 | [max] 20 | pattern = \.upper(_\d+)?$ 21 | xFilesFactor = 0.1 22 | aggregationMethod = max 23 | 24 | [sum] 25 | pattern = \.sum$ 26 | xFilesFactor = 0 27 | aggregationMethod = sum 28 | 29 | [count] 30 | pattern = \.count$ 31 | xFilesFactor = 0 32 | aggregationMethod = sum 33 | 34 | [count_legacy] 35 | pattern = ^stats_counts.* 36 | xFilesFactor = 0 37 | aggregationMethod = sum 38 | 39 | [default_average] 40 | pattern = .* 41 | xFilesFactor = 0.3 42 | aggregationMethod = average 43 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/storage-schemas.conf: -------------------------------------------------------------------------------- 1 | # Schema definitions for Whisper files. Entries are scanned in order, 2 | # and first match wins. This file is scanned for changes every 60 seconds. 3 | # 4 | # Definition Syntax: 5 | # 6 | # [name] 7 | # pattern = regex 8 | # retentions = timePerPoint:timeToStore, timePerPoint:timeToStore, ... 9 | # 10 | # Remember: To support accurate aggregation from higher to lower resolution 11 | # archives, the precision of a longer retention archive must be 12 | # cleanly divisible by precision of next lower retention archive. 13 | # 14 | # Valid: 60s:7d,300s:30d (300/60 = 5) 15 | # Invalid: 180s:7d,300s:30d (300/180 = 3.333) 16 | # 17 | 18 | # Carbon's internal metrics. This entry should match what is specified in 19 | # CARBON_METRIC_PREFIX and CARBON_METRIC_INTERVAL settings 20 | [carbon] 21 | pattern = ^carbon\. 22 | retentions = 10s:6h,1m:90d 23 | 24 | [default_1min_for_1day] 25 | pattern = .* 26 | retentions = 10s:6h,1m:6d,10m:1800d 27 | -------------------------------------------------------------------------------- /conf/opt/graphite/conf/whitelist.conf: -------------------------------------------------------------------------------- 1 | # This file takes a single regular expression per line 2 | # If USE_WHITELIST is set to True in carbon.conf, only metrics received which 3 | # match one of these expressions will be persisted. If this file is empty or 4 | # missing, all metrics will pass through. 5 | # This file is reloaded automatically when changes are made 6 | .* 7 | -------------------------------------------------------------------------------- /conf/opt/graphite/webapp/graphite/app_settings.py: -------------------------------------------------------------------------------- 1 | """Copyright 2008 Orbitz WorldWide 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License.""" 14 | 15 | # Django settings for graphite project. 16 | # DO NOT MODIFY THIS FILE DIRECTLY - use local_settings.py instead 17 | from os.path import dirname, join, abspath 18 | 19 | 20 | #Django settings below, do not touch! 21 | APPEND_SLASH = False 22 | TEMPLATE_DEBUG = False 23 | 24 | TEMPLATES = [ 25 | { 26 | 'BACKEND': 'django.template.backends.django.DjangoTemplates', 27 | 'DIRS': [ 28 | join(dirname( abspath(__file__) ), 'templates') 29 | ], 30 | 'APP_DIRS': True, 31 | 'OPTIONS': { 32 | 'context_processors': [ 33 | # Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this 34 | # list if you haven't customized them: 35 | 'django.contrib.auth.context_processors.auth', 36 | 'django.template.context_processors.debug', 37 | 'django.template.context_processors.i18n', 38 | 'django.template.context_processors.media', 39 | 'django.template.context_processors.static', 40 | 'django.template.context_processors.tz', 41 | 'django.contrib.messages.context_processors.messages', 42 | ], 43 | }, 44 | }, 45 | ] 46 | 47 | # Language code for this installation. All choices can be found here: 48 | # http://www.w3.org/TR/REC-html40/struct/dirlang.html#langcodes 49 | # http://blogs.law.harvard.edu/tech/stories/storyReader$15 50 | LANGUAGE_CODE = 'en-us' 51 | 52 | # Absolute path to the directory that holds media. 53 | MEDIA_ROOT = '' 54 | 55 | # URL that handles the media served from MEDIA_ROOT. 56 | # Example: "http://media.lawrence.com" 57 | MEDIA_URL = '' 58 | 59 | MIDDLEWARE_CLASSES = ( 60 | 'graphite.middleware.LogExceptionsMiddleware', 61 | 'django.middleware.common.CommonMiddleware', 62 | 'django.middleware.gzip.GZipMiddleware', 63 | 'django.contrib.sessions.middleware.SessionMiddleware', 64 | 'django.contrib.auth.middleware.AuthenticationMiddleware', 65 | 'django.contrib.messages.middleware.MessageMiddleware', 66 | ) 67 | 68 | ROOT_URLCONF = 'graphite.urls' 69 | 70 | INSTALLED_APPS = ( 71 | 'graphite.metrics', 72 | 'graphite.render', 73 | 'graphite.browser', 74 | 'graphite.composer', 75 | 'graphite.account', 76 | 'graphite.dashboard', 77 | 'graphite.whitelist', 78 | 'graphite.events', 79 | 'graphite.url_shortener', 80 | 'django.contrib.auth', 81 | 'django.contrib.sessions', 82 | 'django.contrib.admin', 83 | 'django.contrib.contenttypes', 84 | 'django.contrib.staticfiles', 85 | 'tagging', 86 | ) 87 | 88 | AUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.ModelBackend'] 89 | 90 | GRAPHITE_WEB_APP_SETTINGS_LOADED = True 91 | 92 | STATIC_URL = '/static/' 93 | 94 | STATIC_ROOT = '/opt/graphite/static/' 95 | -------------------------------------------------------------------------------- /conf/opt/graphite/webapp/graphite/local_settings.py: -------------------------------------------------------------------------------- 1 | ## Graphite local_settings.py 2 | # Edit this file to customize the default Graphite webapp settings 3 | # 4 | # Additional customizations to Django settings can be added to this file as well 5 | import os 6 | ##################################### 7 | # General Configuration # 8 | ##################################### 9 | # 10 | # Set this to a long, random unique string to use as a secret key for this 11 | # install. This key is used for salting of hashes used in auth tokens, 12 | # CRSF middleware, cookie storage, etc. This should be set identically among 13 | # instances if used behind a load balancer. 14 | #SECRET_KEY = 'UNSAFE_DEFAULT' 15 | 16 | # In Django 1.5+ set this to the list of hosts your graphite instances is 17 | # accessible as. See: 18 | # https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS 19 | ALLOWED_HOSTS = [host.strip() for host in os.environ.get('GRAPHITE_ALLOWED_HOSTS', "*").split(",")] 20 | 21 | # Set your local timezone (Django's default is America/Chicago) 22 | # If your graphs appear to be offset by a couple hours then this probably 23 | # needs to be explicitly set to your local timezone. 24 | TIME_ZONE = os.environ.get('GRAPHITE_TIME_ZONE', 'Etc/UTC') 25 | 26 | # Set the default short date format. See strftime(3) for supported sequences. 27 | DATE_FORMAT = os.environ.get('GRAPHITE_DATE_FORMAT', '%m/%d') 28 | 29 | # Override this to provide documentation specific to your Graphite deployment 30 | #DOCUMENTATION_URL = "http://graphite.readthedocs.io/" 31 | 32 | # Logging 33 | # These can also be configured using Django's LOGGING: 34 | # https://docs.djangoproject.com/en/1.11/topics/logging/ 35 | LOG_ROTATION = os.environ.get("GRAPHITE_LOG_ROTATION", "false").lower() in ['1', 'true', 'yes'] 36 | LOG_ROTATION_COUNT = int(os.environ.get('GRAPHITE_LOG_ROTATION_COUNT', '1')) 37 | LOG_RENDERING_PERFORMANCE = os.environ.get("GRAPHITE_LOG_RENDERING_PERFORMANCE", "true").lower() in ['1', 'true', 'yes'] 38 | LOG_CACHE_PERFORMANCE = os.environ.get("GRAPHITE_LOG_CACHE_PERFORMANCE", "true").lower() in ['1', 'true', 'yes'] 39 | LOG_INFO_PERFORMANCE = os.environ.get("GRAPHITE_LOG_INFO_PERFORMANCE", "false").lower() in ['1', 'true', 'yes'] 40 | 41 | # Filenames for log output, set to '-' to log to stderr 42 | LOG_FILE_INFO = os.environ.get("GRAPHITE_LOG_FILE_INFO", 'info.log') 43 | LOG_FILE_EXCEPTION = os.environ.get("GRAPHITE_LOG_FILE_EXCEPTION", 'exception.log') 44 | LOG_FILE_CACHE = os.environ.get("GRAPHITE_LOG_FILE_CACHE", 'cache.log') 45 | LOG_FILE_RENDERING = os.environ.get("GRAPHITE_LOG_FILE_RENDERING", 'rendering.log') 46 | 47 | # Enable full debug page display on exceptions (Internal Server Error pages) 48 | DEBUG = os.environ.get("GRAPHITE_DEBUG", "false").lower() in ['1', 'true', 'yes'] 49 | 50 | # If using RRD files and rrdcached, set to the address or socket of the daemon 51 | #FLUSHRRDCACHED = 'unix:/var/run/rrdcached.sock' 52 | 53 | # This lists the memcached servers that will be used by this webapp. 54 | # If you have a cluster of webapps you should ensure all of them 55 | # have the *exact* same value for this setting. That will maximize cache 56 | # efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of 57 | # memcached entirely. 58 | # 59 | # You should not use the loopback address (127.0.0.1) here if using clustering 60 | # as every webapp in the cluster should use the exact same values to prevent 61 | # unneeded cache misses. Set to [] to disable caching of images and fetched data 62 | #MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211'] 63 | 64 | # Metric data and graphs are cached for one minute by default. If defined, 65 | # DEFAULT_CACHE_POLICY is a list of tuples of minimum query time ranges mapped 66 | # to the cache duration for the results. This allows for larger queries to be 67 | # cached for longer periods of times. All times are in seconds. If the policy is 68 | # empty or undefined, all results will be cached for DEFAULT_CACHE_DURATION. 69 | if (os.getenv('CACHE_DURATION') is not None): 70 | DEFAULT_CACHE_DURATION = int(os.environ.get('CACHE_DURATION', 60)) 71 | else: 72 | DEFAULT_CACHE_DURATION = int(os.environ.get('GRAPHITE_DEFAULT_CACHE_DURATION', '60')) 73 | #DEFAULT_CACHE_POLICY = [(0, 60), # default is 60 seconds 74 | # (7200, 120), # >= 2 hour queries are cached 2 minutes 75 | # (21600, 180)] # >= 6 hour queries are cached 3 minutes 76 | #MEMCACHE_KEY_PREFIX = 'graphite' 77 | 78 | # This lists the memcached options. Default is an empty dict. 79 | # Accepted options depend on the Memcached implementation and the Django version. 80 | # Until Django 1.10, options are used only for pylibmc. 81 | # Starting from 1.11, options are used for both python-memcached and pylibmc. 82 | #MEMCACHE_OPTIONS = { 'socket_timeout': 0.5 } 83 | 84 | # this setting controls the default xFilesFactor used for query-time aggregration 85 | DEFAULT_XFILES_FACTOR = 0 86 | 87 | # Set URL_PREFIX when deploying graphite-web to a non-root location 88 | URL_PREFIX = str(os.environ.get('GRAPHITE_URL_ROOT', '')) 89 | 90 | # Graphite uses Django Tagging to support tags in Events. By default each 91 | # tag is limited to 50 characters in length. 92 | #MAX_TAG_LENGTH = 50 93 | 94 | # Interval for the Auto-Refresh feature in the Composer, measured in seconds. 95 | #AUTO_REFRESH_INTERVAL = 60 96 | 97 | # Timeouts for find and render requests 98 | #FIND_TIMEOUT = 3.0 # Timeout for metric find requests 99 | #FETCH_TIMEOUT = 3.0 # Timeout to fetch series data 100 | 101 | # Allow UTF-8 metrics' names (can cause performance issues) 102 | UTF8_METRICS = os.environ.get('GRAPHITE_UTF8_METRICS', 'false').lower() in ['1', 'true', 'yes'] 103 | 104 | ##################################### 105 | # Filesystem Paths # 106 | ##################################### 107 | # 108 | # Change only GRAPHITE_ROOT if your install is merely shifted from /opt/graphite 109 | # to somewhere else 110 | #GRAPHITE_ROOT = '/opt/graphite' 111 | 112 | # Most installs done outside of a separate tree such as /opt/graphite will 113 | # need to change these settings. Note that the default settings for each 114 | # of these is relative to GRAPHITE_ROOT. 115 | #CONF_DIR = '/opt/graphite/conf' 116 | #STORAGE_DIR = '/opt/graphite/storage' 117 | #STATIC_ROOT = '/opt/graphite/static' 118 | #LOG_DIR = '/opt/graphite/storage/log/webapp' 119 | #INDEX_FILE = '/opt/graphite/storage/index' # Search index file 120 | 121 | # To further or fully customize the paths, modify the following. Note that the 122 | # default settings for each of these are relative to CONF_DIR and STORAGE_DIR 123 | # 124 | ## Webapp config files 125 | #DASHBOARD_CONF = '/opt/graphite/conf/dashboard.conf' 126 | #GRAPHTEMPLATES_CONF = '/opt/graphite/conf/graphTemplates.conf' 127 | 128 | ## Data directories 129 | # 130 | # NOTE: If any directory is unreadable in STANDARD_DIRS it will break metric browsing 131 | # 132 | #CERES_DIR = '/opt/graphite/storage/ceres' 133 | #WHISPER_DIR = '/opt/graphite/storage/whisper' 134 | #RRD_DIR = '/opt/graphite/storage/rrd' 135 | # 136 | # Data directories using the "Standard" metrics finder (i.e. not Ceres) 137 | #STANDARD_DIRS = [WHISPER_DIR, RRD_DIR] # Default: set from the above variables 138 | 139 | ## Data finders 140 | # It is possible to use an alternate storage layer than the default, Whisper, 141 | # in order to accommodate specific needs. 142 | # See: http://graphite.readthedocs.io/en/latest/storage-backends.html 143 | # 144 | # STORAGE_FINDERS = ( 145 | # 'graphite.finders.remote.RemoteFinder', 146 | # 'graphite.finders.standard.StandardFinder', 147 | # 'graphite.finders.ceres.CeresFinder', 148 | # ) 149 | 150 | ##################################### 151 | # Email Configuration # 152 | ##################################### 153 | # 154 | # This is used for emailing rendered graphs. The default backend is SMTP. 155 | #EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' 156 | # 157 | # To drop emails on the floor, enable the Dummy backend instead. 158 | #EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend' 159 | 160 | #EMAIL_HOST = 'localhost' 161 | #EMAIL_PORT = 25 162 | #EMAIL_HOST_USER = '' 163 | #EMAIL_HOST_PASSWORD = '' 164 | #EMAIL_USE_TLS = False 165 | 166 | 167 | ##################################### 168 | # Authentication Configuration # 169 | ##################################### 170 | # 171 | ## LDAP / ActiveDirectory authentication setup 172 | #USE_LDAP_AUTH = True 173 | #LDAP_SERVER = "ldap.mycompany.com" 174 | #LDAP_PORT = 389 175 | #LDAP_USE_TLS = False 176 | 177 | ## Manual URI / query setup 178 | #LDAP_URI = "ldaps://ldap.mycompany.com:636" 179 | #LDAP_SEARCH_BASE = "OU=users,DC=mycompany,DC=com" 180 | #LDAP_BASE_USER = "CN=some_readonly_account,DC=mycompany,DC=com" 181 | #LDAP_BASE_PASS = "readonly_account_password" 182 | #LDAP_USER_QUERY = "(username=%s)" #For Active Directory use "(sAMAccountName=%s)" 183 | 184 | # User DN template to use for binding (and authentication) against the 185 | # LDAP server. %(username) is replaced with the username supplied at 186 | # graphite login. 187 | #LDAP_USER_DN_TEMPLATE = "CN=%(username)s,OU=users,DC=mycompany,DC=com" 188 | 189 | # If you want to further customize the ldap connection options you should 190 | # directly use ldap.set_option to set the ldap module's global options. 191 | # For example: 192 | # 193 | #import ldap 194 | #ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW) # Use ldap.OPT_X_TLS_DEMAND to force TLS 195 | #ldap.set_option(ldap.OPT_REFERRALS, 0) # Enable for Active Directory 196 | #ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, "/etc/ssl/ca") 197 | #ldap.set_option(ldap.OPT_X_TLS_CERTFILE, "/etc/ssl/mycert.pem") 198 | #ldap.set_option(ldap.OPT_X_TLS_KEYFILE, "/etc/ssl/mykey.pem") 199 | #ldap.set_option(ldap.OPT_DEBUG_LEVEL, 65535) # To enable verbose debugging 200 | # See http://www.python-ldap.org/ for further details on these options. 201 | 202 | ## REMOTE_USER authentication. See: https://docs.djangoproject.com/en/dev/howto/auth-remote-user/ 203 | #USE_REMOTE_USER_AUTHENTICATION = True 204 | 205 | # Override the URL for the login link (e.g. for django_openid_auth) 206 | #LOGIN_URL = '/account/login' 207 | 208 | 209 | ############################### 210 | # Authorization for Dashboard # 211 | ############################### 212 | # By default, there is no security on dashboards - any user can add, change or delete them. 213 | # This section provides 3 different authorization models, of varying strictness. 214 | 215 | # If set to True, users must be logged in to save or delete dashboards. Defaults to False 216 | #DASHBOARD_REQUIRE_AUTHENTICATION = True 217 | 218 | # If set to the name of a user group, dashboards can be saved and deleted by any user in this 219 | # group. Groups can be set in the Django Admin app, or in LDAP. Defaults to None. 220 | # NOTE: Ignored if DASHBOARD_REQUIRE_AUTHENTICATION is not set 221 | #DASHBOARD_REQUIRE_EDIT_GROUP = 'dashboard-editors-group' 222 | 223 | # If set to True, dashboards can be saved or deleted by any user having the appropriate 224 | # (change or delete) permission (as set in the Django Admin app). Defaults to False 225 | # NOTE: Ignored if DASHBOARD_REQUIRE_AUTHENTICATION is not set 226 | #DASHBOARD_REQUIRE_PERMISSIONS = True 227 | 228 | 229 | ########################## 230 | # Database Configuration # 231 | ########################## 232 | # 233 | # By default sqlite is used. If you cluster multiple webapps you will need 234 | # to setup an external database (such as MySQL) and configure all of the webapp 235 | # instances to use the same database. Note that this database is only used to store 236 | # Django models such as saved graphs, dashboards, user preferences, etc. 237 | # Metric data is not stored here. 238 | # 239 | # DO NOT FORGET TO RUN MIGRATIONS AFTER SETTING UP A NEW DATABASE 240 | # http://graphite.readthedocs.io/en/latest/config-database-setup.html 241 | # 242 | # 243 | # The following built-in database engines are available: 244 | # django.db.backends.postgresql_psycopg2 245 | # django.db.backends.mysql 246 | # django.db.backends.sqlite3 247 | # django.db.backends.oracle 248 | # 249 | # The default is 'django.db.backends.sqlite3' with file 'graphite.db' 250 | # located in STORAGE_DIR 251 | # 252 | #DATABASES = { 253 | # 'default': { 254 | # 'NAME': '/opt/graphite/storage/graphite.db', 255 | # 'ENGINE': 'django.db.backends.sqlite3', 256 | # 'USER': '', 257 | # 'PASSWORD': '', 258 | # 'HOST': '', 259 | # 'PORT': '' 260 | # } 261 | #} 262 | # 263 | 264 | 265 | ######################### 266 | # Cluster Configuration # 267 | ######################### 268 | # 269 | # To avoid excessive DNS lookups you want to stick to using IP addresses only 270 | # in this entire section. 271 | # 272 | 273 | # This should list the IP address (and optionally port) of the webapp on each 274 | # remote server in the cluster. These servers must each have local access to 275 | # metric data. Note that the first server to return a match for a query will be 276 | # used. 277 | #CLUSTER_SERVERS = ["10.0.2.2:80", "10.0.2.3:80"] 278 | CLUSTER_SERVERS = [x for x in [host.strip() for host in os.environ.get('GRAPHITE_CLUSTER_SERVERS', '').split(",")] if x] 279 | 280 | # Creates a pool of worker threads to which tasks can be dispatched. This makes 281 | # sense if there are multiple CLUSTER_SERVERS because then the communication 282 | # with them can be parallelized 283 | # The number of threads is equal to: 284 | # POOL_WORKERS_PER_BACKEND * len(CLUSTER_SERVERS) + POOL_WORKERS 285 | # Be careful when increasing the number of threads, in particular if your start 286 | # multiple graphite-web processes (with uwsgi or similar) as this will increase 287 | # memory consumption (and number of connections to memcached). 288 | USE_WORKER_POOL = os.environ.get("GRAPHITE_USE_WORKER_POOL", "true").lower() in ['1', 'true', 'yes'] 289 | 290 | # Maximum number of worker threads for concurrent storage operations 291 | #POOL_MAX_WORKERS = 10 292 | 293 | # This setting controls whether https is used to communicate between cluster members 294 | #INTRACLUSTER_HTTPS = False 295 | 296 | # These are timeout values (in seconds) for requests to remote webapps 297 | REMOTE_FIND_TIMEOUT = float(os.environ.get('GRAPHITE_REMOTE_FIND_TIMEOUT', '30')) # Timeout for metric find requests 298 | REMOTE_FETCH_TIMEOUT = float(os.environ.get('GRAPHITE_REMOTE_FETCH_TIMEOUT', '60')) # Timeout to fetch series data 299 | REMOTE_RETRY_DELAY = float(os.environ.get('GRAPHITE_REMOTE_RETRY_DELAY', '0')) # Time before retrying a failed remote webapp 300 | 301 | # Fail all requests if any remote webapp call fails 302 | #STORE_FAIL_ON_ERROR = False 303 | 304 | # Try to detect when a cluster server is localhost and don't forward queries 305 | REMOTE_EXCLUDE_LOCAL = False 306 | 307 | # Number of retries for a specific remote data fetch. 308 | MAX_FETCH_RETRIES = int(os.environ.get('GRAPHITE_MAX_FETCH_RETRIES', '2')) 309 | 310 | FIND_CACHE_DURATION = int(os.environ.get('GRAPHITE_FIND_CACHE_DURATION', '1')) # Time to cache remote metric find results 311 | # If the query doesn't fall entirely within the FIND_TOLERANCE window 312 | # we disregard the window. This prevents unnecessary remote fetches 313 | # caused when carbon's cache skews node.intervals, giving the appearance 314 | # remote systems have data we don't have locally, which we probably do. 315 | #FIND_TOLERANCE = 2 * FIND_CACHE_DURATION 316 | 317 | #REMOTE_STORE_USE_POST = False # Use POST instead of GET for remote requests 318 | 319 | # Size of the buffer used for streaming remote cluster responses. Set to 0 to avoid streaming deserialization. 320 | REMOTE_BUFFER_SIZE = int(os.environ.get('GRAPHITE_REMOTE_BUFFER_SIZE', '1048576')) 321 | 322 | # During a rebalance of a consistent hash cluster, after a partition event on a replication > 1 cluster, 323 | # or in other cases we might receive multiple TimeSeries data for a metric key. Merge them together rather 324 | # that choosing the "most complete" one (pre-0.9.14 behaviour). 325 | #REMOTE_STORE_MERGE_RESULTS = True 326 | 327 | # Provide a list of HTTP headers that you want forwarded on from this host 328 | # when making a request to a remote webapp server in CLUSTER_SERVERS 329 | #REMOTE_STORE_FORWARD_HEADERS = [] # An iterable of HTTP header names 330 | 331 | ## Remote rendering settings 332 | # Set to True to enable rendering of Graphs on a remote webapp 333 | #REMOTE_RENDERING = True 334 | # List of IP (and optionally port) of the webapp on each remote server that 335 | # will be used for rendering. Note that each rendering host should have local 336 | # access to metric data or should have CLUSTER_SERVERS configured 337 | #RENDERING_HOSTS = [] 338 | #REMOTE_RENDER_CONNECT_TIMEOUT = 1.0 339 | 340 | # If you are running multiple carbon-caches on this machine (typically behind 341 | # a relay using consistent hashing), you'll need to list the ip address, cache 342 | # query port, and instance name of each carbon-cache instance on the local 343 | # machine (NOT every carbon-cache in the entire cluster). The default cache 344 | # query port is 7002 and a common scheme is to use 7102 for instance b, 7202 345 | # for instance c, etc. 346 | # If you're using consistent hashing, please keep an order of hosts the same as 347 | # order of DESTINATIONS in your relay - otherways you'll get cache misses. 348 | # 349 | # You *should* use 127.0.0.1 here in most cases. 350 | # 351 | #CARBONLINK_HOSTS = ["127.0.0.1:7002:a", "127.0.0.1:7102:b", "127.0.0.1:7202:c"] 352 | CARBONLINK_HOSTS = [host.strip() for host in os.environ.get('GRAPHITE_CARBONLINK_HOSTS', "127.0.0.1:7002").split(",")] 353 | 354 | CARBONLINK_TIMEOUT = float(os.environ.get('GRAPHITE_CARBONLINK_TIMEOUT', '1')) 355 | #CARBONLINK_RETRY_DELAY = 15 # Seconds to blacklist a failed remote server 356 | # 357 | 358 | # Type of metric hashing function. 359 | # The default `carbon_ch` is Graphite's traditional consistent-hashing implementation. 360 | # Alternatively, you can use `fnv1a_ch`, which supports the Fowler-Noll-Vo hash 361 | # function (FNV-1a) hash implementation offered by the carbon-c-relay project 362 | # https://github.com/grobian/carbon-c-relay 363 | # 364 | # Supported values: carbon_ch, fnv1a_ch 365 | # 366 | CARBONLINK_HASHING_TYPE = os.environ.get("GRAPHITE_CARBONLINK_HASHING_TYPE", 'carbon_ch') 367 | 368 | # A "keyfunc" is a user-defined python function that is given a metric name 369 | # and returns a string that should be used when hashing the metric name. 370 | # This is important when your hashing has to respect certain metric groupings. 371 | #CARBONLINK_HASHING_KEYFUNC = "/opt/graphite/bin/keyfuncs.py:my_keyfunc" 372 | 373 | # Prefix for internal carbon statistics. 374 | #CARBON_METRIC_PREFIX='carbon' 375 | 376 | # The replication factor to use with consistent hashing. 377 | # This should usually match the value configured in Carbon. 378 | REPLICATION_FACTOR = int(os.environ.get('GRAPHITE_REPLICATION_FACTOR', '1')) 379 | 380 | ##################################### 381 | # TagDB Settings # 382 | ##################################### 383 | # Tag Database 384 | 385 | # set TAGDB to Redis if REDIS_TAGDB env var is set 386 | _REDIS_TAGDB = os.environ.get('REDIS_TAGDB', 'false').lower() in ['1', 'true', 'yes'] 387 | 388 | # default TAGDB is local database. Set to '' to disable 389 | TAGDB = 'graphite.tags.redis.RedisTagDB' if _REDIS_TAGDB else \ 390 | os.environ.get('GRAPHITE_TAGDB', 'graphite.tags.localdatabase.LocalDatabaseTagDB') 391 | 392 | # Time to cache seriesByTag results 393 | TAGDB_CACHE_DURATION = int(os.environ.get('GRAPHITE_TAGDB_CACHE_DURATION') or 60) 394 | 395 | # Autocomplete default result limit 396 | TAGDB_AUTOCOMPLETE_LIMIT = int(os.environ.get('GRAPHITE_TAGDB_AUTOCOMPLETE_LIMIT') or 100) 397 | 398 | # Settings for Redis TagDB 399 | TAGDB_REDIS_HOST = os.environ.get('GRAPHITE_TAGDB_REDIS_HOST', 'localhost') 400 | TAGDB_REDIS_PORT = int(os.environ.get('GRAPHITE_TAGDB_REDIS_PORT') or 6379) 401 | TAGDB_REDIS_DB = int(os.environ.get('GRAPHITE_TAGDB_REDIS_DB') or 0) 402 | 403 | # Settings for HTTP TagDB 404 | TAGDB_HTTP_URL = os.environ.get('GRAPHITE_TAGDB_HTTP_URL', '') 405 | TAGDB_HTTP_USER = os.environ.get('GRAPHITE_TAGDB_HTTP_USER', '') 406 | TAGDB_HTTP_PASSWORD = os.environ.get('GRAPHITE_TAGDB_HTTP_PASSWORD', '') 407 | # Does the remote TagDB support autocomplete? 408 | TAGDB_HTTP_AUTOCOMPLETE = os.environ.get('GRAPHITE_TAGDB_HTTP_AUTOCOMPLETE', 'false').lower() in ['1', 'true', 'yes'] 409 | 410 | ##################################### 411 | # Function plugins # 412 | ##################################### 413 | # List of custom function plugin modules 414 | # See: http://graphite.readthedocs.io/en/latest/functions.html#function-plugins 415 | FUNCTION_PLUGINS = [] 416 | 417 | ##################################### 418 | # Additional Django Settings # 419 | ##################################### 420 | 421 | LOG_DIR = '/var/log/graphite' 422 | _SECRET_KEY = '$(date +%s | sha256sum | base64 | head -c 64)' 423 | SECRET_KEY = os.environ.get('GRAPHITE_SECRET_KEY', _SECRET_KEY) 424 | 425 | if (os.getenv("MEMCACHE_HOST") is not None): 426 | MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOST").split(",") 427 | 428 | STATSD_HOST = os.environ.get('GRAPHITE_STATSD_HOST', '127.0.0.1') 429 | if STATSD_HOST != '': 430 | from graphite.app_settings import * 431 | MIDDLEWARE = ( 432 | 'django_statsd.middleware.GraphiteRequestTimingMiddleware', 433 | 'django_statsd.middleware.GraphiteMiddleware', 434 | ) + MIDDLEWARE 435 | try: 436 | MIDDLEWARE_CLASSES 437 | except NameError: 438 | pass 439 | else: 440 | MIDDLEWARE_CLASSES = MIDDLEWARE 441 | -------------------------------------------------------------------------------- /conf/opt/statsd/config/tcp.js: -------------------------------------------------------------------------------- 1 | { 2 | "graphiteHost": "127.0.0.1", 3 | "graphitePort": 2003, 4 | "port": 8125, 5 | "flushInterval": 10000, 6 | "servers": [ 7 | { server: "./servers/tcp", address: "0.0.0.0", port: 8125 } 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /conf/opt/statsd/config/udp.js: -------------------------------------------------------------------------------- 1 | { 2 | "graphiteHost": "127.0.0.1", 3 | "graphitePort": 2003, 4 | "port": 8125, 5 | "flushInterval": 10000, 6 | "servers": [ 7 | { server: "./servers/udp", address: "0.0.0.0", port: 8125 } 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /conf/usr/local/bin/folder_empty: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | [ -z "$(find "${1}" -mindepth 1 -not -name "lost+found" -print -quit)" ] 4 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | 3 | services: 4 | statsd: 5 | build: 6 | context: . 7 | args: 8 | python_binary: python3 9 | volumes: 10 | - ./conf/opt/graphite/conf:/opt/graphite/conf 11 | ports: 12 | - "80:80" 13 | - "2003-2004:2003-2004" 14 | - "2023-2024:2023-2024" 15 | - "8125:8125/udp" 16 | - "8126:8126" 17 | --------------------------------------------------------------------------------