├── .dockerignore ├── debian ├── compat ├── beamium.dirs ├── beamium.install ├── beamium.logrotate ├── beamium.service ├── control ├── beamium.postrm ├── changelog ├── beamium.preinst ├── rules ├── copyright ├── beamium.postinst └── beamium.init ├── entrypoint.sh ├── config.debian.yaml ├── .gitignore ├── Dockerfile ├── Dockerfile.build ├── deploy └── kubernetes │ └── beamium.yaml ├── src ├── constants.rs ├── lib │ ├── asynch │ │ ├── mod.rs │ │ ├── fs.rs │ │ └── http.rs │ ├── mod.rs │ └── transcompiler.rs ├── log.rs ├── main.rs ├── cmd.rs ├── sink.rs ├── router.rs ├── scraper.rs └── conf.rs ├── .travis.yml ├── Cargo.toml ├── LICENSE ├── CONTRIBUTING.md ├── CODE_OF_CONDUCT.md ├── config.sample.yaml └── README.md /.dockerignore: -------------------------------------------------------------------------------- 1 | target -------------------------------------------------------------------------------- /debian/compat: -------------------------------------------------------------------------------- 1 | 7 2 | -------------------------------------------------------------------------------- /debian/beamium.dirs: -------------------------------------------------------------------------------- 1 | /opt/beamium 2 | /var/log/beamium 3 | -------------------------------------------------------------------------------- /debian/beamium.install: -------------------------------------------------------------------------------- 1 | release/beamium usr/bin 2 | etc/beamium/config.yaml 3 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "${TEMPLATE_CONFIG}" ]]; then 4 | echo "No template provided" 5 | else 6 | envsubst < $TEMPLATE_CONFIG > /etc/beamium/config.yaml 7 | fi 8 | 9 | exec "$@" -------------------------------------------------------------------------------- /debian/beamium.logrotate: -------------------------------------------------------------------------------- 1 | /var/log/beamium/beamium.log /var/log/beamium/beamium.out /var/log/beamium/beamium.err { 2 | daily 3 | missingok 4 | copytruncate 5 | rotate 7 6 | compress 7 | delaycompress 8 | } 9 | -------------------------------------------------------------------------------- /debian/beamium.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=beamium 3 | After=network.target 4 | 5 | [Service] 6 | EnvironmentFile=-/etc/default/beamium 7 | ExecStart=/usr/bin/beamium --config=/etc/beamium/config.yaml 8 | Restart=on-failure 9 | User=beamium 10 | Group=beamium 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /config.debian.yaml: -------------------------------------------------------------------------------- 1 | sources: 2 | source1: 3 | url: http://127.0.0.1:9100/metrics 4 | period: 10000 5 | 6 | sinks: 7 | sink1: 8 | url: WARP10_ENDPOINT 9 | token: WARP10_TOKEN 10 | 11 | labels: 12 | host: tmp 13 | 14 | parameters: 15 | log-file: /var/log/beamium/beamium.log 16 | source-dir: /opt/beamium/sources 17 | sink-dir: /opt/beamium/sinks 18 | -------------------------------------------------------------------------------- /debian/control: -------------------------------------------------------------------------------- 1 | Source: beamium 2 | Section: default 3 | Priority: extra 4 | Maintainer: Kevin GEORGES 5 | Build-Depends: debhelper (>= 8.0.0), dh-systemd (>= 1.5), git, pkg-config, libssl-dev, lsb-release 6 | Standards-Version: 3.9.6 7 | Homepage: https://github.com/ovh/beamium 8 | 9 | Package: beamium 10 | Architecture: i386 amd64 11 | Depends: logrotate 12 | Description: Prometheus to Warp10 metrics forwarder 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ### Rust ### 2 | # Generated by Cargo 3 | # will have compiled files and executables 4 | /target/ 5 | 6 | # Remove Cargo.lock from gitignore if creating an executable, leave it for libraries 7 | # More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html 8 | # Cargo.lock 9 | 10 | # These are backup files generated by rustfmt 11 | **/*.rs.bk 12 | 13 | ### Beamium ### 14 | # Beamium files and folders 15 | *.log 16 | config.yaml 17 | /sources/ 18 | /sinks/ 19 | /src/version.rs 20 | 21 | -------------------------------------------------------------------------------- /debian/beamium.postrm: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | SERVICE=beamium 6 | 7 | case "$1" in 8 | 9 | remove) 10 | if [ -x "/usr/bin/deb-systemd-helper" ] ; then 11 | deb-systemd-helper mask $SERVICE || true 12 | else 13 | update-rc.d $SERVICE remove || true 14 | fi 15 | ;; 16 | 17 | purge) 18 | rm -rf /var/log/$SERVICE 19 | rm -rf /opt/$SERVICE 20 | deluser beamium 21 | ;; 22 | 23 | upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) 24 | # nothing to do 25 | ;; 26 | 27 | *) 28 | echo "$0 called with unknown argument \`$1'" >&2 29 | exit 1 30 | ;; 31 | esac 32 | -------------------------------------------------------------------------------- /debian/changelog: -------------------------------------------------------------------------------- 1 | beamium (1.7.0-1) master; urgency=medium 2 | 3 | * Update to 1.7.0 4 | 5 | -- Aurélien Nephtali Sun, 11 Feb 2018 08:23:52 +0100 6 | 7 | beamium (1.6.3-1) master; urgency=medium 8 | 9 | * Update to 1.6.3 10 | 11 | -- Aurélien Nephtali Thu, 22 Jun 2017 11:45:28 +0200 12 | 13 | beamium (1.6.2-1) master; urgency=medium 14 | 15 | * Update to 1.6.2 16 | 17 | -- Aurélien Nephtali Wed, 21 Jun 2017 10:29:57 +0200 18 | 19 | beamium (1.5.1-1) master; urgency=medium 20 | 21 | * Debian package 22 | 23 | -- Aurélien Nephtali Mon, 12 Jun 2017 12:08:19 +0200 24 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stretch 2 | 3 | ENV DEBIAN_FRONTEND=noninteractive 4 | ARG METRICS_APT_URL=http://last.public.ovh.metrics.snap.mirrors.ovh.net 5 | RUN useradd -u 1000 beamium 6 | RUN apt-get update \ 7 | && apt-get install -y apt-transport-https curl gnupg gettext-base ca-certificates \ 8 | && echo "deb $METRICS_APT_URL/debian stretch main" >> /etc/apt/sources.list.d/beamium.list \ 9 | && curl https://last-public-ovh-metrics.snap.mirrors.ovh.net/pub.key | apt-key add - \ 10 | && apt-get update \ 11 | && apt-get install -y beamium \ 12 | && rm -rf /var/lib/apt/lists/* \ 13 | && chown -R beamium:beamium /etc/beamium/ 14 | USER 1000 15 | ADD entrypoint.sh / 16 | 17 | ENTRYPOINT ["/entrypoint.sh"] 18 | CMD ["beamium"] 19 | -------------------------------------------------------------------------------- /Dockerfile.build: -------------------------------------------------------------------------------- 1 | FROM rust:buster AS builder 2 | 3 | ARG COMMIT=HEAD 4 | ARG REMOTE=https://github.com/ovh/beamium.git 5 | 6 | RUN git clone ${REMOTE} /tmp/beamium 7 | 8 | WORKDIR /tmp/beamium 9 | RUN git checkout -f ${COMMIT} 10 | RUN cargo build --release 11 | 12 | FROM debian:buster-slim 13 | 14 | ENV HOME /opt/beamium 15 | 16 | WORKDIR /opt/beamium 17 | RUN groupadd -r beamium && \ 18 | useradd -r -g beamium -d ${HOME} beamium && \ 19 | chown -R beamium:beamium ${HOME} 20 | 21 | # curl is useful to be able to curl Beamium's metrics 22 | RUN apt update -y && \ 23 | apt install -y ca-certificates curl && \ 24 | rm -rf /var/lib/apt/lists/* 25 | 26 | COPY config.yml /etc/beamium/config.yml 27 | COPY --from=builder /tmp/beamium/target/release/beamium /usr/local/bin/beamium 28 | 29 | USER beamium:beamium 30 | CMD [ "/usr/local/bin/beamium", "-c", "/etc/beamium/config.yml" ] -------------------------------------------------------------------------------- /deploy/kubernetes/beamium.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: beamium-cm 5 | data: 6 | config.yaml: | 7 | scrapers: 8 | # scraping itself for metrics 9 | self: 10 | url: http://127.0.0.1:9110/metrics 11 | period: 60s 12 | format: prometheus 13 | 14 | # Do not forget to configure your sink here 15 | sinks: 16 | 17 | parameters: 18 | metrics: 127.0.0.1:9110 19 | log-file: /var/log/beamium/beamium.log 20 | source-dir: /opt/beamium/sources 21 | sink-dir: /opt/beamium/sinks 22 | --- 23 | apiVersion: v1 24 | kind: Pod 25 | metadata: 26 | name: beamium 27 | spec: 28 | containers: 29 | - name: beamium 30 | image: ovhcom/beamium 31 | imagePullPolicy: Always 32 | volumeMounts: 33 | - name: config-volume 34 | mountPath: /etc/beamium 35 | volumes: 36 | - name: config-volume 37 | configMap: 38 | name: beamium-cm 39 | ... -------------------------------------------------------------------------------- /src/constants.rs: -------------------------------------------------------------------------------- 1 | //! # Constants module. 2 | //! 3 | //! The Constants module provides beamium's constants. 4 | use std::time::Duration; 5 | 6 | /// Keep only files that have the following extension 7 | pub(crate) const EXTENSION: &str = "metrics"; 8 | 9 | /// Time to sleep for thread in waiting to achieve an action 10 | pub(crate) const THREAD_SLEEP: Duration = Duration::from_millis(100); 11 | 12 | /// Size of a chunk to send 13 | pub(crate) const CHUNK_SIZE: u64 = 1024 * 1024; 14 | 15 | /// Number of threads used by hyper to resolve dns request 16 | pub(crate) const NUMBER_DNS_WORKER_THREADS: usize = 4; 17 | 18 | /// Number of handlers per tokio reactor 19 | pub(crate) const MAX_HANDLERS_PER_REACTOR: usize = 20; 20 | 21 | /// Warn if backoff is greater that this 1 second 22 | pub(crate) const BACKOFF_WARN: Duration = Duration::from_millis(1_000); 23 | 24 | /// Keep alive duration of threads in tokio runtime 25 | pub(crate) const KEEP_ALIVE_TOKIO_RUNTIME: Duration = Duration::from_millis(5_000); 26 | -------------------------------------------------------------------------------- /debian/beamium.preinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | SERVICE=beamium 6 | PID_FILE=/var/run/$SERVICE.pid 7 | 8 | case "$1" in 9 | install) 10 | # create a beamium group and user 11 | if ! getent passwd beamium >/dev/null 2>&1; then 12 | adduser --system --group --no-create-home --home /opt/beamium beamium 13 | fi 14 | ;; 15 | 16 | upgrade) 17 | # remove old sysV if present 18 | if systemctl status >/dev/null 2>&1; then 19 | if [ -x "/etc/init.d/$SERVICE" ]; then 20 | update-rc.d $SERVICE remove || true 21 | fi 22 | # stop old process 23 | if [ -f "$PID_FILE" ]; then 24 | pkill -9 --pidfile "$PID_FILE" || true 25 | rm -f $PID_FILE 26 | deb-systemd-invoke stop $SERVICE.service || true 27 | fi 28 | fi 29 | ;; 30 | 31 | *) 32 | echo "preinst called with unknown argument \`$1'" >&2 33 | exit 1 34 | ;; 35 | esac 36 | 37 | exit 0 38 | -------------------------------------------------------------------------------- /debian/rules: -------------------------------------------------------------------------------- 1 | #!/usr/bin/make -f 2 | # -*- makefile -*- 3 | 4 | # Uncomment this to turn on verbose mode. 5 | #export DH_VERBOSE=1 6 | 7 | BUILD_DIR=$(CURDIR)/debian/tmp 8 | DISTRIBUTION = $(shell lsb_release -cs) 9 | VERSION = $(shell cat Cargo.toml | grep version | head -1 | awk -F "\"" '{print $$2}') 10 | PACKAGEVERSION = $(VERSION)-$(DISTRIBUTION) 11 | export CARGO_TARGET_DIR=$(BUILD_DIR) 12 | 13 | DEBIAN_VERSION = $(shell lsb_release -rs | cut -d . -f 1) 14 | DH_ARGS = "--with systemd" 15 | # do we use systemd? 16 | ifeq "${DEBIAN_VERSION}" "7" 17 | DH_ARGS = "" 18 | endif 19 | ifeq "${DEBIAN_VERSION}" "6" 20 | DH_ARGS = "" 21 | endif 22 | 23 | %: 24 | dh $@ ${DH_ARGS} 25 | 26 | override_dh_auto_build: 27 | cargo build --release 28 | 29 | override_dh_install: 30 | mkdir -p $(BUILD_DIR)/etc/beamium 31 | cp $(CURDIR)/config.debian.yaml $(BUILD_DIR)/etc/beamium/config.yaml 32 | dh_install 33 | 34 | override_dh_prep: 35 | dh_prep -Xdebian/tmp 36 | 37 | override_dh_gencontrol: 38 | dh_gencontrol -- -v$(PACKAGEVERSION) 39 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: rust 2 | rust: 3 | - stable 4 | - beta 5 | - nightly 6 | matrix: 7 | allow_failures: 8 | - rust: nightly 9 | before_script: 10 | - PATH=$PATH:/home/travis/.cargo/bin 11 | script: 12 | - cargo build --verbose 13 | before_deploy: "cargo build --release" 14 | deploy: 15 | provider: releases 16 | api_key: 17 | secure: cbVPXdowaY7r6K2qLuSenc4GbGB6NpUOxfd3u3kAlqvaOK/cOLb+yFPQiskDK3AOidBGZf+Ywb5q60Rowu0nCWxM68ficS/mOOdR9/FvOroQR9mZDUHGyhGAUQ5V95eqqZsgJ6YDy023RIZzOe1nRSuaaDtmFIvMw2ba0gZnWLArE8Qp4XnM/zRWB+2SGg5i4aXsBxnKwFNFZ2f9spjq95XHtOwvDnesRmtEA0Uqh1A8amrWO226Obgd1K4AawhsEdQs5TGuSo5NdP6dmnvmkXmAHQuYOH+srZlWv/9eYkQNhNGeIaWyGZrU4Obf9tazrEPuVixBh5CpVZ73NaD4mpGRJqlogHkmdln4/3mMofIss0ZT+nHaygd2/OYg3S90S1wXURnUHoWR2gqqhLn8C3cye6ubE72zBZMIuU1qTcoLuVM72HoFF9YIWSB9Fq4j+eq7entSsLss2rI5qhnLOJdGp3bqyqkUHIrhSBk03U56RX3Twn9BziF30h6IQ/Khsm8UsuPWwkKBFKZUlPuWFKba51r2eU+77tcFtDNk5v+sFyMj47chhrfn57XRzRuJdQtNvij0GFhcMH/8GufWyS5H2BTmYldrj9V5+hP5krN7UT7ZPOxMpdTfkrVtTy2qtDx6Hzg2sl4EieZjsXx7jqrDmxr1YkPbghJ8fNCYOZs= 18 | file: target/release/beamium 19 | skip_cleanup: true 20 | on: 21 | repo: ovh/beamium 22 | branch: master 23 | tags: true 24 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "beamium" 3 | version = "2.0.8" 4 | edition = "2018" 5 | build = "build.rs" 6 | authors = [ 7 | "Kevin Georges ", 8 | "Florentin DUBOIS " 9 | ] 10 | 11 | [dependencies] 12 | bytes = "0.5.4" 13 | config = "0.9.3" 14 | failure = "0.1.8" 15 | futures = "=0.1.29" 16 | humanize-rs = "0.1.5" 17 | glob = "0.3.0" 18 | structopt = { version = "0.3.14", features = ["default", "paw"] } 19 | slog-async = "2.5.0" 20 | slog-term = "2.6.0" 21 | slog-scope = "4.3.0" 22 | slog-syslog = "0.12.0" 23 | serde = "1.0.111" 24 | serde_derive = "1.0.111" 25 | regex = "1.3.9" 26 | hyper = "=0.12.35" 27 | hyper-rustls = "=0.17.1" 28 | tokio = "=0.1.22" 29 | time = "=0.1.42" 30 | crossbeam = "0.7.3" 31 | warp = "0.1.20" 32 | prometheus = "0.9.0" 33 | lazy_static = "1.4.0" 34 | backoff = "0.1.6" 35 | paw = "1.0.0" 36 | uuid = { version = "0.8.1", features = ["v4"] } 37 | slog = { version = "2.5.2", features = ["release_max_level_trace", "max_level_trace"] } 38 | ctrlc = { version = "3.1.4", features = ["termination"] } 39 | notify = "4.0.15" 40 | urlencoding = "1.3.3" 41 | 42 | [build-dependencies] 43 | failure = "0.1.8" 44 | git2 = "0.13.6" 45 | time = "=0.1.42" 46 | 47 | [profile.release] 48 | lto = true 49 | panic = "abort" 50 | 51 | [profile.bench] 52 | lto = true 53 | -------------------------------------------------------------------------------- /src/lib/asynch/mod.rs: -------------------------------------------------------------------------------- 1 | //! # async module 2 | //! 3 | //! The `async` module provide asynchronous helpers. 4 | pub mod fs; 5 | pub mod http; 6 | 7 | /// The `try_future` macro provide an elegant way to manage errors in future. 8 | #[macro_export] 9 | macro_rules! try_future { 10 | ($x: expr) => { 11 | match $x { 12 | Ok(item) => item, 13 | Err(err) => { 14 | return futures::future::err(failure::format_err!("{}", err)); 15 | } 16 | } 17 | }; 18 | } 19 | 20 | /// The `arc` macro provide an easy way to wrap standard `Arc` 21 | #[macro_export] 22 | macro_rules! arc { 23 | ($x: expr) => { 24 | std::sync::Arc::new($x) 25 | }; 26 | } 27 | 28 | /// The `mutex` macro provide an easy way to wrap standard `Arc>` 29 | #[macro_export] 30 | macro_rules! mutex { 31 | ($x: expr) => { 32 | std::sync::Arc::new(std::sync::Mutex::new($x)) 33 | }; 34 | } 35 | 36 | /// The `poll_ready` macro provide a shortcut to the `Poll` alias 37 | #[macro_export] 38 | macro_rules! poll_not_ready { 39 | () => { 40 | Ok(futures::Async::NotReady) 41 | }; 42 | } 43 | 44 | /// The `poll_ready` macro provide a shortcut to the `Poll` alias 45 | #[macro_export] 46 | macro_rules! poll_ready { 47 | ($x: expr) => { 48 | Ok(futures::Async::Ready($x)) 49 | }; 50 | } 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (c) 2013-2016, OVH SAS. 2 | All rights reserved. 3 | 4 | Redistribution and use in source and binary forms, with or without 5 | modification, are permitted provided that the following conditions are met: 6 | 7 | * Redistributions of source code must retain the above copyright 8 | notice, this list of conditions and the following disclaimer. 9 | * Redistributions in binary form must reproduce the above copyright 10 | notice, this list of conditions and the following disclaimer in the 11 | documentation and/or other materials provided with the distribution. 12 | * Neither the name of OVH SAS nor the 13 | names of its contributors may be used to endorse or promote products 14 | derived from this software without specific prior written permission. 15 | 16 | THIS SOFTWARE IS PROVIDED BY OVH SAS AND CONTRIBUTORS ``AS IS'' AND ANY 17 | EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 | WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL OVH SAS AND CONTRIBUTORS BE LIABLE FOR ANY 20 | DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 22 | LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 23 | ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 25 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /debian/copyright: -------------------------------------------------------------------------------- 1 | Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: beamium 3 | Upstream-Contact: Kevin Georges 4 | Source: https://github.com/runabove/beamium 5 | 6 | Files: * 7 | Copyright: (c) 2017, Kevin Georges 8 | License: BSD-3-Clause 9 | Redistribution and use in source and binary forms, with or without 10 | modification, are permitted provided that the following conditions are met: 11 | . 12 | * Redistributions of source code must retain the above copyright notice, this 13 | list of conditions and the following disclaimer. 14 | . 15 | * Redistributions in binary form must reproduce the above copyright notice, 16 | this list of conditions and the following disclaimer in the documentation 17 | and/or other materials provided with the distribution. 18 | . 19 | * Neither the name of the Astropy Team nor the names of its contributors may 20 | be used to endorse or promote products derived from this software without 21 | specific prior written permission. 22 | . 23 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 24 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 27 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 28 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 29 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 30 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 31 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 32 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 33 | POSSIBILITY OF SUCH DAMAGE. 34 | -------------------------------------------------------------------------------- /debian/beamium.postinst: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | SERVICE=beamium 6 | 7 | [ -f /etc/default/$SERVICE ] && . /etc/default/$SERVICE 8 | 9 | registerService() { 10 | if systemctl status >/dev/null 2>&1; then 11 | deb-systemd-helper enable $SERVICE.service 12 | elif [ -x "/etc/init.d/$SERVICE" ]; then 13 | update-rc.d $SERVICE defaults || true 14 | fi 15 | } 16 | 17 | startService() { 18 | if systemctl status >/dev/null 2>&1; then 19 | deb-systemd-invoke start $SERVICE.service 20 | elif [ -x "/etc/init.d/$SERVICE" ]; then 21 | if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then 22 | invoke-rc.d $SERVICE start || true 23 | else 24 | /etc/init.d/$SERVICE start || true 25 | fi 26 | fi 27 | } 28 | 29 | statusService() { 30 | if systemctl status >/dev/null 2>&1; then 31 | deb-systemd-invoke status $SERVICE.service >/dev/null && echo "$SERVICE is running." || true 32 | elif [ -x "/etc/init.d/$SERVICE" ]; then 33 | if [ -x "`which invoke-rc.d 2>/dev/null`" ]; then 34 | invoke-rc.d $SERVICE status 35 | else 36 | /etc/init.d/$SERVICE status 37 | fi 38 | fi 39 | } 40 | 41 | 42 | case "$1" in 43 | configure) 44 | chown -R beamium:beamium /var/log/beamium 45 | chown -R beamium:beamium /opt/beamium 46 | # fix rights on logrotate 47 | if [ -f "/etc/logrotate.d/$SERVICE" ]; then 48 | chown root:root /etc/logrotate.d/$SERVICE 49 | fi 50 | registerService 51 | STATUS=$(statusService) || true 52 | if [ "$STATUS" = "$SERVICE is running." ] ; then 53 | echo "$SERVICE is running: not restarting it." 54 | else 55 | startService 56 | fi 57 | ;; 58 | 59 | abort-upgrade|abort-remove|abort-deconfigure) 60 | ;; 61 | 62 | *) 63 | echo "postinst called with unknown argument \`$1'" >&2 64 | exit 1 65 | ;; 66 | esac 67 | 68 | exit 0 69 | -------------------------------------------------------------------------------- /debian/beamium.init: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Start/stop beamium 3 | 4 | ### BEGIN INIT INFO 5 | # Provides: beamium 6 | # Required-Start: $network 7 | # Should-Start: 8 | # Required-Stop: 9 | # Default-Start: 2 3 4 5 10 | # Default-Stop: 0 1 6 11 | # Short-Description: 12 | # Description: 13 | ### END INIT INFO 14 | 15 | # Do NOT "set -e" 16 | 17 | PATH=/sbin:/usr/sbin:/bin:/usr/bin 18 | DESC="beamium" 19 | NAME=beamium 20 | DAEMON=/usr/bin/$NAME 21 | PID_FILE=/var/run/$NAME.pid 22 | USER="beamium" 23 | GROUP="beamium" 24 | 25 | # Exit if the package is not installed 26 | [ -x $DAEMON ] || exit 0 27 | 28 | # Default values for config variables 29 | CONF_FILE=/etc/beamium/config.yaml 30 | 31 | # Read configuration variable file if it is present 32 | [ -r /etc/default/$NAME ] && . /etc/default/$NAME 33 | 34 | # Define LSB start_daemon and log_* functions. 35 | . /lib/lsb/init-functions 36 | 37 | start() 38 | { 39 | echo -n "Starting $DESC: " 40 | start-stop-daemon --chuid ${USER}:${GROUP} \ 41 | --start --quiet --background \ 42 | --make-pidfile --pidfile "$PID_FILE" \ 43 | --exec "$DAEMON" -- --config="$CONF_FILE" 44 | if [ $? -ne 0 ] ; then 45 | echo "failed." 46 | else 47 | echo "done." 48 | fi 49 | } 50 | 51 | stop() 52 | { 53 | echo -n "Stoping $DESC: " 54 | start-stop-daemon --stop --quiet --retry 3 \ 55 | --pidfile "$PID_FILE" 56 | if [ $? -ne 0 ] ; then 57 | echo "failed." 58 | else 59 | echo "done." 60 | fi 61 | rm -f $PID_FILE 62 | } 63 | 64 | status() 65 | { 66 | status_of_proc -p "$PID_FILE" "$DAEMON" "$NAME" && exit 0 || exit $? 67 | } 68 | 69 | ### main() 70 | case "$1" in 71 | start) 72 | start 73 | ;; 74 | stop) 75 | stop 76 | ;; 77 | restart) 78 | stop 79 | start 80 | ;; 81 | status) 82 | status 83 | ;; 84 | # only to please lintian -> restart 85 | force-reload) 86 | stop 87 | start 88 | ;; 89 | *) 90 | echo "Usage: $0 {start|stop|restart|status}" 91 | esac 92 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Beamium 2 | 3 | This project accepts contributions. In order to contribute, you should pay attention to a few things: 4 | 5 | 1. your code must follow the coding style rules 6 | 2. your code must be unit-tested 7 | 3. your code must be documented 8 | 4. your work must be signed (see below) 9 | 5. you may contribute through GitHub Pull Requests 10 | 11 | # Coding and documentation Style 12 | 13 | ## LANGUAGE_GUIDELINES 14 | 15 | # Submitting Modifications 16 | 17 | The contributions should be submitted through Github Pull Requests and follow the DCO which is defined below. 18 | 19 | # Licensing for new files 20 | 21 | Beamium is licensed under a Modified 3-Clause BSD license. Anything contributed to Beamium must be released under this license. 22 | 23 | When introducing a new file into the project, please make sure it has a copyright header making clear under which license it's being released. 24 | 25 | # Developer Certificate of Origin (DCO) 26 | 27 | To improve tracking of contributions to this project we will use a process modeled on the modified DCO 1.1 and use a "sign-off" procedure on patches that are being emailed around or contributed in any other way. 28 | 29 | The sign-off is a simple line at the end of the explanation for the patch, which certifies that you wrote it or otherwise have the right to pass it on as an open-source patch. The rules are pretty simple: if you can certify the below: 30 | 31 | By making a contribution to this project, I certify that: 32 | 33 | (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or 34 | 35 | (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source License and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or 36 | 37 | (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. 38 | 39 | (d) The contribution is made free of any other party's intellectual property claims or rights. 40 | 41 | (e) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. 42 | 43 | then you just add a line saying 44 | 45 | ``` 46 | Signed-off-by: Random J Developer 47 | ``` 48 | 49 | using your real name (sorry, no pseudonyms or anonymous contributions.) 50 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. 6 | 7 | ## Our Standards 8 | 9 | Examples of behavior that contributes to creating a positive environment include: 10 | 11 | * Using welcoming and inclusive language 12 | * Being respectful of differing viewpoints and experiences 13 | * Gracefully accepting constructive criticism 14 | * Focusing on what is best for the community 15 | * Showing empathy towards other community members 16 | 17 | Examples of unacceptable behavior by participants include: 18 | 19 | * The use of sexualized language or imagery and unwelcome sexual attention or advances 20 | * Trolling, insulting/derogatory comments, and personal or political attacks 21 | * Public or private harassment 22 | * Publishing others' private information, such as a physical or electronic address, without explicit permission 23 | * Other conduct which could reasonably be considered inappropriate in a professional setting 24 | 25 | ## Our Responsibilities 26 | 27 | Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. 28 | 29 | Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. 30 | 31 | ## Scope 32 | 33 | This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. 34 | 35 | ## Enforcement 36 | 37 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at iot@ml.ovh.net. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. 38 | 39 | Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. 40 | 41 | ## Attribution 42 | 43 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] 44 | 45 | [homepage]: http://contributor-covenant.org 46 | [version]: http://contributor-covenant.org/version/1/4/ 47 | -------------------------------------------------------------------------------- /src/log.rs: -------------------------------------------------------------------------------- 1 | //! # Log module. 2 | //! 3 | //! This module provides the log facility. 4 | use std::cmp::min; 5 | use std::fs::create_dir_all; 6 | use std::fs::OpenOptions; 7 | use std::os::unix::fs::OpenOptionsExt; 8 | use std::path::Path; 9 | 10 | use failure::{Error, ResultExt}; 11 | use slog::{Drain, Duplicate, Level, LevelFilter, Logger}; 12 | use slog_async::Async; 13 | use slog_scope::{set_global_logger, GlobalLoggerGuard as Guard}; 14 | use slog_syslog::{unix_3164, Facility}; 15 | use slog_term::{FullFormat, PlainSyncDecorator, TermDecorator}; 16 | 17 | use crate::conf::Parameters; 18 | 19 | /// Bare logger that only write to console. 20 | #[must_use = "logger guard must be keep as reference or else all messages will be discarded"] 21 | pub fn bootstrap() -> Guard { 22 | let decorator = TermDecorator::new().build(); 23 | let drain = FullFormat::new(decorator).build().fuse(); 24 | let drain = Async::new(drain).build().fuse(); 25 | 26 | set_global_logger(Logger::root(drain, o!())) 27 | } 28 | 29 | /// Full featured logger. 30 | /// Send log to console and log file, also handle log level. 31 | #[must_use = "logger guard must be keep as reference or else all messages will be discarded"] 32 | pub fn initialize(verbose: usize, parameters: &Parameters) -> Result { 33 | // Ensure log directory is present 34 | if let Some(log_path) = Path::new(¶meters.log_file).parent() { 35 | create_dir_all(log_path).with_context(|err| { 36 | format!( 37 | "could not create directory '{}', {}", 38 | log_path.display(), 39 | err 40 | ) 41 | })? 42 | } 43 | 44 | // Stdout drain 45 | let term_decorator = TermDecorator::new().build(); 46 | let term_drain = FullFormat::new(term_decorator).build().fuse(); 47 | let term_drain = Async::new(term_drain).build().fuse(); 48 | 49 | // File drain 50 | let log_file = OpenOptions::new() 51 | .create(true) 52 | .append(true) 53 | .mode(0o640) 54 | .open(¶meters.log_file) 55 | .with_context(|err| format!("could not create file '{}', {}", parameters.log_file, err))?; 56 | 57 | let decorator = PlainSyncDecorator::new(log_file); 58 | let file_drain = FullFormat::new(decorator).build().fuse(); 59 | let file_drain = Async::new(file_drain).build().fuse(); 60 | 61 | // increase console log level if needed. Cap to trace 62 | let console_level = parameters.log_level + verbose; 63 | let console_level = min(console_level, Level::Trace.as_usize()); 64 | let console_level = Level::from_usize(console_level).unwrap_or_else(|| Level::Trace); 65 | 66 | if parameters.syslog { 67 | let syslog_drain = unix_3164(Facility::LOG_DAEMON)?; 68 | let drain = Duplicate::new( 69 | LevelFilter::new( 70 | syslog_drain, 71 | Level::from_usize(parameters.log_level).unwrap_or_else(|| Level::Info), 72 | ), 73 | Duplicate::new( 74 | LevelFilter::new(term_drain, console_level), 75 | LevelFilter::new( 76 | file_drain, 77 | Level::from_usize(parameters.log_level).unwrap_or_else(|| Level::Info), 78 | ), 79 | ), 80 | ) 81 | .fuse(); 82 | 83 | return Ok(set_global_logger(Logger::root(drain, o!()))); 84 | } 85 | 86 | let drain = Duplicate::new( 87 | LevelFilter::new(term_drain, console_level), 88 | LevelFilter::new( 89 | file_drain, 90 | Level::from_usize(parameters.log_level).unwrap_or_else(|| Level::Info), 91 | ), 92 | ) 93 | .fuse(); 94 | 95 | Ok(set_global_logger(Logger::root(drain, o!()))) 96 | } 97 | -------------------------------------------------------------------------------- /config.sample.yaml: -------------------------------------------------------------------------------- 1 | scrapers: # Scrapers definitions (Optional) 2 | scraper1: # Source name (Required) 3 | url: http://127.0.0.1:9100/metrics # Prometheus endpoint (Required) 4 | period: 60s # Polling interval (Required) 5 | format: prometheus # Polling format (Optional, default: prometheus, value: [prometheus, sensision]) 6 | labels: # Labels definitions (Optional) 7 | label_name: label_value # Label definition (Required) 8 | filtered_labels: # filtered labels (optional) 9 | - jobid # key label which is removed (required) 10 | metrics: # filter fetched metrics (optional) 11 | - node.* # regex used to select metrics (required) 12 | headers: # Add custom header on request (Optional) 13 | X-Toto: tata # list of headers to add (Optional) 14 | Authorization: Basic XXXXXXXX 15 | pool: 1 # Number of threads allocated for the scraper (Optionnal) 16 | 17 | sinks: # Sinks definitions (Optional) 18 | source1: # Sink name (Required) 19 | url: WARP10_ENDPOINT # Warp10 endpoint (Required) 20 | token: WARP10_TOKEN # Warp10 write token (Required) 21 | token-header: X-Custom-Token # Warp10 token header name (Optional, default: X-Warp10-Token) 22 | selector: metrics.* # Regex used to filter metrics (Optional, default: None) 23 | ttl: 1h # Discard file older than ttl (Optional, default: 3600) 24 | size: 100Gb # Discard old file if sink size is greater (Optional, default: 1073741824) 25 | parallel: 1 # Send parallelism (Optional, default: 1) 26 | keep-alive: 1 # Use keep alive (Optional, default: 1) 27 | 28 | labels: # Labels definitions (Optional) 29 | label_name: label_value # Label definition (Required) 30 | 31 | parameters: # Parameters definitions (Optional) 32 | source-dir: sources # Beamer data source directory (Optional, default: sources) 33 | sink-dir: sinks # Beamer data sink directory (Optional, default: sinks) 34 | scan-period: 1s # Delay(ms) between source/sink scan (Optional, default: 1000) 35 | batch-count: 250 # Maximum number of files to process in a batch (Optional, default: 250) 36 | batch-size: 2Kb # Maximum batch size (Optional, default: 200000) 37 | log-file: beamium.log # Log file (Optional, default: beamium.log) 38 | log-level: 4 # Log level (Optional, default: info) 39 | timeout: 500 # Http timeout (Optional, default: 500) 40 | router-parallel: 1 # Routing threads (Optional, default: 1) 41 | metrics: 127.0.0.1:9110 # Open a server on the given address and expose a prometheus /metrics endpoint (Optional, default: none) 42 | filesystem-threads: 100 # Set the maximum number of threads use for blocking treatment per scraper, sink and router (Optional, default: 100) 43 | backoff: # Backoff configuration - slow down push on errors (Optional) 44 | initial: 500ms # Initial interval (Optional, default: 500ms) 45 | max: 1m # Max interval (Optional, default: 1m) 46 | multiplier: 1.5 # Interval multiplier (Optional, default: 1.5) 47 | randomization: 0.3 # Randomization factor - delay = interval * 0.3 (Optional, default: 0.3) 48 | -------------------------------------------------------------------------------- /src/lib/asynch/fs.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, HashSet}; 2 | use std::convert::From; 3 | use std::ffi::OsStr; 4 | use std::fs::Metadata; 5 | use std::path::PathBuf; 6 | use std::time::{Duration, Instant}; 7 | 8 | use failure::{format_err, Error}; 9 | use futures::future::{err, join_all, ok}; 10 | use futures::{try_ready, Async, Future, Poll, Stream}; 11 | use prometheus::GaugeVec; 12 | use tokio::fs::{metadata, read_dir, remove_file}; 13 | use tokio::timer::Interval; 14 | 15 | use crate::constants::EXTENSION; 16 | 17 | lazy_static! { 18 | static ref BEAMIUM_DIRECTORY_FILES: GaugeVec = register_gauge_vec!( 19 | opts!( 20 | "beamium_directory_files", 21 | "Number of files in the directory" 22 | ), 23 | &["directory"] 24 | ) 25 | .expect("create metric: 'beamium_directory_files'"); 26 | } 27 | 28 | #[derive(Debug)] 29 | pub struct Scanner { 30 | interval: Interval, 31 | dir: PathBuf, 32 | } 33 | 34 | impl From<(PathBuf, Duration)> for Scanner { 35 | fn from(tuple: (PathBuf, Duration)) -> Self { 36 | let (dir, period) = tuple; 37 | 38 | Self { 39 | interval: Interval::new(Instant::now(), period), 40 | dir, 41 | } 42 | } 43 | } 44 | 45 | impl Stream for Scanner { 46 | type Item = HashMap; 47 | type Error = Error; 48 | 49 | fn poll(&mut self) -> Poll, Self::Error> { 50 | try_ready!(self.interval.poll().map_err(|err| format_err!("{}", err))); 51 | 52 | let mut scan = Self::scan(self.dir.to_owned()); 53 | 54 | loop { 55 | return match scan.poll()? { 56 | Async::NotReady => continue, 57 | Async::Ready(entries) => { 58 | let dir = self 59 | .dir 60 | .to_str() 61 | .expect("directory name is utf-8 compliant"); 62 | 63 | BEAMIUM_DIRECTORY_FILES 64 | .with_label_values(&[dir]) 65 | .set(entries.len() as f64); 66 | 67 | Ok(Async::Ready(Some(entries))) 68 | } 69 | }; 70 | } 71 | } 72 | } 73 | 74 | impl Scanner { 75 | fn scan(path: PathBuf) -> impl Future, Error = Error> { 76 | read_dir(path) 77 | .map_err(|err| format_err!("{}", err)) 78 | .and_then(move |entries| { 79 | entries 80 | .map_err(|err| format_err!("{}", err)) 81 | .filter_map(move |entry| { 82 | let path = entry.path(); 83 | if path.extension() != Some(OsStr::new(EXTENSION)) { 84 | return None; 85 | } 86 | 87 | Some(path) 88 | }) 89 | .fold(HashSet::new(), |mut acc, path| { 90 | ok::<_, Error>({ 91 | acc.insert(path); 92 | acc 93 | }) 94 | }) 95 | }) 96 | .and_then(|entries| { 97 | let mut bulk = vec![]; 98 | for entry in entries { 99 | let entry = entry.to_owned(); 100 | 101 | bulk.push( 102 | // In some cases, metadata failed to retrieve the meta of the file. 103 | // This occurred when a file is deleted by a sink. 104 | metadata(entry.to_owned()) 105 | .and_then(move |meta| ok(Some((entry, meta)))) 106 | .or_else(|_| ok(None)), 107 | ); 108 | } 109 | 110 | join_all(bulk) 111 | }) 112 | .and_then(|tuples| { 113 | let mut bulk = vec![]; 114 | for tuple in tuples { 115 | let (entry, meta) = match tuple.to_owned() { 116 | Some(tuple) => tuple, 117 | None => continue, 118 | }; 119 | 120 | let fut = if meta.len() > 0 { 121 | ok(Some((entry, meta))) 122 | } else { 123 | err((entry, meta)) 124 | }; 125 | 126 | bulk.push(fut.or_else(|(entry, _)| { 127 | trace!("remove empty file"; "path" => entry.to_str()); 128 | remove_file(entry) 129 | .map_err(|err| format_err!("{}", err)) 130 | .and_then(|_| ok::, _>(None)) 131 | })); 132 | } 133 | 134 | join_all(bulk).and_then(|entries| { 135 | ok(entries.iter().filter_map(ToOwned::to_owned).fold( 136 | HashMap::new(), 137 | |mut acc, (path, meta)| { 138 | acc.insert(path, meta); 139 | acc 140 | }, 141 | )) 142 | }) 143 | }) 144 | } 145 | } 146 | -------------------------------------------------------------------------------- /src/lib/mod.rs: -------------------------------------------------------------------------------- 1 | //! # Library module. 2 | //! 3 | //! This module provide traits and standard stuffs. 4 | use failure::{format_err, Error, ResultExt}; 5 | use tokio::runtime::Runtime; 6 | 7 | #[macro_use] 8 | pub mod asynch; 9 | pub mod transcompiler; 10 | 11 | /// `Runner` trait provide a method to start a job on the given runtime 12 | pub trait Runner { 13 | type Error; 14 | 15 | /// Start the runner 16 | fn start(&self, runtime: &mut Runtime) -> Result<(), Self::Error>; 17 | } 18 | 19 | /// `Named` trait provide a method to retrieve the name of the structure 20 | pub trait Named { 21 | /// Retrieve the name 22 | fn name(&self) -> String; 23 | } 24 | 25 | /// `add_labels` to the time series 26 | pub fn add_labels(line: &str, labels: &str) -> Result { 27 | if labels.is_empty() { 28 | return Ok(String::from(line)); 29 | } 30 | 31 | let mut parts = line.splitn(2, '{'); 32 | 33 | let class = parts 34 | .next() 35 | .ok_or_else(|| format_err!("no_class")) 36 | .with_context(|err| format!("could not parse '{}', {}", line, err))?; 37 | let class = String::from(class); 38 | 39 | let plabels = parts 40 | .next() 41 | .ok_or_else(|| format_err!("no_labels")) 42 | .with_context(|err| format!("could not parse '{}', {}", line, err))?; 43 | let plabels = String::from(plabels); 44 | 45 | let sep = if plabels.trim().starts_with('}') { 46 | "" 47 | } else { 48 | "," 49 | }; 50 | 51 | Ok(format!("{}{{{}{}{}", class, labels, sep, plabels)) 52 | } 53 | 54 | /// `remove_labels` to the time series 55 | pub fn remove_labels(line: &str, labels_to_drop: &[String]) -> Result { 56 | if labels_to_drop.is_empty() { 57 | return Ok(String::from(line)); 58 | } 59 | 60 | let mut parts = line.splitn(2, '{'); 61 | 62 | let class = parts 63 | .next() 64 | .ok_or_else(|| format_err!("no_class")) 65 | .with_context(|err| format!("could not parse '{}', {}", line, err))?; 66 | let class = String::from(class); 67 | 68 | let plabels = parts 69 | .next() 70 | .ok_or_else(|| format_err!("no_labels")) 71 | .with_context(|err| format!("could not parse '{}', {}", line, err))?; 72 | let plabels = String::from(plabels); 73 | 74 | let mut end_parts = plabels.splitn(2, "} "); 75 | 76 | let plabels = end_parts 77 | .next() 78 | .ok_or_else(|| format_err!("no_end")) 79 | .with_context(|err| format!("could not parse '{}', {}", line, err))?; 80 | let plabels = String::from(plabels); 81 | 82 | let value = end_parts 83 | .next() 84 | .ok_or_else(|| format_err!("no_value")) 85 | .with_context(|err| format!("could not parse '{}', {}", line, err))?; 86 | let value = String::from(value); 87 | 88 | let labels: Vec = plabels 89 | .split(',') 90 | .filter_map(|l| { 91 | let mut label_splits: Vec = l.split('=').map(String::from).collect(); 92 | let value = label_splits.pop()?; 93 | let key = label_splits.pop()?; 94 | Some((key, value)) 95 | }) 96 | .filter(|(key, _)| !labels_to_drop.contains(&key.to_owned())) 97 | .map(|(key, value)| format!("{}={}", key, value)) 98 | .collect(); 99 | 100 | Ok(format!("{}{{{}}} {}", class, labels.join(","), value)) 101 | } 102 | 103 | #[cfg(test)] 104 | mod tests { 105 | use failure::Error; 106 | 107 | #[test] 108 | fn no_labels_at_all() { 109 | let line = "1484828198557102// f{} 10"; 110 | let expected: Result = Ok(String::from("1484828198557102// f{} 10")); 111 | let labels = vec![String::from("job_name"), String::from("another_id")]; 112 | let result = super::remove_labels(line, &labels); 113 | assert_eq!(expected.is_ok(), result.is_ok()); 114 | assert_eq!(expected.unwrap(), result.unwrap()); 115 | } 116 | 117 | #[test] 118 | fn remove_no_labels() { 119 | let line = "1484828198557102// f{job_id=123,job_name=job1,another_id=456} 10"; 120 | let expected: Result = Ok(String::from(line)); 121 | let labels = vec![]; 122 | let result = super::remove_labels(line, &labels); 123 | assert_eq!(expected.is_ok(), result.is_ok()); 124 | assert_eq!(expected.unwrap(), result.unwrap()); 125 | } 126 | 127 | #[test] 128 | fn remove_one_labels() { 129 | let line = "1484828198557102// f{job_id=123,job_name=job1,another_id=456} 10"; 130 | let expected: Result = Ok(String::from( 131 | "1484828198557102// f{job_id=123,another_id=456} 10", 132 | )); 133 | let labels = vec![String::from("job_name")]; 134 | let result = super::remove_labels(line, &labels); 135 | assert_eq!(expected.is_ok(), result.is_ok()); 136 | assert_eq!(expected.unwrap(), result.unwrap()); 137 | } 138 | 139 | #[test] 140 | fn remove_multiple_labels() { 141 | let line = "1484828198557102// f{job_id=123,job_name=job1,another_id=456} 10"; 142 | let expected: Result = 143 | Ok(String::from("1484828198557102// f{job_id=123} 10")); 144 | let labels = vec![String::from("job_name"), String::from("another_id")]; 145 | let result = super::remove_labels(line, &labels); 146 | assert_eq!(expected.is_ok(), result.is_ok()); 147 | assert_eq!(expected.unwrap(), result.unwrap()); 148 | } 149 | 150 | #[test] 151 | fn add_one_label() { 152 | let line = "1562656816000000// f{type=count} 1486"; 153 | let label = "host=foo"; 154 | let expected: Result = Ok(String::from( 155 | "1562656816000000// f{host=foo,type=count} 1486", 156 | )); 157 | let result = super::add_labels(line, label); 158 | assert_eq!(expected.is_ok(), result.is_ok()); 159 | assert_eq!(expected.unwrap(), result.unwrap()); 160 | } 161 | 162 | #[test] 163 | fn add_multiple_labels() { 164 | let line = "1562656816000000// f{type=count} 1486"; 165 | let label = "host=foo,rack=toto"; 166 | let expected: Result = Ok(String::from( 167 | "1562656816000000// f{host=foo,rack=toto,type=count} 1486", 168 | )); 169 | let result = super::add_labels(line, label); 170 | assert_eq!(expected.is_ok(), result.is_ok()); 171 | assert_eq!(expected.unwrap(), result.unwrap()); 172 | } 173 | } 174 | -------------------------------------------------------------------------------- /src/lib/transcompiler.rs: -------------------------------------------------------------------------------- 1 | use std::error::Error; 2 | 3 | use time::now_utc; 4 | 5 | use urlencoding::encode; 6 | 7 | use crate::conf::ScraperFormat; 8 | 9 | #[derive(Clone, Debug)] 10 | pub struct Transcompiler { 11 | format: ScraperFormat, 12 | now: i64, 13 | } 14 | 15 | impl Transcompiler { 16 | pub fn new(format: ScraperFormat) -> Self { 17 | let start = now_utc(); 18 | let now = start.to_timespec().sec * 1_000_000 19 | + (i64::from(start.to_timespec().nsec) as i64 / 1_000); 20 | 21 | Self { format, now } 22 | } 23 | 24 | pub fn format(&self, line: &str) -> Result> { 25 | match self.format { 26 | ScraperFormat::Sensision => format_warp10(line), 27 | ScraperFormat::Prometheus => format_prometheus(line, self.now), 28 | } 29 | } 30 | } 31 | 32 | /// Format Warp10 metrics from Prometheus one. 33 | fn format_warp10(line: &str) -> Result> { 34 | Ok(String::from(line.trim())) 35 | } 36 | 37 | /// Format Warp10 metrics from Prometheus one. 38 | fn format_prometheus(line: &str, now: i64) -> Result> { 39 | let line = line.trim(); 40 | 41 | // Skip comments or empty line 42 | if line.starts_with('#') || line.is_empty() { 43 | return Ok(String::new()); 44 | } 45 | 46 | // Extract Prometheus metric 47 | let index = if line.contains('{') { 48 | line.rfind('}').ok_or_else(|| "bad class")? 49 | } else { 50 | line.find(' ').ok_or_else(|| "bad class")? 51 | }; 52 | let (class, v) = line.split_at(index + 1); 53 | let mut tokens = v.split_whitespace(); 54 | 55 | let value = tokens.next().ok_or_else(|| "no value")?; 56 | 57 | // Prometheus value can be '-Inf', '+Inf', 'nan', 'NaN' skipping if so 58 | if value == "+Inf" || value == "-Inf" || value == "nan" || value == "NaN" { 59 | return Ok(String::new()); 60 | } 61 | 62 | let timestamp = tokens.next().map_or(now, |v| { 63 | i64::from_str_radix(v, 10) 64 | .map(|v| v * 1000) 65 | .unwrap_or_else(|_| now) 66 | }); 67 | 68 | // Format class 69 | let mut parts = class.splitn(2, '{'); 70 | let class = String::from(parts.next().ok_or_else(|| "no_class")?); 71 | let class = encode(class.trim()); 72 | let plabels = parts.next(); 73 | let slabels = match plabels { 74 | None => String::new(), 75 | Some(plabels) => { 76 | let mut labels = String::new(); 77 | let mut in_label = false; 78 | let mut buffer = String::new(); 79 | for c in plabels.chars() { 80 | if c == '"' { 81 | in_label = !in_label; 82 | continue; 83 | } 84 | 85 | if !in_label { 86 | if c == '=' || c == ',' || c == '}' { 87 | labels.push_str(& encode(&buffer)); 88 | buffer = String::new(); 89 | 90 | if c == ',' { 91 | labels.push(','); 92 | } 93 | if c == '=' { 94 | labels.push('='); 95 | } 96 | continue; 97 | } 98 | } 99 | 100 | buffer.push(c); 101 | } 102 | labels 103 | } 104 | }; 105 | 106 | let class = format!("{}{{{}}}", class, slabels); 107 | 108 | Ok(format!("{}// {} {}", timestamp, class, value)) 109 | } 110 | 111 | #[cfg(test)] 112 | mod tests { 113 | use super::*; 114 | 115 | #[test] 116 | fn prometheus_skip_infinity() { 117 | let line = "f{job_id=\"123\"} +Inf"; 118 | let expected: Result> = Ok(String::new()); 119 | let result = super::format_prometheus(line, 1); 120 | assert_eq!(expected.is_ok(), result.is_ok()); 121 | assert_eq!(expected.unwrap(), result.unwrap()); 122 | 123 | let line = "f{job_id=\"123\"} -Inf"; 124 | let expected: Result> = Ok(String::new()); 125 | let result = super::format_prometheus(line, 1); 126 | assert_eq!(expected.is_ok(), result.is_ok()); 127 | assert_eq!(expected.unwrap(), result.unwrap()); 128 | } 129 | 130 | #[test] 131 | fn prometheus_skip_empty() { 132 | let line = ""; 133 | let expected: Result> = Ok(String::new()); 134 | let result = super::format_prometheus(line, 1); 135 | assert_eq!(expected.is_ok(), result.is_ok()); 136 | assert_eq!(expected.unwrap(), result.unwrap()); 137 | } 138 | 139 | #[test] 140 | fn prometheus_skip_comment() { 141 | let line = "# HELP ..."; 142 | let expected: Result> = Ok(String::new()); 143 | let result = super::format_prometheus(line, 1); 144 | assert_eq!(expected.is_ok(), result.is_ok()); 145 | assert_eq!(expected.unwrap(), result.unwrap()); 146 | } 147 | 148 | #[test] 149 | fn prometheus_skip_nan() { 150 | let line = "f{job_id=\"123\"} nan"; 151 | let expected: Result> = Ok(String::new()); 152 | let result = super::format_prometheus(line, 1); 153 | assert_eq!(expected.is_ok(), result.is_ok()); 154 | assert_eq!(expected.unwrap(), result.unwrap()); 155 | 156 | let line = "f{job_id=\"123\"} NaN"; 157 | let expected: Result> = Ok(String::new()); 158 | let result = super::format_prometheus(line, 1); 159 | assert_eq!(expected.is_ok(), result.is_ok()); 160 | assert_eq!(expected.unwrap(), result.unwrap()); 161 | } 162 | 163 | #[test] 164 | fn prometheus_urlencoding() { 165 | let line = "f{job_id=\"1%3\"} 1"; 166 | let expected: Result> = Ok(String::from("1// f{job_id=1%253} 1")); 167 | let result = super::format_prometheus(line, 1); 168 | assert_eq!(expected.is_ok(), result.is_ok()); 169 | assert_eq!(expected.unwrap(), result.unwrap()); 170 | 171 | let line = "f{job_id=\"1%3\"} 1"; 172 | let expected: Result> = Ok(String::from("1// f{job_id=1%253} 1")); 173 | let result = super::format_prometheus(line, 1); 174 | assert_eq!(expected.is_ok(), result.is_ok()); 175 | assert_eq!(expected.unwrap(), result.unwrap()); 176 | 177 | let line = "f{job_id=\"1%3\"} 1"; 178 | let expected: Result> = Ok(String::from("1// f{job_id=1%253} 1")); 179 | let result = super::format_prometheus(line, 1); 180 | assert_eq!(expected.is_ok(), result.is_ok()); 181 | assert_eq!(expected.unwrap(), result.unwrap()); 182 | 183 | let line = "f{job_id=\"1 3\"} 1"; 184 | let expected: Result> = Ok(String::from("1// f{job_id=1%203} 1")); 185 | let result = super::format_prometheus(line, 1); 186 | assert_eq!(expected.is_ok(), result.is_ok()); 187 | assert_eq!(expected.unwrap(), result.unwrap()); 188 | 189 | let line = "f{job_id=\"1+3\"} 1"; 190 | let expected: Result> = Ok(String::from("1// f{job_id=1%2B3} 1")); 191 | let result = super::format_prometheus(line, 1); 192 | assert_eq!(expected.is_ok(), result.is_ok()); 193 | assert_eq!(expected.unwrap(), result.unwrap()); 194 | } 195 | } 196 | -------------------------------------------------------------------------------- /src/main.rs: -------------------------------------------------------------------------------- 1 | //! # Beamium. 2 | //! 3 | //! Beamium scrap Prometheus endpoint and forward metrics to Warp10. 4 | #[macro_use] 5 | extern crate lazy_static; 6 | #[macro_use] 7 | extern crate prometheus; 8 | #[macro_use] 9 | extern crate slog; 10 | #[macro_use] 11 | extern crate slog_scope; 12 | 13 | use std::convert::TryFrom; 14 | use std::process::abort; 15 | use std::sync::atomic::{AtomicBool, Ordering}; 16 | use std::thread; 17 | use std::time::Duration; 18 | 19 | use failure::{format_err, Error}; 20 | use prometheus::Counter; 21 | 22 | use crate::cmd::{version, Opts}; 23 | use crate::conf::Conf; 24 | use crate::constants::THREAD_SLEEP; 25 | use crate::version::PROFILE; 26 | 27 | lazy_static! { 28 | static ref BEAMIUM_RELOAD_COUNT: Counter = register_counter!(opts!( 29 | "beamium_reload_count", 30 | "Number of time Beamium was reloaded" 31 | )) 32 | .expect("create metric: 'beamium_reload_count'"); 33 | } 34 | 35 | #[macro_use] 36 | pub(crate) mod lib; 37 | pub(crate) mod cmd; 38 | pub(crate) mod conf; 39 | pub(crate) mod constants; 40 | pub(crate) mod log; 41 | pub(crate) mod router; 42 | pub(crate) mod scraper; 43 | pub(crate) mod sink; 44 | pub(crate) mod version; 45 | 46 | #[paw::main] 47 | fn main(opts: Opts) -> Result<(), Error> { 48 | let _guard = log::bootstrap(); 49 | 50 | // ------------------------------------------------------------------------- 51 | // check if beamium is compiled in the 'release' profile 52 | if PROFILE != "release" { 53 | warn!( 54 | "{} is running in '{}' mode", 55 | env!("CARGO_PKG_NAME"), 56 | PROFILE 57 | ); 58 | } 59 | 60 | // ------------------------------------------------------------------------- 61 | // Display version 62 | if opts.version { 63 | return version(); 64 | } 65 | 66 | // ------------------------------------------------------------------------- 67 | // Manage termination signals from the system 68 | let sigint = arc!(AtomicBool::new(false)); 69 | // used to notify the launcher of cmd::main that it is running 70 | let cmd_main_is_ready = arc!(AtomicBool::new(false)); 71 | let tx = sigint.to_owned(); 72 | let result = ctrlc::set_handler(move || { 73 | tx.store(true, Ordering::SeqCst); 74 | }); 75 | 76 | if let Err(err) = result { 77 | crit!("could not set handler on signal int"; "error" => err.to_string()); 78 | return Err(format_err!("{}", err)); 79 | } 80 | 81 | // ------------------------------------------------------------------------- 82 | // Retrieve configuration 83 | let result = match opts.config { 84 | Some(ref path) => Conf::try_from(path), 85 | None => Conf::default(), 86 | }; 87 | 88 | let conf = match result { 89 | Ok(conf) => conf, 90 | Err(err) => { 91 | crit!("configuration is not healthy"; "error" => err.to_string()); 92 | return Err(format_err!("{}", err)); 93 | } 94 | }; 95 | 96 | // We need to keep a reference of _tx and _watcher as they implement Drop trait they will stop to watch files 97 | // and closed the communication channel 98 | let (_tx, watcher_rx, _watcher) = match Conf::watch(opts.config.to_owned()) { 99 | Ok(watcher) => watcher, 100 | Err(err) => { 101 | crit!("could not watch configuration"; "error" => err.to_string()); 102 | return Err(format_err!("{}", err)); 103 | } 104 | }; 105 | 106 | // ------------------------------------------------------------------------- 107 | // Quit if it is only for check configuration 108 | if opts.check { 109 | // 0 is info level 110 | // 1 is debug level 111 | // 2+ is trace level 112 | if opts.verbose >= 1 { 113 | debug!("{:#?}", conf); 114 | } 115 | 116 | info!("configuration is healthy"); 117 | return Ok(()); 118 | } 119 | 120 | // ------------------------------------------------------------------------- 121 | // Initialize full featured logger and keep a reference of the guard 122 | let _guard = match log::initialize(opts.verbose, &conf.parameters) { 123 | Ok(guard) => guard, 124 | Err(err) => { 125 | crit!("could not instantiate full featured logger"; "error" => err.to_string()); 126 | return Err(format_err!("{}", err)); 127 | } 128 | }; 129 | 130 | // ------------------------------------------------------------------------- 131 | // Start beamium scraper, sinks and metrics 132 | let signal = arc!(AtomicBool::new(true)); 133 | let rx = signal.to_owned(); 134 | let main_is_ready = cmd_main_is_ready.to_owned(); 135 | let mut handler = thread::spawn(move || { 136 | if let Err(err) = cmd::main(conf, rx, main_is_ready) { 137 | crit!("{}", err); 138 | thread::sleep(Duration::from_millis(100)); // Sleep the time to display the message 139 | abort(); 140 | } 141 | }); 142 | 143 | while !cmd_main_is_ready.load(Ordering::SeqCst) { 144 | thread::sleep(THREAD_SLEEP); 145 | } 146 | 147 | // Wait for termination signals 148 | loop { 149 | if sigint.load(Ordering::SeqCst) { 150 | signal.store(false, Ordering::SeqCst); 151 | if handler.join().is_err() { 152 | crit!("could not stop main thread"); 153 | } 154 | 155 | break; 156 | } 157 | 158 | // retrieve all pending events from watch 159 | let watch_event_count = watcher_rx.try_iter().count(); 160 | 161 | if watch_event_count > 0 { 162 | debug!("received a batch of {} watch events", watch_event_count); 163 | info!("reload configuration"); 164 | signal.store(false, Ordering::SeqCst); 165 | if handler.join().is_err() { 166 | crit!("could not stop main thread"); 167 | break; 168 | } 169 | 170 | let path = opts.config.to_owned(); 171 | let tx = signal.to_owned(); 172 | let cmd_ready = cmd_main_is_ready.to_owned(); 173 | 174 | handler = thread::spawn(move || { 175 | let result = match path { 176 | Some(ref path) => Conf::try_from(path), 177 | None => Conf::default(), 178 | }; 179 | 180 | let conf = match result { 181 | Ok(conf) => conf, 182 | Err(err) => { 183 | crit!("configuration is not healthy"; "error" => err.to_string()); 184 | thread::sleep(Duration::from_millis(100)); // Sleep the time to display the message 185 | abort(); 186 | } 187 | }; 188 | 189 | tx.store(true, Ordering::SeqCst); 190 | if let Err(err) = cmd::main(conf, tx, cmd_ready) { 191 | crit!("{}", err); 192 | thread::sleep(Duration::from_millis(100)); // Sleep the time to display the message 193 | abort(); 194 | } 195 | }); 196 | 197 | // waiting for cmd::main to be in a started state 198 | while !cmd_main_is_ready.load(Ordering::SeqCst) { 199 | thread::sleep(THREAD_SLEEP); 200 | } 201 | BEAMIUM_RELOAD_COUNT.inc(); 202 | } 203 | 204 | thread::sleep(THREAD_SLEEP); 205 | } 206 | 207 | info!("Beamium halted!"); 208 | Ok(()) 209 | } 210 | -------------------------------------------------------------------------------- /src/cmd.rs: -------------------------------------------------------------------------------- 1 | //! # Command module 2 | //! 3 | //! The command provide useful stuffs to handle the command line interface 4 | use std::fs::create_dir_all; 5 | use std::path::PathBuf; 6 | use std::sync::atomic::{AtomicBool, Ordering}; 7 | use std::sync::Arc; 8 | use std::thread; 9 | 10 | use failure::{format_err, Error, ResultExt}; 11 | use prometheus::{gather, Encoder, TextEncoder}; 12 | use structopt::StructOpt; 13 | use tokio::prelude::*; 14 | use tokio::runtime::Builder; 15 | use warp::{path, serve, Filter}; 16 | 17 | use crate::conf::Conf; 18 | use crate::constants::{KEEP_ALIVE_TOKIO_RUNTIME, MAX_HANDLERS_PER_REACTOR, THREAD_SLEEP}; 19 | use crate::lib::{Named, Runner}; 20 | use crate::router::Router; 21 | use crate::scraper::Scraper; 22 | use crate::sink::Sink; 23 | use crate::version::{BUILD_DATE, GITHASH, PROFILE}; 24 | 25 | #[derive(StructOpt, Clone, Debug)] 26 | pub(crate) struct Opts { 27 | /// Prints version information 28 | #[structopt(short = "V", long = "version")] 29 | pub version: bool, 30 | 31 | /// Increase verbosity level (console only) 32 | #[structopt(short = "v", parse(from_occurrences))] 33 | pub verbose: usize, 34 | 35 | /// Sets a custom config file 36 | #[structopt(short = "c", long = "config", parse(from_os_str))] 37 | pub config: Option, 38 | 39 | /// Test configuration 40 | #[structopt(short = "t", long = "check")] 41 | pub check: bool, 42 | } 43 | 44 | pub(crate) fn version() -> Result<(), Error> { 45 | let mut version = String::new(); 46 | 47 | version += &format!( 48 | "{} version {} {}\n", 49 | env!("CARGO_PKG_NAME"), 50 | env!("CARGO_PKG_VERSION"), 51 | GITHASH 52 | ); 53 | 54 | version += &format!("{} build date {}\n", env!("CARGO_PKG_NAME"), BUILD_DATE); 55 | version += &format!("{} profile {}\n", env!("CARGO_PKG_NAME"), PROFILE); 56 | 57 | print!("{}", version); 58 | Ok(()) 59 | } 60 | 61 | pub(crate) fn main( 62 | conf: Conf, 63 | sigint: Arc, 64 | is_started_notifier: Arc, 65 | ) -> Result<(), Error> { 66 | // ------------------------------------------------------------------------- 67 | // Ensure that directories are presents 68 | if let Err(err) = create_dir_all(conf.parameters.source_dir.to_owned()) { 69 | crit!("could not create source directory"; "error" => err.to_string()); 70 | return Err(format_err!("{}", err)); 71 | } 72 | 73 | if let Err(err) = create_dir_all(conf.parameters.sink_dir.to_owned()) { 74 | crit!("could not create sink directory"; "error" => err.to_string()); 75 | return Err(format_err!("{}", err)); 76 | } 77 | 78 | // ------------------------------------------------------------------------- 79 | // Create metrics http server 80 | let mut metrics_rt = None; 81 | 82 | if let Some(addr) = conf.parameters.metrics { 83 | let mut rt = Builder::new() 84 | .keep_alive(Some(KEEP_ALIVE_TOKIO_RUNTIME)) 85 | .core_threads(1) 86 | .blocking_threads(1) 87 | .name_prefix("metrics-") 88 | .build() 89 | .with_context(|err| format_err!("could not start metrics runtime, {}", err))?; 90 | 91 | let router = path!("metrics").map(|| { 92 | let encoder = TextEncoder::new(); 93 | let metric_families = gather(); 94 | 95 | let mut buffer = vec![]; 96 | if let Err(err) = encoder.encode(&metric_families, &mut buffer) { 97 | error!("could not encode prometheus metrics"; "error" => err.to_string()); 98 | } 99 | 100 | buffer 101 | }); 102 | 103 | info!("start metrics http server"; "uri" => format!("http://{}/metrics", addr)); 104 | rt.spawn(serve(router).bind(addr)); 105 | metrics_rt = Some(rt); 106 | } 107 | 108 | // Create scrapers and associated runtimes 109 | let mut scrapers = vec![]; 110 | for scraper in conf.scrapers.to_owned() { 111 | debug!("create scraper and associated runtime"; "scraper" => scraper.name.as_str()); 112 | let result = Builder::new() 113 | .keep_alive(Some(KEEP_ALIVE_TOKIO_RUNTIME)) 114 | .core_threads(scraper.pool + 1) 115 | .blocking_threads(conf.parameters.filesystem_threads) 116 | .name_prefix(format!("{}-", scraper.name.as_str())) 117 | .build(); 118 | 119 | let scraper = Scraper::from((scraper.to_owned(), conf.parameters.to_owned())); 120 | let mut rt = match result { 121 | Ok(rt) => rt, 122 | Err(err) => { 123 | return Err(format_err!( 124 | "could not build the runtime for scraper '{}', {}", 125 | scraper.name(), 126 | err 127 | )); 128 | } 129 | }; 130 | 131 | if let Err(err) = scraper.start(&mut rt) { 132 | return Err(format_err!( 133 | "could not start scraper '{}', {}", 134 | scraper.name(), 135 | err 136 | )); 137 | } 138 | 139 | scrapers.push((scraper, rt)); 140 | } 141 | 142 | // Create router and associated runtime 143 | let result = Builder::new() 144 | .keep_alive(Some(KEEP_ALIVE_TOKIO_RUNTIME)) 145 | .core_threads(conf.parameters.router_parallel) 146 | .blocking_threads(conf.parameters.filesystem_threads) 147 | .name_prefix("router-") 148 | .build(); 149 | 150 | let mut rt = match result { 151 | Ok(rt) => rt, 152 | Err(err) => { 153 | return Err(format_err!( 154 | "could not build the runtime for router, {}", 155 | err 156 | )); 157 | } 158 | }; 159 | 160 | let router = Router::from(( 161 | conf.parameters.to_owned(), 162 | conf.labels, 163 | conf.sinks.to_owned(), 164 | )); 165 | 166 | if let Err(err) = router.start(&mut rt) { 167 | return Err(format_err!("could not start the router, {}", err)); 168 | } 169 | 170 | let router = (router, rt); 171 | 172 | // Create sinks and associated runtimes 173 | let mut sinks = vec![]; 174 | for sink in conf.sinks { 175 | debug!("create sink and associated runtime"; "sink" => sink.name.to_owned()); 176 | let result = Builder::new() 177 | .keep_alive(Some(KEEP_ALIVE_TOKIO_RUNTIME)) 178 | .core_threads( 179 | (sink.parallel as f64 / MAX_HANDLERS_PER_REACTOR as f64).ceil() as usize + 1, 180 | ) 181 | .blocking_threads(conf.parameters.filesystem_threads) 182 | .name_prefix(format!("{}-", sink.name)) 183 | .build(); 184 | 185 | let sink = Sink::from((sink.to_owned(), conf.parameters.to_owned())); 186 | let mut rt = match result { 187 | Ok(rt) => rt, 188 | Err(err) => { 189 | return Err(format_err!( 190 | "could not build the runtime for sink '{}', {}", 191 | sink.name(), 192 | err 193 | )); 194 | } 195 | }; 196 | 197 | if let Err(err) = sink.start(&mut rt) { 198 | return Err(format_err!( 199 | "could not start sink '{}', {}", 200 | sink.name(), 201 | err 202 | )); 203 | } 204 | 205 | sinks.push((sink, rt)); 206 | } 207 | 208 | debug!("cmd::main is started"); 209 | is_started_notifier.store(true, Ordering::SeqCst); 210 | 211 | // Wait for termination signals 212 | while sigint.load(Ordering::SeqCst) { 213 | thread::sleep(THREAD_SLEEP); 214 | } 215 | 216 | // Shutdown runtime for each scrapers 217 | for (scraper, rt) in scrapers { 218 | debug!("shutdown scraper's runtime"; "scraper" => scraper.name()); 219 | if rt.shutdown_now().wait().is_err() { 220 | error!("could not shutdown the runtime"; "scraper" => scraper.name()); 221 | } 222 | } 223 | 224 | // Shutdown runtime for each sinks 225 | for (sink, rt) in sinks { 226 | debug!("shutdown sink's runtime"; "sink" => sink.name()); 227 | if rt.shutdown_now().wait().is_err() { 228 | error!("could not shutdown the runtime"; "sink" => sink.name()); 229 | } 230 | } 231 | 232 | // Shutdown router runtime 233 | debug!("shutdown router's runtime"); 234 | if router.1.shutdown_now().wait().is_err() { 235 | error!("could not shutdown the router's runtime"); 236 | } 237 | 238 | // Shutdown the metrics server 239 | if let Some(rt) = metrics_rt { 240 | debug!("shutdown metrics server's runtime"); 241 | if rt.shutdown_now().wait().is_err() { 242 | error!("could not shutdown the metrics server"); 243 | } 244 | } 245 | 246 | debug!("cmd::main is stopped"); 247 | is_started_notifier.store(false, Ordering::SeqCst); 248 | Ok(()) 249 | } 250 | -------------------------------------------------------------------------------- /src/sink.rs: -------------------------------------------------------------------------------- 1 | use std::collections::{HashMap, HashSet, VecDeque}; 2 | use std::convert::From; 3 | use std::fs::Metadata; 4 | use std::path::PathBuf; 5 | use std::process::abort; 6 | use std::sync::{Arc, Mutex}; 7 | use std::thread::sleep; 8 | use std::time::{Duration, SystemTime}; 9 | 10 | use failure::{format_err, Error}; 11 | use futures::future::{ExecuteErrorKind, Executor}; 12 | use futures::Stream; 13 | use prometheus::CounterVec; 14 | use tokio::fs::remove_file; 15 | use tokio::prelude::*; 16 | use tokio::runtime::Runtime; 17 | 18 | use crate::conf; 19 | use crate::lib::asynch::fs::Scanner; 20 | use crate::lib::asynch::http::Sender; 21 | use crate::lib::{Named, Runner}; 22 | 23 | lazy_static! { 24 | static ref BEAMIUM_SKIP_TTL: CounterVec = register_counter_vec!( 25 | opts!( 26 | "beamium_skip_ttl", 27 | "Number of files skipped due to a too old ttl" 28 | ), 29 | &["sink"] 30 | ) 31 | .expect("create metric: 'beamium_skip_ttl'"); 32 | static ref BEAMIUM_SKIP_MAX_SIZE: CounterVec = register_counter_vec!( 33 | opts!( 34 | "beamium_skip_max_size", 35 | "Number of files skipped due to a max size in sink" 36 | ), 37 | &["sink"] 38 | ) 39 | .expect("create metric: 'beamium_skip_max_size'"); 40 | } 41 | 42 | #[derive(Debug, Clone)] 43 | pub struct Sink { 44 | conf: Arc, 45 | params: Arc, 46 | queue: Arc>>, 47 | } 48 | 49 | impl From<(conf::Sink, conf::Parameters)> for Sink { 50 | fn from(tuple: (conf::Sink, conf::Parameters)) -> Self { 51 | let (conf, params) = tuple; 52 | 53 | Self { 54 | conf: arc!(conf), 55 | params: arc!(params), 56 | queue: mutex!(VecDeque::new()), 57 | } 58 | } 59 | } 60 | 61 | impl Named for Sink { 62 | fn name(&self) -> String { 63 | self.conf.name.to_owned() 64 | } 65 | } 66 | 67 | impl Runner for Sink { 68 | type Error = Error; 69 | 70 | fn start(&self, rt: &mut Runtime) -> Result<(), Self::Error> { 71 | let name = self.name(); 72 | let dir = self.params.sink_dir.to_owned(); 73 | 74 | for _ in 0..self.conf.parallel.to_owned() { 75 | let name = self.name(); 76 | let task = Sender::from(( 77 | self.queue.to_owned(), 78 | self.conf.to_owned(), 79 | self.params.to_owned(), 80 | )) 81 | .for_each(move |_| future::ok(())) 82 | .map_err(move |err| { 83 | crit!("could not send data"; "sink" => name.as_str(), "error" => err.to_string()); 84 | sleep(Duration::from_millis(100)); // Sleep the time to display the message 85 | abort(); 86 | }); 87 | 88 | rt.spawn(task); 89 | } 90 | 91 | let conf = self.conf.to_owned(); 92 | let mutex = self.queue.to_owned(); 93 | let executor = rt.executor(); 94 | 95 | let scanner = Scanner::from((PathBuf::from(dir.to_owned()), self.params.scan_period.to_owned())) 96 | .fold(HashSet::new(), move |acc, entries| { 97 | // Owned variables 98 | let conf = conf.to_owned(); 99 | let mutex = mutex.to_owned(); 100 | 101 | // Compute useful information 102 | // Retrieve files which have expired 103 | let expired: HashMap = entries.iter() 104 | .filter_map(|(path, meta)| { 105 | let file_name = path.file_name()?.to_str()?; 106 | if !file_name.starts_with(conf.name.as_str()) { 107 | return None; 108 | } 109 | 110 | let modified = meta.modified().unwrap_or_else(|_| SystemTime::now()); 111 | let age = modified.elapsed().unwrap_or_else(|_| Duration::new(0, 0)); 112 | 113 | if age > conf.ttl { 114 | return Some((path.to_owned(), meta.to_owned())); 115 | } 116 | 117 | None 118 | }) 119 | .collect(); 120 | 121 | for (path, _) in expired { 122 | let path = path.to_owned(); 123 | let name = conf.name.to_owned(); 124 | 125 | warn!("skip file"; "sink" => name.as_str(), "path" => path.to_str(), "reason" => "file is too old"); 126 | BEAMIUM_SKIP_TTL 127 | .with_label_values(&[name.as_str()]) 128 | .inc(); 129 | 130 | let result = executor.execute( 131 | remove_file(path.to_owned()) 132 | .and_then(|_| future::ok(())) 133 | .map_err(move |err| { error!("could not remove file"; "error" => err.to_string(), "sink" => name.as_str(), "path" => path.to_str()); }) 134 | ); 135 | 136 | if let Err(err) = result { 137 | match err.kind() { 138 | ExecuteErrorKind::Shutdown => { 139 | warn!("could not execute the future, runtime is closed"); 140 | }, 141 | _ => { 142 | return future::err(format_err!("could not execute future, got runtime error")); 143 | } 144 | } 145 | } 146 | } 147 | 148 | // Retrieve files that are not expired 149 | let entries: HashMap = entries.iter() 150 | .filter_map(|(path, meta)| { 151 | let file_name = path.file_name()?.to_str()?; 152 | if !file_name.starts_with(conf.name.as_str()) { 153 | return None; 154 | } 155 | 156 | let modified = meta.modified().unwrap_or_else(|_| SystemTime::now()); 157 | let age = modified.elapsed().unwrap_or_else(|_| Duration::new(0, 0)); 158 | 159 | if age < conf.ttl { 160 | return Some((path.to_owned(), meta.to_owned())); 161 | } 162 | 163 | None 164 | }) 165 | .collect(); 166 | 167 | let paths = entries.iter().fold(HashSet::new(), |mut acc, (path, _)| { 168 | acc.insert(path.to_owned()); 169 | acc 170 | }); 171 | 172 | let mut current_size = entries.iter().fold(0, |acc, (_, meta)| acc + meta.len()); 173 | let new: Vec = paths.difference(&acc).cloned().collect(); 174 | 175 | if !new.is_empty() { 176 | info!("found files"; "sink" => conf.name.as_str(), "number" => new.len()); 177 | } 178 | 179 | { 180 | let mut queue = try_future!(mutex.lock()); 181 | for path in new { 182 | queue.push_front(path); 183 | } 184 | 185 | while current_size > conf.size { 186 | let path = match queue.pop_back() { 187 | Some(path) => path, 188 | None => break, 189 | }; 190 | 191 | let meta = match entries.get(&path) { 192 | Some(meta) => meta, 193 | None => continue, 194 | }; 195 | 196 | let name = conf.name.to_owned(); 197 | 198 | warn!("skip file"; "sink" => name.as_str(), "path" => path.to_str(), "reason" => "sink is too large"); 199 | BEAMIUM_SKIP_MAX_SIZE 200 | .with_label_values(&[name.as_str()]) 201 | .inc(); 202 | 203 | let result = executor.execute( 204 | remove_file(path.to_owned()) 205 | .and_then(|_| future::ok(())) 206 | .map_err(move |err| { error!("could not remove file"; "error" => err.to_string(), "sink" => name.as_str(), "path" => path.to_str()); }) 207 | ); 208 | 209 | if let Err(err) = result { 210 | match err.kind() { 211 | ExecuteErrorKind::Shutdown => { 212 | warn!("could not execute the future, runtime is closed"); 213 | }, 214 | _ => { 215 | return future::err(format_err!("could not execute future, got runtime error")); 216 | } 217 | } 218 | } 219 | 220 | current_size -= meta.len(); 221 | } 222 | } 223 | 224 | future::ok(paths) 225 | }) 226 | .and_then(|_| future::ok(())) 227 | .map_err(move |err| { 228 | crit!("could not scan sink directory"; "sink" => name.as_str(), "dir" => dir.as_str(), "error" => err.to_string()); 229 | sleep(Duration::from_millis(100)); // Sleep the time to display the message 230 | abort(); 231 | }); 232 | 233 | rt.spawn(scanner); 234 | 235 | Ok(()) 236 | } 237 | } 238 | -------------------------------------------------------------------------------- /src/router.rs: -------------------------------------------------------------------------------- 1 | //! # Router module. 2 | //! 3 | //! The Router module forward sources to sinks. 4 | use std::collections::{HashMap, HashSet}; 5 | use std::path::PathBuf; 6 | use std::process::abort; 7 | use std::sync::Arc; 8 | use std::thread::sleep; 9 | use std::time::Duration; 10 | use uuid::Uuid; 11 | 12 | use failure::{format_err, Error}; 13 | use futures::future; 14 | use futures::future::{ExecuteErrorKind, Executor}; 15 | use tokio::fs::remove_file; 16 | use tokio::fs::{rename, File}; 17 | use tokio::prelude::*; 18 | use tokio::runtime::Runtime; 19 | 20 | use crate::conf; 21 | use crate::lib::asynch::fs::Scanner; 22 | use crate::lib::{add_labels, Runner}; 23 | 24 | #[derive(Clone, Debug)] 25 | pub struct Router { 26 | params: Arc, 27 | labels: Arc>, 28 | sinks: Arc>, 29 | } 30 | 31 | impl From<(conf::Parameters, HashMap, Vec)> for Router { 32 | fn from(tuple: (conf::Parameters, HashMap, Vec)) -> Self { 33 | let (params, labels, sinks) = tuple; 34 | 35 | Self { 36 | params: arc!(params), 37 | labels: arc!(labels), 38 | sinks: arc!(sinks), 39 | } 40 | } 41 | } 42 | 43 | impl Runner for Router { 44 | type Error = Error; 45 | 46 | fn start(&self, rt: &mut Runtime) -> Result<(), Self::Error> { 47 | // Owned variables by creating a new reference using Arc. 48 | let labels = self.labels.to_owned(); 49 | let sinks = self.sinks.to_owned(); 50 | let params = self.params.to_owned(); 51 | 52 | let dir = PathBuf::from(self.params.source_dir.to_owned()); 53 | let executor = rt.executor(); 54 | 55 | let scanner = Scanner::from((dir, self.params.scan_period.to_owned())) 56 | .fold(mutex!(HashSet::new()), move |acc, entries| { 57 | let paths: HashSet = 58 | entries.iter().fold(HashSet::new(), |mut acc, (path, _)| { 59 | acc.insert(path.to_owned()); 60 | acc 61 | }); 62 | 63 | let new = { 64 | let mut state = try_future!(acc.lock().map_err(|err| format_err!("could not get lock in router, {}", err))); 65 | let new: Vec = paths.difference(&state).cloned().collect(); 66 | let delete: Vec = state.difference(&paths).cloned().collect(); 67 | 68 | for path in &new { 69 | state.insert(path.to_owned()); 70 | } 71 | 72 | for path in delete { 73 | state.remove(&path); 74 | } 75 | 76 | new 77 | }; 78 | 79 | for path in new { 80 | let labels = labels.to_owned(); 81 | let sinks = sinks.to_owned(); 82 | let params = params.to_owned(); 83 | let epath = path.to_owned(); 84 | let state = acc.to_owned(); 85 | 86 | let result = executor.execute( 87 | Self::load(path.to_owned()) 88 | .and_then(move |lines| Self::process(&lines, &labels)) 89 | .and_then(move |lines| Self::write(&lines, ¶ms, &sinks)) 90 | .and_then(move |_| Self::remove(path)) 91 | .map_err(move |err| { 92 | error!("could not process file in router"; "path" => epath.to_str(), "error" => err.to_string()); 93 | let mut state = match state.lock() { 94 | Ok(state) => state, 95 | Err(err) => { 96 | crit!("could not lock state in router for recovery"; "path" => epath.to_str(), "error" => err.to_string()); 97 | sleep(Duration::from_millis(100)); // Sleep the time to display the message 98 | abort(); 99 | } 100 | }; 101 | 102 | state.remove(&epath); 103 | }), 104 | ); 105 | 106 | if let Err(err) = result { 107 | match err.kind() { 108 | ExecuteErrorKind::Shutdown => { 109 | warn!("could not execute the future, runtime is closed"); 110 | }, 111 | _ => { 112 | return future::err(format_err!("could not execute future, got runtime error")); 113 | } 114 | } 115 | } 116 | } 117 | 118 | future::ok(acc) 119 | }) 120 | .and_then(|_| future::ok(())) 121 | .map_err(|err| { 122 | crit!("could not scan source directory"; "error" => err.to_string()); 123 | sleep(Duration::from_millis(100)); // Sleep the time to display the message 124 | abort(); 125 | }); 126 | 127 | // Spawn the ticker on router's runtime 128 | rt.spawn(scanner); 129 | 130 | Ok(()) 131 | } 132 | } 133 | 134 | impl Router { 135 | fn load(path: PathBuf) -> impl Future, Error = Error> { 136 | trace!("open file"; "path" => path.to_str()); 137 | File::open(path) 138 | .map_err(|err| format_err!("could not open file, {}", err)) 139 | .and_then(|mut file| { 140 | let mut buf = String::new(); 141 | try_future!(file 142 | .read_to_string(&mut buf) 143 | .map_err(|err| format_err!("could not read file, {}", err))); 144 | future::ok(buf.split('\n').map(String::from).collect()) 145 | }) 146 | } 147 | 148 | fn process( 149 | lines: &[String], 150 | labels: &Arc>, 151 | ) -> impl Future, Error = Error> { 152 | let labels: Vec = labels 153 | .to_owned() 154 | .iter() 155 | .map(|(k, v)| format!("{}={}", k, v)) 156 | .collect(); 157 | 158 | let labels = labels.join(","); 159 | let mut body = vec![]; 160 | for line in lines { 161 | if !line.is_empty() { 162 | body.push(try_future!(add_labels(&line, &labels).map_err( 163 | |err| format_err!("could not add labels to time series, {}", err) 164 | ))) 165 | } 166 | } 167 | 168 | future::ok(body) 169 | } 170 | 171 | fn write( 172 | lines: &[String], 173 | params: &conf::Parameters, 174 | sinks: &[conf::Sink], 175 | ) -> impl Future { 176 | let mut bulk = vec![]; 177 | 178 | let mut idx = -1; 179 | for sink in sinks { 180 | idx += 1; 181 | let body = match &sink.selector { 182 | None => lines.to_owned(), 183 | Some(selector) => { 184 | let mut body = vec![]; 185 | for line in lines.to_owned() { 186 | if line 187 | .split_whitespace() 188 | .nth(1) 189 | .map_or(false, |class| selector.is_match(class)) 190 | { 191 | body.push(line); 192 | } 193 | } 194 | 195 | body 196 | } 197 | }; 198 | 199 | if body.is_empty() { 200 | continue; 201 | } 202 | 203 | let file_uuid = Uuid::new_v4(); 204 | let start = time::now_utc().to_timespec(); 205 | let run_id = format!("{}#{}#{}", start.sec, start.nsec, file_uuid); 206 | let name = sink.name.to_owned(); 207 | let dir = PathBuf::from(params.sink_dir.to_owned()); 208 | let temp_file = dir.join(format!("{}-{}-{}.tmp", sink.name, idx, run_id.to_owned())); 209 | 210 | trace!("create tmp sink file"; "path" => temp_file.to_str()); 211 | bulk.push( 212 | File::create(temp_file.to_owned()) 213 | .map_err(|err| format_err!("could not create file, {}", err)) 214 | .and_then(move |mut file| { 215 | file.poll_write((body.join("\n") + "\n").as_bytes()) 216 | .and_then(|_| file.poll_flush()) 217 | .map_err(|err| format_err!("could not write into file, {}", err)) 218 | }) 219 | .and_then(move |_| { 220 | let new = dir.join(format!("{}-{}-{}.metrics", name, idx, run_id)); 221 | 222 | debug!("rotate file"; "old" => temp_file.to_str(), "new" => new.to_str()); 223 | rename(temp_file, new) 224 | .map_err(|err| format_err!("could not rename file, {}", err)) 225 | }) 226 | .and_then(|_| Ok(())), 227 | ) 228 | } 229 | 230 | future::join_all(bulk).and_then(|_| future::ok(())) 231 | } 232 | 233 | fn remove(path: PathBuf) -> impl Future { 234 | trace!("remove file"; "path" => path.to_str()); 235 | remove_file(path) 236 | .map_err(|err| format_err!("could not remove file, {}", err)) 237 | .and_then(|_| future::ok(())) 238 | } 239 | } 240 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Beamium - metrics scraper for Warp10 & Prometheus 2 | 3 | [![GitHub release](https://img.shields.io/github/release/ovh/beamium.svg)]() 4 | [![Build Status](https://travis-ci.org/ovh/beamium.svg?branch=master)](https://travis-ci.org/ovh/beamium) 5 | 6 | Beamium collect metrics from HTTP endpoints like http://127.0.0.1/metrics and supports Prometheus and Warp10/Sensision format. Once scraped, Beamium can filter and forward data to a Warp10 Time Series platform. While acquiring metrics, Beamium uses DFO (Disk Fail Over) to prevent metrics loss due to eventual network issues or unavailable service. 7 | 8 | Beamium is written in Rust to ensure efficiency, a very low footprint and deterministic performances. 9 | 10 | Beamium key points: 11 | - **Simple**: Beamium is a single binary that does one thing : scraping then pushing. 12 | - **Integration**: Beamium fetch Prometheus metrics and so benefits from a large community. 13 | - **Reliable**: Beamium handle network failure. Never loose data. We guarantee void proof graph ;) 14 | - **Versatile**: Beamium can also scrape metrics from a directory. 15 | - **Powerful**: Beamium is able to filter metrics and send them to multiple Warp10 platforms. 16 | 17 | ## How it works? 18 | 19 | Scraper (optionals) will collect metrics from defined endpoints. They will store them into the source_dir. 20 | Beamium will read files inside source_dir, and will fan out them according to the provided selector into sink_dir. 21 | Finaly Beamium will push files from the sink_dir to the defined sinks. 22 | 23 | The pipeline can be describe this way : 24 | 25 | HTTP /metrics endpoint --scrape--> source_dir --route--> sink_dir --forward--> warp10 26 | 27 | It also means that given your need, you could produce metrics directly to source/sink directory, example : 28 | 29 | $ TS=`date +%s` && echo $TS"000000// metrics{} T" >> /opt/beamium/data/sources/prefix-$TS.metrics 30 | 31 | ## Status 32 | 33 | Beamium is not under development. We are moving toward [prometheus in agent mode](https://github.com/prometheus/prometheus) 34 | 35 | ## Install 36 | 37 | ### Debian 38 | 39 | We provide deb packages for Beamium! 40 | ``` 41 | sudo apt-get install apt-transport-https 42 | sudo lsb_release -a | grep Codename | awk '{print "deb https://last-public-ovh-metrics.snap.mirrors.ovh.net/debian/ " $2 " main"}' >> /etc/apt/sources.list.d/beamium.list 43 | curl https://last-public-ovh-metrics.snap.mirrors.ovh.net/pub.key | sudo apt-key add - 44 | sudo apt-get update 45 | sudo apt-get install beamium 46 | ``` 47 | 48 | ### Kubernetes 49 | 50 | We are providing an [example yaml](deploy/kubernetes/beamium.yaml) file to deploy Beamium within Kubernetes. 51 | 52 | ```bash 53 | kubectl apply -f deploy/kubernetes 54 | ``` 55 | 56 | ## Building 57 | Beamium is pretty easy to build. 58 | - Clone the repository 59 | - Setup a minimal working config (see below) 60 | - Install rust compile tools with `curl https://sh.rustup.rs -sSf | sh` 61 | - Then `source ~/.cargo/env` 62 | - Build with `cargo build` 63 | - Finally, run `cargo run` 64 | 65 | If you have already rust: 66 | - `cargo install --git https://github.com/ovh/beamium` 67 | 68 | ## Configuration 69 | Beamium come with a [sample config file](config.sample.yaml). Simply copy the sample to *config.yaml*, replace `WARP10_ENDPOINT` and `WARP10_TOKEN`, launch Beamiun and you are ready to go! 70 | 71 | Since the release 2.x, Beamium will look for configuration in those directories/files: 72 | - /etc/beamium.d/ 73 | - /etc/beamium/config.yaml 74 | - $HOME/beamium.d/ 75 | - $HOME/beamium/config.yaml 76 | 77 | In addition, it will recursively discover configuration files in `beamium.d` directories. Then it will merge all discovered configuration files. 78 | 79 | Furthermore, Beamium support multiple formats for configuration files which are `hjson`, `json`, `toml`, `yaml`, `yml` or `ini`. 80 | 81 | 82 | Also, Beamium can be started with several labels put as env vars, they must be prefixed by `BEAMIUM_LABEL`. 83 | 84 | Ex: 85 | ```sh 86 | BEAMIUM_LABEL_HOST=myhost ./beamium -v 87 | ``` 88 | 89 | This is also available per scraper 90 | 91 | Ex: 92 | ```sh 93 | BEAMIUM_SCRAPPER1_LABEL_HOST=myhost ./beamium -v 94 | ``` 95 | 96 | ### Hot reload 97 | 98 | Beamium now supports hot reloading of his configuration. There is no specific thing to do to enable this feature. Actually, this support all features excepted those in relation with the logger. 99 | 100 | Besides, beamium debounced file-system event in an interval of two seconds. So, it may appears that the reload of beamium is not released at the same time of the configuration. 101 | 102 | ### Definitions 103 | Config is composed of four parts: 104 | 105 | #### Scrapers 106 | Beamium can have none to many Prometheus or Warp10/Sensision endpoints. A *scraper* is defined as follow: 107 | ``` yaml 108 | scrapers: # Scrapers definitions (Optional) 109 | scraper1: # Source name (Required) 110 | url: http://127.0.0.1:9100/metrics # Prometheus endpoint (Required) 111 | period: 60s # Polling interval (Required) 112 | format: prometheus # Polling format (Optional, default: prometheus, value: [prometheus, sensision]) 113 | labels: # Labels definitions (Optional) 114 | label_name: label_value # Label definition (Required) 115 | another: env:USER # label values can be resolved from env vars 116 | filtered_labels: # filtered labels (optional) 117 | - jobid # key label which is removed (required) 118 | metrics: # filter fetched metrics (optional) 119 | - node.* # regex used to select metrics (required) 120 | headers: # Add custom header on request (Optional) 121 | X-Toto: tata # list of headers to add (Optional) 122 | Authorization: Basic XXXXXXXX 123 | pool: 1 # Number of threads allocated for the scraper (Optionnal) 124 | ``` 125 | 126 | #### Sinks 127 | Beamium can have none to many Warp10 endpoints. A *sink* is defined as follow: 128 | ``` yaml 129 | sinks: # Sinks definitions (Optional) 130 | source1: # Sink name (Required) 131 | url: https://warp.io/api/v0/update # Warp10 endpoint (Required) 132 | token: mywarp10token # Warp10 write token (Required) 133 | token-header: X-Custom-Token # Warp10 token header name (Optional, default: X-Warp10-Token) 134 | selector: metrics.* # Regex used to filter metrics (Optional, default: None) 135 | ttl: 1h # Discard file older than ttl (Optional, default: 3600) 136 | size: 100Gb # Discard old file if sink size is greater (Optional, default: 1073741824) 137 | parallel: 1 # Send parallelism (Optional, default: 1) 138 | keep-alive: 1 # Use keep alive (Optional, default: 1) 139 | ``` 140 | 141 | #### Labels 142 | Beamium can add static labels to collected metrics. A *label* is defined as follow: 143 | ``` yaml 144 | labels: # Labels definitions (Optional) 145 | label_name: label_value # Label definition (Required) 146 | another: env:USER # label values can be resolved from env vars 147 | ``` 148 | 149 | #### Parameters 150 | Beamium can be customized through parameters. See available parameters bellow: 151 | ``` yaml 152 | parameters: # Parameters definitions (Optional) 153 | source-dir: sources # Beamer data source directory (Optional, default: sources) 154 | sink-dir: sinks # Beamer data sink directory (Optional, default: sinks) 155 | scan-period: 1s # Delay(ms) between source/sink scan (Optional, default: 1000) 156 | batch-count: 250 # Maximum number of files to process in a batch (Optional, default: 250) 157 | batch-size: 2Kb # Maximum batch size (Optional, default: 200000) 158 | log-file: beamium.log # Log file (Optional, default: beamium.log) 159 | log-level: 4 # Log level (Optional, default: info) 160 | timeout: 500 # Http timeout (Optional, default: 500) 161 | router-parallel: 1 # Routing threads (Optional, default: 1) 162 | metrics: 127.0.0.1:9110 # Open a server on the given address and expose a prometheus /metrics endpoint (Optional, default: none) 163 | filesystem-threads: 100 # Set the maximum number of threads use for blocking treatment per scraper, sink and router (Optional, default: 100) 164 | backoff: # Backoff configuration - slow down push on errors (Optional) 165 | initial: 500ms # Initial interval (Optional, default: 500ms) 166 | max: 1m # Max interval (Optional, default: 1m) 167 | multiplier: 1.5 # Interval multiplier (Optional, default: 1.5) 168 | randomization: 0.3 # Randomization factor - delay = interval * 0.3 (Optional, default: 0.3) 169 | ``` 170 | 171 | #### Test 172 | In order to know if the configuration is healthy, you can use the following command: 173 | ```bash 174 | $ beamium -t [--config ] 175 | ``` 176 | 177 | This will output if the configuration is healthy. 178 | 179 | To print the configuration you can use the `-v` flag. 180 | 181 | ```bash 182 | $ beamium -t -vvvvv [--config ] 183 | ``` 184 | 185 | This will output if the configuration is healthy and the configuration loaded. 186 | 187 | ## Metrics 188 | Beamium can expose metrics about his usage: 189 | 190 | | name | labels | type | description | 191 | | ------------------------ | ------------ | ------- | -------------------------------- | 192 | | beamium_directory_files | directory | gauge | Number of files in the directory | 193 | | beamium_fetch_datapoints | scraper | counter | Number of datapoints fetched | 194 | | beamium_fetch_errors | scraper | counter | Number of fetch errors | 195 | | beamium_push_datapoints | sink | counter | Number of datapoints pushed | 196 | | beamium_push_http_status | sink, status | counter | Push response http status code | 197 | | beamium_push_errors | sink | counter | Number of push error | 198 | | beamium_reload_count | | counter | Number of global reloads | 199 | 200 | ## Contributing 201 | Instructions on how to contribute to Beamium are available on the [Contributing][Contributing] page. 202 | 203 | ## Get in touch 204 | 205 | - Twitter: [@notd33d33](https://twitter.com/notd33d33) 206 | 207 | [Contributing]: CONTRIBUTING.md 208 | -------------------------------------------------------------------------------- /src/scraper.rs: -------------------------------------------------------------------------------- 1 | //! # Scraper module. 2 | //! 3 | //! The Scraper module fetch metrics from an HTTP endpoint. 4 | use std::convert::From; 5 | use std::path::Path; 6 | use std::process::abort; 7 | use std::sync::Arc; 8 | use std::thread::sleep; 9 | use std::time::{Duration, Instant}; 10 | 11 | use failure::{format_err, Error}; 12 | use futures::future::{ExecuteErrorKind, Executor}; 13 | use futures::{Future, Stream}; 14 | use hyper::client::connect::dns::GaiResolver; 15 | use hyper::client::HttpConnector; 16 | use hyper::{Body, Client, Method, Request}; 17 | use hyper_rustls::HttpsConnector; 18 | use prometheus::CounterVec; 19 | use time::now_utc; 20 | use tokio::fs::{rename, File}; 21 | use tokio::io::AsyncWrite; 22 | use tokio::prelude::*; 23 | use tokio::runtime::Runtime; 24 | use tokio::timer::Interval; 25 | 26 | use crate::conf; 27 | use crate::constants::NUMBER_DNS_WORKER_THREADS; 28 | use crate::lib::transcompiler::Transcompiler; 29 | use crate::lib::{add_labels, remove_labels}; 30 | use crate::lib::{Named, Runner}; 31 | 32 | /// Alias for the hyper's https client 33 | type HttpsClient = Client>, Body>; 34 | 35 | lazy_static! { 36 | static ref BEAMIUM_FETCH_DP: CounterVec = register_counter_vec!( 37 | opts!("beamium_fetch_datapoints", "Number of datapoints fetched"), 38 | &["scraper"] 39 | ) 40 | .expect("create metric: 'beamium_fetch_datapoints'"); 41 | static ref BEAMIUM_FETCH_ERRORS: CounterVec = register_counter_vec!( 42 | opts!("beamium_fetch_errors", "Number of fetch errors"), 43 | &["scraper"] 44 | ) 45 | .expect("create metric: 'beamium_fetch_errors'"); 46 | } 47 | 48 | #[derive(Clone, Debug)] 49 | pub struct Scraper { 50 | conf: Arc, 51 | params: Arc, 52 | client: Arc, 53 | } 54 | 55 | impl From<(conf::Scraper, conf::Parameters)> for Scraper { 56 | fn from(tuple: (conf::Scraper, conf::Parameters)) -> Self { 57 | let (conf, params) = tuple; 58 | let client = Client::builder() 59 | .keep_alive(true) 60 | .keep_alive_timeout(params.timeout) 61 | .build(HttpsConnector::new(NUMBER_DNS_WORKER_THREADS)); 62 | 63 | Self { 64 | conf: arc!(conf), 65 | params: arc!(params), 66 | client: arc!(client), 67 | } 68 | } 69 | } 70 | 71 | impl Named for Scraper { 72 | fn name(&self) -> String { 73 | self.conf.name.to_owned() 74 | } 75 | } 76 | 77 | impl Runner for Scraper { 78 | type Error = Error; 79 | 80 | fn start(&self, rt: &mut Runtime) -> Result<(), Self::Error> { 81 | // Owned variables by creating a new reference using Arc. 82 | let name = self.name(); 83 | let conf = self.conf.to_owned(); 84 | let params = self.params.to_owned(); 85 | let client = self.client.to_owned(); 86 | 87 | let executor = rt.executor(); 88 | 89 | // Create a ticker for the scraper for the configured period 90 | let ticker = Interval::new(Instant::now(), conf.period.to_owned()) 91 | .map_err(|err| format_err!("{}", err)) 92 | .for_each(move |_| { 93 | // Owned variables by creating a new reference using Arc. 94 | let name = conf.name.to_owned(); 95 | let conf = conf.to_owned(); 96 | let conf2 = conf.to_owned(); 97 | let params = params.to_owned(); 98 | let compiler = Transcompiler::new(conf.format.to_owned()); 99 | 100 | let mut request = Request::builder(); 101 | let request = request.method(Method::GET).uri(conf.url.to_owned()); 102 | 103 | for (header, value) in conf.headers.to_owned() { 104 | request.header(header.as_str(), value.as_str()); 105 | } 106 | 107 | info!("fetch success"; "uri" => conf.url.to_string(), "scraper" => name.as_str()); 108 | let request = try_future!(request.body(Body::empty())); 109 | let process = Self::fetch(&client, request, params.timeout) 110 | .and_then(move |body| Self::process(&compiler, &body, &conf)) 111 | .and_then(move |lines| { 112 | BEAMIUM_FETCH_DP.with_label_values(&[conf2.name.as_str()]).inc_by(lines.len() as f64); 113 | Self::write(lines, &conf2, ¶ms) 114 | }) 115 | .map_err(move |err| { 116 | BEAMIUM_FETCH_ERRORS.with_label_values(&[name.as_str()]).inc(); 117 | error!("fetch failed"; "error" => err.to_string(), "scraper" => name.as_str()) 118 | }); 119 | 120 | // Spawn the request on executor to send it 121 | if let Err(err) = executor.execute(process) { 122 | match err.kind() { 123 | ExecuteErrorKind::Shutdown => { 124 | warn!("could not execute the future, runtime is closed"); 125 | }, 126 | _ => { 127 | return future::err(format_err!("could not execute future, got runtime error")); 128 | } 129 | } 130 | } 131 | 132 | 133 | // return that everything is good 134 | future::ok(()) 135 | }) 136 | .map_err(move |err| { 137 | crit!("could not handle ticker"; "error" => err.to_string(), "scraper" => name); 138 | sleep(Duration::from_millis(100)); // Sleep the time to display the message 139 | abort(); 140 | }); 141 | 142 | // Spawn the ticker on the runtime 143 | rt.spawn(ticker); 144 | 145 | Ok(()) 146 | } 147 | } 148 | 149 | impl Scraper { 150 | /// Fetch the source of the scraper using the http(s) [`Client`], the given [`Request`] and the 151 | /// timeout [`Duration`]. 152 | fn fetch( 153 | client: &HttpsClient, 154 | request: Request, 155 | timeout: Duration, 156 | ) -> impl Future { 157 | client 158 | .request(request) 159 | .map_err(|err| format_err!("{}", err)) 160 | .timeout(timeout) 161 | .map_err(|err| format_err!("{}", err)) 162 | .and_then(|response| { 163 | let status = response.status(); 164 | if !status.is_success() { 165 | return future::err(format_err!( 166 | "http request failed, got: {}", 167 | status.as_u16() 168 | )); 169 | } 170 | 171 | future::ok( 172 | response 173 | .into_body() 174 | .concat2() 175 | .map_err(|err| format_err!("{}", err)), 176 | ) 177 | }) 178 | .flatten() 179 | .and_then(|body| future::ok(String::from_utf8_lossy(&body).to_string())) 180 | } 181 | 182 | /// Process scraper's data in order to add/remove labels and format time series into sensision 183 | /// format. 184 | fn process( 185 | transcompiler: &Transcompiler, 186 | body: &str, 187 | conf: &conf::Scraper, 188 | ) -> impl Future, Error = Error> { 189 | let mut lines = vec![]; 190 | let labels: Vec = conf 191 | .labels 192 | .to_owned() 193 | .iter() 194 | .map(|(k, v)| format!("{}={}", k, v)) 195 | .collect(); 196 | 197 | let labels = labels.join(","); 198 | for line in body.lines() { 199 | let mut line = try_future!(transcompiler.format(line)); 200 | if line.is_empty() || line.starts_with('#') { 201 | continue; 202 | } 203 | 204 | if let Some(ref regex) = &conf.metrics { 205 | if !regex.is_match(&line) { 206 | continue; 207 | } 208 | } 209 | 210 | if !line.starts_with('=') { 211 | line = try_future!(add_labels(&line, &labels)); 212 | line = try_future!(remove_labels(&line, &conf.filtered_labels)); 213 | } 214 | 215 | lines.push(line); 216 | } 217 | 218 | future::ok(lines) 219 | } 220 | 221 | /// Write time series into the disk 222 | fn write( 223 | lines: Vec, 224 | conf: &conf::Scraper, 225 | params: &conf::Parameters, 226 | ) -> impl Future { 227 | let start = now_utc(); 228 | let now = 229 | start.to_timespec().sec * 1_000_000 + (i64::from(start.to_timespec().nsec) / 1000); 230 | 231 | let dir = Path::new(¶ms.source_dir); 232 | 233 | let mut batch_size = 0; 234 | let mut batch_count = -1; 235 | let mut chunks = vec![]; 236 | let mut chunk = vec![]; 237 | for line in lines { 238 | batch_size += line.len() as u64; 239 | if batch_size > params.batch_size && !line.starts_with('=') { 240 | batch_size = 0; 241 | batch_count += 1; 242 | 243 | let file_name = format!("{}-{}-{}.tmp", conf.name, now, batch_count); 244 | let dir = dir.to_owned(); 245 | let batch_count = batch_count.to_owned(); 246 | let now = now.to_owned(); 247 | let name = conf.name.to_owned(); 248 | let name2 = conf.name.to_owned(); 249 | let temp_file = dir.join(file_name.to_owned()); 250 | 251 | debug!("create file"; "scraper" => name.to_owned(), "file" => temp_file.to_str()); 252 | chunks.push( 253 | File::create(temp_file.to_owned()) 254 | .and_then(move |mut file| { 255 | trace!("write chunk on file"; "scraper" => name, "file" => temp_file.to_str()); 256 | file.poll_write((chunk.join("\n") + "\n").as_bytes()) 257 | .and_then(|_| file.poll_flush()) 258 | }) 259 | .and_then(move |_| { 260 | let old = dir.join(file_name); 261 | let new = dir.join(format!("{}-{}-{}.metrics", name2, now, batch_count)); 262 | 263 | debug!("rotate source file"; "scraper" => name2, "old" => old.to_str(), "new" => new.to_str()); 264 | rename(old, new) 265 | }) 266 | .and_then(|_| future::ok(())) 267 | .map_err(|err| format_err!("{}", err)), 268 | ); 269 | 270 | chunk = vec![]; 271 | } 272 | 273 | chunk.push(line); 274 | } 275 | 276 | let bulk = future::join_all(chunks).and_then(|_| Ok(())); 277 | 278 | batch_count += 1; 279 | 280 | let name = conf.name.to_owned(); 281 | let name2 = conf.name.to_owned(); 282 | let dir = dir.to_owned(); 283 | let file_name = format!("{}-{}-{}.tmp", conf.name, now, batch_count); 284 | let temp_file = dir.join(file_name.to_owned()); 285 | 286 | trace!("create tmp source file"; "scraper" => name, "file" => temp_file.to_str()); 287 | let chunk = File::create(temp_file) 288 | .and_then(move |mut file| { 289 | file.poll_write((chunk.join("\n") + "\n").as_bytes()) 290 | .and_then(|_| file.poll_flush()) 291 | }) 292 | .and_then(move |_| { 293 | let old = dir.join(file_name); 294 | let new = dir.join(format!("{}-{}-{}.metrics", name2, now, batch_count)); 295 | 296 | debug!("rotate file"; "scraper" => name2, "old" => old.to_str(), "new" => new.to_str()); 297 | rename(old, new) 298 | }) 299 | .and_then(|_| future::ok(())) 300 | .map_err(|err| format_err!("{}", err)); 301 | 302 | bulk.join(chunk).and_then(|_| future::ok(())) 303 | } 304 | } 305 | -------------------------------------------------------------------------------- /src/lib/asynch/http.rs: -------------------------------------------------------------------------------- 1 | use std::collections::VecDeque; 2 | use std::convert::From; 3 | use std::fs::File; 4 | use std::io::{BufRead, BufReader}; 5 | use std::path::PathBuf; 6 | use std::sync::{Arc, Mutex}; 7 | use std::time::Instant; 8 | 9 | use backoff::backoff::Backoff; 10 | use backoff::ExponentialBackoff; 11 | use crossbeam::queue::SegQueue; 12 | use failure::{format_err, Error, ResultExt}; 13 | use futures::future::{err, join_all, ok}; 14 | use futures::{try_ready, Poll, Stream}; 15 | use hyper::body::{Chunk, Payload}; 16 | use hyper::client::connect::dns::GaiResolver; 17 | use hyper::client::HttpConnector; 18 | use hyper::{Client, Method, Request}; 19 | use hyper_rustls::HttpsConnector; 20 | use prometheus::CounterVec; 21 | use tokio::fs::remove_file; 22 | use tokio::prelude::*; 23 | use tokio::timer::{Delay, Interval}; 24 | 25 | use crate::conf; 26 | use crate::constants::{BACKOFF_WARN, CHUNK_SIZE, NUMBER_DNS_WORKER_THREADS, THREAD_SLEEP}; 27 | 28 | /// Alias for the hyper's https client 29 | type HttpsClient = Client>, Body>; 30 | 31 | lazy_static! { 32 | static ref BEAMIUM_PUSH_DP: CounterVec = register_counter_vec!( 33 | opts!("beamium_push_datapoints", "Number of datapoints pushed"), 34 | &["sink"] 35 | ) 36 | .expect("create metric: 'beamium_fetch_datapoints'"); 37 | static ref BEAMIUM_PUSH_ERRORS: CounterVec = register_counter_vec!( 38 | opts!("beamium_push_errors", "Number of push error"), 39 | &["sink"] 40 | ) 41 | .expect("create metric: 'beamium_push_errors'"); 42 | static ref BEAMIUM_PUSH_HTTP_STATUS: CounterVec = register_counter_vec!( 43 | opts!("beamium_push_http_status", "Push response http status code"), 44 | &["sink", "status"] 45 | ) 46 | .expect("create metric: 'beamium_push_http_status'"); 47 | } 48 | 49 | pub enum State { 50 | Idle, 51 | Sending(Box + Send>), 52 | Waiting, 53 | Backoff(Box + Send>), 54 | } 55 | 56 | pub struct Sender { 57 | interval: Interval, 58 | queue: Arc>>, 59 | files: Arc>, 60 | conf: Arc, 61 | params: Arc, 62 | client: Arc, 63 | state: State, 64 | backoff: ExponentialBackoff, 65 | } 66 | 67 | impl 68 | From<( 69 | Arc>>, 70 | Arc, 71 | Arc, 72 | )> for Sender 73 | { 74 | fn from( 75 | tuple: ( 76 | Arc>>, 77 | Arc, 78 | Arc, 79 | ), 80 | ) -> Self { 81 | let (queue, conf, params) = tuple; 82 | let client = Client::builder() 83 | .keep_alive(conf.keep_alive) 84 | .keep_alive_timeout(conf.keep_alive_timeout) 85 | .build(HttpsConnector::new(NUMBER_DNS_WORKER_THREADS)); 86 | 87 | let mut backoff = ExponentialBackoff::default(); 88 | 89 | backoff.initial_interval = params.backoff.initial; 90 | backoff.max_interval = params.backoff.max; 91 | backoff.multiplier = params.backoff.multiplier; 92 | backoff.randomization_factor = params.backoff.randomization; 93 | backoff.max_elapsed_time = None; 94 | 95 | Self { 96 | interval: Interval::new(Instant::now(), THREAD_SLEEP), 97 | queue, 98 | files: Arc::new(SegQueue::new()), 99 | conf, 100 | params, 101 | client: Arc::new(client), 102 | state: State::Idle, 103 | backoff, 104 | } 105 | } 106 | } 107 | 108 | impl Stream for Sender { 109 | type Item = (); 110 | type Error = Error; 111 | 112 | fn poll(&mut self) -> Poll, Self::Error> { 113 | let conf = self.conf.to_owned(); 114 | match &mut self.state { 115 | State::Idle => { 116 | let is_empty = { 117 | match self.queue.try_lock() { 118 | Ok(queue) => queue.is_empty(), 119 | Err(_) => { 120 | // Wait in order to retrieve the lock later 121 | true 122 | } 123 | } 124 | }; 125 | 126 | if is_empty { 127 | self.state = State::Waiting; 128 | return Ok(Async::Ready(Some(()))); 129 | } 130 | 131 | let body = Body::from(( 132 | self.queue.to_owned(), 133 | self.conf.to_owned(), 134 | self.params.to_owned(), 135 | )); 136 | 137 | self.files = body.get_files(); 138 | let request = Request::builder() 139 | .method(Method::POST) 140 | .uri(self.conf.url.to_owned()) 141 | .header(self.conf.token_header.as_str(), self.conf.token.as_str()) 142 | .body(body) 143 | .with_context(|err| format!("could not create the http request, {}", err))?; 144 | 145 | let name = self.conf.name.to_owned(); 146 | let sink = self.conf.name.to_owned(); 147 | let files = self.files.to_owned(); 148 | 149 | let request = self 150 | .client 151 | .to_owned() 152 | .request(request) 153 | .timeout(self.params.timeout.to_owned()) 154 | .map_err(|err| format_err!("{}", err)) 155 | .and_then(move |res| { 156 | let status = res.status(); 157 | 158 | BEAMIUM_PUSH_HTTP_STATUS 159 | .with_label_values(&[sink.as_str(), status.as_str()]) 160 | .inc(); 161 | if status.is_success() { 162 | info!("post success"; "sink" => sink.as_str()); 163 | return ok(()); 164 | } 165 | 166 | err(format_err!("http request failed, got {}", status.as_u16())) 167 | }) 168 | .and_then(move |_| { 169 | let mut bulk = vec![]; 170 | while let Ok(file) = files.pop() { 171 | trace!("remove file"; "sink" => name.as_str(), "path" => file.to_str()); 172 | bulk.push(Sender::remove(file)); 173 | } 174 | 175 | join_all(bulk).and_then(|_| ok(())) 176 | }); 177 | 178 | self.state = State::Sending(Box::new(request)); 179 | Ok(Async::Ready(Some(()))) 180 | } 181 | State::Sending(req) => match req.poll() { 182 | Err(err) => { 183 | error!("post failed"; "sink" => conf.name.as_str(), "error" => err.to_string()); 184 | { 185 | let mut queue = self.queue.lock().map_err(|err| format_err!("{}", err))?; 186 | while let Ok(file) = self.files.pop() { 187 | debug!("push back file in queue"; "sink" => conf.name.as_str(), "path" => file.to_str()); 188 | queue.push_front(file); 189 | } 190 | } 191 | 192 | BEAMIUM_PUSH_ERRORS 193 | .with_label_values(&[conf.name.as_str()]) 194 | .inc(); 195 | 196 | let delay = self 197 | .backoff 198 | .next_backoff() 199 | .expect("never None as max_elapsed_time is None"); 200 | if delay > BACKOFF_WARN { 201 | warn!("backoff on request"; "delay" => format!("{:?}", delay), "sink" => conf.name.as_str()); 202 | } 203 | 204 | let delay = 205 | Delay::new(Instant::now() + delay).map_err(|err| format_err!("{}", err)); 206 | 207 | self.state = State::Backoff(Box::new(delay)); 208 | Ok(Async::Ready(Some(()))) 209 | } 210 | Ok(poll) => { 211 | if let Async::Ready(_) = poll { 212 | self.backoff.reset(); 213 | self.state = State::Idle; 214 | return Ok(Async::Ready(Some(()))); 215 | } 216 | 217 | // Should return not ready else all cpus goes straight to full usage 218 | Ok(Async::NotReady) 219 | } 220 | }, 221 | State::Waiting => { 222 | try_ready!(self.interval.poll().map_err(|err| format_err!("{}", err))); 223 | 224 | self.state = State::Idle; 225 | Ok(Async::Ready(Some(()))) 226 | } 227 | State::Backoff(delay) => { 228 | try_ready!(delay.poll().map_err(|err| format_err!("{}", err))); 229 | 230 | self.state = State::Idle; 231 | Ok(Async::Ready(Some(()))) 232 | } 233 | } 234 | } 235 | } 236 | 237 | impl Sender { 238 | fn remove(path: PathBuf) -> impl Future { 239 | remove_file(path) 240 | .map_err(|err| format_err!("{}", err)) 241 | .and_then(|_| ok(())) 242 | } 243 | } 244 | 245 | pub struct Body { 246 | queue: Arc>>, 247 | conf: Arc, 248 | params: Arc, 249 | files: Arc>, 250 | current_batch_size: u64, 251 | current_batch_count: u64, 252 | reader: Option>, 253 | } 254 | 255 | impl 256 | From<( 257 | Arc>>, 258 | Arc, 259 | Arc, 260 | )> for Body 261 | { 262 | fn from( 263 | tuple: ( 264 | Arc>>, 265 | Arc, 266 | Arc, 267 | ), 268 | ) -> Self { 269 | let (queue, conf, params) = tuple; 270 | 271 | Self { 272 | queue, 273 | conf, 274 | params, 275 | files: Arc::new(SegQueue::new()), 276 | current_batch_size: 0, 277 | current_batch_count: 0, 278 | reader: None, 279 | } 280 | } 281 | } 282 | 283 | impl Payload for Body { 284 | type Data = Chunk; 285 | type Error = Error; 286 | 287 | fn poll_data(&mut self) -> Poll, Self::Error> { 288 | match &mut self.reader { 289 | None => { 290 | if self.current_batch_count >= self.params.batch_count 291 | || self.current_batch_size >= self.params.batch_size 292 | { 293 | // We reach the maximum of the batch 294 | return Ok(Async::Ready(None)); 295 | } 296 | 297 | let path = { 298 | match self.queue.try_lock() { 299 | Ok(mut queue) => queue.pop_front(), 300 | Err(_) => { 301 | task::current().notify(); 302 | return Ok(Async::NotReady); 303 | } 304 | } 305 | }; 306 | 307 | let path = match path { 308 | Some(path) => path, 309 | None => { 310 | // We don't have more files to send 311 | return Ok(Async::Ready(None)); 312 | } 313 | }; 314 | 315 | trace!("open file"; "sink" => self.conf.name.as_str(), "path" => path.to_str()); 316 | self.reader = Some(BufReader::new(File::open(path.to_owned())?)); 317 | self.files.push(path); 318 | self.current_batch_count += 1; 319 | 320 | task::current().notify(); 321 | Ok(Async::NotReady) 322 | } 323 | Some(reader) => { 324 | let mut acc = String::new(); 325 | let mut len = 0; 326 | 327 | while len < CHUNK_SIZE { 328 | let mut line = String::new(); 329 | let mut line_len = reader.read_line(&mut line)? as u64; 330 | 331 | if line_len == 0 { 332 | // We have read all the file 333 | trace!("we have read all the file"); 334 | self.reader = None; 335 | break; 336 | } 337 | 338 | if &line == "\n" { 339 | continue; 340 | } 341 | 342 | if !line.ends_with('\n') { 343 | line += "\n"; 344 | line_len += "\n".len() as u64; 345 | } 346 | 347 | acc += &line; 348 | len += line_len; 349 | 350 | self.current_batch_size += line_len; 351 | 352 | BEAMIUM_PUSH_DP 353 | .with_label_values(&[self.conf.name.as_str()]) 354 | .inc(); 355 | } 356 | 357 | Ok(Async::Ready(Some(Chunk::from(acc)))) 358 | } 359 | } 360 | } 361 | } 362 | 363 | impl Body { 364 | pub fn get_files(&self) -> Arc> { 365 | self.files.to_owned() 366 | } 367 | } 368 | -------------------------------------------------------------------------------- /src/conf.rs: -------------------------------------------------------------------------------- 1 | //! # Conf module. 2 | //! 3 | //! The Conf module provides the beamium configuration. 4 | //! It set defaults and then load config from '/etc', local dir and provided path. 5 | use std::collections::HashMap; 6 | use std::convert::TryFrom; 7 | use std::net::SocketAddr; 8 | use std::path::PathBuf; 9 | use std::sync::mpsc::{channel, Receiver, Sender}; 10 | use std::time::Duration; 11 | 12 | use config::{Config, File}; 13 | use failure::{format_err, Error, ResultExt}; 14 | use humanize_rs::bytes::{Bytes, Unit}; 15 | use humanize_rs::duration::parse; 16 | use hyper::Uri; 17 | use notify::{watcher, DebouncedEvent, RecommendedWatcher, RecursiveMode, Watcher}; 18 | use regex::{Regex, RegexSet}; 19 | use serde_derive::{Deserialize, Serialize}; 20 | use std::env; 21 | 22 | use glob::glob; 23 | 24 | /// `Scraper` config. 25 | #[derive(Deserialize, Serialize, Clone, Debug)] 26 | pub(crate) struct RawScraper { 27 | pub url: String, 28 | pub period: String, 29 | pub format: Option, 30 | pub metrics: Option>, 31 | pub headers: Option>, 32 | pub labels: Option>, 33 | #[serde(rename = "filtered-labels")] 34 | pub filtered_labels: Option>, 35 | pub pool: Option, 36 | } 37 | 38 | /// `RawSink` config. 39 | #[derive(Deserialize, Serialize, Clone, Debug)] 40 | pub(crate) struct RawSink { 41 | pub url: String, 42 | pub token: String, 43 | #[serde(rename = "token-header")] 44 | pub token_header: Option, 45 | pub selector: Option, 46 | pub ttl: Option, 47 | pub size: Option, 48 | pub parallel: Option, 49 | #[serde(rename = "keep-alive")] 50 | pub keep_alive: Option, 51 | #[serde(rename = "keep-alive-timeout")] 52 | pub keep_alive_timeout: Option, 53 | } 54 | 55 | /// `RawBackoff` config. 56 | #[derive(Deserialize, Serialize, Clone, Debug)] 57 | pub(crate) struct RawBackoff { 58 | pub initial: String, 59 | pub max: String, 60 | pub multiplier: f64, 61 | pub randomization: f64, 62 | } 63 | 64 | /// `RawParameters` config. 65 | #[derive(Deserialize, Serialize, Clone, Debug)] 66 | pub(crate) struct RawParameters { 67 | #[serde(rename = "scan-period")] 68 | pub scan_period: String, 69 | #[serde(rename = "sink-dir")] 70 | pub sink_dir: String, 71 | #[serde(rename = "source-dir")] 72 | pub source_dir: String, 73 | #[serde(rename = "batch-size")] 74 | pub batch_size: String, 75 | #[serde(rename = "batch-count")] 76 | pub batch_count: u64, 77 | #[serde(rename = "log-file")] 78 | pub log_file: String, 79 | #[serde(rename = "log-level")] 80 | pub log_level: usize, 81 | pub syslog: bool, 82 | pub timeout: String, 83 | #[serde(rename = "router-parallel")] 84 | pub router_parallel: usize, 85 | pub backoff: RawBackoff, 86 | pub metrics: Option, 87 | #[serde(rename = "filesystem-threads")] 88 | pub filesystem_threads: usize, 89 | } 90 | 91 | /// `RawConfig` root. 92 | #[derive(Deserialize, Serialize, Clone, Debug)] 93 | pub(crate) struct RawConf { 94 | pub sources: Option>, 95 | pub scrapers: Option>, 96 | pub sinks: Option>, 97 | pub labels: Option>, 98 | pub parameters: RawParameters, 99 | } 100 | 101 | impl TryFrom<&PathBuf> for RawConf { 102 | type Error = Error; 103 | 104 | /// Import configuration from a file 105 | fn try_from(path: &PathBuf) -> Result { 106 | let mut config = Self::initialize() 107 | .with_context(|err| format!("could not initialize the configuration, {}", err))?; 108 | 109 | config 110 | .merge(File::from(path.to_owned()).required(true)) 111 | .with_context(|err| format!("could not merge configuration with file, {}", err))?; 112 | 113 | Ok(config.try_into::()?) 114 | } 115 | } 116 | 117 | impl RawConf { 118 | fn initialize() -> Result { 119 | let mut config = Config::default(); 120 | 121 | // parameters 122 | config.set_default("parameters.scan-period", "1s")?; 123 | config.set_default("parameters.sink-dir", "sinks")?; 124 | config.set_default("parameters.source-dir", "sources")?; 125 | config.set_default("parameters.batch-size", 200_000)?; 126 | config.set_default("parameters.batch-count", 250)?; 127 | config.set_default("parameters.log-file", "beamium.log")?; 128 | config.set_default("parameters.log-level", 4)?; 129 | config.set_default("parameters.syslog", false)?; 130 | config.set_default("parameters.timeout", "500s")?; 131 | config.set_default("parameters.router-parallel", 1)?; 132 | config.set_default("parameters.filesystem-threads", 100)?; 133 | 134 | // backoff parameters 135 | config.set_default("parameters.backoff.initial", "500ms")?; 136 | config.set_default("parameters.backoff.max", "1m")?; 137 | config.set_default("parameters.backoff.multiplier", 1.5)?; 138 | config.set_default("parameters.backoff.randomization", 0.3)?; 139 | 140 | Ok(config) 141 | } 142 | 143 | /// Import configuration from '/etc' and local directory 144 | pub(crate) fn default() -> Result { 145 | let mut config = Self::initialize()?; 146 | let mut paths = glob("/etc/beamium.d/**/*")? 147 | .filter_map(|result| { 148 | if result.is_err() { 149 | return None; 150 | } 151 | 152 | let path = result.expect("error is filter above"); 153 | 154 | Some(File::from(path).required(false)) 155 | }) 156 | .collect::>(); 157 | 158 | paths.push(File::with_name("/etc/beamium/config").required(false)); 159 | 160 | paths.append( 161 | &mut glob(format!("{}/.beamium.d/**/*", env!("HOME")).as_str())? 162 | .filter_map(|result| { 163 | if result.is_err() { 164 | return None; 165 | } 166 | 167 | let path = result.expect("error is filter above"); 168 | 169 | Some(File::from(path).required(false)) 170 | }) 171 | .collect::>(), 172 | ); 173 | 174 | paths.push( 175 | File::with_name(format!("{}/.beamium/config", env!("HOME")).as_str()).required(false), 176 | ); 177 | 178 | config.merge(paths).with_context(|err| { 179 | format!("could not merge configuration using default paths, {}", err) 180 | })?; 181 | 182 | Ok(config.try_into::()?) 183 | } 184 | } 185 | 186 | /// `Scraper` format. 187 | #[derive(Debug, Clone)] 188 | pub enum ScraperFormat { 189 | Prometheus, 190 | Sensision, 191 | } 192 | 193 | impl TryFrom<&str> for ScraperFormat { 194 | type Error = Error; 195 | 196 | fn try_from(v: &str) -> Result { 197 | match v { 198 | "sensision" => Ok(ScraperFormat::Sensision), 199 | "prometheus" => Ok(ScraperFormat::Prometheus), 200 | _ => Err(format_err!( 201 | "the scraper's format field should be one of 'sensision' or 'prometheus'" 202 | )), 203 | } 204 | } 205 | } 206 | 207 | /// `Scraper` config. 208 | #[derive(Clone, Debug)] 209 | pub struct Scraper { 210 | pub name: String, 211 | pub url: Uri, 212 | pub period: Duration, 213 | pub format: ScraperFormat, 214 | pub metrics: Option, 215 | pub headers: HashMap, 216 | pub labels: HashMap, 217 | pub filtered_labels: Vec, 218 | pub pool: usize, 219 | } 220 | 221 | impl TryFrom<(String, RawScraper)> for Scraper { 222 | type Error = Error; 223 | 224 | fn try_from(value: (String, RawScraper)) -> Result { 225 | let name = value.0; 226 | let raw_scraper = value.1; 227 | 228 | let metrics = match raw_scraper.metrics { 229 | Some(ref patterns) => Some(RegexSet::new(patterns).with_context(|err| { 230 | format!("Could not create regex set from 'metrics' field, {}", err) 231 | })?), 232 | None => None, 233 | }; 234 | 235 | let period = match raw_scraper.period.parse::() { 236 | Ok(period) => Duration::from_millis(period), 237 | Err(_) => parse(raw_scraper.period.as_str()) 238 | .with_context(|err| format!("could not parse 'period' setting, {}", err))?, 239 | }; 240 | 241 | let headers = match raw_scraper.headers { 242 | None => HashMap::new(), 243 | Some(headers) => headers, 244 | }; 245 | 246 | let mut labels = match raw_scraper.labels { 247 | None => HashMap::new(), 248 | Some(mut labels) => { 249 | for (k, v) in labels.to_owned() { 250 | match Conf::replace_env(v.to_string()) { 251 | Some(v) => labels.insert(k, v), 252 | None => labels.remove(&k), 253 | }; 254 | } 255 | 256 | labels 257 | } 258 | }; 259 | 260 | for (k, v) in Conf::env_labels(format!( 261 | "BEAMIUM_{}_LABEL_", 262 | name.to_uppercase().replace("-", "_") 263 | )) { 264 | labels.insert(k, v); 265 | } 266 | 267 | let pool = match raw_scraper.pool { 268 | Some(pool) => pool, 269 | None => 1, 270 | }; 271 | 272 | let filtered_labels = match raw_scraper.filtered_labels { 273 | Some(filtered_labels) => filtered_labels, 274 | None => vec![], 275 | }; 276 | 277 | let format = match raw_scraper.format { 278 | None => String::from("prometheus"), 279 | Some(format) => format, 280 | }; 281 | 282 | if !raw_scraper.url.starts_with("http://") && !raw_scraper.url.starts_with("https://") { 283 | Err(format_err!( 284 | "protocol is missing or incorrect, it should be one of 'http' or 'https'" 285 | )) 286 | .with_context(|err| format!("could not parse 'url' setting, {}", err))?; 287 | } 288 | 289 | Ok(Self { 290 | name, 291 | url: raw_scraper 292 | .url 293 | .parse::() 294 | .with_context(|err| format!("could not parse 'url' setting, {}", err))?, 295 | period, 296 | format: ScraperFormat::try_from(format.as_str()) 297 | .with_context(|err| format!("could not parse 'format' setting, {}", err))?, 298 | metrics, 299 | headers, 300 | labels, 301 | filtered_labels, 302 | pool, 303 | }) 304 | } 305 | } 306 | 307 | /// `Sink` config. 308 | #[derive(Clone, Debug)] 309 | pub struct Sink { 310 | pub name: String, 311 | pub url: Uri, 312 | pub token: String, 313 | pub token_header: String, 314 | pub selector: Option, 315 | pub ttl: Duration, 316 | pub size: u64, 317 | pub parallel: usize, 318 | pub keep_alive: bool, 319 | pub keep_alive_timeout: Duration, 320 | } 321 | 322 | impl TryFrom<(String, RawSink)> for Sink { 323 | type Error = Error; 324 | 325 | fn try_from(value: (String, RawSink)) -> Result { 326 | let name = value.0; 327 | let raw_sink = value.1; 328 | 329 | let selector = match raw_sink.selector { 330 | None => None, 331 | Some(ref pattern) => { 332 | Some(Regex::new(&format!("^{}", pattern)).with_context(|err| { 333 | format!("could not create regex from 'selector' field, {}", err) 334 | })?) 335 | } 336 | }; 337 | 338 | let keep_alive_timeout = match raw_sink.keep_alive_timeout { 339 | None => Duration::from_secs(3600), 340 | Some(timeout) => parse(timeout.as_str()).with_context(|err| { 341 | format!("could not parse 'keep-alive-timeout' setting, {}", err) 342 | })?, 343 | }; 344 | 345 | let token_header = match raw_sink.token_header { 346 | None => String::from("X-Warp10-Token"), 347 | Some(token_header) => token_header, 348 | }; 349 | 350 | let ttl = match raw_sink.ttl { 351 | None => String::from("1h"), 352 | Some(ttl) => ttl, 353 | }; 354 | 355 | let ttl = match ttl.parse::() { 356 | Ok(ttl) => Duration::from_secs(ttl), 357 | Err(_) => parse(ttl.as_str()) 358 | .with_context(|err| format!("could not parse 'ttl' setting, {}", err))?, 359 | }; 360 | 361 | let size = match raw_sink.size { 362 | None => String::from("1Gb"), 363 | Some(size) => size, 364 | }; 365 | 366 | let size = match size.parse::() { 367 | Ok(size) => Bytes::new(size, Unit::Byte)?.size() as u64, 368 | Err(_) => size 369 | .parse::() 370 | .with_context(|err| format!("could not parse 'size' setting, {}", err))? 371 | .size() as u64, 372 | }; 373 | 374 | let parallel = match raw_sink.parallel { 375 | None => 1, 376 | Some(parallel) => parallel, 377 | }; 378 | 379 | let keep_alive = match raw_sink.keep_alive { 380 | None => true, 381 | Some(keep_alive) => keep_alive, 382 | }; 383 | 384 | Ok(Self { 385 | name, 386 | url: raw_sink 387 | .url 388 | .parse::() 389 | .with_context(|err| format!("could not parse 'url' setting, {}", err))?, 390 | token: raw_sink.token, 391 | token_header, 392 | ttl, 393 | size, 394 | selector, 395 | parallel, 396 | keep_alive, 397 | keep_alive_timeout, 398 | }) 399 | } 400 | } 401 | 402 | /// `Backoff` config. 403 | #[derive(Clone, Debug)] 404 | pub struct Backoff { 405 | pub initial: Duration, 406 | pub max: Duration, 407 | pub multiplier: f64, 408 | pub randomization: f64, 409 | } 410 | 411 | impl TryFrom<&RawBackoff> for Backoff { 412 | type Error = Error; 413 | 414 | fn try_from(raw_backoff: &RawBackoff) -> Result { 415 | Ok(Self { 416 | initial: parse(raw_backoff.initial.as_str()).with_context(|err| { 417 | format!("could not parse 'backoff.initial' setting, {}", err) 418 | })?, 419 | max: parse(raw_backoff.max.as_str()) 420 | .with_context(|err| format!("could not parse 'backoff.max' setting, {}", err))?, 421 | multiplier: raw_backoff.multiplier, 422 | randomization: raw_backoff.randomization, 423 | }) 424 | } 425 | } 426 | 427 | /// `RawParameters` config. 428 | #[derive(Clone, Debug)] 429 | pub struct Parameters { 430 | pub scan_period: Duration, 431 | pub sink_dir: String, 432 | pub source_dir: String, 433 | pub batch_size: u64, 434 | pub batch_count: u64, 435 | pub log_file: String, 436 | pub log_level: usize, 437 | pub syslog: bool, 438 | pub timeout: Duration, 439 | pub router_parallel: usize, 440 | pub backoff: Backoff, 441 | pub metrics: Option, 442 | pub filesystem_threads: usize, 443 | } 444 | 445 | impl TryFrom for Parameters { 446 | type Error = Error; 447 | 448 | fn try_from(raw_parameters: RawParameters) -> Result { 449 | let scan_period = match raw_parameters.scan_period.parse::() { 450 | Ok(scan_period) => Duration::from_millis(scan_period), 451 | Err(_) => parse(raw_parameters.scan_period.as_str()) 452 | .with_context(|err| format!("could not parse 'scan-period' setting, {}", err))?, 453 | }; 454 | 455 | let timeout = match raw_parameters.timeout.parse::() { 456 | Ok(timeout) => Duration::from_secs(timeout), 457 | Err(_) => parse(raw_parameters.timeout.as_str()) 458 | .with_context(|err| format!("could not parse 'timeout' setting, {}", err))?, 459 | }; 460 | 461 | let batch_size = match raw_parameters.batch_size.parse::() { 462 | Ok(batch_size) => batch_size, 463 | Err(_) => raw_parameters 464 | .batch_size 465 | .parse::() 466 | .with_context(|err| format!("could not parse 'size' setting, {}", err))? 467 | .size() as u64, 468 | }; 469 | 470 | let metrics = match raw_parameters.metrics { 471 | None => None, 472 | Some(metrics) => Some( 473 | metrics 474 | .parse::() 475 | .with_context(|err| format!("could not parse 'metrics' setting, {}", err))?, 476 | ), 477 | }; 478 | 479 | Ok(Self { 480 | scan_period, 481 | sink_dir: raw_parameters.sink_dir, 482 | source_dir: raw_parameters.source_dir, 483 | batch_size, 484 | batch_count: raw_parameters.batch_count, 485 | log_file: raw_parameters.log_file, 486 | log_level: raw_parameters.log_level, 487 | syslog: raw_parameters.syslog, 488 | timeout, 489 | router_parallel: raw_parameters.router_parallel, 490 | backoff: Backoff::try_from(&raw_parameters.backoff)?, 491 | metrics, 492 | filesystem_threads: raw_parameters.filesystem_threads, 493 | }) 494 | } 495 | } 496 | 497 | /// `Config` root. 498 | #[derive(Clone, Debug)] 499 | pub struct Conf { 500 | pub scrapers: Vec, 501 | pub sinks: Vec, 502 | pub labels: HashMap, 503 | pub parameters: Parameters, 504 | } 505 | 506 | impl TryFrom for Conf { 507 | type Error = Error; 508 | 509 | fn try_from(raw_config: RawConf) -> Result { 510 | let mut scrapers: Vec = vec![]; 511 | let mut sinks: Vec = vec![]; 512 | 513 | if let Some(raw_scrapers) = raw_config.sources { 514 | warn!("configuration key 'sources' is deprecated and will be removed in further revision. Please use 'scrapers' instead."); 515 | for (name, raw_scraper) in raw_scrapers { 516 | scrapers.push( 517 | Scraper::try_from((name.to_owned(), raw_scraper.to_owned())) 518 | .with_context(|err| format!("source '{}' is malformed, {}", name, err))?, 519 | ); 520 | } 521 | } 522 | 523 | if let Some(raw_scrapers) = raw_config.scrapers { 524 | for (name, raw_scraper) in raw_scrapers { 525 | scrapers.push( 526 | Scraper::try_from((name.to_owned(), raw_scraper.to_owned())) 527 | .with_context(|err| format!("scraper '{}' is malformed, {}", name, err))?, 528 | ); 529 | } 530 | } 531 | 532 | if let Some(raw_sinks) = raw_config.sinks { 533 | for (name, raw_sink) in raw_sinks { 534 | sinks.push( 535 | Sink::try_from((name.to_owned(), raw_sink.to_owned())) 536 | .with_context(|err| format!("sink '{}' is malformed, {}", name, err))?, 537 | ) 538 | } 539 | } 540 | 541 | let mut labels = match raw_config.labels { 542 | None => HashMap::new(), 543 | Some(mut labels) => { 544 | for (k, v) in labels.to_owned() { 545 | match Conf::replace_env(v) { 546 | Some(v) => labels.insert(k, v), 547 | None => labels.remove(&k), 548 | }; 549 | } 550 | 551 | labels 552 | } 553 | }; 554 | 555 | for (k, v) in Conf::env_labels("BEAMIUM_LABEL_".into()) { 556 | labels.insert(k, v); 557 | } 558 | 559 | Ok(Self { 560 | scrapers, 561 | sinks, 562 | labels, 563 | parameters: Parameters::try_from(raw_config.parameters) 564 | .with_context(|err| format!("'parameters' is malformed, {}", err))?, 565 | }) 566 | } 567 | } 568 | 569 | impl TryFrom<&PathBuf> for Conf { 570 | type Error = Error; 571 | 572 | /// Import configuration using raw configuration 573 | fn try_from(path: &PathBuf) -> Result { 574 | let config = RawConf::try_from(path)?; 575 | 576 | Ok(Self::try_from(config)?) 577 | } 578 | } 579 | 580 | impl Conf { 581 | /// Import configuration from '/etc' and local directory using raw config 582 | pub fn default() -> Result { 583 | let config = RawConf::default()?; 584 | 585 | Ok(Self::try_from(config)?) 586 | } 587 | 588 | fn replace_env(value: String) -> Option { 589 | if !value.starts_with("env:") { 590 | return Some(value); 591 | } 592 | 593 | let striped = value.trim_start_matches("env:"); 594 | 595 | match env::var(striped) { 596 | Ok(v) => Some(v), 597 | Err(_) => { 598 | warn!("could not retrieve environment variable '{}'", striped); 599 | None 600 | } 601 | } 602 | } 603 | 604 | fn env_labels(prefix: String) -> HashMap { 605 | env::vars() 606 | .filter(|(k, _)| k.starts_with(&prefix)) 607 | .map(|(k, v)| ((&k.trim_start_matches(&prefix)).to_lowercase(), v)) 608 | .collect() 609 | } 610 | 611 | pub fn watch( 612 | path: Option, 613 | ) -> Result< 614 | ( 615 | Sender, 616 | Receiver, 617 | RecommendedWatcher, 618 | ), 619 | Error, 620 | > { 621 | let (tx, rx) = channel(); 622 | let mut w = watcher(tx.to_owned(), Duration::from_secs(2)) 623 | .with_context(|err| format!("could not create watcher, {}", err))?; 624 | 625 | match path { 626 | Some(path) => { 627 | w.watch(path.to_owned(), RecursiveMode::NonRecursive) 628 | .with_context(|err| { 629 | format!("could not put a watch on '{:?}', {}", path, err) 630 | })?; 631 | } 632 | None => { 633 | let result = w 634 | .watch("/etc/beamium.d", RecursiveMode::Recursive) 635 | .with_context(|err| { 636 | format!("could not put a watch on '/etc/beamium.d', {}", err) 637 | }); 638 | 639 | if let Err(err) = result { 640 | warn!("could not put a watch"; "error" => err.to_string()); 641 | } 642 | 643 | let result = w 644 | .watch("/etc/beamium", RecursiveMode::Recursive) 645 | .with_context(|err| { 646 | format!("could not put a watch on '/etc/beamium', {}", err) 647 | }); 648 | 649 | if let Err(err) = result { 650 | warn!("could not put a watch"; "error" => err.to_string()); 651 | } 652 | 653 | let result = w 654 | .watch( 655 | format!("{}/.beamium.d", env!("HOME")), 656 | RecursiveMode::Recursive, 657 | ) 658 | .with_context(|err| { 659 | format!( 660 | "could not put a watch on '{}/.beamium.d', {}", 661 | env!("HOME"), 662 | err 663 | ) 664 | }); 665 | 666 | if let Err(err) = result { 667 | warn!("could not put a watch"; "error" => err.to_string()); 668 | } 669 | 670 | let result = w 671 | .watch( 672 | format!("{}/.beamium", env!("HOME")), 673 | RecursiveMode::Recursive, 674 | ) 675 | .with_context(|err| { 676 | format!( 677 | "could not put a watch on '{}/.beamium', {}", 678 | env!("HOME"), 679 | err 680 | ) 681 | }); 682 | 683 | if let Err(err) = result { 684 | warn!("could not put a watch"; "error" => err.to_string()); 685 | } 686 | } 687 | } 688 | 689 | Ok((tx, rx, w)) 690 | } 691 | } 692 | --------------------------------------------------------------------------------