├── .github ├── dependabot.yml └── workflows │ └── container.yml ├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── README.md ├── assets ├── example.png └── speedtest.service ├── container ├── Dockerfile ├── entrypoint.sh └── wrapper ├── plotscript ├── requirements.in ├── requirements.txt └── speedtest-plotter /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | 4 | - package-ecosystem: docker 5 | directory: container/ 6 | schedule: 7 | interval: daily 8 | open-pull-requests-limit: 10 9 | 10 | - package-ecosystem: github-actions 11 | directory: / 12 | schedule: 13 | interval: daily 14 | open-pull-requests-limit: 10 15 | 16 | - package-ecosystem: pip 17 | directory: / 18 | schedule: 19 | interval: weekly # very noisy if not updated all at once 20 | open-pull-requests-limit: 1 21 | -------------------------------------------------------------------------------- /.github/workflows/container.yml: -------------------------------------------------------------------------------- 1 | name: Build container image 2 | 3 | on: 4 | push: 5 | branches: [ main, devel ] 6 | tags: [ v*.*.* ] 7 | pull_request: 8 | types: [ assigned, opened, synchronize, reopened ] 9 | schedule: 10 | - cron: "0 13 * * 1" 11 | workflow_dispatch: 12 | 13 | env: 14 | 15 | # build for multiple platforms when not a pull-request 16 | PLATFORMS: ${{ fromJSON('[ "linux/amd64", "linux/amd64,linux/arm64,linux/arm/v7,linux/arm/v6,linux/ppc64le" ]')[ github.event_name != 'pull_request' ] }} 17 | 18 | # how to name the image 19 | IMAGENAME: speedtest 20 | TESTIMAGE: "${{ github.repository_owner }}/speedtest:testing" 21 | 22 | # dockerhub credentials 23 | DOCKERHUB_USER: ansemjo 24 | #DOCKERHUB_TOKEN - add as secret value 25 | 26 | jobs: 27 | 28 | container: 29 | name: build image 📦 30 | runs-on: ubuntu-latest 31 | steps: 32 | - name: Checkout 33 | uses: actions/checkout@v4 34 | 35 | - name: Prepare Tags 36 | id: prep 37 | shell: bash 38 | run: | 39 | TAGS=() 40 | case "${GITHUB_REF}" in 41 | # version releases 42 | refs/tags/*) 43 | VERSION="${GITHUB_REF#refs/tags/}" 44 | if [[ ${VERSION} =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)$ ]]; then 45 | V=("${BASH_REMATCH[@]}") 46 | TAGS+=("${{ env.IMAGENAME }}:${V[1]}" \ 47 | "${{ env.IMAGENAME }}:${V[1]}.${V[2]}" \ 48 | "${{ env.IMAGENAME }}:${V[1]}.${V[2]}.${V[3]}") 49 | else 50 | TAGS+=("${{ env.IMAGENAME }}:${VERSION}") 51 | fi 52 | ;& 53 | # branch heads (+ fallthorugh) 54 | refs/heads/*) 55 | TAGS+=("${{ env.IMAGENAME }}:latest") 56 | TAGS=$({ IFS=","; echo "${TAGS[*]/#/${{ env.DOCKERHUB_USER }}/}","${TAGS[*]/#/ghcr.io/${{ github.repository_owner }}/}"; }) 57 | ;; 58 | # pull requests 59 | refs/pull/*) 60 | TAGS=("${{ github.repository_owner }}/${{ env.IMAGENAME }}:pr-${{ github.event.number }}") 61 | ;; 62 | esac 63 | echo "TAGS ${TAGS}" 64 | echo "tags=${TAGS}" >> $GITHUB_OUTPUT 65 | echo "head=${GITHUB_REF#refs/heads/}" >> $GITHUB_OUTPUT 66 | echo "created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')" >> $GITHUB_OUTPUT 67 | 68 | - name: Set up QEMU 69 | uses: docker/setup-qemu-action@v3.6.0 70 | 71 | - name: Set up Docker Buildx 72 | uses: docker/setup-buildx-action@v3.11.1 73 | 74 | - name: Login to Docker Hub 75 | uses: docker/login-action@v3.4.0 76 | if: ${{ github.event_name != 'pull_request' && steps.prep.outputs.head != 'devel' }} 77 | with: 78 | username: ${{ env.DOCKERHUB_USER }} 79 | password: ${{ secrets.DOCKERHUB_TOKEN }} 80 | 81 | - name: Login to GitHub Container Registry 82 | uses: docker/login-action@v3.4.0 83 | if: ${{ github.event_name != 'pull_request' && steps.prep.outputs.head != 'devel' }} 84 | with: 85 | registry: ghcr.io 86 | username: ${{ github.repository_owner }} 87 | password: ${{ secrets.GITHUB_TOKEN }} 88 | 89 | - name: Build and export to Docker 90 | uses: docker/build-push-action@v6.18.0 91 | with: 92 | context: . 93 | file: ./container/Dockerfile 94 | push: false 95 | load: true 96 | tags: ${{ env.TESTIMAGE }} 97 | 98 | - name: Run a test measurement in built image 99 | run: | 100 | docker run --rm ${{ env.TESTIMAGE }} measure 101 | 102 | - name: Build and push 103 | uses: docker/build-push-action@v6.18.0 104 | if: ${{ github.event_name != 'pull_request' && steps.prep.outputs.head != 'devel' }} 105 | with: 106 | context: . 107 | file: ./container/Dockerfile 108 | platforms: ${{ env.PLATFORMS }} 109 | push: true 110 | tags: ${{ steps.prep.outputs.tags }} 111 | labels: | 112 | org.opencontainers.image.title=${{ github.event.repository.name }} 113 | org.opencontainers.image.description=${{ github.event.repository.description }} 114 | org.opencontainers.image.url=${{ github.event.repository.html_url }} 115 | org.opencontainers.image.source=${{ github.event.repository.clone_url }} 116 | org.opencontainers.image.created=${{ steps.prep.outputs.created }} 117 | org.opencontainers.image.revision=${{ github.sha }} 118 | org.opencontainers.image.licenses=${{ github.event.repository.license.spdx_id }} 119 | 120 | dependabot: 121 | # https://nicolasiensen.github.io/2022-07-23-automating-dependency-updates-with-dependabot-github-auto-merge-and-github-actions/ 122 | name: merge dependabot pr 🏗️ 123 | needs: [ container ] 124 | runs-on: ubuntu-latest 125 | if: ${{ github.event_name == 'pull_request' && github.actor == 'dependabot[bot]' }} 126 | permissions: 127 | pull-requests: write 128 | contents: write 129 | steps: 130 | 131 | - name: Get Dependabot metadata 132 | id: dependabot-metadata 133 | uses: dependabot/fetch-metadata@v2.4.0 134 | 135 | - name: Enable auto-merge for PR 136 | run: | 137 | gh pr merge --auto --rebase "$PR" 138 | env: 139 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 140 | PR: ${{ github.event.pull_request.html_url }} 141 | 142 | - name: Approve patch and minor updates 143 | if: ${{ steps.dependabot-metadata.outputs.update-type == 'version-update:semver-patch' || steps.dependabot-metadata.outputs.update-type == 'version-update:semver-minor' }} 144 | run: | 145 | gh pr review "$PR" --approve --body "Automatically **approving** this pull request because it includes a **patch or minor** update." 146 | env: 147 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 148 | PR: ${{ github.event.pull_request.html_url }} 149 | 150 | - name: Comment on major updates 151 | if: ${{ steps.dependabot-metadata.outputs.update-type == 'version-update:semver-major' }} 152 | run: | 153 | gh pr comment "$PR" --body "Requires manual approval due to **major update**." 154 | gh pr edit "$PR" --add-label "dependabot-major" 155 | env: 156 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 157 | PR: ${{ github.event.pull_request.html_url }} 158 | 159 | 160 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # python virtualenv 2 | venv/ 3 | 4 | # temporary local databases 5 | *.db 6 | *.db-wal 7 | *.db-shm 8 | *.sqlite 9 | 10 | # visual studio code 11 | .vscode/ 12 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Changelog 2 | 3 | ## Unreleased 4 | 5 | ## v0.6.0 - 2023-05-23 6 | 7 | ### Added 8 | - Various GitHub workflow fixes and an added auto-approve for dependabot updates 9 | - Treat anything not parseable as a number in Gnuplot as missing data for continuous lines (#127) 10 | 11 | ### Changed 12 | - Bumped various package and workflow action versions 13 | - Fixed `ytics` interval calulation for arbitrarily large `ymax` values (#117) 14 | - Use `ghcr.io` image references everywhere 15 | 16 | ## v0.5.2 - 2022-08-10 17 | 18 | ### Added 19 | - Run a test measurement using built `linux/amd64` image before pushing to repositories 20 | 21 | ### Changed 22 | - Bump `alpine` image to 3.16.2 (#100, see also #92 #93 #94 #97) 23 | - Bump flask to 2.1.3 (#96) 24 | - Bump docker/build-push-action 3.1.1 (#99, see also #98) 25 | 26 | ## v0.5.1 - 2022-05-07 27 | 28 | ### Added 29 | - Added `FETCH_LIMIT`/`--fetch-limit` to configure the default time to fetch for plotting (requested in #53) 30 | 31 | ### Changed 32 | - Bumped Flask version to 2.1.2 (#87) 33 | - Numerous Dependabot updates to workflow action versions 34 | 35 | ## v0.5.0 - 2022-05-07 36 | 37 | ### Added 38 | - Retry measurements when the server list is temporarily unavailable (fixed #72) 39 | - Started this `CHANGELOG.md` 40 | 41 | ### Changed 42 | - Use line plot with steps for Ping as well (fixed #62) 43 | - Remove leading slash from ressources to allow subdirectories behind reverse-proxies (fixed #76) 44 | - Lots of Dependabot updates to workflow actions and pip packages 45 | 46 | ## v0.4.0 - 2021-09-09 47 | 48 | Last release before starting a changelog. Please just check the commit log for 49 | anything before this release. 50 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Anton Semjonov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # speedtest-plotter 2 | 3 | This is a collection of scripts, which takes internet speedtest measurements 4 | against the speedtest.net network with [taganaka/SpeedTest](https://github.com/taganaka/SpeedTest) and plots them 5 | with [gnuplot](http://gnuplot.sourceforge.net). A crontab schedule is used 6 | to automate measurements every couple of minutes and save them to a database. 7 | The results can be displayed through a simple Flask webserver. 8 | 9 | ![example plot of speedtest results](assets/example.png) 10 | 11 | ## USAGE 12 | 13 | For changes between releases check the [changelog](CHANGELOG.md). 14 | 15 | ### CONTAINER 16 | 17 | 18 | 19 | ![GitHub Workflow Status](https://github.com/ansemjo/speedtest-plotter/actions/workflows/release.yml/badge.svg) 20 | 21 | 22 | 23 | The main distribution method is the automatically built container 24 | [ghcr.io/ansemjo/speedtest](https://github.com/ansemjo/speedtest-plotter/pkgs/container/speedtest). 25 | Obviously, you need to have a container runtime like `docker` or `podman` 26 | installed to run the container. 27 | 28 | **Note:** please update your image name to use the Github container registry. 29 | I will delete the DockerHub project sometime in the future. 30 | 31 | To start the container with default settings run: 32 | 33 | docker run -d -p 8000:8000 ghcr.io/ansemjo/speedtest 34 | 35 | This will take a measurement every 15 minutes, save them to a SQLite database 36 | in `/data/speedtests.db` and run the webserver on port `8000`. Visit http://localhost:8000 37 | to look at the plotted results. (*Note: The smoothed bezier curves require at least two 38 | measurements and the image will stay blank otherwise. So you might have to wait a while first.*) 39 | 40 | #### TIMEZONE 41 | 42 | Your local timezone can be set with the `TZ` environment variable and a string from 43 | `tzselect`. If none is set usually UTC is assumed. For example users in Japan should use: 44 | 45 | docker run -d -p 8000:8000 -e TZ=Asia/Tokyo ghcr.io/ansemjo/speedtest 46 | 47 | #### DATABASE 48 | 49 | For data persistence, either mount a volume at `/data` to save the database file 50 | or set the environment variable `DATABASE` to an SQLAlchemy-compatible URI. A PostgreSQL 51 | URI might look like this: 52 | 53 | docker run -d \ 54 | -p 8000:8000 \ 55 | -e TZ=Europe/Berlin \ 56 | -e DATABASE=postgresql://user:password@hostname:5432/database' \ 57 | ghcr.io/ansemjo/speedtest 58 | 59 | #### SCHEDULE 60 | 61 | You can modify the measurement schedule with the environment variables `MINUTES` and 62 | `SCHEDULE`. The former takes a measurement every `n` minutes and the latter may define 63 | an entirely custom cron schedule like "four times a day": 64 | 65 | docker run -d -p 8000:8000 -e SCHEDULE="0 3,9,15,21 * * *" ghcr.io/ansemjo/speedtest 66 | 67 | #### MARKERS AND SCALING 68 | 69 | To add horizontal dashed lines in the plot (e.g. to mark your expected bandwidths) 70 | you can use environment variables `MARKER_DOWNLOAD` and `MARKER_UPLOAD`. The values 71 | are given in `MBit/s`. 72 | 73 | In addition or independently from that you can also set a range scaling for the upload 74 | plot relative to the download range with `UPLOAD_SCALE`. For highly asymmetrical connections 75 | this makes it easier to see the upload bandwidth. For example, the above example 76 | picture was created with: 77 | 78 | docker run -d \ 79 | [...] \ 80 | -e MARKER_DOWNLOAD=800 \ 81 | -e MARKER_UPLOAD=40 \ 82 | -e UPLOAD_SCALE=10 \ 83 | ghcr.io/ansemjo/speedtest 84 | 85 | #### DEFAULT FETCH LIMIT 86 | 87 | By default, the webserver will fetch the last seven days (`7d`) for plotting. This can be configured 88 | with the `limit=` query parameter per request and then bookmark this URL; i.e. 89 | `http://localhost:8000/?limit=30d` will fetch the last 30 days. Alternatively, you can set the 90 | environment variable `FETCH_LIMIT` to configure a different default value for all requests 91 | without the query parameter above. 92 | 93 | #### FONT AND RESOLUTION 94 | 95 | The resolution and font of the SVG output can be configured with environment variables `RESOLUTION` and `FONT` respectively. Output resolution is expected as a comma-separated value of x- and y-size; the default is `1280,800`. The font can take either only a name (`Arial`), only a size (`,18`) or both (`Arial, 18`). Note that for a font in an SVG to work, the client needs to have the font, *not* the server. For example: 96 | 97 | docker run -d \ 98 | [...] \ 99 | -e RESOLUTION=1920,1080 \ 100 | -e FONT="Fira Sans, 14" \ 101 | ghcr.io/ansemjo/speedtest 102 | 103 | #### SPECIFIC TESTSERVER 104 | 105 | If you want to test against a specific server, you can give a `host:port` combination 106 | in the environment variable `TESTSERVER`. You can use the API at 107 | [www.speedtest.net/api/js/servers](https://www.speedtest.net/api/js/servers?&limit=10&search=) 108 | to pick a suitable `host` key from the JSON; supply a parameter for `?search=...` if you need to. 109 | By default it lists servers close to you. **Note** that this is *different* from the 110 | `SERVERID` used previously! But you can use `?id=...` to search for a specific ID. 111 | 112 | For example, to test against wilhelm.tel in Norderstedt with the server ID 4087, you'd use: 113 | 114 | docker run -d \ 115 | [...] \ 116 | -e TESTSERVER=speedtest.wtnet.de:8080 \ 117 | ghcr.io/ansemjo/speedtest 118 | 119 | #### DISABLE WEBSERVER 120 | 121 | The webserver is a single-threaded Flask application and pipes the data to gnuplot in a subprocess, which may not be suitable 122 | for production usage. To disable the webserver completely set the `PORT` environment 123 | variable to an empty string. This will only take measurements and save them to the 124 | database. 125 | 126 | docker run -d -e PORT="" -v speedtests:/data ghcr.io/ansemjo/speedtest 127 | 128 | #### SHORTHAND COMMANDS 129 | 130 | To dump the results as CSV from a running container use the `dump` command: 131 | 132 | docker exec $containerid dump > results.csv 133 | 134 | To trigger a measurement manually use the `measure` command: 135 | 136 | docker exec $containerid measure 137 | 138 | To reimport a previous dump in a fresh container use `import`: 139 | 140 | docker exec -i $containerid import < results.csv 141 | 142 | This can also be used to import results obtained manually with `speedtest-cli`. 143 | 144 | ### PYTHON SCRIPT 145 | 146 | You can use the Python script by itself locally, too. First install the requirements: 147 | 148 | pip install -r requirements.txt 149 | 150 | Choose a database location and take any number of measurements: 151 | 152 | ./speedtest-plotter -d sqlite:///$PWD/measurements.db measure 153 | ... 154 | 155 | Then start the flask webserver to look at the results: 156 | 157 | TZ=Europe/Berlin ./speedtest-plotter -d sqlite:///$PWD/measurements.db serve 158 | 159 | ### GNUPLOT SCRIPT 160 | 161 | To keep things really simple, you can also take measurements manually with `speedtest-cli` and only 162 | plot an image with `gnuplot`. 163 | 164 | The [`plotscript`](plotscript) expects the format that `speedtest-cli` outputs when using the `--csv` flag 165 | and a header line from `--csv-header`. To take some measurements manually with a simple sleep-loop: 166 | 167 | speedtest-cli --csv-header > results.csv 168 | while true; do speedtest-cli --csv | tee -a results.csv; sleep 600; done 169 | ^C 170 | 171 | Afterwards plot the results to an SVG picture with: 172 | 173 | gnuplot -c plotscript results.csv plot.svg 174 | 175 | ## BREITBANDMESSUNG 176 | 177 | If you're in Germany and you have found that your measured speed regularly does not meet minimum contractual obligations ("erhebliche, kontinuierliche oder regelmäßig wiederkehrende Abweichung bei der Geschwindigkeit") and your provider is not responsive to your complaints, you could use the [Breitbandmessung App](https://www.breitbandmessung.de/) as the next step. It helps you prepare a well-formatted measurement report, which you could use to file a complaint with the Bundesnetzagentur (BNetzA). 178 | 179 | ## LICENSE 180 | 181 | Copyright (c) 2019 Anton Semjonov 182 | Licensed under the MIT License 183 | -------------------------------------------------------------------------------- /assets/example.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ansemjo/speedtest-plotter/ae2d209b314fab6c5a398a973d7ddc88756fcf19/assets/example.png -------------------------------------------------------------------------------- /assets/speedtest.service: -------------------------------------------------------------------------------- 1 | # Example unit file for ansemjo/speedtest using podman container runtime: 2 | 3 | [Unit] 4 | Description=Speedtest Container 5 | After=network.target 6 | 7 | [Service] 8 | Type=simple 9 | Restart=on-failure 10 | 11 | #ExecStartPre=-/usr/bin/podman pull ghcr.io/ansemjo/speedtest 12 | ExecStartPre=-/usr/bin/podman create --net host -v /etc/speedtest:/data --name speedtest ghcr.io/ansemjo/speedtest 13 | ExecStart=/usr/bin/podman start -a --sig-proxy speedtest 14 | 15 | [Install] 16 | WantedBy=multi-user.target 17 | 18 | -------------------------------------------------------------------------------- /container/Dockerfile: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 Anton Semjonov 2 | # Licensed under the MIT License 3 | 4 | # Check with hadolint: 5 | # $ docker run --rm -i hadolint/hadolint < Dockerfile 6 | 7 | # ---------- build taganaka/SpeedTest binary ---------- 8 | FROM alpine:3.22 as compiler 9 | 10 | # install build framework and libraries 11 | # hadolint ignore=DL3018 12 | RUN apk add --no-cache alpine-sdk cmake curl-dev libxml2-dev 13 | 14 | # configure and build binary 15 | WORKDIR /build 16 | RUN git clone https://github.com/taganaka/SpeedTest.git . \ 17 | && cmake \ 18 | -DCMAKE_BUILD_TYPE=Release \ 19 | -DCMAKE_CXX_FLAGS="-Wno-psabi" \ 20 | . \ 21 | && make 22 | 23 | # --------- build application container ---------- 24 | FROM alpine:3.22 as runtime 25 | 26 | # install necessary packages and fonts 27 | # hadolint ignore=DL3018 28 | RUN apk add --no-cache gnuplot ttf-droid libcurl libxml2 libstdc++ libgcc tini 29 | 30 | # install python3 31 | RUN apk add --no-cache python3 py3-pip 32 | 33 | # copy requirements file and install with pip 34 | COPY requirements.txt /requirements.txt 35 | # hadolint ignore=DL3018 36 | RUN apk add --no-cache --virtual build-deps python3-dev musl-dev gcc g++ postgresql-dev \ 37 | && apk add --no-cache postgresql-libs \ 38 | && pip install --no-cache-dir --break-system-packages -r /requirements.txt \ 39 | && apk del --purge build-deps 40 | 41 | # default cron interval 42 | ENV MINUTES="15" 43 | 44 | # listening port, set to empty string for no webserver 45 | ENV PORT="8000" 46 | 47 | # database uri (sqlalchemy uri) 48 | ENV DATABASE="sqlite:////data/speedtests.db" 49 | 50 | # copy built binary from first stage 51 | COPY --from=compiler /build/SpeedTest /usr/local/bin/SpeedTest 52 | 53 | # copy entrypoint and scripts 54 | WORKDIR /opt/speedtest-plotter 55 | ENV PATH="/opt/speedtest-plotter:${PATH}" 56 | COPY container/entrypoint.sh /entrypoint.sh 57 | COPY plotscript speedtest-plotter ./ 58 | 59 | # wrapper script to easy docker exec usage 60 | COPY container/wrapper /usr/local/bin/dump 61 | COPY container/wrapper /usr/local/bin/import 62 | COPY container/wrapper /usr/local/bin/measure 63 | 64 | # start with entrypoint which exec's crond 65 | ENTRYPOINT ["/sbin/tini", "-wg", "--", "/bin/ash", "/entrypoint.sh"] 66 | CMD ["cron"] 67 | -------------------------------------------------------------------------------- /container/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/ash 2 | 3 | # Copyright (c) 2019 Anton Semjonov 4 | # Licensed under the MIT License 5 | 6 | # if DATABASE is an SQlite uri, create base directory 7 | if [[ ${DATABASE%%://*} == sqlite ]]; then 8 | mkdir -pv "$(dirname "${DATABASE##*:///}")" 9 | fi 10 | 11 | # check container cmd for cron entrypoint 12 | if [[ $1 == cron ]]; then 13 | 14 | # assemble cron schedule from env 15 | # either use '-e MINUTES=n' to run test every n minutes or 16 | # define the complete schedule part with '-e SCHEDULE=...' 17 | export SCHEDULE="${SCHEDULE:-"*/${MINUTES:-15} * * * *"}" 18 | 19 | # install crontab with schedule from env 20 | echo "${SCHEDULE} /opt/speedtest-plotter/speedtest-plotter measure" | crontab - 21 | 22 | # run server in background if WEBSERVER is truthy 23 | if [[ -n "${PORT}" ]]; then 24 | (cd /opt/speedtest-plotter/ && ./speedtest-plotter serve) & 25 | fi 26 | 27 | # start crontab for regular tests 28 | exec crond -f 29 | 30 | else 31 | 32 | # otherwise exec speedtest plotter with passed arguments 33 | exec speedtest-plotter "$@" 34 | 35 | fi 36 | -------------------------------------------------------------------------------- /container/wrapper: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ash 2 | # wrapper for speedtest-plotter to ease `docker exec` usage 3 | 4 | cmd="$(basename "$0")" 5 | if expr match "$cmd" "\(dump\|import\|measure\)" >/dev/null; then 6 | exec speedtest-plotter "$cmd" "$@" 7 | fi 8 | 9 | echo "unknown wrapper command: $cmd" >&2 10 | exit 1 11 | -------------------------------------------------------------------------------- /plotscript: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2019 Anton Semjonov 2 | # Licensed under the MIT License 3 | 4 | # general I/O and data options, find some stats 5 | set datafile separator "," missing NaN 6 | stats ARG1 using 6 name "PING" 7 | stats ARG1 using 7 name "DOWNLOAD" 8 | stats ARG1 using 8 name "UPLOAD" 9 | set xdata time 10 | set timefmt "%Y-%m-%dT%H:%M:%S.*Z" 11 | 12 | # set output resolution and font size 13 | # config with: gnuplot -e 'font = "Name, size"' -e 'resolution = 1920,1080' ... 14 | if (!exists("font")) font = ",12"; 15 | if (!exists("resolution")) resolution = "1280,800"; 16 | resX = substr(resolution, 1, strstrt(resolution, ",")); 17 | resY = substr(resolution, strstrt(resolution, ",") + 1, strlen(resolution)); 18 | set terminal svg enhanced font font size resX,resY dynamic 19 | set output ARG2 20 | set multiplot 21 | 22 | # manual margins for multiplot 23 | set bmargin 5 24 | set tmargin 4 25 | set lmargin 18 26 | set rmargin 12 27 | 28 | # pick colors 29 | rgb_download = "#00000080" 30 | rgb_download_fade = "#702185ff" 31 | rgb_download_text = "#00000080" 32 | rgb_upload = "#00ff3371" 33 | rgb_upload_fade = "#70ff3371" 34 | rgb_upload_text = "#00ff3371" 35 | rgb_ping = "#0070e000" 36 | 37 | # check if bandwidth markers are given and scale to mbits 38 | # config with: gnuplot -e 'marker_download = ' ... 39 | if (exists("marker_download") && marker_download > 0) \ 40 | marker_download = marker_download * 1000000; \ 41 | dlmarker = 1; else dlmarker = 0; 42 | if (exists("marker_upload") && marker_upload > 0) \ 43 | marker_upload = marker_upload * 1000000; \ 44 | ulmarker = 1; else ulmarker = 0; 45 | 46 | # scaling of upload plot relative to download range 47 | # config with: gnuplot -e 'upload_scale = ' ... 48 | if (exists("upload_scale") && upload_scale > 0 && upload_scale != 1) \ 49 | ulscale = upload_scale; else ulscale = 1.0; 50 | 51 | # marker set for empty ranges 52 | if (!(exists("empty_range") && empty_range == 1)) \ 53 | empty_range = 0; 54 | 55 | # set specific xrange if bounds are given 56 | if (empty_range != 1 && exists("xrange_from") && exists("xrange_to")) \ 57 | set xrange [xrange_from:xrange_to]; 58 | 59 | # ---------- plot 1/3: ping and labels ---------- 60 | 61 | # enable all titles and labels that need to be plotted once 62 | set title "Speedtest Results\ngithub.com/ansemjo/speedtest-plotter" 63 | set timestamp "plotted %F %T %Z" 64 | set xlabel "Measurement Date" 65 | set ylabel "Bandwidth" offset -13, 0 66 | set xtics format "%Y-%m-%d\n%H:%M:%S" out 67 | set key right top 68 | 69 | # set upper ping range 70 | pingrange = PING_mean + (3 * PING_stddev) 71 | if (pingrange < 50) pingrange = 50; 72 | 73 | # --> plot ping 74 | unset ytics 75 | unset yrange 76 | set y2label "Ping" 77 | set y2tics format "%.0f ms" nomirror out 78 | set y2range [0:pingrange] 79 | set linetype 1 lw 1 lc rgb rgb_ping pt 1 80 | plot \ 81 | keyentry title " ", \ 82 | keyentry title " ", \ 83 | ARG1 using 4:6 title gprintf("Ping (avg. %.0f ms)", PING_mean) axes x1y2 with steps lt 1 84 | 85 | # unset elements that would be duplicate 86 | unset title 87 | unset timestamp 88 | unset xlabel 89 | unset ylabel 90 | unset y2label 91 | unset y2tics 92 | set xtics format "" out 93 | set grid 94 | 95 | # ---------- plot 2/3: download ---------- 96 | 97 | # if download marker is given, use it for yrange 98 | if (dlmarker) \ 99 | set arrow from graph 0,first marker_download to graph 1,first marker_download \ 100 | nohead lw 2 dt "-" lc rgb rgb_download_text back; \ 101 | set yrange [0:marker_download*1.5]; \ 102 | else set yrange [0:*] 103 | 104 | # mark the average 105 | set arrow from graph 0,first DOWNLOAD_mean to graph 1,first DOWNLOAD_mean \ 106 | nohead lw 1 dt "-" lc rgb rgb_download_fade back; 107 | 108 | # find a sensible tick interval manually 109 | if (dlmarker) ymax = marker_download * 1.2 / 1000000; \ 110 | else ymax = DOWNLOAD_max / 1000000; 111 | # scale interval into appropriate magnitude 112 | interval = 1000000; 113 | while (ymax >= 100) { 114 | ymax = ymax / 10; 115 | interval = interval * 10; 116 | } 117 | if (ymax / 2 < 10) interval = interval * 2; else \ 118 | if (ymax / 5 < 10) interval = interval * 5; else \ 119 | if (ymax / 10 < 10) interval = interval * 10; 120 | set ytics format "%.2s %cBit/s" nomirror out 121 | 122 | # maybe split tick labels 123 | if (ulscale != 1.0) set ytics offset 0,0.4 interval tc rgb rgb_download_text; 124 | 125 | # --> plot download 126 | set linetype 1 lw 1 lc rgb rgb_download_fade 127 | set linetype 2 lw 3 lc rgb rgb_download 128 | plot \ 129 | ARG1 using 4:7 notitle with steps, \ 130 | ARG1 using 4:7 title gprintf("Download (avg. %.2s %cBit/s)", DOWNLOAD_mean) sm bezier 131 | 132 | # ---------- plot 3/3: upload ---------- 133 | 134 | # add warning if no data was in range 135 | if (empty_range) \ 136 | set label "NO DATA IN RANGE" at graph 0.5,0.5 center font "default,20" 137 | 138 | # if upload marker is given, use it for yrange 139 | if (ulmarker) \ 140 | set arrow from graph 0,first marker_upload to graph 1,first marker_upload \ 141 | nohead lw 2 dt "-" lc rgb rgb_upload_text back; 142 | set yrange [0:GPVAL_Y_MAX/ulscale] 143 | 144 | # mark the average 145 | set arrow from graph 0,first UPLOAD_mean to graph 1,first UPLOAD_mean \ 146 | nohead lw 1 dt "-" lc rgb rgb_upload_fade back; 147 | 148 | # either split tick labels or simply unset 149 | if (ulscale != 1.0) \ 150 | set ytics offset 0,-0.4 interval/ulscale textcolor rgb rgb_upload_text; \ 151 | else unset ytics; 152 | 153 | # --> plot upload 154 | set linetype 1 lw 1 lc rgb rgb_upload_fade 155 | set linetype 2 lw 3 lc rgb rgb_upload 156 | plot \ 157 | keyentry title " ", \ 158 | ARG1 using 4:8 notitle with steps, \ 159 | ARG1 using 4:8 title gprintf("Upload (avg. %.2s %cBit/s)", UPLOAD_mean) sm bezier 160 | -------------------------------------------------------------------------------- /requirements.in: -------------------------------------------------------------------------------- 1 | python-dateutil 2 | dataset 3 | psycopg2 4 | flask 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file is autogenerated by pip-compile with Python 3.11 3 | # by the following command: 4 | # 5 | # pip-compile --strip-extras requirements.in 6 | # 7 | alembic==1.13.0 8 | # via dataset 9 | banal==1.0.6 10 | # via dataset 11 | blinker==1.9.0 12 | # via flask 13 | click==8.1.7 14 | # via flask 15 | dataset==1.6.2 16 | # via -r requirements.in 17 | flask==3.1.1 18 | # via -r requirements.in 19 | greenlet==3.0.2 20 | # via sqlalchemy 21 | itsdangerous==2.2.0 22 | # via flask 23 | jinja2==3.1.6 24 | # via flask 25 | mako==1.3.0 26 | # via alembic 27 | markupsafe==2.1.3 28 | # via 29 | # flask 30 | # jinja2 31 | # mako 32 | # werkzeug 33 | psycopg2==2.9.10 34 | # via -r requirements.in 35 | python-dateutil==2.9.0.post0 36 | # via -r requirements.in 37 | six==1.16.0 38 | # via python-dateutil 39 | sqlalchemy==1.4.50 40 | # via 41 | # alembic 42 | # dataset 43 | typing-extensions==4.9.0 44 | # via alembic 45 | werkzeug==3.1.3 46 | # via flask 47 | -------------------------------------------------------------------------------- /speedtest-plotter: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Copyright (c) 2019 Anton Semjonov 4 | # Licensed under the MIT License 5 | 6 | import subprocess, csv, json, tempfile, sys, argparse, os, io, signal, re, datetime, logging, time 7 | import dataset 8 | from dateutil import parser as dateparser, tz 9 | from urllib.parse import urlparse 10 | 11 | # default / environment variables 12 | DATABASE = os.environ.get("DATABASE", "sqlite:///speedtest.db") 13 | PORT = os.environ.get("PORT", "8000") 14 | SERVER = os.environ.get("TESTSERVER", None) 15 | MARKER_DOWNLOAD = os.environ.get("MARKER_DOWNLOAD", 0) 16 | MARKER_UPLOAD = os.environ.get("MARKER_UPLOAD", 0) 17 | UPLOAD_SCALE = os.environ.get("UPLOAD_SCALE", 1) 18 | FETCH_LIMIT = os.environ.get("FETCH_LIMIT", "7d") 19 | PLOT_FONT = os.environ.get("FONT", ",12") 20 | PLOT_RESOLUTION = os.environ.get("RESOLUTION", "1280,800") 21 | 22 | # commandline parser 23 | p = argparse.ArgumentParser() 24 | p.add_argument("-d", dest="database", help="database connection uri", default=DATABASE) 25 | p.add_argument("-p", dest="port", help="port to run application server on", default=PORT) 26 | p.add_argument("-s", dest="server", help="take measurements against specific server:port", default=SERVER) 27 | p.add_argument("-l", dest="limit", help="limit number of results when dumping", default=32768) 28 | p.add_argument("-o", dest="order", help="order retrieval, desc/before or asc/after the date", choices=("asc", "desc"), default="desc") 29 | p.add_argument("-t", dest="date", help="dump up to or after this iso timestamp", default="now") 30 | p.add_argument("--marker-download", help="add expected download marker in plot", default=MARKER_DOWNLOAD) 31 | p.add_argument("--marker-upload", help="add expected upload marker in plot", default=MARKER_UPLOAD) 32 | p.add_argument("--upload-scale", help="scale upload yrange relative to download", default=UPLOAD_SCALE) 33 | p.add_argument("--fetch-limit", help="default time limit to fetch for display", default=FETCH_LIMIT) 34 | p.add_argument("--font", help="use different font for plot (Name,size)", default=PLOT_FONT) 35 | p.add_argument("--resolution", help="change svg output resolution (resx,resy)", default=PLOT_RESOLUTION) 36 | p.add_argument("--debug", help="enable flask and sql debugging", action="store_true") 37 | p.add_argument("command", choices=["serve", "measure", "dump", "import"], help="start webserver, take a measurement, dump results to csv or reimport csv data") 38 | args = p.parse_args() 39 | 40 | # print sql engine messages when debugging 41 | if args.debug: 42 | sqllog = logging.StreamHandler() 43 | sqllog.setFormatter(logging.Formatter("SQL Engine> %(message)s")) 44 | engine = logging.getLogger('sqlalchemy.engine') 45 | engine.setLevel(logging.INFO) 46 | engine.addHandler(sqllog) 47 | 48 | # if database argument has no dialect, assume sqlite 49 | if re.match(r"^\w+://", args.database) is None: 50 | args.database = "sqlite:///" + args.database 51 | 52 | # connect to database 53 | db = dataset.connect(args.database, sqlite_wal_mode=False) 54 | table = db["speedtest"] 55 | 56 | # handle ctrl-c 57 | def quit(signum, time): 58 | db.close() 59 | print(" quit.") 60 | sys.exit(0) 61 | signal.signal(signal.SIGINT, quit) 62 | 63 | # run a subcommand and capture its output 64 | def run(cmd, stdin=None): 65 | return subprocess.run(cmd, capture_output=True, stdin=stdin) 66 | 67 | # return current utc time 68 | def utcnow(): 69 | return datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0) 70 | 71 | # get local timezone from TZ env 72 | localtz = tz.gettz() 73 | 74 | # transform a timestamp (assume utc) to local timezone (given in TZ env) 75 | def isoformat_to_local(then): 76 | try: 77 | t = datetime.datetime.fromisoformat(then) 78 | except ValueError: 79 | t = dateparser.parse(then) 80 | if t.tzinfo is None: 81 | t = t.replace(tzinfo=datetime.timezone.utc) 82 | return t.astimezone(localtz) 83 | 84 | def pltrange(t): 85 | if t.tzinfo is None: 86 | t = t.replace(tzinfo=datetime.timezone.utc) 87 | return t.astimezone(localtz).replace(microsecond=0).isoformat() 88 | 89 | # parse a possibly unqualified timestamp to utc for searching 90 | def parse_to_utc(then): 91 | try: 92 | if then == "now": 93 | t = utcnow() 94 | else: 95 | t = dateparser.parse(then) 96 | except Exception as e: 97 | raise ValueError(f"cannot parse iso date: {e}") 98 | if t.tzinfo is None: 99 | t = t.replace(tzinfo=localtz) 100 | return t.astimezone(datetime.timezone.utc) 101 | 102 | # column names in speedtest-cli csv output 103 | FIELDNAMES = ("Server ID", "Sponsor", "Server Name", "Timestamp", "Distance", 104 | "Ping", "Download", "Upload", "Share", "IP Address") 105 | 106 | # custom exception when SpeedTest had an error 107 | class SpeedTestError(Exception): pass 108 | 109 | # parse the json output of taganaka/SpeedTest 110 | def parse_measurement(js, now=utcnow().isoformat()): 111 | if (err := js.get("error")) is not None: 112 | raise SpeedTestError(err) 113 | server, client = js.get("server", {}), js.get("client", {}) 114 | return dict(zip(FIELDNAMES, ( 115 | server.get("host", ""), 116 | server.get("sponsor", ""), 117 | server.get("name", ""), 118 | now, 119 | server.get("distance", ""), 120 | js["ping"], 121 | js["download"], 122 | js["upload"], 123 | js.get("share", ""), 124 | client.get("ip", ""), 125 | ))) 126 | 127 | # take a new measurement with speedtest-cli 128 | def take_measurement(attempt=0): 129 | cmd = ["SpeedTest", "--output", "json"] 130 | if args.server: 131 | cmd += ["--test-server", args.server] 132 | now = utcnow().isoformat() 133 | res = run(cmd) 134 | if res.returncode != 0: 135 | stderr = res.stderr.decode() 136 | if "download server list" in stderr and attempt < 3: 137 | # silently retry when server list endpoint was unavailable 138 | time.sleep(15) 139 | return take_measurement(attempt+1) 140 | else: 141 | raise SpeedTestError(stderr) 142 | r = parse_measurement(json.loads(res.stdout), now) 143 | table.insert(r) 144 | print(r) 145 | 146 | # parse csv results from speedtest-cli format to dict generator 147 | def csv_reader(reader): 148 | cr = csv.DictReader(reader, fieldnames=FIELDNAMES) 149 | for line in cr: 150 | if list(line.values()) == list(FIELDNAMES): 151 | continue # skip header line 152 | yield line 153 | 154 | # output stored database rows to writer as speedtest-cli csv with header 155 | def csv_writer(rows, writer): 156 | wr = csv.DictWriter(writer, fieldnames=FIELDNAMES) 157 | wr.writeheader() 158 | for row in rows: #sorted(rows, key=lambda r: r["id"]): 159 | try: del row["id"] 160 | except: pass 161 | row["Timestamp"] = isoformat_to_local(row["Timestamp"]).isoformat() 162 | wr.writerow(row) 163 | return writer 164 | 165 | # write retrieved measurements to a spooled temporary file 166 | def spooler(rows): 167 | spool = tempfile.SpooledTemporaryFile(mode="wt+") 168 | csv_writer(rows, spool) 169 | spool.seek(0) 170 | return spool 171 | 172 | # retrieve only the latest measurement 173 | def latest(): 174 | return table.find_one(order_by=["-Timestamp"]) 175 | 176 | # retrieve measurements from table applying optional filters 177 | def retrieve_measurements(limit, order, date, **rest): 178 | delta = None 179 | ascend = True 180 | 181 | # check ordering argument 182 | if order == "asc": 183 | ascend = True 184 | elif order == "desc": 185 | ascend = False 186 | else: 187 | raise ValueError("order must be either \"asc\" or \"desc\"") 188 | 189 | # fill date if none given 190 | if date is None or date == "": 191 | date = parse_to_utc(latest()["Timestamp"]) or utcnow() 192 | else: 193 | date = parse_to_utc(date) 194 | 195 | # maybe interpret limit as a timeframe 196 | if match := re.match(r"^([0-9]+)([dh]?)$", str(limit)): 197 | num, mod = match.groups() 198 | if mod == "d": delta = datetime.timedelta(days=int(num)) 199 | elif mod == "h": delta = datetime.timedelta(hours=int(num)) 200 | else: limit = int(num) 201 | else: 202 | raise ValueError("unknown limit format! must match: /^([0-9]+)([dh]?)$/") 203 | 204 | if delta: 205 | if ascend: 206 | pr = (pltrange(date), pltrange(date+delta)) 207 | return pr, table.find(Timestamp={'>=': date.isoformat(), '<=': (date+delta).isoformat()}, order_by=["-Timestamp"]) 208 | else: 209 | pr = (pltrange(date-delta), pltrange(date)) 210 | return pr, table.find(Timestamp={'>=': (date-delta).isoformat(), '<=': date.isoformat()}, order_by=["-Timestamp"]) 211 | else: 212 | if ascend: 213 | return None, table.find(Timestamp={'>=': date.isoformat()}, order_by=["Timestamp"], _limit=limit) 214 | else: 215 | return None, table.find(Timestamp={'<=': date.isoformat()}, order_by=["-Timestamp"], _limit=limit) 216 | 217 | 218 | def flask_server(): 219 | 220 | # initialize flask 221 | import flask 222 | print(" * Connected database: {}".format(args.database)) 223 | app = flask.Flask("speedtest", root_path="./") 224 | app.env = "development" 225 | 226 | # retrieve common request parameters for retrieval and plotting 227 | def request_params(req): 228 | # get interesting parameters 229 | limit = req.args.get("limit", None) or args.fetch_limit 230 | order = req.args.get("order", None) or "desc" 231 | date = req.args.get("date", None) or "now" 232 | # save the entire query component to pass on 233 | query = urlparse(req.url).query 234 | return (query, dict(limit=limit, order=order, date=date)) 235 | 236 | # scale number as mbit/s 237 | @app.template_filter("mbits") 238 | def to_mbits(d, fact=1_000_000, unit="MBit/s"): 239 | scaled = float(d) / fact 240 | return f"{scaled:.2f} {unit}" 241 | 242 | # apply isoformat_to_local to timestamps 243 | @app.template_filter("datefmt") 244 | def to_local_datefmt(t): 245 | return isoformat_to_local(t).replace(microsecond=0).strftime("%F %T %Z") 246 | 247 | # homepage with rendered results 248 | @app.route("/") 249 | def home(): 250 | try: 251 | (query, params) = request_params(flask.request) 252 | # dummy check if all parameters are legal 253 | _, rows = retrieve_measurements(**params) 254 | rows = list(rows) 255 | last = rows[0] if len(rows) else None 256 | total = len(table) 257 | 258 | TEMPLATE = """ 259 | 260 | 261 | 262 | 263 | Speedtest Plot 264 | 288 | 289 | 290 |
291 |
292 | Show 293 | 294 | 298 | 299 | 300 |
301 | 302 | 303 | 304 | {% if last is not none %} 305 |

306 | 307 | 308 | 309 | 310 | 311 | 312 | 313 | 314 | 315 | 316 | 317 | 318 | 319 | 320 | 321 | 322 | 323 | 324 | 325 | 326 | 327 | 328 |
Latest Measurement in View
Timestamp {{ last['Timestamp'] | datefmt }}
Server {{ last['Server ID'] }}
Download {{ last['Download'] | mbits }}
Upload {{ last['Upload'] | mbits }}
Ping {{ "%d" | format( last['Ping'] | int ) }} ms
329 |

330 | {% endif %} 331 |
332 | 333 | 334 | """ 335 | return flask.render_template_string(TEMPLATE, total=total, query=query, last=last, **params) 336 | 337 | except Exception as e: 338 | err = str(e) 339 | if "no such table: speedtest" in err: 340 | err = "No measurements taken yet!\nPlease wait for cron to trigger a measurement or start one yourself with 'speedtest-plotter measure'.\n\n" + err 341 | print(err) 342 | return flask.Response(err, mimetype="text/plain", status=500) 343 | 344 | # return rendered plot picture 345 | @app.route("/results.svg") 346 | def render(): 347 | try: 348 | _, params = request_params(flask.request) 349 | pr, rows = retrieve_measurements(**params) 350 | rows = list(rows) 351 | empty = False 352 | if len(rows) <= 0: 353 | # spoof a "null" row so gnuplot doesn't complain 354 | spoof = dict(([k,"0"] for k in list(FIELDNAMES) + ["id"])) 355 | spoof["Timestamp"] = "1970-01-01" 356 | rows = [spoof] 357 | empty = True 358 | data = spooler(rows) 359 | #return flask.Response(data, mimetype="text/plain") 360 | pr = ["-e", f"xrange_from = \"{pr[0]}\"", "-e", f"xrange_to = \"{pr[1]}\""] if pr else [] 361 | empty = ["-e", "empty_range = 1"] if empty else [] 362 | plot = run(["gnuplot", *pr, *empty, 363 | # TODO: proper escaping to prevent injection 364 | "-e", f"marker_download = {args.marker_download}", 365 | "-e", f"marker_upload = {args.marker_upload}", 366 | "-e", f"upload_scale = {args.upload_scale}", 367 | "-e", f"font = \"{args.font}\"", 368 | "-e", f"resolution = \"{args.resolution}\"", 369 | "-c", "plotscript", "/dev/stdin", "/dev/stdout"], stdin=data) 370 | data.close() 371 | if plot.returncode != 0: 372 | raise ValueError(plot.stderr.decode()) 373 | return flask.Response(plot.stdout, mimetype="image/svg+xml") 374 | except Exception as e: 375 | print(e, file=sys.stderr) 376 | return flask.Response(str(e), mimetype="text/plain", status=500) 377 | 378 | # return raw data in formatted csv 379 | @app.route("/results.csv") 380 | def results(): 381 | try: 382 | _, params = request_params(flask.request) 383 | _, rows = retrieve_measurements(**params) 384 | data = spooler(rows) 385 | return flask.Response(data, mimetype="text/plain") 386 | except Exception as e: 387 | return flask.Response(str(e), mimetype="text/plain", status=500) 388 | 389 | app.run(threaded=False, port=args.port, host="0.0.0.0", debug=args.debug) 390 | 391 | # ---------- main() ---------- 392 | try: 393 | 394 | if args.command == "serve": 395 | flask_server() 396 | 397 | elif args.command == "measure": 398 | take_measurement() 399 | 400 | elif args.command == "import": 401 | for m in csv_reader(sys.stdin): 402 | table.insert(m) 403 | print(m) 404 | 405 | elif args.command == "dump": 406 | _, rows = retrieve_measurements(args.limit, args.order, args.date) 407 | csv_writer(rows, sys.stdout) 408 | 409 | else: 410 | raise ValueError("unknown command: " + args.command) 411 | 412 | except Exception as e: 413 | print(e) 414 | exit(1) 415 | --------------------------------------------------------------------------------