├── .bra.toml ├── .circleci └── config.yml ├── .editorconfig ├── .gitignore ├── .gitmodules ├── .hooks └── pre-commit ├── CHANGELOG.md ├── LICENSE.md ├── Makefile ├── README.md ├── apiary.apib ├── conf ├── defaults.ini └── sample.ini ├── go.mod ├── go.sum ├── main.go ├── pkg ├── alerting │ ├── alerting_eval_test.go │ ├── alerting_executor_test.go │ ├── check_state.go │ ├── executor.go │ ├── init.go │ ├── jobqueue │ │ ├── jobqueue.go │ │ ├── jobqueue_test.go │ │ ├── pubsub.go │ │ └── pubsub_test.go │ ├── offset.go │ └── schedule.go ├── api │ ├── admin.go │ ├── admin_test.go │ ├── api.go │ ├── api_test.go │ ├── collector.go │ ├── elasticsearch.go │ ├── endpoint.go │ ├── graphite.go │ ├── graphite_test.go │ ├── monitor.go │ ├── quota.go │ ├── rbody │ │ └── rbody.go │ ├── socketio.go │ ├── socketio_test.go │ ├── sockets │ │ ├── probe.go │ │ └── sockets.go │ ├── v1_test.go │ ├── v2_endpoint.go │ ├── v2_probe.go │ └── v2_test.go ├── cmd │ └── web.go ├── elasticsearch │ └── elasticsearch.go ├── events │ ├── endpoint.go │ ├── events.go │ ├── events_test.go │ ├── probe.go │ └── pubsub.go ├── log │ ├── console.go │ ├── file.go │ ├── log.go │ └── syslog.go ├── middleware │ ├── middleware.go │ └── quota.go ├── models │ ├── alert_scheduler_value.go │ ├── alerting.go │ ├── emails.go │ ├── endpoint.go │ ├── errors.go │ ├── monitor.go │ ├── probe.go │ ├── quotas.go │ ├── usage.go │ └── v1Api.go ├── services │ ├── endpointdiscovery │ │ └── endpointdiscovery.go │ ├── interfaces.go │ ├── notifications │ │ ├── email.go │ │ ├── mailer.go │ │ └── notifications.go │ └── sqlstore │ │ ├── alert_scheduler_value.go │ │ ├── endpoint.go │ │ ├── endpoint_test.go │ │ ├── migrations │ │ ├── alert_scheduler_value.go │ │ ├── check.go │ │ ├── common.go │ │ ├── endpoint_mig.go │ │ ├── migrations.go │ │ ├── migrations_test.go │ │ ├── probe.go │ │ └── quota_mig.go │ │ ├── migrator │ │ ├── column.go │ │ ├── conditions.go │ │ ├── dialect.go │ │ ├── migrations.go │ │ ├── migrator.go │ │ ├── mysql_dialect.go │ │ ├── postgres_dialect.go │ │ ├── sqlite_dialect.go │ │ └── types.go │ │ ├── monitor_test.go │ │ ├── probe.go │ │ ├── probe_test.go │ │ ├── quota.go │ │ ├── quota_test.go │ │ ├── shared.go │ │ ├── sqlstore.go │ │ ├── sqlstore.goconvey │ │ ├── sqlutil │ │ └── sqlutil.go │ │ ├── usage.go │ │ └── usage_test.go ├── setting │ ├── alerting.go │ ├── kafka_settings.go │ ├── setting.go │ ├── setting_quota.go │ ├── setting_smtp.go │ └── setting_test.go └── util │ ├── remoteIp.go │ └── time.go ├── public ├── emails │ ├── alerting_notification.html │ ├── invited_to_org.html │ ├── new_user_invite.html │ ├── reset_password.html │ └── welcome_on_signup.html └── robots.txt ├── scripts ├── Dockerfile ├── build.sh ├── build_docker.sh ├── deploy_docker.sh ├── docker-compose.yml └── docker │ ├── entrypoint.sh │ └── worldping-api.ini └── tests └── config-files └── override.ini /.bra.toml: -------------------------------------------------------------------------------- 1 | [run] 2 | init_cmds = [ 3 | ["go", "build", "-o", "./bin/worldping-api"], 4 | ["./bin/worldping-api"] 5 | ] 6 | watch_all = true 7 | watch_dirs = [ 8 | "$WORKDIR/pkg", 9 | "$WORKDIR/conf", 10 | ] 11 | watch_exts = [".go", ".ini", ".toml", ".html"] 12 | build_delay = 1500 13 | cmds = [ 14 | ["godep", "go", "build", "-o", "./bin/worldping-api"], 15 | ["./bin/worldping-api"] 16 | ] 17 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | docker: 5 | - image: circleci/golang:1.13.5 6 | steps: 7 | - checkout 8 | - run: scripts/build.sh 9 | - persist_to_workspace: 10 | root: . 11 | paths: 12 | - build 13 | 14 | test: 15 | docker: 16 | - image: circleci/golang:1.13.5 17 | steps: 18 | - checkout 19 | - run: test -z "$(gofmt -s -l ./ | tee /dev/stderr)" 20 | - run: go test -race ./... 21 | - run: go vet ./... 22 | 23 | deploy: 24 | docker: 25 | - image: circleci/golang:1.13.5 26 | steps: 27 | - checkout 28 | - attach_workspace: 29 | at: . 30 | - setup_remote_docker 31 | - run: 32 | name: build docker image 33 | command: scripts/build_docker.sh 34 | - deploy: 35 | command: | 36 | if [ "${CIRCLE_BRANCH}" == "master" ]; then 37 | docker login -u $DOCKER_USER -p $DOCKER_PASS 38 | scripts/deploy_docker.sh 39 | fi 40 | 41 | workflows: 42 | version: 2 43 | build_accept_deploy: 44 | jobs: 45 | - build 46 | - test 47 | - deploy: 48 | requires: 49 | - build 50 | - test 51 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # http://editorconfig.org 2 | root = true 3 | 4 | [*] 5 | indent_style = space 6 | indent_size = 2 7 | charset = utf-8 8 | trim_trailing_whitespace = true 9 | insert_final_newline = true 10 | 11 | [*.md] 12 | trim_trailing_whitespace = false 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | coverage/ 3 | .aws-config.json 4 | awsconfig 5 | /dist 6 | /emails/dist 7 | /public_gen 8 | /tmp 9 | 10 | # Editor junk 11 | *.sublime-workspace 12 | *.swp 13 | .idea/ 14 | *.iml 15 | 16 | /data/* 17 | /bin/* 18 | 19 | conf/custom* 20 | fig.yml 21 | profile.cov 22 | 23 | build/ 24 | .notouch 25 | 26 | worldping-api 27 | .notouch 28 | 29 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/raintank/worldping-api/0bdedc6dd6302c2b0d7f8b10ec0a040ffd5c6928/.gitmodules -------------------------------------------------------------------------------- /.hooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | test -z "$(gofmt -s -l . | grep -v Godeps/_workspace/src/ | tee /dev/stderr)" 4 | if [ $? -gt 0 ]; then 5 | echo "Some files aren't formatted, please run 'go fmt ./pkg/...' to format your source code before committing" 6 | exit 1 7 | fi 8 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 0.0.1 (2016-03-31) 2 | 3 | Beta Release 4 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2015-2016 Raintank Inc. 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); you 4 | may not use this file except in compliance with the License. You may 5 | obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | implied. See the License for the specific language governing 13 | permissions and limitations under the License. 14 | 15 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: test build docker all 2 | 3 | clean: 4 | rm -rf build/worldping-api 5 | 6 | default: 7 | $(MAKE) all 8 | 9 | test: 10 | bash -c "./scripts/test.sh $(TEST)" 11 | 12 | all: docker 13 | 14 | build: build/worldping-api 15 | 16 | build/worldping-api: 17 | bash -c "./scripts/build.sh" 18 | 19 | docker: build 20 | bash -c "./scripts/build_docker.sh" 21 | 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Notice: worldPing has reached End-Of-Life (EOL) on April 1, 2021 2 | 3 | Everything you love about worldPing is now available with the new synthetic monitoring feature in Grafana Cloud, plus you’ll get reduced complexity and all the benefits of Grafana Cloud. 4 | 5 | Take the first step to get started with synthetic monitoring by signing up for a Grafana Cloud account. [Get started for free.](https://go2.grafana.com/worldPing-EOL-grafana-cloud.html?pg=plugins-wp&plcmt=body-txt) 6 | 7 | 8 | [worldPing](https://worldping.raintank.io) [![Circle CI](https://circleci.com/gh/raintank/worldping-api.svg?style=shield)](https://circleci.com/gh/raintank/worldping-api) 9 | ================ 10 | [Website](https://worldping.raintank.io) | 11 | [Twitter](https://twitter.com/raintankSaaS) | 12 | [Slack](https://raintank.slack.com) | 13 | [Email](mailto:hello@raintank.io) 14 | 15 | 16 | Worldping-api is the backend service for the worldPing-app available from [grafana.net](https://grafana.net/plugins/raintank-worldping-app) 17 | 18 | Everything that Worldping is capable of doing is exposed through a REST based API. Documentation for this API is available at 19 | [API Docs](http://docs.worldping.apiary.io/#) 20 | -------------------------------------------------------------------------------- /conf/defaults.ini: -------------------------------------------------------------------------------- 1 | ##################### WorldpingApi Configuration Defaults ##################### 2 | # all grafana instances in your environment should have a unique instance_id 3 | instance_id = default 4 | app_mode = development 5 | 6 | #################################### Paths #################################### 7 | [paths] 8 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 9 | # note: must be unique if you run multiple grafana processes on the same machine. 10 | data = data 11 | 12 | # Directory where grafana can store logs 13 | # 14 | logs = data/log 15 | 16 | #################################### Server #################################### 17 | [server] 18 | # Protocol (http or https) 19 | protocol = http 20 | 21 | # The ip address to bind to, empty will bind to all interfaces 22 | http_addr = 23 | 24 | # The http port to use 25 | http_port = 3000 26 | 27 | # Log web requests 28 | router_logging = false 29 | 30 | root_url = %(protocol)s://worldping-api:%(http_port)s/ 31 | 32 | static_root_path = public 33 | 34 | # enable gzip 35 | enable_gzip = false 36 | 37 | # https certs & key file 38 | cert_file = 39 | cert_key = 40 | 41 | admin_key = changeme 42 | 43 | #################################### Database #################################### 44 | [database] 45 | # Either "mysql", "postgres" or "sqlite3", it's your choice 46 | type = sqlite3 47 | host = 127.0.0.1:3306 48 | name = grafana 49 | user = root 50 | password = 51 | 52 | # For "postgres" only, either "disable", "require" or "verify-full" 53 | ssl_mode = disable 54 | 55 | # For "sqlite3" only, path relative to data_path setting 56 | path = worldping-api.db 57 | 58 | #################################### SMTP / Emailing ########################## 59 | [smtp] 60 | enabled = false 61 | host = localhost:25 62 | user = 63 | password = 64 | cert_file = 65 | key_file = 66 | skip_verify = false 67 | from_address = admin@grafana.localhost 68 | 69 | [emails] 70 | templates_pattern = emails/*.html 71 | 72 | #################################### Logging ########################## 73 | [log] 74 | # Either "console", "file", default is "console" 75 | # Use comma to separate multiple modes, e.g. "console, file" 76 | mode = console, file 77 | 78 | # Buffer length of channel, keep it as it is if you don't know what it is. 79 | buffer_len = 10000 80 | 81 | # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" 82 | level = Info 83 | 84 | # For "console" mode only 85 | [log.console] 86 | level = 87 | # Set formatting to "false" to disable color formatting of console logs 88 | formatting = false 89 | 90 | # For "file" mode only 91 | [log.file] 92 | level = 93 | # This enables automated log rotate(switch of following options), default is true 94 | log_rotate = true 95 | 96 | # Max line number of single file, default is 1000000 97 | max_lines = 1000000 98 | 99 | # Max size shift of single file, default is 28 means 1 << 28, 256MB 100 | max_lines_shift = 28 101 | 102 | # Segment log daily, default is true 103 | daily_rotate = true 104 | 105 | # Expired days of log file(delete after max days), default is 7 106 | max_days = 7 107 | 108 | [raintank] 109 | graphite_url = http://graphite-api:8888/ 110 | elasticsearch_url = http://localhost:9200/ 111 | tsdb_url = http://tsdb-gw/ 112 | 113 | [telemetry] 114 | stats_enabled = false 115 | stats_addr = localhost:2003 116 | stats_prefix = worldping.worldping-api.stats.default.$hostname 117 | stats_interval = 10 118 | stats_timeout = 10s 119 | stats_buffer_size = 20000 120 | 121 | [kafka] 122 | enabled = false 123 | brokers = localhost:9092 124 | topic = worldping 125 | 126 | [quota] 127 | enabled = false 128 | 129 | # limit number of endpoints per Org. 130 | org_endpoint = 10 131 | 132 | # limit number of collectorsper Org. 133 | org_probe = 10 134 | 135 | # golbal limit of endpoints 136 | global_endpoint = -1 137 | 138 | # golbal limit of collectors 139 | global_probe = -1 140 | 141 | #################################### Alerting ########################## 142 | [alerting] 143 | enabled = false 144 | distributed = false 145 | topic = worldping-alerts 146 | tickqueue_size = 20 147 | internal_jobqueue_size = 1000 148 | executor_lru_size = 10000 149 | enable_scheduler = true 150 | enable_worker = true 151 | graphite_url = http://graphite-api:8888/ -------------------------------------------------------------------------------- /conf/sample.ini: -------------------------------------------------------------------------------- 1 | ##################### WorldpingApi Configuration Example ##################### 2 | # 3 | # Everything has defaults so you only need to uncomment things you want to 4 | # change 5 | 6 | # possible values : production, development 7 | ; app_mode = production 8 | 9 | #################################### Paths #################################### 10 | [paths] 11 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 12 | # 13 | ;data = /var/lib/grafana 14 | # 15 | # Directory where grafana can store logs 16 | # 17 | ;logs = /var/log/grafana 18 | 19 | #################################### Server #################################### 20 | [server] 21 | # Protocol (http or https) 22 | ;protocol = http 23 | 24 | # The ip address to bind to, empty will bind to all interfaces 25 | ;http_addr = 26 | 27 | # The http port to use 28 | ;http_port = 3000 29 | 30 | # The full public facing url 31 | ;root_url = %(protocol)s://%(domain)s:%(http_port)s/ 32 | 33 | # Log web requests 34 | ;router_logging = false 35 | 36 | # the path relative working path 37 | ;static_root_path = public 38 | 39 | # enable gzip 40 | ;enable_gzip = false 41 | 42 | # https certs & key file 43 | ;cert_file = 44 | ;cert_key = 45 | 46 | ;admin_key = changeme 47 | 48 | #################################### Database #################################### 49 | [database] 50 | # Either "mysql", "postgres" or "sqlite3", it's your choice 51 | ;type = sqlite3 52 | ;host = 127.0.0.1:3306 53 | ;name = grafana 54 | ;user = root 55 | ;password = 56 | 57 | # For "postgres" only, either "disable", "require" or "verify-full" 58 | ;ssl_mode = disable 59 | 60 | # For "sqlite3" only, path relative to data_path setting 61 | ;path = grafana.db 62 | 63 | #################################### SMTP / Emailing ########################## 64 | [smtp] 65 | ;enabled = false 66 | ;host = localhost:25 67 | ;user = 68 | ;password = 69 | ;cert_file = 70 | ;key_file = 71 | ;skip_verify = false 72 | ;from_address = admin@grafana.localhost 73 | 74 | [emails] 75 | ;welcome_email_on_sign_up = false 76 | 77 | #################################### Logging ########################## 78 | [log] 79 | # Either "console", "file", default is "console" 80 | # Use comma to separate multiple modes, e.g. "console, file" 81 | ;mode = console, file 82 | 83 | # Buffer length of channel, keep it as it is if you don't know what it is. 84 | ;buffer_len = 10000 85 | 86 | # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" 87 | ;level = Info 88 | 89 | # For "console" mode only 90 | [log.console] 91 | ;level = 92 | 93 | # For "file" mode only 94 | [log.file] 95 | ;level = 96 | # This enables automated log rotate(switch of following options), default is true 97 | ;log_rotate = true 98 | 99 | # Max line number of single file, default is 1000000 100 | ;max_lines = 1000000 101 | 102 | # Max size shift of single file, default is 28 means 1 << 28, 256MB 103 | ;max_lines_shift = 28 104 | 105 | # Segment log daily, default is true 106 | ;daily_rotate = true 107 | 108 | # Expired days of log file(delete after max days), default is 7 109 | ;max_days = 7 110 | 111 | #################################### Kafka Event Publisher ########################## 112 | [kafka] 113 | ;enabled = false 114 | ;brokers = localhost:9092 115 | ;topic = worldping 116 | 117 | #################################### Alerting ########################## 118 | [alerting] 119 | ;enabled = false 120 | ;distributed = false 121 | ;topic = worldping-alerts 122 | ;tickqueue_size = 20 123 | ;internal_jobqueue_size = 1000 124 | ;executor_lru_size = 10000 125 | ;enable_scheduler = true 126 | ;graphite_url = http://graphite-api:8888/ 127 | 128 | [raintank] 129 | ;graphite_url = http://graphite-api:8888/ 130 | ;elasticsearch_url = http://localhost:9200/ 131 | ;tsdb_url = http://tsdb-gw/ 132 | 133 | [telemetry] 134 | ;stats_enabled = false 135 | ;stats_addr = localhost:2003 136 | ;stats_prefix = worldping.worldping-api.stats.default.$hostname 137 | ;stats_interval = 10 138 | ;stats_timeout = 10s 139 | ;stats_buffer_size = 20000 140 | 141 | 142 | -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/raintank/worldping-api 2 | 3 | require ( 4 | bosun.org v0.0.0-20160609184141-bb4e4366b216 5 | github.com/DataDog/datadog-go v0.0.0-20150618030719-a27810dd518c 6 | github.com/DataDog/zstd v1.4.0 7 | github.com/Dieterbe/artisanalhistogram v0.0.0-20170619072513-f61b7225d304 // indirect 8 | github.com/Dieterbe/go-metrics v0.0.0-20181015090856-87383909479d 9 | github.com/Dieterbe/profiletrigger v0.0.0-20150625092948-98e0b12887d3 10 | github.com/Shopify/sarama v0.0.0-20190527091752-41f00bce7fd3 11 | github.com/Unknwon/com v0.0.0-20151008135407-28b053d5a292 12 | github.com/alexcesaro/statsd v0.0.0-20151215225140-bbc5756ebb41 13 | github.com/bsm/sarama-cluster v0.0.0-20161206102625-11887f57ba85 14 | github.com/codeskyblue/go-uuid v0.0.0-20140208115753-952abbca900b // indirect 15 | github.com/davecgh/go-spew v1.1.1 16 | github.com/dgryski/go-linlog v0.0.0-20180207191225-edcf2dfd90ff // indirect 17 | github.com/eapache/go-resiliency v1.1.0 18 | github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 19 | github.com/eapache/queue v1.1.0 20 | github.com/fiorix/freegeoip v0.0.0-20160530125603-3e5065fca718 21 | github.com/go-macaron/binding v0.0.0-20161115070950-a453235199f8 22 | github.com/go-macaron/inject v0.0.0-20160627170012-d8a0b8677191 23 | github.com/go-macaron/toolbox v0.0.0-20170220183756-6766b8f16d1b 24 | github.com/go-sql-driver/mysql v0.0.0-20160125151823-7c7f55628262 25 | github.com/go-xorm/core v0.5.6 26 | github.com/go-xorm/xorm v0.5.4 27 | github.com/golang/snappy v0.0.1 28 | github.com/googollee/go-engine.io v0.0.0-20150203162345-597faf3df88a 29 | github.com/googollee/go-socket.io v0.0.0-20150201044939-31886c563625 30 | github.com/gopherjs/gopherjs v0.0.0-20200217142428-fce0ec30dd00 31 | github.com/gorilla/websocket v0.0.0-20150515162638-1551221275a7 32 | github.com/graarh/golang-socketio v0.0.0-20160527125345-533dd6f9e8e9 33 | github.com/grafana/grafana v4.2.0+incompatible 34 | github.com/grafana/metrictank v0.12.0 35 | github.com/hashicorp/go-version v0.0.0-20160519212729-0181db470237 36 | github.com/hashicorp/golang-lru v0.0.0-20150706174024-61b79a797853 37 | github.com/howeyc/fsnotify v0.0.0-20151003194602-f0c08ee9c607 38 | github.com/inconshreveable/log15 v0.0.0-20180818164646-67afb5ed74ec // indirect 39 | github.com/jarcoal/httpmock v1.0.4 // indirect 40 | github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 41 | github.com/jtolds/gls v4.20.0+incompatible 42 | github.com/klauspost/crc32 v0.0.0-20170210140523-1bab8b35b6bb 43 | github.com/lib/pq v0.0.0-20150109111458-19eeca3e30d2 44 | github.com/mattn/go-colorable v0.1.2 // indirect 45 | github.com/mattn/go-sqlite3 v0.0.0-20150323234855-e28cd440fabd 46 | github.com/metrics20/go-metrics20 v0.0.0-20180821133656-717ed3a27bf9 // indirect 47 | github.com/opentracing/opentracing-go v1.1.0 48 | github.com/oschwald/maxminddb-golang v0.0.0-20160523021551-03a3feaec4e5 49 | github.com/philhofer/fwd v0.0.0-20150722221502-4dbda5e2904d 50 | github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41 51 | github.com/pierrec/xxHash v0.0.0-20160112165351-5a004441f897 52 | github.com/prometheus/client_golang v0.9.3 // indirect 53 | github.com/raintank/met v0.0.0-20160113084835-daf6d57fc205 54 | github.com/raintank/raintank-probe v0.0.0-20170509153314-3969b73d30db 55 | github.com/raintank/schema v1.0.0 // indirect 56 | github.com/raintank/tsdb-gw v0.0.0-20190408195803-9b50e6fd3947 57 | github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a 58 | github.com/sirupsen/logrus v1.2.0 59 | github.com/smartystreets/assertions v1.1.0 60 | github.com/smartystreets/goconvey v1.6.4 61 | github.com/tinylib/msgp v0.0.0-20170101023110-362bfb3384d5 62 | github.com/uber/jaeger-client-go v2.16.0+incompatible 63 | github.com/uber/jaeger-lib v2.0.0+incompatible // indirect 64 | github.com/weaveworks/common v0.0.0-20190714171817-ddeaa31513fd // indirect 65 | github.com/yanzay/log v0.0.0-20160419144809-87352bb23506 66 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b 67 | golang.org/x/sys v0.0.0-20190412213103-97732733099d 68 | golang.org/x/tools v0.0.0-20200515220128-d3bf790afa53 // indirect 69 | gopkg.in/ini.v1 v1.21.1 70 | gopkg.in/macaron.v1 v1.1.8 71 | gopkg.in/raintank/schema.v0 v0.0.0-20160713163449-b1d2969aa4a5 72 | gopkg.in/raintank/schema.v1 v1.0.0-20170112123755-a323316458b5 73 | ) 74 | 75 | go 1.13 76 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "io" 6 | "io/ioutil" 7 | "net/url" 8 | "os" 9 | "os/signal" 10 | "strconv" 11 | "strings" 12 | "syscall" 13 | "time" 14 | 15 | "github.com/grafana/metrictank/stats" 16 | "github.com/raintank/raintank-probe/publisher" 17 | "github.com/raintank/worldping-api/pkg/alerting" 18 | "github.com/raintank/worldping-api/pkg/api" 19 | "github.com/raintank/worldping-api/pkg/cmd" 20 | "github.com/raintank/worldping-api/pkg/events" 21 | "github.com/raintank/worldping-api/pkg/log" 22 | "github.com/raintank/worldping-api/pkg/middleware" 23 | "github.com/raintank/worldping-api/pkg/services/endpointdiscovery" 24 | "github.com/raintank/worldping-api/pkg/services/notifications" 25 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 26 | "github.com/raintank/worldping-api/pkg/setting" 27 | jaegercfg "github.com/uber/jaeger-client-go/config" 28 | ) 29 | 30 | var version = "master" 31 | var commit = "NA" 32 | var buildstamp string 33 | 34 | var configFile = flag.String("config", "", "path to config file") 35 | var homePath = flag.String("homepath", "", "path to grafana install/home path, defaults to working directory") 36 | var exitChan = make(chan int) 37 | 38 | func main() { 39 | buildstampInt64, _ := strconv.ParseInt(buildstamp, 10, 64) 40 | 41 | setting.BuildVersion = version 42 | setting.BuildCommit = commit 43 | setting.BuildStamp = buildstampInt64 44 | notifyShutdown := make(chan struct{}) 45 | go listenToSystemSignels(notifyShutdown) 46 | 47 | flag.Parse() 48 | initRuntime() 49 | 50 | if setting.StatsEnabled { 51 | stats.NewMemoryReporter() 52 | hostname, _ := os.Hostname() 53 | prefix := strings.Replace(setting.StatsPrefix, "$hostname", strings.Replace(hostname, ".", "_", -1), -1) 54 | stats.NewGraphite(prefix, setting.StatsAddr, setting.StatsInterval, setting.StatsBufferSize, setting.StatsTimeout) 55 | } else { 56 | stats.NewDevnull() 57 | } 58 | 59 | events.Init() 60 | tsdbUrl, _ := url.Parse(setting.TsdbUrl) 61 | tsdbPublisher := publisher.NewTsdb(tsdbUrl, setting.AdminKey, 1) 62 | api.InitCollectorController(tsdbPublisher) 63 | if setting.Alerting.Enabled { 64 | closer := initTracing() 65 | defer closer.Close() 66 | alerting.Init(tsdbPublisher) 67 | alerting.Construct() 68 | } 69 | 70 | if err := notifications.Init(); err != nil { 71 | log.Fatal(3, "Notification service failed to initialize", err) 72 | } 73 | 74 | if err := endpointdiscovery.InitEndpointDiscovery(); err != nil { 75 | log.Fatal(3, "EndpointDiscovery service failed to initialize.", err) 76 | } 77 | 78 | cmd.StartServer(notifyShutdown) 79 | exitChan <- 0 80 | } 81 | 82 | func initRuntime() { 83 | err := setting.NewConfigContext(&setting.CommandLineArgs{ 84 | Config: *configFile, 85 | HomePath: *homePath, 86 | Args: flag.Args(), 87 | }) 88 | 89 | if err != nil { 90 | log.Fatal(3, err.Error()) 91 | } 92 | 93 | log.Info("Starting worldping-api") 94 | log.Info("Version: %v, Commit: %v, Build date: %v", setting.BuildVersion, setting.BuildCommit, time.Unix(setting.BuildStamp, 0)) 95 | setting.LogConfigurationInfo() 96 | 97 | sqlstore.NewEngine() 98 | middleware.Init(setting.AdminKey) 99 | } 100 | 101 | func listenToSystemSignels(notifyShutdown chan struct{}) { 102 | signalChan := make(chan os.Signal, 1) 103 | code := 0 104 | 105 | signal.Notify(signalChan, os.Interrupt) 106 | signal.Notify(signalChan, os.Kill) 107 | signal.Notify(signalChan, syscall.SIGTERM) 108 | 109 | select { 110 | case sig := <-signalChan: 111 | log.Info("Received signal %s. shutting down", sig) 112 | case code = <-exitChan: 113 | switch code { 114 | case 0: 115 | log.Info("Shutting down") 116 | default: 117 | log.Warn("Shutting down") 118 | } 119 | } 120 | close(notifyShutdown) 121 | 122 | publisher.Publisher.Close() 123 | api.ShutdownController() 124 | log.Close() 125 | os.Exit(code) 126 | } 127 | 128 | func initTracing() io.Closer { 129 | log.Info("initializing jaeger") 130 | cfg, err := jaegercfg.FromEnv() 131 | if err != nil { 132 | log.Fatal(3, err.Error()) 133 | } 134 | if cfg.Sampler.SamplingServerURL == "" && cfg.Reporter.LocalAgentHostPort == "" { 135 | log.Info("Jaeger tracer disabled: No trace report agent or config server specified") 136 | return ioutil.NopCloser(nil) 137 | } 138 | 139 | closer, err := cfg.InitGlobalTracer("worldping-alerting") 140 | if err != nil { 141 | log.Fatal(3, err.Error()) 142 | } 143 | 144 | return closer 145 | } 146 | -------------------------------------------------------------------------------- /pkg/alerting/alerting_eval_test.go: -------------------------------------------------------------------------------- 1 | package alerting 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "testing" 7 | 8 | "bosun.org/graphite" 9 | m "github.com/raintank/worldping-api/pkg/models" 10 | . "github.com/smartystreets/goconvey/convey" 11 | ) 12 | 13 | func getSeries(vals []int) graphite.Series { 14 | s := graphite.Series{ 15 | Target: "test", 16 | Datapoints: make([]graphite.DataPoint, len(vals)), 17 | } 18 | for i, v := range vals { 19 | s.Datapoints[i] = []json.Number{json.Number(fmt.Sprintf("%d", v)), json.Number(fmt.Sprintf("%d", i))} 20 | } 21 | return s 22 | } 23 | 24 | func check(series []graphite.Series, steps, numProbes int) m.CheckEvalResult { 25 | res := graphite.Response(series) 26 | healthSettings := m.CheckHealthSettings{ 27 | NumProbes: numProbes, 28 | Steps: steps, 29 | } 30 | result, err := eval(res, 1, &healthSettings) 31 | So(err, ShouldBeNil) 32 | return result 33 | } 34 | 35 | func TestAlertingEval(t *testing.T) { 36 | Convey("check steps=3, numProbes=1", t, func() { 37 | So(check( 38 | []graphite.Series{ 39 | getSeries([]int{0, 0, 0}), 40 | }, 41 | 3, 42 | 1, 43 | ), ShouldEqual, m.EvalResultOK) 44 | 45 | So(check( 46 | []graphite.Series{ 47 | getSeries([]int{0, 1, 1}), 48 | }, 49 | 3, 50 | 1, 51 | ), ShouldEqual, m.EvalResultOK) 52 | 53 | So(check( 54 | []graphite.Series{ 55 | getSeries([]int{1, 1, 1}), 56 | }, 57 | 3, 58 | 1, 59 | ), ShouldEqual, m.EvalResultCrit) 60 | 61 | So(check( 62 | []graphite.Series{ 63 | getSeries([]int{1, 1}), 64 | }, 65 | 3, 66 | 1, 67 | ), ShouldEqual, m.EvalResultOK) 68 | 69 | So(check( 70 | []graphite.Series{ 71 | getSeries([]int{1, 1, 0, 1}), 72 | }, 73 | 3, 74 | 1, 75 | ), ShouldEqual, m.EvalResultOK) 76 | 77 | So(check( 78 | []graphite.Series{ 79 | getSeries([]int{1, 1, 1}), 80 | getSeries([]int{0, 0, 0}), 81 | }, 82 | 3, 83 | 1, 84 | ), ShouldEqual, m.EvalResultCrit) 85 | 86 | So(check( 87 | []graphite.Series{ 88 | getSeries([]int{1, 1, 1}), 89 | getSeries([]int{1, 1, 1}), 90 | }, 91 | 3, 92 | 1, 93 | ), ShouldEqual, m.EvalResultCrit) 94 | }) 95 | 96 | Convey("check steps=3, numProbes=2", t, func() { 97 | So(check( 98 | []graphite.Series{ 99 | getSeries([]int{0, 0, 0}), 100 | getSeries([]int{1, 1, 1}), 101 | }, 102 | 3, 103 | 2, 104 | ), ShouldEqual, m.EvalResultOK) 105 | 106 | So(check( 107 | []graphite.Series{ 108 | getSeries([]int{1, 1, 1}), 109 | getSeries([]int{1, 1, 1}), 110 | }, 111 | 3, 112 | 2, 113 | ), ShouldEqual, m.EvalResultCrit) 114 | 115 | So(check( 116 | []graphite.Series{ 117 | getSeries([]int{1, 1, 1}), 118 | getSeries([]int{1, 1, 1}), 119 | getSeries([]int{1, 1, 1}), 120 | }, 121 | 3, 122 | 2, 123 | ), ShouldEqual, m.EvalResultCrit) 124 | 125 | So(check( 126 | []graphite.Series{ 127 | getSeries([]int{1, 0, 1}), 128 | getSeries([]int{1, 0, 1}), 129 | getSeries([]int{1, 0, 1}), 130 | }, 131 | 3, 132 | 2, 133 | ), ShouldEqual, m.EvalResultOK) 134 | 135 | So(check( 136 | []graphite.Series{ 137 | getSeries([]int{1, 1, 1}), 138 | getSeries([]int{0, 0, 0}), 139 | getSeries([]int{0, 1, 1}), 140 | getSeries([]int{1, 1, 1}), 141 | getSeries([]int{0, 0, 0}), 142 | }, 143 | 3, 144 | 2, 145 | ), ShouldEqual, m.EvalResultCrit) 146 | }) 147 | } 148 | -------------------------------------------------------------------------------- /pkg/alerting/alerting_executor_test.go: -------------------------------------------------------------------------------- 1 | package alerting 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | "strconv" 8 | "strings" 9 | "testing" 10 | "time" 11 | 12 | "bosun.org/graphite" 13 | "github.com/hashicorp/golang-lru" 14 | "github.com/raintank/worldping-api/pkg/alerting/jobqueue" 15 | m "github.com/raintank/worldping-api/pkg/models" 16 | "github.com/raintank/worldping-api/pkg/setting" 17 | . "github.com/smartystreets/goconvey/convey" 18 | "gopkg.in/raintank/schema.v1" 19 | ) 20 | 21 | type mockTransport struct { 22 | queries chan graphite.Request 23 | } 24 | 25 | func (m *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) { 26 | response := &http.Response{ 27 | Header: make(http.Header), 28 | Request: req, 29 | StatusCode: http.StatusOK, 30 | } 31 | req.ParseForm() 32 | start, err := strconv.ParseInt(req.FormValue("from"), 10, 64) 33 | if err != nil { 34 | return nil, err 35 | } 36 | end, err := strconv.ParseInt(req.FormValue("until"), 10, 64) 37 | if err != nil { 38 | return nil, err 39 | } 40 | startTs := time.Unix(start, 0) 41 | endTs := time.Unix(end, 0) 42 | request := graphite.Request{ 43 | Targets: req.Form["target"], 44 | Start: &startTs, 45 | End: &endTs, 46 | } 47 | m.queries <- request 48 | response.Header.Set("Content-Type", "application/json") 49 | responseBody := ` 50 | [ 51 | {"target": "endpoint1", "datapoints": [[0, 1], [0,2], [0,3]]}, 52 | {"target": "endpoint2", "datapoints": [[0, 1], [1,2], [1,3]]} 53 | ]` 54 | response.Body = ioutil.NopCloser(strings.NewReader(responseBody)) 55 | return response, nil 56 | } 57 | 58 | type mockPublisher struct { 59 | } 60 | 61 | func (m *mockPublisher) Add(metrics []*schema.MetricData) { 62 | return 63 | } 64 | 65 | func init() { 66 | Init(&mockPublisher{}) 67 | } 68 | 69 | func TestExecutor(t *testing.T) { 70 | transport := &mockTransport{ 71 | queries: make(chan graphite.Request, 10), 72 | } 73 | graphite.DefaultClient.Transport = transport 74 | setting.Alerting.Distributed = false 75 | ResultQueue = make(chan *m.AlertingJob, 1000) 76 | 77 | Convey("executor must do the right thing", t, func() { 78 | jobAt := func(ts int64) *m.AlertingJob { 79 | return &m.AlertingJob{ 80 | CheckForAlertDTO: &m.CheckForAlertDTO{ 81 | HealthSettings: &m.CheckHealthSettings{ 82 | NumProbes: 1, 83 | Steps: 3, 84 | }, 85 | Slug: "test", 86 | Type: "http", 87 | Frequency: 10, 88 | }, 89 | LastPointTs: time.Unix(ts, 0), 90 | GeneratedAt: time.Now(), 91 | } 92 | } 93 | jobQ := jobqueue.NewJobQueue() 94 | cache, err := lru.New(1000) 95 | if err != nil { 96 | panic(fmt.Sprintf("Can't create LRU: %s", err.Error())) 97 | } 98 | done := make(chan struct{}) 99 | jobsChan := jobQ.Jobs() 100 | go func() { 101 | ChanExecutor(jobsChan, cache) 102 | close(done) 103 | }() 104 | 105 | jobQ.QueueJob(jobAt(0)) 106 | jobQ.QueueJob(jobAt(1)) 107 | jobQ.QueueJob(jobAt(2)) 108 | jobQ.QueueJob(jobAt(2)) 109 | jobQ.QueueJob(jobAt(1)) 110 | jobQ.QueueJob(jobAt(0)) 111 | jobQ.Close() 112 | <-done 113 | close(transport.queries) 114 | count := int64(0) 115 | for q := range transport.queries { 116 | So(q.Targets, ShouldHaveLength, 1) 117 | So(q.Targets[0], ShouldEqual, "worldping.test.*.http.error_state") 118 | count++ 119 | } 120 | So(count, ShouldEqual, 3) 121 | }) 122 | } 123 | -------------------------------------------------------------------------------- /pkg/alerting/check_state.go: -------------------------------------------------------------------------------- 1 | package alerting 2 | 3 | import ( 4 | "strings" 5 | "time" 6 | 7 | "github.com/raintank/worldping-api/pkg/log" 8 | m "github.com/raintank/worldping-api/pkg/models" 9 | "github.com/raintank/worldping-api/pkg/services/notifications" 10 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 11 | "github.com/raintank/worldping-api/pkg/util" 12 | ) 13 | 14 | var ( 15 | ResultQueue chan *m.AlertingJob 16 | ) 17 | 18 | func InitResultHandler() { 19 | ResultQueue = make(chan *m.AlertingJob, 1000) 20 | 21 | stateChanges := make(chan *m.AlertingJob, 1000) 22 | for i := 0; i < 5; i++ { 23 | go storeResults(stateChanges) 24 | } 25 | go handleStateChange(stateChanges) 26 | 27 | } 28 | 29 | func storeResults(stateChanges chan *m.AlertingJob) { 30 | for j := range ResultQueue { 31 | saved := false 32 | attempts := 0 33 | for !saved && attempts < 3 { 34 | attempts++ 35 | pre := time.Now() 36 | change, err := sqlstore.UpdateCheckState(j) 37 | executorStateDBUpdate.Value(util.Since(pre)) 38 | if err != nil { 39 | log.Warn("failed to update checkState for checkId=%d. %s", j.Id, err) 40 | continue 41 | } 42 | saved = true 43 | log.Debug("updated state of checkId=%d stateChange=%v", j.Id, change) 44 | if change { 45 | stateChanges <- j 46 | } 47 | executorStateSaveDelay.Value(util.Since(j.TimeExec)) 48 | } 49 | if !saved { 50 | log.Error(3, "failed to update checkState for checkId=%d", j.Id) 51 | } 52 | } 53 | } 54 | 55 | func ProcessResult(job *m.AlertingJob) { 56 | ResultQueue <- job 57 | } 58 | 59 | func handleStateChange(c chan *m.AlertingJob) { 60 | for job := range c { 61 | log.Debug("state change: orgId=%d, monitorId=%d, endpointSlug=%s, state=%s", job.OrgId, job.Id, job.Slug, job.NewState.String()) 62 | if job.HealthSettings.Notifications.Enabled { 63 | emails := strings.Split(job.HealthSettings.Notifications.Addresses, ",") 64 | if len(emails) < 1 { 65 | log.Debug("no email addresses provided. OrgId: %d monitorId: %d", job.OrgId, job.Id) 66 | } else { 67 | emailTo := make([]string, 0) 68 | for _, email := range emails { 69 | email := strings.TrimSpace(email) 70 | if email == "" { 71 | continue 72 | } 73 | log.Info("sending email. addr=%s, orgId=%d, monitorId=%d, endpointSlug=%s, state=%s", email, job.OrgId, job.Id, job.Slug, job.NewState.String()) 74 | emailTo = append(emailTo, email) 75 | } 76 | if len(emailTo) == 0 { 77 | continue 78 | } 79 | sendCmd := m.SendEmailCommand{ 80 | To: emailTo, 81 | Template: "alerting_notification.html", 82 | Data: map[string]interface{}{ 83 | "EndpointId": job.EndpointId, 84 | "EndpointName": job.Name, 85 | "EndpointSlug": job.Slug, 86 | "Settings": job.Settings, 87 | "CheckType": job.Type, 88 | "State": job.NewState.String(), 89 | "TimeLastData": job.LastPointTs, // timestamp of the most recent data used 90 | "TimeExec": job.TimeExec, // when we executed the alerting rule and made the determination 91 | }, 92 | } 93 | go func(sendCmd *m.SendEmailCommand, job *m.AlertingJob) { 94 | if err := notifications.SendEmail(sendCmd); err != nil { 95 | log.Error(3, "failed to send email to %s. OrgId: %d monitorId: %d due to: %s", sendCmd.To, job.OrgId, job.Id, err) 96 | executorEmailFailed.Inc() 97 | } else { 98 | executorEmailSent.Inc() 99 | } 100 | }(&sendCmd, job) 101 | } 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /pkg/alerting/init.go: -------------------------------------------------------------------------------- 1 | package alerting 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/grafana/metrictank/stats" 7 | lru "github.com/hashicorp/golang-lru" 8 | "github.com/raintank/worldping-api/pkg/alerting/jobqueue" 9 | "github.com/raintank/worldping-api/pkg/log" 10 | "github.com/raintank/worldping-api/pkg/services" 11 | "github.com/raintank/worldping-api/pkg/setting" 12 | ) 13 | 14 | var ( 15 | tickQueueItems = stats.NewMeter32("alert-tickqueue.items", true) 16 | tickQueueSize = stats.NewGauge32("alert-tickqueue.size") 17 | dispatcherJobsSkippedDueToSlowJobQueueInternal = stats.NewCounterRate32("alert-dispatcher.jobs-skipped-due-to-slow-internal-jobqueue") 18 | dispatcherTicksSkippedDueToSlowTickQueue = stats.NewCounterRate32("alert-dispatcher.ticks-skipped-due-to-slow-tickqueue") 19 | 20 | dispatcherGetSchedules = stats.NewMeter32("alert-dispatcher.get-schedules", true) 21 | dispatcherNumGetSchedules = stats.NewCounterRate32("alert-dispatcher.num-getschedules") 22 | dispatcherJobsScheduled = stats.NewCounterRate32("alert-dispatcher.jobs-scheduled") 23 | 24 | executorNum = stats.NewGauge32("alert-executor.num") 25 | 26 | executorNumTooOld = stats.NewCounterRate32("alert-executor.too-old") 27 | executorNumAlreadyDone = stats.NewCounterRate32("alert-executor.already-done") 28 | executorNumExecuted = stats.NewCounterRate32("alert-executor.executed") 29 | executorAlertOutcomesErr = stats.NewCounterRate32("alert-executor.alert-outcomes.error") 30 | executorAlertOutcomesOk = stats.NewCounterRate32("alert-executor.alert-outcomes.ok") 31 | executorAlertOutcomesCrit = stats.NewCounterRate32("alert-executor.alert-outcomes.critical") 32 | executorAlertOutcomesUnkn = stats.NewCounterRate32("alert-executor.alert-outcomes.unknown") 33 | executorGraphiteEmptyResponse = stats.NewCounterRate32("alert-executor.graphite-emptyresponse") 34 | 35 | executorJobExecDelay = stats.NewMeter32("alert-executor.job_execution_delay", true) 36 | executorStateSaveDelay = stats.NewMeter32("alert-executor.state_save_delay", true) 37 | executorStateDBUpdate = stats.NewMeter32("alert-executor.state_db_update", true) 38 | executorJobQueryGraphite = stats.NewMeter32("alert-executor.job_query_graphite", true) 39 | executorGraphiteMissingVals = stats.NewCounterRate32("alert-executor.graphite-missingVals") 40 | 41 | executorEmailSent = stats.NewCounterRate32("alert-executor.emails.sent") 42 | executorEmailFailed = stats.NewCounterRate32("alert-executor.emails.failed") 43 | 44 | metricsPublisher services.MetricsPublisher 45 | ) 46 | 47 | // Init initializes the alerting engine. 48 | func Init(publisher services.MetricsPublisher) { 49 | 50 | metricsPublisher = publisher 51 | } 52 | 53 | func Construct() { 54 | cache, err := lru.New(setting.Alerting.ExecutorLRUSize) 55 | if err != nil { 56 | panic(fmt.Sprintf("Can't create LRU: %s", err.Error())) 57 | } 58 | 59 | if !setting.Alerting.Distributed && !(setting.Alerting.EnableScheduler && setting.Alerting.EnableWorker) { 60 | log.Fatal(3, "Alerting in standalone mode requires a scheduler and a worker (enable_scheduler = true and enabled_worker = true)") 61 | } 62 | 63 | if !setting.Alerting.EnableScheduler && !setting.Alerting.EnableWorker { 64 | log.Fatal(3, "Alerting requires a scheduler or a worker (enable_scheduler = true or enable_worker = true)") 65 | } 66 | 67 | jobQ := jobqueue.NewJobQueue() 68 | 69 | // create jobs 70 | if setting.Alerting.EnableScheduler { 71 | log.Info("Alerting: starting job Dispatcher") 72 | go dispatchJobs(jobQ) 73 | } 74 | 75 | //worker to execute the checks. 76 | if setting.Alerting.EnableWorker { 77 | log.Info("Alerting: starting alert executor") 78 | go ChanExecutor(jobQ.Jobs(), cache) 79 | } 80 | 81 | InitResultHandler() 82 | } 83 | -------------------------------------------------------------------------------- /pkg/alerting/jobqueue/jobqueue.go: -------------------------------------------------------------------------------- 1 | package jobqueue 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/grafana/metrictank/stats" 7 | m "github.com/raintank/worldping-api/pkg/models" 8 | "github.com/raintank/worldping-api/pkg/setting" 9 | ) 10 | 11 | var ( 12 | jobQueueInItems = stats.NewGauge32("alert-jobqueue.in-items") 13 | jobQueueOutItems = stats.NewGauge32("alert-jobqueue.out-items") 14 | jobQueueSize = stats.NewGauge32("alert-jobqueue.size") 15 | jobsDroppedCount = stats.NewCounterRate32("kafka-pubsub.jobs-dropped") 16 | jobsConsumedCount = stats.NewCounterRate32("kafka-pubsub.jobs-consumed") 17 | jobsPublishedCount = stats.NewCounterRate32("kafka-pubsub.jobs-published") 18 | consumerMessageDelay = stats.NewMeter32("kafka-pubsub.message_delay", true) 19 | ) 20 | 21 | type JobQueue struct { 22 | jobsIn chan *m.AlertingJob 23 | jobsOut chan *m.AlertingJob 24 | pubSub *KafkaPubSub 25 | } 26 | 27 | func NewJobQueue() *JobQueue { 28 | q := new(JobQueue) 29 | if setting.Alerting.Distributed { 30 | in := make(chan *m.AlertingJob, setting.Alerting.InternalJobQueueSize) 31 | out := make(chan *m.AlertingJob, setting.Alerting.InternalJobQueueSize) 32 | pubSub := NewKafkaPubSub(setting.Kafka.Brokers, setting.Alerting.Topic, in, out) 33 | pubSub.Run() 34 | q.pubSub = pubSub 35 | q.jobsIn = in 36 | q.jobsOut = out 37 | } else { 38 | jobCh := make(chan *m.AlertingJob, setting.Alerting.InternalJobQueueSize) 39 | q.jobsIn = jobCh 40 | q.jobsOut = jobCh 41 | } 42 | go q.stats() 43 | return q 44 | } 45 | 46 | func (q *JobQueue) stats() { 47 | ticker := time.NewTicker(time.Second * 2) 48 | for range ticker.C { 49 | jobQueueInItems.Set(len(q.jobsIn)) 50 | jobQueueOutItems.Set(len(q.jobsOut)) 51 | jobQueueSize.Set(setting.Alerting.InternalJobQueueSize) 52 | } 53 | } 54 | 55 | func (q *JobQueue) QueueJob(job *m.AlertingJob) { 56 | q.jobsIn <- job 57 | } 58 | 59 | func (q *JobQueue) Jobs() <-chan *m.AlertingJob { 60 | return q.jobsOut 61 | } 62 | 63 | func (q *JobQueue) Close() { 64 | close(q.jobsIn) 65 | if q.pubSub != nil { 66 | q.pubSub.Close() 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /pkg/alerting/jobqueue/jobqueue_test.go: -------------------------------------------------------------------------------- 1 | package jobqueue 2 | 3 | import ( 4 | "testing" 5 | "time" 6 | 7 | m "github.com/raintank/worldping-api/pkg/models" 8 | "github.com/raintank/worldping-api/pkg/setting" 9 | . "github.com/smartystreets/goconvey/convey" 10 | ) 11 | 12 | func TestJobQueuePublish(t *testing.T) { 13 | setting.Alerting.InternalJobQueueSize = 1 14 | jobQ := NewJobQueue() 15 | 16 | jobs := jobQ.Jobs() 17 | Convey("When queuing job", t, func() { 18 | jobQ.QueueJob(&m.AlertingJob{ 19 | CheckForAlertDTO: &m.CheckForAlertDTO{ 20 | Id: 1, 21 | Slug: "test", 22 | }, 23 | }) 24 | var job *m.AlertingJob 25 | select { 26 | case job = <-jobs: 27 | default: 28 | } 29 | So(job, ShouldNotBeNil) 30 | So(job.Id, ShouldEqual, 1) 31 | So(job.Slug, ShouldEqual, "test") 32 | }) 33 | Convey("When queue fills up adding job should block", t, func() { 34 | done := make(chan struct{}) 35 | go func() { 36 | jobQ.QueueJob(&m.AlertingJob{ 37 | CheckForAlertDTO: &m.CheckForAlertDTO{ 38 | Id: 2, 39 | Slug: "test", 40 | }, 41 | }) 42 | jobQ.QueueJob(&m.AlertingJob{ 43 | CheckForAlertDTO: &m.CheckForAlertDTO{ 44 | Id: 3, 45 | Slug: "test", 46 | }, 47 | }) 48 | close(done) 49 | }() 50 | queued := false 51 | select { 52 | case <-time.After(time.Second): 53 | case <-done: 54 | queued = true 55 | } 56 | So(queued, ShouldBeFalse) 57 | 58 | count := 0 59 | LOOP: 60 | for { 61 | select { 62 | case <-jobs: 63 | count++ 64 | case <-done: 65 | break LOOP 66 | } 67 | } 68 | So(count, ShouldEqual, 2) 69 | 70 | }) 71 | 72 | } 73 | -------------------------------------------------------------------------------- /pkg/alerting/jobqueue/pubsub_test.go: -------------------------------------------------------------------------------- 1 | package jobqueue 2 | 3 | import ( 4 | "encoding/json" 5 | "testing" 6 | "time" 7 | 8 | "github.com/Shopify/sarama" 9 | "github.com/Shopify/sarama/mocks" 10 | m "github.com/raintank/worldping-api/pkg/models" 11 | . "github.com/smartystreets/goconvey/convey" 12 | ) 13 | 14 | func TestPublish(t *testing.T) { 15 | config := sarama.NewConfig() 16 | config.Producer.Return.Successes = true 17 | mp := mocks.NewSyncProducer(t, config) 18 | pubSub := &KafkaPubSub{ 19 | instance: "test", 20 | producer: mp, 21 | topic: "jq", 22 | shutdown: make(chan struct{}), 23 | } 24 | pub := make(chan *m.AlertingJob, 1) 25 | go pubSub.produce(pub) 26 | 27 | verifyChan := make(chan []byte) 28 | handler := func(msg []byte) error { 29 | verifyChan <- msg 30 | return nil 31 | } 32 | Convey("when publishing jobs", t, func() { 33 | for i := int64(1); i <= 3; i++ { 34 | mp.ExpectSendMessageWithCheckerFunctionAndSucceed(handler) 35 | } 36 | for i := int64(1); i <= 3; i++ { 37 | pub <- &m.AlertingJob{ 38 | CheckForAlertDTO: &m.CheckForAlertDTO{ 39 | Id: i, 40 | Slug: "test", 41 | }, 42 | } 43 | var j m.AlertingJob 44 | msg := <-verifyChan 45 | err := json.Unmarshal(msg, &j) 46 | So(err, ShouldBeNil) 47 | So(j.Id, ShouldEqual, i) 48 | } 49 | }) 50 | Convey("when broker errors", t, func() { 51 | for i := int64(1); i <= 3; i++ { 52 | // fail i times, before succeeding 53 | for e := int64(0); e < i; e++ { 54 | mp.ExpectSendMessageAndFail(sarama.ErrBrokerNotAvailable) 55 | } 56 | mp.ExpectSendMessageWithCheckerFunctionAndSucceed(handler) 57 | pre := time.Now() 58 | pub <- &m.AlertingJob{ 59 | CheckForAlertDTO: &m.CheckForAlertDTO{ 60 | Id: i, 61 | Slug: "test", 62 | }, 63 | } 64 | var j m.AlertingJob 65 | msg := <-verifyChan 66 | end := time.Now() 67 | err := json.Unmarshal(msg, &j) 68 | So(err, ShouldBeNil) 69 | So(j.Id, ShouldEqual, i) 70 | So(end, ShouldHappenOnOrAfter, pre.Add(time.Second*time.Duration(i))) 71 | } 72 | }) 73 | 74 | pubSub.Close() 75 | } 76 | -------------------------------------------------------------------------------- /pkg/alerting/offset.go: -------------------------------------------------------------------------------- 1 | package alerting 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | 7 | "github.com/raintank/worldping-api/pkg/log" 8 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 9 | ) 10 | 11 | func LoadOrSetOffset() int { 12 | offset, err := sqlstore.GetAlertSchedulerValue("offset") 13 | if err != nil { 14 | log.Error(3, "failure querying for current offset: %q", err) 15 | return 30 16 | } 17 | if offset == "" { 18 | log.Debug("initializing offset to default value of 30 seconds.") 19 | setOffset(30) 20 | return 30 21 | } 22 | i, err := strconv.Atoi(offset) 23 | if err != nil { 24 | panic(fmt.Sprintf("failure reading in offset: %q. input value was: %q", err, offset)) 25 | } 26 | return i 27 | } 28 | 29 | func setOffset(offset int) { 30 | err := sqlstore.UpdateAlertSchedulerValue("offset", fmt.Sprintf("%d", offset)) 31 | if err != nil { 32 | log.Error(3, "Could not persist offset: %q", err) 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /pkg/alerting/schedule.go: -------------------------------------------------------------------------------- 1 | package alerting 2 | 3 | import ( 4 | "time" 5 | 6 | "github.com/raintank/worldping-api/pkg/alerting/jobqueue" 7 | "github.com/raintank/worldping-api/pkg/log" 8 | m "github.com/raintank/worldping-api/pkg/models" 9 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 10 | "github.com/raintank/worldping-api/pkg/util" 11 | ) 12 | 13 | // getJobs retrieves all jobs for which lastPointAt % their freq == their offset. 14 | func getJobs(lastPointAt int64) ([]*m.AlertingJob, error) { 15 | checks, err := sqlstore.GetChecksForAlerts(lastPointAt) 16 | if err != nil { 17 | return nil, err 18 | } 19 | 20 | jobs := make([]*m.AlertingJob, 0) 21 | for i := range checks { 22 | check := &checks[i] 23 | if check.HealthSettings == nil { 24 | continue 25 | } 26 | if check.Frequency == 0 || check.HealthSettings.Steps == 0 || check.HealthSettings.NumProbes == 0 { 27 | continue 28 | } 29 | jobs = append(jobs, &m.AlertingJob{CheckForAlertDTO: check}) 30 | 31 | } 32 | return jobs, nil 33 | } 34 | 35 | func dispatchJobs(jobQ *jobqueue.JobQueue) { 36 | ticker := time.NewTicker(time.Second) 37 | offsetTicker := time.NewTicker(time.Minute) 38 | newOffsetChan := make(chan int) 39 | offset := LoadOrSetOffset() 40 | log.Info("Alerting using offset %d", offset) 41 | next := time.Now().Unix() - int64(offset) 42 | for { 43 | select { 44 | case lastPointAt := <-ticker.C: 45 | for next <= lastPointAt.Unix()-int64(offset) { 46 | pre := time.Now() 47 | jobs, err := getJobs(next) 48 | next++ 49 | dispatcherNumGetSchedules.Inc() 50 | dispatcherGetSchedules.Value(util.Since(pre)) 51 | 52 | if err != nil { 53 | log.Error(0, "Alerting failed to get jobs from DB: %q", err) 54 | continue 55 | } 56 | log.Debug("%d jobs found for TS: %d", len(jobs), next) 57 | for _, job := range jobs { 58 | job.GeneratedAt = time.Now() 59 | job.LastPointTs = time.Unix(next-1, 0) 60 | jobQ.QueueJob(job) 61 | dispatcherJobsScheduled.Inc() 62 | } 63 | } 64 | case <-offsetTicker.C: 65 | // run this in a separate goroutine so we dont block the scheduler. 66 | go func() { 67 | newOffset := LoadOrSetOffset() 68 | if newOffset != offset { 69 | newOffsetChan <- newOffset 70 | } 71 | }() 72 | case newOffset := <-newOffsetChan: 73 | log.Info("Alerting offset updated to %d", offset) 74 | offset = newOffset 75 | } 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /pkg/api/admin.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "github.com/raintank/worldping-api/pkg/api/rbody" 5 | "github.com/raintank/worldping-api/pkg/middleware" 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 8 | ) 9 | 10 | func GetUsage(c *middleware.Context) *rbody.ApiResponse { 11 | usage, err := sqlstore.GetUsage() 12 | if err != nil { 13 | return rbody.ErrResp(err) 14 | } 15 | 16 | return rbody.OkResp("usage", usage) 17 | } 18 | 19 | func GetBilling(c *middleware.Context) *rbody.ApiResponse { 20 | usage := make(map[int64]float64) 21 | probes, err := sqlstore.GetOnlineProbes() 22 | if err != nil { 23 | return rbody.ErrResp(err) 24 | } 25 | for _, probe := range probes { 26 | checks, err := sqlstore.GetProbeChecks(&m.ProbeDTO{Id: probe.Id}) 27 | if err != nil { 28 | return rbody.ErrResp(err) 29 | } 30 | for _, check := range checks { 31 | if _, ok := usage[check.OrgId]; !ok { 32 | usage[check.OrgId] = 0 33 | } 34 | usage[check.OrgId] += (60.0 / float64(check.Frequency)) 35 | } 36 | } 37 | 38 | resp := make([]m.BillingUsage, len(usage)) 39 | counter := 0 40 | for org, checks := range usage { 41 | resp[counter] = m.BillingUsage{ 42 | OrgId: org, 43 | ChecksPerMinute: checks, 44 | } 45 | counter++ 46 | } 47 | 48 | return rbody.OkResp("billing", resp) 49 | } 50 | 51 | func GetApiKey(ctx *middleware.Context) *rbody.ApiResponse { 52 | return rbody.OkResp("apiKey", map[string]string{"apiKey": ctx.ApiKey}) 53 | } 54 | -------------------------------------------------------------------------------- /pkg/api/admin_test.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | "net/http/httptest" 7 | "testing" 8 | 9 | "github.com/raintank/worldping-api/pkg/api/rbody" 10 | m "github.com/raintank/worldping-api/pkg/models" 11 | "github.com/raintank/worldping-api/pkg/setting" 12 | . "github.com/smartystreets/goconvey/convey" 13 | "gopkg.in/macaron.v1" 14 | ) 15 | 16 | func TestUsageApi(t *testing.T) { 17 | InitTestDB(t) 18 | r := macaron.Classic() 19 | setting.AdminKey = "test" 20 | Register(r) 21 | populateEndpoints(t) 22 | populateCollectors(t) 23 | 24 | Convey("When getting usage stats", t, func() { 25 | resp := httptest.NewRecorder() 26 | req, err := http.NewRequest("GET", "/api/v2/admin/usage", nil) 27 | So(err, ShouldBeNil) 28 | addAuthHeader(req) 29 | 30 | r.ServeHTTP(resp, req) 31 | Convey("should return 200", func() { 32 | So(resp.Code, ShouldEqual, 200) 33 | Convey("usage response should be valid", func() { 34 | response := rbody.ApiResponse{} 35 | err := json.Unmarshal(resp.Body.Bytes(), &response) 36 | So(err, ShouldBeNil) 37 | 38 | So(response.Meta.Code, ShouldEqual, 200) 39 | So(response.Meta.Type, ShouldEqual, "usage") 40 | So(response.Meta.Message, ShouldEqual, "success") 41 | Convey("response body should be usage data", func() { 42 | usage := m.Usage{} 43 | err := json.Unmarshal(response.Body, &usage) 44 | So(err, ShouldBeNil) 45 | So(usage.Endpoints.Total, ShouldEqual, 6) 46 | }) 47 | }) 48 | }) 49 | }) 50 | 51 | } 52 | -------------------------------------------------------------------------------- /pkg/api/api_test.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httptest" 6 | "testing" 7 | 8 | . "github.com/smartystreets/goconvey/convey" 9 | "gopkg.in/macaron.v1" 10 | ) 11 | 12 | func TestHttpApi(t *testing.T) { 13 | 14 | m := macaron.New() 15 | Register(m) 16 | 17 | Convey("Given request for /foobar", t, func() { 18 | resp := httptest.NewRecorder() 19 | req, err := http.NewRequest("GET", "/foobar", nil) 20 | So(err, ShouldBeNil) 21 | m.ServeHTTP(resp, req) 22 | Convey("should return 404", func() { 23 | So(resp.Code, ShouldEqual, 404) 24 | }) 25 | }) 26 | Convey("Given request for /login", t, func() { 27 | resp := httptest.NewRecorder() 28 | req, err := http.NewRequest("GET", "/login", nil) 29 | So(err, ShouldBeNil) 30 | m.ServeHTTP(resp, req) 31 | Convey("should return 200", func() { 32 | So(resp.Code, ShouldEqual, 200) 33 | }) 34 | }) 35 | 36 | } 37 | -------------------------------------------------------------------------------- /pkg/api/collector.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | //"github.com/raintank/worldping-api/pkg/log" 5 | "github.com/raintank/worldping-api/pkg/middleware" 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 8 | ) 9 | 10 | func V1GetCollectors(c *middleware.Context, query m.GetProbesQuery) { 11 | query.OrgId = int64(c.User.ID) 12 | probes, err := sqlstore.GetProbes(&query) 13 | if err != nil { 14 | handleError(c, err) 15 | return 16 | } 17 | c.JSON(200, probes) 18 | return 19 | } 20 | 21 | func V1GetCollectorLocations(c *middleware.Context) { 22 | query := m.GetProbesQuery{ 23 | OrgId: int64(c.User.ID), 24 | } 25 | 26 | probes, err := sqlstore.GetProbes(&query) 27 | if err != nil { 28 | handleError(c, err) 29 | return 30 | } 31 | 32 | locations := make([]m.ProbeLocationDTO, len(probes)) 33 | for i, c := range probes { 34 | locations[i] = m.ProbeLocationDTO{ 35 | Key: c.Slug, 36 | Latitude: c.Latitude, 37 | Longitude: c.Longitude, 38 | Name: c.Name, 39 | } 40 | } 41 | 42 | c.JSON(200, locations) 43 | return 44 | } 45 | 46 | func V1GetCollectorById(c *middleware.Context) { 47 | id := c.ParamsInt64(":id") 48 | 49 | probe, err := sqlstore.GetProbeById(id, int64(c.User.ID)) 50 | if err != nil { 51 | handleError(c, err) 52 | return 53 | } 54 | 55 | c.JSON(200, probe) 56 | return 57 | } 58 | 59 | func V1DeleteCollector(c *middleware.Context) { 60 | id := c.ParamsInt64(":id") 61 | 62 | err := sqlstore.DeleteProbe(id, int64(c.User.ID)) 63 | if err != nil { 64 | handleError(c, err) 65 | return 66 | } 67 | 68 | c.JSON(200, "collector deleted") 69 | return 70 | } 71 | 72 | func V1AddCollector(c *middleware.Context, probe m.ProbeDTO) { 73 | probe.OrgId = int64(c.User.ID) 74 | if probe.Id != 0 { 75 | c.JSON(400, "Id already set. Try update instead of create.") 76 | return 77 | } 78 | if probe.Name == "" { 79 | c.JSON(400, "Collector Name not set.") 80 | return 81 | } 82 | 83 | if err := sqlstore.AddProbe(&probe); err != nil { 84 | handleError(c, err) 85 | return 86 | } 87 | 88 | c.JSON(200, probe) 89 | return 90 | } 91 | 92 | func V1UpdateCollector(c *middleware.Context, probe m.ProbeDTO) { 93 | probe.OrgId = int64(c.User.ID) 94 | if probe.Name == "" { 95 | c.JSON(400, "Collector Name not set.") 96 | return 97 | } 98 | 99 | if probe.Public { 100 | if !c.IsAdmin { 101 | c.JSON(400, "Only admins can make public collectors") 102 | return 103 | } 104 | } 105 | 106 | if err := sqlstore.UpdateProbe(&probe); err != nil { 107 | handleError(c, err) 108 | return 109 | } 110 | 111 | c.JSON(200, probe) 112 | return 113 | } 114 | -------------------------------------------------------------------------------- /pkg/api/elasticsearch.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/raintank/worldping-api/pkg/elasticsearch" 8 | "github.com/raintank/worldping-api/pkg/middleware" 9 | ) 10 | 11 | func V1ElasticsearchProxy(c *middleware.Context) { 12 | proxyPath := c.Params("*") 13 | y, m, d := time.Now().Date() 14 | idxDate := fmt.Sprintf("%s-%d-%02d-%02d", elasticsearch.IndexName, y, m, d) 15 | if c.Req.Request.Method == "GET" && proxyPath == fmt.Sprintf("%s/_stats", idxDate) { 16 | c.JSON(200, "ok") 17 | return 18 | } 19 | if c.Req.Request.Method == "POST" && proxyPath == "_msearch" { 20 | elasticsearch.Proxy(c.User.ID, c.Context) 21 | return 22 | } 23 | c.JSON(404, "Not Found") 24 | } 25 | -------------------------------------------------------------------------------- /pkg/api/endpoint.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | "github.com/raintank/worldping-api/pkg/log" 8 | "github.com/raintank/worldping-api/pkg/middleware" 9 | m "github.com/raintank/worldping-api/pkg/models" 10 | "github.com/raintank/worldping-api/pkg/services/endpointdiscovery" 11 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 12 | ) 13 | 14 | func V1GetEndpointById(c *middleware.Context) { 15 | id := c.ParamsInt64(":id") 16 | 17 | endpoint, err := sqlstore.GetEndpointById(int64(c.User.ID), id) 18 | if err != nil { 19 | handleError(c, err) 20 | return 21 | } 22 | 23 | c.JSON(200, endpoint) 24 | return 25 | } 26 | 27 | func V1GetEndpoints(c *middleware.Context, query m.GetEndpointsQuery) { 28 | query.OrgId = int64(c.User.ID) 29 | log.Info("calling sqlstore.GetEndpoints") 30 | endpoints, err := sqlstore.GetEndpoints(&query) 31 | if err != nil { 32 | handleError(c, err) 33 | return 34 | } 35 | c.JSON(200, endpoints) 36 | } 37 | 38 | func V1DeleteEndpoint(c *middleware.Context) { 39 | id := c.ParamsInt64(":id") 40 | 41 | err := sqlstore.DeleteEndpoint(int64(c.User.ID), id) 42 | if err != nil { 43 | handleError(c, err) 44 | return 45 | } 46 | 47 | c.JSON(200, "endpoint deleted") 48 | return 49 | } 50 | 51 | func V1AddEndpoint(c *middleware.Context, cmd m.AddEndpointCommand) { 52 | cmd.OrgId = int64(c.User.ID) 53 | if cmd.Name == "" { 54 | c.JSON(400, "Endpoint name not set.") 55 | return 56 | } 57 | checks := make([]m.Check, len(cmd.Monitors)) 58 | for i, mon := range cmd.Monitors { 59 | checks[i] = m.Check{ 60 | OrgId: int64(c.User.ID), 61 | EndpointId: 0, 62 | Type: m.MonitorTypeToCheckTypeMap[mon.MonitorTypeId-1], 63 | Frequency: mon.Frequency, 64 | Enabled: mon.Enabled, 65 | HealthSettings: mon.HealthSettings, 66 | Route: &m.CheckRoute{}, 67 | Settings: m.MonitorSettingsDTO(mon.Settings).ToV2Setting(m.MonitorTypeToCheckTypeMap[mon.MonitorTypeId-1]), 68 | } 69 | if len(mon.CollectorTags) > 0 { 70 | checks[i].Route.Type = m.RouteByTags 71 | checks[i].Route.Config = map[string]interface{}{"tags": mon.CollectorTags} 72 | } else { 73 | checks[i].Route.Type = m.RouteByIds 74 | checks[i].Route.Config = map[string]interface{}{"ids": mon.CollectorIds} 75 | } 76 | err := sqlstore.ValidateCheckRoute(&checks[i]) 77 | if err != nil { 78 | handleError(c, err) 79 | return 80 | } 81 | 82 | } 83 | endpoint := m.EndpointDTO{ 84 | OrgId: cmd.OrgId, 85 | Name: cmd.Name, 86 | Tags: cmd.Tags, 87 | Created: time.Now(), 88 | Updated: time.Now(), 89 | Checks: checks, 90 | } 91 | err := sqlstore.AddEndpoint(&endpoint) 92 | if err != nil { 93 | handleError(c, err) 94 | return 95 | } 96 | 97 | c.JSON(200, endpoint) 98 | } 99 | 100 | func V1UpdateEndpoint(c *middleware.Context, cmd m.UpdateEndpointCommand) { 101 | cmd.OrgId = int64(c.User.ID) 102 | if cmd.Name == "" { 103 | c.JSON(400, "Endpoint name not set.") 104 | return 105 | } 106 | // get existing endpoint. 107 | endpoint, err := sqlstore.GetEndpointById(cmd.OrgId, cmd.Id) 108 | if err != nil { 109 | handleError(c, err) 110 | return 111 | } 112 | if endpoint == nil { 113 | c.JSON(404, "Endpoint not found") 114 | return 115 | } 116 | 117 | endpoint.Name = cmd.Name 118 | endpoint.Tags = cmd.Tags 119 | 120 | err = sqlstore.UpdateEndpoint(endpoint) 121 | if err != nil { 122 | handleError(c, err) 123 | return 124 | } 125 | 126 | c.JSON(200, "Endpoint updated") 127 | } 128 | 129 | func V1DiscoverEndpoint(c *middleware.Context, cmd m.DiscoverEndpointCmd) { 130 | endpoint, err := endpointdiscovery.Discover(cmd.Name) 131 | if err != nil { 132 | handleError(c, err) 133 | return 134 | } 135 | // convert from checks to v1api SuggestedMonitor 136 | monitors := make([]m.SuggestedMonitor, len(endpoint.Checks)) 137 | for i, check := range endpoint.Checks { 138 | monitors[i] = m.SuggestedMonitor{ 139 | MonitorTypeId: checkTypeToId(check.Type), 140 | Settings: checkSettingToMonitorSetting(check.Settings), 141 | } 142 | } 143 | c.JSON(200, monitors) 144 | } 145 | 146 | func checkTypeToId(t m.CheckType) int64 { 147 | lookup := map[m.CheckType]int64{ 148 | m.HTTP_CHECK: 1, 149 | m.HTTPS_CHECK: 2, 150 | m.PING_CHECK: 3, 151 | m.DNS_CHECK: 4, 152 | } 153 | typeNum, exists := lookup[t] 154 | if !exists { 155 | return 0 156 | } 157 | return typeNum 158 | } 159 | 160 | func checkSettingToMonitorSetting(settings map[string]interface{}) []m.MonitorSettingDTO { 161 | monSetting := make([]m.MonitorSettingDTO, 0) 162 | for key, val := range settings { 163 | monSetting = append(monSetting, m.MonitorSettingDTO{ 164 | Variable: key, 165 | Value: fmt.Sprintf("%v", val), 166 | }) 167 | } 168 | return monSetting 169 | } 170 | -------------------------------------------------------------------------------- /pkg/api/graphite.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "net/http" 5 | "net/http/httputil" 6 | "net/url" 7 | "regexp" 8 | "strings" 9 | 10 | "github.com/grafana/grafana/pkg/util" 11 | "github.com/raintank/worldping-api/pkg/middleware" 12 | m "github.com/raintank/worldping-api/pkg/models" 13 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 14 | "github.com/raintank/worldping-api/pkg/setting" 15 | ) 16 | 17 | var ( 18 | GraphiteUrl *url.URL 19 | gProxy httputil.ReverseProxy 20 | ) 21 | 22 | func initGraphiteProxy() error { 23 | GraphiteUrl, err := url.Parse(setting.TsdbUrl + "graphite/") 24 | if err != nil { 25 | return err 26 | } 27 | gProxy.Director = func(req *http.Request) { 28 | req.URL.Scheme = GraphiteUrl.Scheme 29 | req.URL.Host = GraphiteUrl.Host 30 | req.URL.Path = strings.TrimPrefix(req.URL.Path, "/api") 31 | } 32 | return nil 33 | } 34 | 35 | func V1GraphiteProxy(c *middleware.Context) { 36 | proxyPath := c.Params("*") 37 | 38 | // check if this is a special raintank_db requests 39 | if proxyPath == "metrics/find" { 40 | query := c.Query("query") 41 | if strings.HasPrefix(query, "raintank_db") { 42 | response, err := executeRaintankDbQuery(query, int64(c.User.ID)) 43 | if err != nil { 44 | handleError(c, err) 45 | return 46 | } 47 | c.JSON(200, response) 48 | return 49 | } 50 | } 51 | 52 | // forward to tsdb-gw 53 | gProxy.ServeHTTP(c.Resp, c.Req.Request) 54 | } 55 | 56 | func executeRaintankDbQuery(query string, orgId int64) (interface{}, error) { 57 | values := []map[string]interface{}{} 58 | 59 | regex := regexp.MustCompile(`^raintank_db\.tags\.(\w+)\.(\w+|\*)`) 60 | matches := regex.FindAllStringSubmatch(query, -1) 61 | 62 | if len(matches) == 0 { 63 | return values, nil 64 | } 65 | 66 | tagType := matches[0][1] 67 | tagValue := matches[0][2] 68 | 69 | if tagType == "collectors" || tagType == "probes" { 70 | if tagValue == "*" { 71 | // return all tags 72 | tags, err := sqlstore.GetProbeTags(orgId) 73 | if err != nil { 74 | return nil, err 75 | } 76 | 77 | for _, tag := range tags { 78 | values = append(values, util.DynMap{"text": tag, "expandable": false}) 79 | } 80 | return values, nil 81 | } else if tagValue != "" { 82 | // return tag values for key 83 | collectorsQuery := m.GetProbesQuery{OrgId: orgId, Tag: tagValue} 84 | probes, err := sqlstore.GetProbes(&collectorsQuery) 85 | if err != nil { 86 | return nil, err 87 | } 88 | for _, collector := range probes { 89 | values = append(values, util.DynMap{"text": collector.Slug, "expandable": false}) 90 | } 91 | } 92 | } else if tagType == "endpoints" { 93 | if tagValue == "*" { 94 | // return all tags 95 | tags, err := sqlstore.GetEndpointTags(orgId) 96 | if err != nil { 97 | return nil, err 98 | } 99 | 100 | for _, tag := range tags { 101 | values = append(values, util.DynMap{"text": tag, "expandable": false}) 102 | } 103 | return values, nil 104 | } else if tagValue != "" { 105 | // return tag values for key 106 | endpointsQuery := m.GetEndpointsQuery{OrgId: orgId, Tag: tagValue} 107 | endpoints, err := sqlstore.GetEndpoints(&endpointsQuery) 108 | if err != nil { 109 | return nil, err 110 | } 111 | 112 | for _, endpoint := range endpoints { 113 | values = append(values, util.DynMap{"text": endpoint.Slug, "expandable": false}) 114 | } 115 | 116 | } 117 | } 118 | 119 | return values, nil 120 | } 121 | -------------------------------------------------------------------------------- /pkg/api/graphite_test.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "testing" 5 | 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 8 | . "github.com/smartystreets/goconvey/convey" 9 | ) 10 | 11 | func populateDB(t *testing.T) { 12 | if err := sqlstore.AddProbe(&m.ProbeDTO{ 13 | Name: "dev1", 14 | Tags: []string{"tag1", "tag2"}, 15 | OrgId: 10, 16 | }); err != nil { 17 | t.Fatal(err) 18 | } 19 | 20 | if err := sqlstore.AddEndpoint(&m.EndpointDTO{ 21 | Name: "dev2", 22 | Tags: []string{"Dev"}, 23 | OrgId: 10, 24 | }); err != nil { 25 | t.Fatal(err) 26 | } 27 | } 28 | 29 | func TestGraphiteRaintankQueries(t *testing.T) { 30 | InitTestDB(t) 31 | populateDB(t) 32 | 33 | Convey("Given raintank collector tags query", t, func() { 34 | resp, err := executeRaintankDbQuery("raintank_db.tags.collectors.*", 10) 35 | So(err, ShouldBeNil) 36 | 37 | Convey("should return tags", func() { 38 | array := resp.([]map[string]interface{}) 39 | So(len(array), ShouldEqual, 2) 40 | So(array[0]["text"], ShouldEqual, "tag1") 41 | }) 42 | }) 43 | 44 | Convey("Given raintank collector tag values query", t, func() { 45 | resp, err := executeRaintankDbQuery("raintank_db.tags.collectors.tag1.*", 10) 46 | So(err, ShouldBeNil) 47 | 48 | Convey("should return tags", func() { 49 | array := resp.([]map[string]interface{}) 50 | So(len(array), ShouldEqual, 1) 51 | So(array[0]["text"], ShouldEqual, "dev1") 52 | }) 53 | }) 54 | 55 | Convey("Given raintank endpoint tags query", t, func() { 56 | resp, err := executeRaintankDbQuery("raintank_db.tags.endpoints.*", 10) 57 | So(err, ShouldBeNil) 58 | 59 | Convey("should return tags", func() { 60 | array := resp.([]map[string]interface{}) 61 | So(len(array), ShouldEqual, 1) 62 | So(array[0]["text"], ShouldEqual, "Dev") 63 | }) 64 | }) 65 | 66 | Convey("Given raintank endpoint tag values query", t, func() { 67 | 68 | resp, err := executeRaintankDbQuery("raintank_db.tags.endpoints.Dev.*", 10) 69 | So(err, ShouldBeNil) 70 | 71 | Convey("should return tags", func() { 72 | array := resp.([]map[string]interface{}) 73 | So(len(array), ShouldEqual, 1) 74 | So(array[0]["text"], ShouldEqual, "dev2") 75 | }) 76 | }) 77 | } 78 | -------------------------------------------------------------------------------- /pkg/api/quota.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "github.com/raintank/worldping-api/pkg/api/rbody" 5 | "github.com/raintank/worldping-api/pkg/middleware" 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 8 | "github.com/raintank/worldping-api/pkg/setting" 9 | ) 10 | 11 | func V1GetOrgQuotas(c *middleware.Context) { 12 | var quotas []m.OrgQuotaDTO 13 | var err error 14 | if setting.Quota.Enabled { 15 | quotas, err = sqlstore.GetOrgQuotas(int64(c.User.ID)) 16 | if err != nil { 17 | handleError(c, err) 18 | return 19 | } 20 | } else { 21 | quotas = []m.OrgQuotaDTO{ 22 | { 23 | OrgId: int64(c.User.ID), 24 | Target: "endpoint", 25 | Limit: -1, 26 | Used: -10, 27 | }, 28 | { 29 | OrgId: int64(c.User.ID), 30 | Target: "probe", 31 | Limit: -1, 32 | Used: -10, 33 | }, 34 | { 35 | OrgId: int64(c.User.ID), 36 | Target: "downloadLimit", 37 | Limit: -1, 38 | Used: -1, 39 | }, 40 | } 41 | } 42 | c.JSON(200, quotas) 43 | } 44 | 45 | func GetQuotas(c *middleware.Context) *rbody.ApiResponse { 46 | var quotas []m.OrgQuotaDTO 47 | var err error 48 | if setting.Quota.Enabled { 49 | quotas, err = sqlstore.GetOrgQuotas(int64(c.User.ID)) 50 | if err != nil { 51 | return rbody.ErrResp(err) 52 | } 53 | } else { 54 | quotas = []m.OrgQuotaDTO{ 55 | { 56 | OrgId: int64(c.User.ID), 57 | Target: "endpoint", 58 | Limit: -1, 59 | Used: -1, 60 | }, 61 | { 62 | OrgId: int64(c.User.ID), 63 | Target: "probe", 64 | Limit: -1, 65 | Used: -1, 66 | }, 67 | { 68 | OrgId: int64(c.User.ID), 69 | Target: "downloadLimit", 70 | Limit: -1, 71 | Used: -1, 72 | }, 73 | } 74 | } 75 | 76 | return rbody.OkResp("quotas", quotas) 77 | } 78 | 79 | func GetOrgQuotas(c *middleware.Context) *rbody.ApiResponse { 80 | var quotas []m.OrgQuotaDTO 81 | var err error 82 | org := c.ParamsInt64("orgId") 83 | if setting.Quota.Enabled { 84 | quotas, err = sqlstore.GetOrgQuotas(org) 85 | if err != nil { 86 | return rbody.ErrResp(err) 87 | } 88 | } else { 89 | quotas = []m.OrgQuotaDTO{ 90 | { 91 | OrgId: org, 92 | Target: "endpoint", 93 | Limit: -1, 94 | Used: -1, 95 | }, 96 | { 97 | OrgId: org, 98 | Target: "probe", 99 | Limit: -1, 100 | Used: -1, 101 | }, 102 | { 103 | OrgId: int64(c.User.ID), 104 | Target: "downloadLimit", 105 | Limit: -1, 106 | Used: -1, 107 | }, 108 | } 109 | } 110 | 111 | return rbody.OkResp("quotas", quotas) 112 | } 113 | 114 | func UpdateOrgQuota(c *middleware.Context) *rbody.ApiResponse { 115 | orgId := c.ParamsInt64(":orgId") 116 | target := c.Params(":target") 117 | limit := c.ParamsInt64(":limit") 118 | 119 | if _, ok := setting.Quota.Org.ToMap()[target]; !ok { 120 | return rbody.ErrResp(m.NewNotFoundError("quota target not found")) 121 | } 122 | 123 | quota := m.OrgQuotaDTO{ 124 | OrgId: orgId, 125 | Target: target, 126 | Limit: limit, 127 | } 128 | err := sqlstore.UpdateOrgQuota("a) 129 | if err != nil { 130 | return rbody.ErrResp(err) 131 | } 132 | return rbody.OkResp("quota", quota) 133 | } 134 | -------------------------------------------------------------------------------- /pkg/api/rbody/rbody.go: -------------------------------------------------------------------------------- 1 | package rbody 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "strings" 7 | "time" 8 | 9 | "github.com/raintank/worldping-api/pkg/log" 10 | "github.com/raintank/worldping-api/pkg/middleware" 11 | m "github.com/raintank/worldping-api/pkg/models" 12 | "gopkg.in/macaron.v1" 13 | ) 14 | 15 | type ApiError struct { 16 | Code int 17 | Message string 18 | } 19 | 20 | func (e ApiError) Error() string { 21 | return fmt.Sprintf("%d: %s", e.Code, e.Message) 22 | } 23 | 24 | type ApiResponse struct { 25 | Meta *ResponseMeta `json:"meta"` 26 | Body json.RawMessage `json:"body"` 27 | } 28 | 29 | type ResponseMeta struct { 30 | Code int `json:"code"` 31 | Message string `json:"message"` 32 | Type string `json:"type"` 33 | } 34 | 35 | func (r *ApiResponse) Error() error { 36 | if r.Meta.Code == 200 { 37 | return nil 38 | } 39 | return ApiError{Code: r.Meta.Code, Message: r.Meta.Message} 40 | } 41 | 42 | func OkResp(t string, body interface{}) *ApiResponse { 43 | bRaw, err := json.Marshal(body) 44 | if err != nil { 45 | return ErrResp(err) 46 | } 47 | resp := &ApiResponse{ 48 | Meta: &ResponseMeta{ 49 | Code: 200, 50 | Message: "success", 51 | Type: t, 52 | }, 53 | Body: json.RawMessage(bRaw), 54 | } 55 | return resp 56 | } 57 | 58 | func ErrResp(err error) *ApiResponse { 59 | code := 500 60 | message := err.Error() 61 | 62 | if e, ok := err.(m.AppError); ok { 63 | code = e.Code() 64 | message = e.Message() 65 | } 66 | 67 | resp := &ApiResponse{ 68 | Meta: &ResponseMeta{ 69 | Code: code, 70 | Message: message, 71 | Type: "error", 72 | }, 73 | Body: json.RawMessage([]byte("null")), 74 | } 75 | return resp 76 | } 77 | 78 | func Wrap(action interface{}) macaron.Handler { 79 | return func(c *middleware.Context) { 80 | pre := time.Now() 81 | var res *ApiResponse 82 | val, err := c.Invoke(action) 83 | if err != nil { 84 | log.Error(3, "request handler error: %s", err.Error()) 85 | c.JSON(500, err.Error()) 86 | } else if val != nil && len(val) > 0 { 87 | res = val[0].Interface().(*ApiResponse) 88 | } else { 89 | log.Error(3, "request handler error: No response generated") 90 | c.JSON(500, "No response generated.") 91 | } 92 | 93 | if res.Meta.Code == 500 { 94 | log.Error(3, "internal server error: %s", res.Meta.Message) 95 | } 96 | timer(c, time.Since(pre)) 97 | c.JSON(200, res) 98 | } 99 | } 100 | 101 | func timer(c *middleware.Context, duration time.Duration) { 102 | path := strings.Replace(strings.Trim(c.Req.URL.Path, "/"), "/", ".", -1) 103 | log.Debug("%s.%s took %s.", path, c.Req.Method, duration) 104 | } 105 | -------------------------------------------------------------------------------- /pkg/api/sockets/sockets.go: -------------------------------------------------------------------------------- 1 | package sockets 2 | 3 | import ( 4 | "math/rand" 5 | "sync" 6 | "time" 7 | 8 | "github.com/grafana/metrictank/stats" 9 | "github.com/raintank/worldping-api/pkg/log" 10 | m "github.com/raintank/worldping-api/pkg/models" 11 | "github.com/raintank/worldping-api/pkg/services" 12 | ) 13 | 14 | var ( 15 | ProbesConnected = stats.NewGauge32("api.probes.connected") 16 | UpdatesSent = stats.NewCounter32("api.probes.updates-sent") 17 | CreatesSent = stats.NewCounter32("api.probes.creates-sent") 18 | RemovesSent = stats.NewCounter32("api.probes.removes-sent") 19 | 20 | socketCache *Cache 21 | publisher services.MetricsEventsPublisher 22 | ) 23 | 24 | type Cache struct { 25 | sync.RWMutex 26 | Sockets map[string]*ProbeSocket 27 | done chan struct{} 28 | refreshChan chan int64 29 | } 30 | 31 | func InitCache(pub services.MetricsEventsPublisher) { 32 | publisher = pub 33 | if socketCache != nil { 34 | return 35 | } 36 | socketCache = &Cache{ 37 | Sockets: make(map[string]*ProbeSocket), 38 | done: make(chan struct{}), 39 | refreshChan: make(chan int64, 100), 40 | } 41 | 42 | go socketCache.refreshLoop() 43 | go socketCache.refreshQueue() 44 | } 45 | 46 | func Shutdown() { 47 | socketCache.Shutdown() 48 | } 49 | 50 | func Set(id string, sock *ProbeSocket) { 51 | socketCache.Set(id, sock) 52 | } 53 | 54 | func Remove(id string) { 55 | socketCache.Remove(id) 56 | } 57 | 58 | func Emit(id string, event string, payload interface{}) { 59 | socketCache.Emit(id, event, payload) 60 | } 61 | 62 | func Refresh(id int64) { 63 | socketCache.Refresh(id) 64 | } 65 | 66 | func UpdateProbe(probe *m.ProbeDTO) { 67 | socketCache.UpdateProbe(probe) 68 | } 69 | 70 | func (c *Cache) Set(id string, sock *ProbeSocket) { 71 | c.Lock() 72 | c.Sockets[id] = sock 73 | c.Unlock() 74 | ProbesConnected.Inc() 75 | } 76 | 77 | func (c *Cache) Remove(id string) { 78 | c.Lock() 79 | delete(c.Sockets, id) 80 | c.Unlock() 81 | ProbesConnected.Dec() 82 | } 83 | 84 | func (c *Cache) Shutdown() { 85 | c.done <- struct{}{} 86 | sessList := make([]*ProbeSocket, 0) 87 | c.Lock() 88 | for _, sock := range c.Sockets { 89 | sessList = append(sessList, sock) 90 | } 91 | c.Sockets = make(map[string]*ProbeSocket) 92 | c.Unlock() 93 | for _, sock := range sessList { 94 | sock.Remove() 95 | } 96 | return 97 | } 98 | 99 | func (c *Cache) Emit(id string, event string, payload interface{}) { 100 | c.RLock() 101 | socket, ok := c.Sockets[id] 102 | if !ok { 103 | log.Info("socket " + id + " is not local.") 104 | c.RUnlock() 105 | return 106 | } 107 | c.RUnlock() 108 | socket.emit(event, payload) 109 | switch event { 110 | case "updated": 111 | UpdatesSent.Inc() 112 | case "created": 113 | CreatesSent.Inc() 114 | case "removed": 115 | RemovesSent.Inc() 116 | } 117 | } 118 | 119 | func (c *Cache) Refresh(id int64) { 120 | c.refreshChan <- id 121 | } 122 | 123 | func (c *Cache) refresh(id int64) { 124 | sessList := make([]*ProbeSocket, 0) 125 | c.RLock() 126 | for _, sock := range c.Sockets { 127 | if sock.Probe.Id == id { 128 | sessList = append(sessList, sock) 129 | } 130 | } 131 | c.RUnlock() 132 | for _, sock := range sessList { 133 | sock.Refresh() 134 | } 135 | } 136 | 137 | func (c *Cache) refreshQueue() { 138 | ticker := time.NewTicker(time.Second * 2) 139 | buffer := make([]int64, 0) 140 | for { 141 | select { 142 | case <-c.done: 143 | log.Info("RefreshQueue terminating due to shutdown signal.") 144 | ticker.Stop() 145 | return 146 | case <-ticker.C: 147 | if len(buffer) == 0 { 148 | break 149 | } 150 | log.Debug("processing %d queued probe refreshes.", len(buffer)) 151 | ids := make(map[int64]struct{}) 152 | for _, id := range buffer { 153 | ids[id] = struct{}{} 154 | } 155 | log.Debug("%d refreshes are for %d probes", len(buffer), len(ids)) 156 | for id := range ids { 157 | c.refresh(id) 158 | } 159 | buffer = buffer[:0] 160 | case id := <-c.refreshChan: 161 | log.Debug("adding refresh of %d to buffer", id) 162 | buffer = append(buffer, id) 163 | } 164 | } 165 | } 166 | 167 | func (c *Cache) refreshLoop() { 168 | ticker := time.NewTicker(time.Second * 30) 169 | 170 | // allow up to 10 probes to be refreshing concurrently. 171 | // we constrain this as querying the DB and marshaling all of the checks into the refresh 172 | // payload can be quite resource intensive. 173 | limiter := make(chan struct{}, 10) 174 | for { 175 | select { 176 | case <-c.done: 177 | log.Info("RefreshLoop terminating due to shutdown signal.") 178 | ticker.Stop() 179 | return 180 | case <-ticker.C: 181 | sessList := make([]*ProbeSocket, 0) 182 | c.RLock() 183 | for _, sock := range c.Sockets { 184 | sessList = append(sessList, sock) 185 | } 186 | c.RUnlock() 187 | 188 | for _, sock := range sessList { 189 | // add some jitter so that we avoid all probes refreshing at the same time. 190 | // Probes will refresh between every 5 and 10minutes. 191 | maxRefreshDelay := time.Second * time.Duration(300+rand.Intn(240)) 192 | if time.Since(sock.LastRefresh()) >= maxRefreshDelay { 193 | limiter <- struct{}{} 194 | go func(sock *ProbeSocket) { 195 | sock.Refresh() 196 | <-limiter 197 | }(sock) 198 | } 199 | } 200 | } 201 | } 202 | } 203 | 204 | func (c *Cache) UpdateProbe(probe *m.ProbeDTO) { 205 | c.RLock() 206 | defer c.RUnlock() 207 | // get list of local sockets for this collector. 208 | sockets := make([]*ProbeSocket, 0) 209 | for _, sock := range c.Sockets { 210 | if sock.Probe.Id == probe.Id { 211 | sockets = append(sockets, sock) 212 | } 213 | } 214 | if len(sockets) > 0 { 215 | for _, sock := range sockets { 216 | sock.Probe = probe 217 | sock.EmitReady() 218 | } 219 | } 220 | } 221 | -------------------------------------------------------------------------------- /pkg/api/v2_endpoint.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "github.com/raintank/worldping-api/pkg/api/rbody" 5 | "github.com/raintank/worldping-api/pkg/middleware" 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/services/endpointdiscovery" 8 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 9 | ) 10 | 11 | func GetEndpoints(c *middleware.Context, query m.GetEndpointsQuery) *rbody.ApiResponse { 12 | query.OrgId = int64(c.User.ID) 13 | 14 | endpoints, err := sqlstore.GetEndpoints(&query) 15 | if err != nil { 16 | return rbody.ErrResp(err) 17 | } 18 | 19 | return rbody.OkResp("endpoints", endpoints) 20 | } 21 | 22 | func GetEndpointById(c *middleware.Context) *rbody.ApiResponse { 23 | id := c.ParamsInt64(":id") 24 | 25 | endpoint, err := sqlstore.GetEndpointById(int64(c.User.ID), id) 26 | if err != nil { 27 | return rbody.ErrResp(err) 28 | } 29 | 30 | return rbody.OkResp("endpoint", endpoint) 31 | } 32 | 33 | func DeleteEndpoint(c *middleware.Context) *rbody.ApiResponse { 34 | id := c.ParamsInt64(":id") 35 | 36 | err := sqlstore.DeleteEndpoint(int64(c.User.ID), id) 37 | if err != nil { 38 | return rbody.ErrResp(err) 39 | } 40 | 41 | return rbody.OkResp("endpoint", nil) 42 | } 43 | 44 | func AddEndpoint(c *middleware.Context, endpoint m.EndpointDTO) *rbody.ApiResponse { 45 | endpoint.OrgId = int64(c.User.ID) 46 | if endpoint.Name == "" { 47 | return rbody.ErrResp(m.NewValidationError("Endpoint name not set.")) 48 | } 49 | 50 | quotas, err := sqlstore.GetOrgQuotas(int64(c.User.ID)) 51 | if err != nil { 52 | return rbody.ErrResp(m.NewValidationError("Error checking quota")) 53 | } 54 | 55 | for i := range endpoint.Checks { 56 | check := endpoint.Checks[i] 57 | check.OrgId = int64(c.User.ID) 58 | if !check.Enabled { 59 | continue 60 | } 61 | if err := check.Validate(quotas); err != nil { 62 | return rbody.ErrResp(err) 63 | } 64 | 65 | err := sqlstore.ValidateCheckRoute(&check) 66 | if err != nil { 67 | return rbody.ErrResp(err) 68 | } 69 | } 70 | 71 | err = sqlstore.AddEndpoint(&endpoint) 72 | if err != nil { 73 | return rbody.ErrResp(err) 74 | } 75 | 76 | return rbody.OkResp("endpoint", endpoint) 77 | } 78 | 79 | func UpdateEndpoint(c *middleware.Context, endpoint m.EndpointDTO) *rbody.ApiResponse { 80 | endpoint.OrgId = int64(c.User.ID) 81 | if endpoint.Name == "" { 82 | return rbody.ErrResp(m.NewValidationError("Endpoint name not set.")) 83 | } 84 | if endpoint.Id == 0 { 85 | return rbody.ErrResp(m.NewValidationError("Endpoint id not set.")) 86 | } 87 | 88 | quotas, err := sqlstore.GetOrgQuotas(int64(c.User.ID)) 89 | if err != nil { 90 | return rbody.ErrResp(m.NewValidationError("Error checking quota")) 91 | } 92 | 93 | for i := range endpoint.Checks { 94 | check := endpoint.Checks[i] 95 | if !check.Enabled { 96 | continue 97 | } 98 | if err := check.Validate(quotas); err != nil { 99 | return rbody.ErrResp(err) 100 | } 101 | } 102 | 103 | err = sqlstore.UpdateEndpoint(&endpoint) 104 | if err != nil { 105 | return rbody.ErrResp(err) 106 | } 107 | 108 | return rbody.OkResp("endpoint", endpoint) 109 | } 110 | 111 | func DiscoverEndpoint(c *middleware.Context, cmd m.DiscoverEndpointCmd) *rbody.ApiResponse { 112 | endpoint, err := endpointdiscovery.Discover(cmd.Name) 113 | if err != nil { 114 | return rbody.ErrResp(err) 115 | } 116 | 117 | return rbody.OkResp("endpoint", endpoint) 118 | } 119 | 120 | func DisableEndpoints(c *middleware.Context) *rbody.ApiResponse { 121 | query := m.GetEndpointsQuery{ 122 | OrgId: int64(c.User.ID), 123 | } 124 | 125 | endpoints, err := sqlstore.GetEndpoints(&query) 126 | if err != nil { 127 | return rbody.ErrResp(err) 128 | } 129 | disabledChecks := make(map[string][]string) 130 | 131 | for i := range endpoints { 132 | e := &endpoints[i] 133 | for j := range e.Checks { 134 | c := &e.Checks[j] 135 | if c.Enabled { 136 | c.Enabled = false 137 | disabledChecks[e.Slug] = append(disabledChecks[e.Slug], string(c.Type)) 138 | } 139 | } 140 | err := sqlstore.UpdateEndpoint(e) 141 | if err != nil { 142 | return rbody.ErrResp(err) 143 | } 144 | } 145 | 146 | return rbody.OkResp("disabledChecks", disabledChecks) 147 | } 148 | -------------------------------------------------------------------------------- /pkg/api/v2_probe.go: -------------------------------------------------------------------------------- 1 | package api 2 | 3 | import ( 4 | "github.com/raintank/worldping-api/pkg/api/rbody" 5 | "github.com/raintank/worldping-api/pkg/middleware" 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 8 | ) 9 | 10 | func GetProbes(c *middleware.Context, query m.GetProbesQuery) *rbody.ApiResponse { 11 | query.OrgId = int64(c.User.ID) 12 | 13 | probes, err := sqlstore.GetProbes(&query) 14 | if err != nil { 15 | return rbody.ErrResp(err) 16 | } 17 | 18 | return rbody.OkResp("probes", probes) 19 | } 20 | 21 | func GetProbeById(c *middleware.Context) *rbody.ApiResponse { 22 | id := c.ParamsInt64(":id") 23 | 24 | probe, err := sqlstore.GetProbeById(id, int64(c.User.ID)) 25 | if err != nil { 26 | return rbody.ErrResp(err) 27 | } 28 | 29 | return rbody.OkResp("probe", probe) 30 | } 31 | 32 | func DeleteProbe(c *middleware.Context) *rbody.ApiResponse { 33 | id := c.ParamsInt64(":id") 34 | 35 | err := sqlstore.DeleteProbe(id, int64(c.User.ID)) 36 | if err != nil { 37 | return rbody.ErrResp(err) 38 | } 39 | 40 | return rbody.OkResp("probe", nil) 41 | } 42 | 43 | func AddProbe(c *middleware.Context, probe m.ProbeDTO) *rbody.ApiResponse { 44 | probe.OrgId = int64(c.User.ID) 45 | if probe.Id != 0 { 46 | return rbody.ErrResp(m.NewValidationError("Id already set. Try update instead of create.")) 47 | } 48 | if probe.Name == "" { 49 | return rbody.ErrResp(m.NewValidationError("Probe name not set.")) 50 | } 51 | if probe.Public { 52 | if !c.IsAdmin { 53 | return rbody.ErrResp(m.NewValidationError("Only admins can make public probes.")) 54 | } 55 | } 56 | 57 | if err := sqlstore.AddProbe(&probe); err != nil { 58 | return rbody.ErrResp(err) 59 | } 60 | 61 | return rbody.OkResp("probe", probe) 62 | } 63 | 64 | func UpdateProbe(c *middleware.Context, probe m.ProbeDTO) *rbody.ApiResponse { 65 | probe.OrgId = int64(c.User.ID) 66 | if probe.Name == "" { 67 | return rbody.ErrResp(m.NewValidationError("Probe name not set.")) 68 | } 69 | 70 | if err := sqlstore.UpdateProbe(&probe); err != nil { 71 | return rbody.ErrResp(err) 72 | } 73 | 74 | return rbody.OkResp("probe", probe) 75 | } 76 | -------------------------------------------------------------------------------- /pkg/cmd/web.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 Unknwon 2 | // Copyright 2014 Torkel Ödegaard 3 | 4 | package cmd 5 | 6 | import ( 7 | "crypto/tls" 8 | _ "expvar" 9 | "fmt" 10 | "net" 11 | "net/http" 12 | "time" 13 | 14 | "github.com/go-macaron/toolbox" 15 | "gopkg.in/macaron.v1" 16 | 17 | "github.com/raintank/worldping-api/pkg/api" 18 | "github.com/raintank/worldping-api/pkg/log" 19 | "github.com/raintank/worldping-api/pkg/setting" 20 | ) 21 | 22 | func newMacaron() *macaron.Macaron { 23 | macaron.Env = setting.Env 24 | m := macaron.Classic() 25 | m.Use(toolbox.Toolboxer(m)) 26 | m.Use(func(ctx *macaron.Context) { 27 | if ctx.Req.URL.Path == "/debug/vars" { 28 | http.DefaultServeMux.ServeHTTP(ctx.Resp, ctx.Req.Request) 29 | } 30 | }) 31 | 32 | return m 33 | } 34 | 35 | func StartServer(notifyShutdown chan struct{}) { 36 | var err error 37 | m := newMacaron() 38 | api.Register(m) 39 | 40 | listenAddr := fmt.Sprintf("%s:%s", setting.HttpAddr, setting.HttpPort) 41 | log.Info("Listen: %v://%s%s", setting.Protocol, listenAddr, setting.AppSubUrl) 42 | 43 | // define our own listner so we can call Close on it 44 | l, err := net.Listen("tcp", listenAddr) 45 | if err != nil { 46 | log.Fatal(4, err.Error()) 47 | } 48 | go handleShutdown(notifyShutdown, l) 49 | srv := http.Server{ 50 | Addr: listenAddr, 51 | Handler: m, 52 | } 53 | if setting.Protocol == setting.HTTPS { 54 | cert, err := tls.LoadX509KeyPair(setting.CertFile, setting.KeyFile) 55 | if err != nil { 56 | log.Fatal(4, "Fail to start server: %v", err) 57 | } 58 | srv.TLSConfig = &tls.Config{ 59 | Certificates: []tls.Certificate{cert}, 60 | NextProtos: []string{"http/1.1"}, 61 | } 62 | tlsListener := tls.NewListener(tcpKeepAliveListener{l.(*net.TCPListener)}, srv.TLSConfig) 63 | err = srv.Serve(tlsListener) 64 | } else { 65 | err = srv.Serve(tcpKeepAliveListener{l.(*net.TCPListener)}) 66 | } 67 | 68 | if err != nil { 69 | log.Info(err.Error()) 70 | } 71 | } 72 | 73 | func handleShutdown(notifyShutdown chan struct{}, l net.Listener) { 74 | <-notifyShutdown 75 | log.Info("shutdown started.") 76 | l.Close() 77 | } 78 | 79 | type tcpKeepAliveListener struct { 80 | *net.TCPListener 81 | } 82 | 83 | func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) { 84 | tc, err := ln.AcceptTCP() 85 | if err != nil { 86 | return 87 | } 88 | tc.SetKeepAlive(true) 89 | tc.SetKeepAlivePeriod(3 * time.Minute) 90 | return tc, nil 91 | } 92 | -------------------------------------------------------------------------------- /pkg/elasticsearch/elasticsearch.go: -------------------------------------------------------------------------------- 1 | package elasticsearch 2 | 3 | import ( 4 | "bytes" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "net/url" 10 | "regexp" 11 | "strings" 12 | 13 | "github.com/raintank/tsdb-gw/util" 14 | log "github.com/sirupsen/logrus" 15 | "gopkg.in/macaron.v1" 16 | ) 17 | 18 | var ( 19 | ElasticsearchURL *url.URL 20 | IndexName string 21 | ) 22 | 23 | func Init(elasticsearchURL, indexName string) error { 24 | var err error 25 | IndexName = indexName 26 | ElasticsearchURL, err = url.Parse(elasticsearchURL) 27 | return err 28 | } 29 | 30 | func Proxy(orgID int, c *macaron.Context) { 31 | proxyPath := c.Params("*") 32 | body, err := ioutil.ReadAll(c.Req.Request.Body) 33 | if err != nil { 34 | c.JSON(http.StatusBadRequest, fmt.Sprintf("unable to read request body. %s", err)) 35 | return 36 | } 37 | searchBody, err := restrictSearch(orgID, body) 38 | if err != nil { 39 | c.JSON(http.StatusBadRequest, fmt.Sprintf("unable to read request body. %s", err)) 40 | return 41 | } 42 | log.Debugf("search body is: %s", string(searchBody)) 43 | 44 | url := new(url.URL) 45 | *url = *ElasticsearchURL 46 | url.Path = util.JoinUrlFragments(ElasticsearchURL.Path, proxyPath) 47 | url.RawQuery = c.Req.URL.RawQuery 48 | request := http.Request{ 49 | Method: "POST", 50 | URL: url, 51 | Body: ioutil.NopCloser(bytes.NewReader(searchBody)), 52 | } 53 | 54 | resp, err := http.DefaultClient.Do(&request) 55 | if err != nil { 56 | c.JSON(http.StatusServiceUnavailable, err.Error()) 57 | return 58 | } 59 | 60 | respBody, err := ioutil.ReadAll(resp.Body) 61 | if err != nil { 62 | c.JSON(http.StatusServiceUnavailable, err.Error()) 63 | } 64 | c.WriteHeader(resp.StatusCode) 65 | c.Write(respBody) 66 | } 67 | 68 | func restrictSearch(orgID int, body []byte) ([]byte, error) { 69 | var newBody bytes.Buffer 70 | 71 | lines := strings.Split(string(body), "\n") 72 | for i := 0; i < len(lines); i += 2 { 73 | if lines[i] == "" { 74 | continue 75 | } 76 | if err := validateHeader([]byte(lines[i])); err != nil { 77 | return newBody.Bytes(), err 78 | } 79 | newBody.Write([]byte(lines[i] + "\n")) 80 | 81 | s, err := transformSearch(orgID, []byte(lines[i+1])) 82 | if err != nil { 83 | return newBody.Bytes(), err 84 | } 85 | newBody.Write(s) 86 | newBody.Write([]byte("\n")) 87 | } 88 | return newBody.Bytes(), nil 89 | } 90 | 91 | type msearchHeader struct { 92 | SearchType string `json:"search_type"` 93 | IgnoreUnavailable bool `json:"ignore_unavailable,omitempty"` 94 | Index []string `json:"index"` 95 | } 96 | 97 | func validateHeader(header []byte) error { 98 | h := msearchHeader{} 99 | log.Debugf("validating search header: %s", string(header)) 100 | if err := json.Unmarshal(header, &h); err != nil { 101 | return err 102 | } 103 | if h.SearchType != "query_then_fetch" && h.SearchType != "count" { 104 | return fmt.Errorf("invalid search_type %s", h.SearchType) 105 | } 106 | 107 | for _, index := range h.Index { 108 | if match, err := regexp.Match("^events-\\d\\d\\d\\d-\\d\\d-\\d\\d$", []byte(index)); err != nil || !match { 109 | return fmt.Errorf("invalid index name. %s", index) 110 | } 111 | } 112 | 113 | return nil 114 | } 115 | 116 | type esSearch struct { 117 | Size int `json:"size"` 118 | Query interface{} `json:"query"` 119 | Sort interface{} `json:"sort,omitempty"` 120 | Fields interface{} `json:"fields,omitempty"` 121 | ScriptFields interface{} `json:"script_fields,omitempty"` 122 | FielddataFields interface{} `json:"fielddata_fields,omitempty"` 123 | Aggs interface{} `json:"aggs,omitempty"` 124 | } 125 | 126 | type esQueryWrapper struct { 127 | Bool esBool `json:"bool"` 128 | } 129 | 130 | type esBool struct { 131 | Must []interface{} `json:"must"` 132 | Filter interface{} `json:"filter"` 133 | } 134 | 135 | func transformSearch(orgID int, search []byte) ([]byte, error) { 136 | // remove all "format": "epoch_millis" entries, since our timestamp isn't a date 137 | re := regexp.MustCompile(`,\s*"format"\s*:\s*"epoch_millis"`) 138 | cleanSearch := re.ReplaceAllLiteral(search, []byte("")) 139 | re = regexp.MustCompile(`"format"\s*:\s*"epoch_millis"\s*,`) 140 | cleanSearch = re.ReplaceAllLiteral(cleanSearch, []byte("")) 141 | 142 | s := esSearch{} 143 | if err := json.Unmarshal(cleanSearch, &s); err != nil { 144 | return nil, err 145 | } 146 | 147 | // wrap provided query in a bool query with a filter clause restricting matches to the specified org 148 | orgCondition := map[string]map[string]int{"term": {"org_id": orgID}} 149 | 150 | Query := &esQueryWrapper{} 151 | Query.Bool.Must = append(Query.Bool.Must, s.Query) 152 | Query.Bool.Filter = orgCondition 153 | s.Query = Query 154 | 155 | return json.Marshal(s) 156 | } 157 | -------------------------------------------------------------------------------- /pkg/events/endpoint.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | 8 | m "github.com/raintank/worldping-api/pkg/models" 9 | ) 10 | 11 | type EndpointCreated struct { 12 | Ts time.Time 13 | Payload *m.EndpointDTO 14 | } 15 | 16 | func (a *EndpointCreated) Id() string { 17 | return fmt.Sprintf("%d", a.Payload.Id) 18 | } 19 | 20 | func (a *EndpointCreated) Type() string { 21 | return "Endpoint.created" 22 | } 23 | 24 | func (a *EndpointCreated) Timestamp() time.Time { 25 | return a.Ts 26 | } 27 | 28 | func (a *EndpointCreated) Body() ([]byte, error) { 29 | return json.Marshal(a.Payload) 30 | } 31 | 32 | type EndpointDeleted struct { 33 | Ts time.Time 34 | Payload *m.EndpointDTO 35 | } 36 | 37 | func (a *EndpointDeleted) Id() string { 38 | return fmt.Sprintf("%d", a.Payload.Id) 39 | } 40 | 41 | func (a *EndpointDeleted) Type() string { 42 | return "Endpoint.deleted" 43 | } 44 | 45 | func (a *EndpointDeleted) Timestamp() time.Time { 46 | return a.Ts 47 | } 48 | 49 | func (a *EndpointDeleted) Body() ([]byte, error) { 50 | return json.Marshal(a.Payload) 51 | } 52 | 53 | type EndpointUpdated struct { 54 | Ts time.Time 55 | Payload struct { 56 | Last *m.EndpointDTO `json:"last"` 57 | Current *m.EndpointDTO `json:"current"` 58 | } 59 | } 60 | 61 | func (a *EndpointUpdated) Id() string { 62 | return fmt.Sprintf("%d", a.Payload.Current.Id) 63 | } 64 | 65 | func (a *EndpointUpdated) Type() string { 66 | return "Endpoint.updated" 67 | } 68 | 69 | func (a *EndpointUpdated) Timestamp() time.Time { 70 | return a.Ts 71 | } 72 | 73 | func (a *EndpointUpdated) Body() ([]byte, error) { 74 | return json.Marshal(a.Payload) 75 | } 76 | -------------------------------------------------------------------------------- /pkg/events/events.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "os" 6 | "sync" 7 | "time" 8 | 9 | "github.com/raintank/worldping-api/pkg/log" 10 | "github.com/raintank/worldping-api/pkg/setting" 11 | ) 12 | 13 | var hostname string 14 | 15 | func init() { 16 | hostname, _ = os.Hostname() 17 | } 18 | 19 | type Event interface { 20 | Type() string 21 | Timestamp() time.Time 22 | Body() ([]byte, error) 23 | Id() string 24 | } 25 | 26 | type RawEvent struct { 27 | Id string `json:"id"` 28 | Type string `json:"type"` 29 | Timestamp time.Time `json:"timestamp"` 30 | Body json.RawMessage `json:"payload"` 31 | Source string `json:"source"` 32 | Attempts int `json:"attempts"` 33 | } 34 | 35 | func NewRawEventFromEvent(e Event) (*RawEvent, error) { 36 | payload, err := e.Body() 37 | if err != nil { 38 | return nil, err 39 | } 40 | 41 | raw := &RawEvent{ 42 | Id: e.Id(), 43 | Type: e.Type(), 44 | Timestamp: e.Timestamp(), 45 | Source: hostname, 46 | Body: payload, 47 | } 48 | return raw, nil 49 | } 50 | 51 | type Handlers struct { 52 | sync.Mutex 53 | Listeners map[string][]chan<- RawEvent 54 | } 55 | 56 | func (h *Handlers) Add(key string, ch chan<- RawEvent) { 57 | h.Lock() 58 | if l, ok := h.Listeners[key]; !ok { 59 | l = make([]chan<- RawEvent, 0) 60 | h.Listeners[key] = l 61 | } 62 | h.Listeners[key] = append(h.Listeners[key], ch) 63 | h.Unlock() 64 | } 65 | 66 | func (h *Handlers) GetListeners(key string) []chan<- RawEvent { 67 | listeners := make([]chan<- RawEvent, 0) 68 | h.Lock() 69 | for rk, l := range h.Listeners { 70 | if rk == "*" || rk == key { 71 | listeners = append(listeners, l...) 72 | } 73 | } 74 | h.Unlock() 75 | return listeners 76 | } 77 | 78 | var ( 79 | handlers *Handlers 80 | pubChan chan Message 81 | subChan chan Message 82 | ) 83 | 84 | func Init() { 85 | handlers = &Handlers{ 86 | Listeners: make(map[string][]chan<- RawEvent), 87 | } 88 | pubChan = make(chan Message, 100) 89 | 90 | if setting.Kafka.Enabled { 91 | // use rabbitmq for message distribution. 92 | subChan = make(chan Message, 10) 93 | go Run(setting.Kafka.Brokers, setting.Kafka.Topic, pubChan, subChan) 94 | go handleMessages(subChan) 95 | } else { 96 | // handle all message written to the publish chan. 97 | go handleMessages(pubChan) 98 | } 99 | return 100 | } 101 | 102 | func Subscribe(t string, channel chan<- RawEvent) { 103 | handlers.Add(t, channel) 104 | } 105 | 106 | func Publish(e Event, attempts int) error { 107 | if handlers == nil { 108 | // not initialized. 109 | return nil 110 | } 111 | raw, err := NewRawEventFromEvent(e) 112 | if err != nil { 113 | return err 114 | } 115 | raw.Attempts = attempts + 1 116 | 117 | body, err := json.Marshal(raw) 118 | if err != nil { 119 | return err 120 | } 121 | msg := Message{ 122 | Id: e.Id(), 123 | Payload: body, 124 | } 125 | ticker := time.NewTicker(2 * time.Second) 126 | pre := time.Now() 127 | WAITLOOP: 128 | for { 129 | select { 130 | case <-ticker.C: 131 | log.Error(3, "blocked writing to event publish channel for %f seconds", time.Since(pre).Seconds()) 132 | case pubChan <- msg: 133 | ticker.Stop() 134 | break WAITLOOP 135 | } 136 | } 137 | 138 | return nil 139 | } 140 | 141 | func handleMessages(c chan Message) { 142 | for m := range c { 143 | go func(msg Message) { 144 | e := RawEvent{} 145 | err := json.Unmarshal(msg.Payload, &e) 146 | if err != nil { 147 | log.Error(3, "unable to unmarshal event Message. %s", err) 148 | return 149 | } 150 | 151 | log.Debug("processing event of type %s", e.Type) 152 | //broadcast the event to listeners. 153 | for _, ch := range handlers.GetListeners(e.Type) { 154 | ch <- e 155 | } 156 | }(m) 157 | } 158 | } 159 | -------------------------------------------------------------------------------- /pkg/events/probe.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "time" 7 | 8 | m "github.com/raintank/worldping-api/pkg/models" 9 | ) 10 | 11 | type ProbeCreated struct { 12 | Ts time.Time 13 | Payload *m.ProbeDTO 14 | } 15 | 16 | func (a *ProbeCreated) Id() string { 17 | return fmt.Sprintf("%d", a.Payload.Id) 18 | } 19 | 20 | func (a *ProbeCreated) Type() string { 21 | return "Probe.created" 22 | } 23 | 24 | func (a *ProbeCreated) Timestamp() time.Time { 25 | return a.Ts 26 | } 27 | 28 | func (a *ProbeCreated) Body() ([]byte, error) { 29 | return json.Marshal(a.Payload) 30 | } 31 | 32 | type ProbeDeleted struct { 33 | Ts time.Time 34 | Payload *m.ProbeDTO 35 | } 36 | 37 | func (a *ProbeDeleted) Id() string { 38 | return fmt.Sprintf("%d", a.Payload.Id) 39 | } 40 | 41 | func (a *ProbeDeleted) Type() string { 42 | return "Probe.deleted" 43 | } 44 | 45 | func (a *ProbeDeleted) Timestamp() time.Time { 46 | return a.Ts 47 | } 48 | 49 | func (a *ProbeDeleted) Body() ([]byte, error) { 50 | return json.Marshal(a.Payload) 51 | } 52 | 53 | type ProbeUpdated struct { 54 | Ts time.Time 55 | Payload struct { 56 | Last *m.ProbeDTO `json:"last"` 57 | Current *m.ProbeDTO `json:"current"` 58 | } 59 | } 60 | 61 | func (a *ProbeUpdated) Id() string { 62 | return fmt.Sprintf("%d", a.Payload.Current.Id) 63 | } 64 | 65 | func (a *ProbeUpdated) Type() string { 66 | return "Probe.updated" 67 | } 68 | 69 | func (a *ProbeUpdated) Timestamp() time.Time { 70 | return a.Ts 71 | } 72 | 73 | func (a *ProbeUpdated) Body() ([]byte, error) { 74 | return json.Marshal(a.Payload) 75 | } 76 | 77 | type ProbeOnline struct { 78 | Ts time.Time 79 | Payload *m.ProbeDTO 80 | } 81 | 82 | func (a *ProbeOnline) Id() string { 83 | return fmt.Sprintf("%d", a.Payload.Id) 84 | } 85 | 86 | func (a *ProbeOnline) Type() string { 87 | return "Probe.online" 88 | } 89 | 90 | func (a *ProbeOnline) Timestamp() time.Time { 91 | return a.Ts 92 | } 93 | 94 | func (a *ProbeOnline) Body() ([]byte, error) { 95 | return json.Marshal(a.Payload) 96 | } 97 | 98 | type ProbeOffline struct { 99 | Ts time.Time 100 | Payload *m.ProbeDTO 101 | } 102 | 103 | func (a *ProbeOffline) Id() string { 104 | return fmt.Sprintf("%d", a.Payload.Id) 105 | } 106 | 107 | func (a *ProbeOffline) Type() string { 108 | return "Probe.offline" 109 | } 110 | 111 | func (a *ProbeOffline) Timestamp() time.Time { 112 | return a.Ts 113 | } 114 | 115 | func (a *ProbeOffline) Body() ([]byte, error) { 116 | return json.Marshal(a.Payload) 117 | } 118 | 119 | type ProbeSessionCreated struct { 120 | Ts time.Time 121 | Payload *m.ProbeSession 122 | } 123 | 124 | func (a *ProbeSessionCreated) Id() string { 125 | return fmt.Sprintf("%d", a.Payload.Id) 126 | } 127 | 128 | func (a *ProbeSessionCreated) Type() string { 129 | return "ProbeSession.created" 130 | } 131 | 132 | func (a *ProbeSessionCreated) Timestamp() time.Time { 133 | return a.Ts 134 | } 135 | 136 | func (a *ProbeSessionCreated) Body() ([]byte, error) { 137 | return json.Marshal(a.Payload) 138 | } 139 | 140 | type ProbeSessionDeleted struct { 141 | Ts time.Time 142 | Payload *m.ProbeSession 143 | } 144 | 145 | func (a *ProbeSessionDeleted) Id() string { 146 | return fmt.Sprintf("%d", a.Payload.Id) 147 | } 148 | 149 | func (a *ProbeSessionDeleted) Type() string { 150 | return "ProbeSession.deleted" 151 | } 152 | 153 | func (a *ProbeSessionDeleted) Timestamp() time.Time { 154 | return a.Ts 155 | } 156 | 157 | func (a *ProbeSessionDeleted) Body() ([]byte, error) { 158 | return json.Marshal(a.Payload) 159 | } 160 | -------------------------------------------------------------------------------- /pkg/events/pubsub.go: -------------------------------------------------------------------------------- 1 | package events 2 | 3 | import ( 4 | "strings" 5 | "sync" 6 | 7 | "github.com/Shopify/sarama" 8 | "github.com/raintank/worldping-api/pkg/log" 9 | "github.com/raintank/worldping-api/pkg/setting" 10 | ) 11 | 12 | // message is the application type for a message. This can contain identity, 13 | // or a reference to the recevier chan for further demuxing. 14 | type Message struct { 15 | Id string 16 | Payload []byte 17 | } 18 | 19 | type KafkaPubSub struct { 20 | instance string 21 | client sarama.Client 22 | consumer sarama.Consumer 23 | producer sarama.AsyncProducer 24 | partitions []int32 25 | topic string 26 | 27 | wg sync.WaitGroup 28 | shutdown chan struct{} 29 | } 30 | 31 | func Run(brokersStr, topic string, pub, sub chan Message) { 32 | brokers := strings.Split(brokersStr, ",") 33 | config := sarama.NewConfig() 34 | config.ClientID = setting.InstanceId 35 | config.Version = sarama.V2_0_0_0 36 | config.Producer.RequiredAcks = sarama.WaitForAll // Wait for all in-sync replicas to ack the message 37 | config.Producer.Retry.Max = 10 // Retry up to 10 times to produce the message 38 | config.Producer.Compression = sarama.CompressionSnappy 39 | config.Producer.Return.Successes = true 40 | err := config.Validate() 41 | if err != nil { 42 | log.Fatal(2, "kafka: invalid consumer config: %s", err) 43 | } 44 | 45 | client, err := sarama.NewClient(brokers, config) 46 | if err != nil { 47 | log.Fatal(4, "kafka: failed to create client. %s", err) 48 | } 49 | 50 | // validate our partitions 51 | partitions, err := client.Partitions(topic) 52 | if err != nil { 53 | log.Fatal(4, "kafka: failed to get partitions for topic %s: %s", topic, err.Error()) 54 | } 55 | 56 | consumer, err := sarama.NewConsumerFromClient(client) 57 | if err != nil { 58 | log.Fatal(2, "kafka: failed to initialize consumer: %s", err) 59 | } 60 | log.Info("kafka: consumer initialized without error") 61 | 62 | producer, err := sarama.NewAsyncProducerFromClient(client) 63 | if err != nil { 64 | log.Fatal(2, "kafka: failed to initialize producer: %s", err) 65 | } 66 | 67 | pubSub := &KafkaPubSub{ 68 | instance: setting.InstanceId, 69 | client: client, 70 | consumer: consumer, 71 | producer: producer, 72 | partitions: partitions, 73 | topic: topic, 74 | shutdown: make(chan struct{}), 75 | } 76 | 77 | go pubSub.consume(sub) 78 | go pubSub.produce(pub) 79 | } 80 | 81 | func (ps *KafkaPubSub) consume(sub chan Message) { 82 | for _, p := range ps.partitions { 83 | ps.wg.Add(1) 84 | go ps.consumePartition(sub, p) 85 | } 86 | } 87 | 88 | func (ps *KafkaPubSub) consumePartition(sub chan Message, partition int32) { 89 | defer ps.wg.Done() 90 | pc, err := ps.consumer.ConsumePartition(ps.topic, partition, sarama.OffsetNewest) 91 | if err != nil { 92 | log.Fatal(4, "kafka: failed to start partitionConsumer for %s:%d. %s", ps.topic, partition, err) 93 | } 94 | log.Info("kafka: consuming from %s:%d", ps.topic, partition) 95 | 96 | messages := pc.Messages() 97 | for { 98 | select { 99 | case msg, ok := <-messages: 100 | if !ok { 101 | log.Info("kafka: consumer for %s:%d ended.", ps.topic, partition) 102 | return 103 | } 104 | log.Debug("kafka received message: Topic %s, Partition: %d, Offset: %d, Key: %s", msg.Topic, msg.Partition, msg.Offset, msg.Key) 105 | sub <- Message{ 106 | Id: string(msg.Key), 107 | Payload: msg.Value, 108 | } 109 | case <-ps.shutdown: 110 | pc.Close() 111 | log.Info("kafka: consumer for %s:%d ended.", ps.topic, partition) 112 | return 113 | } 114 | } 115 | } 116 | 117 | func (ps *KafkaPubSub) produce(pub chan Message) { 118 | input := ps.producer.Input() 119 | success := ps.producer.Successes() 120 | errors := ps.producer.Errors() 121 | done := make(chan struct{}) 122 | for { 123 | select { 124 | case msg := <-pub: 125 | pm := &sarama.ProducerMessage{ 126 | Topic: ps.topic, 127 | Value: sarama.ByteEncoder(msg.Payload), 128 | Key: sarama.StringEncoder(msg.Id), 129 | } 130 | input <- pm 131 | case pm := <-success: 132 | log.Debug("kafka sent message: Topic: %s, Partition: %d, Offset: %d, key: %s", pm.Topic, pm.Partition, pm.Offset, pm.Key) 133 | case pe := <-errors: 134 | log.Error(3, "kafka failed to send message. %s: Topic: %s, Partition: %d, Offset: %d, key: %s", pe.Error(), pe.Msg.Topic, pe.Msg.Partition, pe.Msg.Offset, pe.Msg.Key) 135 | case <-ps.shutdown: 136 | go func() { 137 | ps.producer.Close() 138 | close(done) 139 | }() 140 | <-done 141 | return 142 | } 143 | } 144 | } 145 | -------------------------------------------------------------------------------- /pkg/log/console.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Gogs Authors. All rights reserved. 2 | // Use of this source code is governed by a MIT-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package log 6 | 7 | import ( 8 | "encoding/json" 9 | "fmt" 10 | "log" 11 | "os" 12 | "runtime" 13 | ) 14 | 15 | type Brush func(string) string 16 | 17 | func NewBrush(color string) Brush { 18 | pre := "\033[" 19 | reset := "\033[0m" 20 | return func(text string) string { 21 | return pre + color + "m" + text + reset 22 | } 23 | } 24 | 25 | var ( 26 | Red = NewBrush("1;31") 27 | Purple = NewBrush("1;35") 28 | Yellow = NewBrush("1;33") 29 | Green = NewBrush("1;32") 30 | Blue = NewBrush("1;34") 31 | Cyan = NewBrush("1;36") 32 | 33 | colors = []Brush{ 34 | Cyan, // Trace cyan 35 | Blue, // Debug blue 36 | Green, // Info green 37 | Yellow, // Warn yellow 38 | Red, // Error red 39 | Purple, // Critical purple 40 | Red, // Fatal red 41 | } 42 | consoleWriter = &ConsoleWriter{lg: log.New(os.Stdout, "", 0), 43 | Level: TRACE} 44 | ) 45 | 46 | // ConsoleWriter implements LoggerInterface and writes messages to terminal. 47 | type ConsoleWriter struct { 48 | lg *log.Logger 49 | Level LogLevel `json:"level"` 50 | Formatting bool `json:"formatting"` 51 | } 52 | 53 | // create ConsoleWriter returning as LoggerInterface. 54 | func NewConsole() LoggerInterface { 55 | return &ConsoleWriter{ 56 | lg: log.New(os.Stderr, "", log.Ldate|log.Ltime), 57 | Level: TRACE, 58 | Formatting: true, 59 | } 60 | } 61 | 62 | func (cw *ConsoleWriter) Init(config string) error { 63 | return json.Unmarshal([]byte(config), cw) 64 | } 65 | 66 | func (cw *ConsoleWriter) WriteMsg(msg string, skip int, level LogLevel) error { 67 | if cw.Level > level { 68 | return nil 69 | } 70 | if runtime.GOOS == "windows" || !cw.Formatting { 71 | cw.lg.Println(msg) 72 | } else { 73 | cw.lg.Println(colors[level](msg)) 74 | } 75 | return nil 76 | } 77 | 78 | func (_ *ConsoleWriter) Flush() { 79 | 80 | } 81 | 82 | func (_ *ConsoleWriter) Destroy() { 83 | } 84 | 85 | func printConsole(level LogLevel, msg string) { 86 | consoleWriter.WriteMsg(msg, 0, level) 87 | } 88 | 89 | func printfConsole(level LogLevel, format string, v ...interface{}) { 90 | consoleWriter.WriteMsg(fmt.Sprintf(format, v...), 0, level) 91 | } 92 | 93 | // ConsoleTrace prints to stdout using TRACE colors 94 | func ConsoleTrace(s string) { 95 | printConsole(TRACE, s) 96 | } 97 | 98 | // ConsoleTracef prints a formatted string to stdout using TRACE colors 99 | func ConsoleTracef(format string, v ...interface{}) { 100 | printfConsole(TRACE, format, v...) 101 | } 102 | 103 | // ConsoleDebug prints to stdout using DEBUG colors 104 | func ConsoleDebug(s string) { 105 | printConsole(DEBUG, s) 106 | } 107 | 108 | // ConsoleDebugf prints a formatted string to stdout using DEBUG colors 109 | func ConsoleDebugf(format string, v ...interface{}) { 110 | printfConsole(DEBUG, format, v...) 111 | } 112 | 113 | // ConsoleInfo prints to stdout using INFO colors 114 | func ConsoleInfo(s string) { 115 | printConsole(INFO, s) 116 | } 117 | 118 | // ConsoleInfof prints a formatted string to stdout using INFO colors 119 | func ConsoleInfof(format string, v ...interface{}) { 120 | printfConsole(INFO, format, v...) 121 | } 122 | 123 | // ConsoleWarn prints to stdout using WARN colors 124 | func ConsoleWarn(s string) { 125 | printConsole(WARN, s) 126 | } 127 | 128 | // ConsoleWarnf prints a formatted string to stdout using WARN colors 129 | func ConsoleWarnf(format string, v ...interface{}) { 130 | printfConsole(WARN, format, v...) 131 | } 132 | 133 | // ConsoleError prints to stdout using ERROR colors 134 | func ConsoleError(s string) { 135 | printConsole(ERROR, s) 136 | } 137 | 138 | // ConsoleErrorf prints a formatted string to stdout using ERROR colors 139 | func ConsoleErrorf(format string, v ...interface{}) { 140 | printfConsole(ERROR, format, v...) 141 | } 142 | 143 | // ConsoleFatal prints to stdout using FATAL colors 144 | func ConsoleFatal(s string) { 145 | printConsole(FATAL, s) 146 | os.Exit(1) 147 | } 148 | 149 | // ConsoleFatalf prints a formatted string to stdout using FATAL colors 150 | func ConsoleFatalf(format string, v ...interface{}) { 151 | printfConsole(FATAL, format, v...) 152 | os.Exit(1) 153 | } 154 | 155 | func init() { 156 | Register("console", NewConsole) 157 | } 158 | -------------------------------------------------------------------------------- /pkg/log/syslog.go: -------------------------------------------------------------------------------- 1 | //+build !windows,!nacl,!plan9 2 | 3 | package log 4 | 5 | import ( 6 | "encoding/json" 7 | "errors" 8 | "log/syslog" 9 | ) 10 | 11 | type SyslogWriter struct { 12 | syslog *syslog.Writer 13 | Network string `json:"network"` 14 | Address string `json:"address"` 15 | Facility string `json:"facility"` 16 | Tag string `json:"tag"` 17 | } 18 | 19 | func NewSyslog() LoggerInterface { 20 | return new(SyslogWriter) 21 | } 22 | 23 | func (sw *SyslogWriter) Init(config string) error { 24 | if err := json.Unmarshal([]byte(config), sw); err != nil { 25 | return err 26 | } 27 | 28 | prio, err := parseFacility(sw.Facility) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | w, err := syslog.Dial(sw.Network, sw.Address, prio, sw.Tag) 34 | if err != nil { 35 | return err 36 | } 37 | 38 | sw.syslog = w 39 | return nil 40 | } 41 | 42 | func (sw *SyslogWriter) WriteMsg(msg string, skip int, level LogLevel) error { 43 | var err error 44 | 45 | switch level { 46 | case TRACE, DEBUG: 47 | err = sw.syslog.Debug(msg) 48 | case INFO: 49 | err = sw.syslog.Info(msg) 50 | case WARN: 51 | err = sw.syslog.Warning(msg) 52 | case ERROR: 53 | err = sw.syslog.Err(msg) 54 | case CRITICAL: 55 | err = sw.syslog.Crit(msg) 56 | case FATAL: 57 | err = sw.syslog.Alert(msg) 58 | default: 59 | err = errors.New("invalid syslog level") 60 | } 61 | 62 | return err 63 | } 64 | 65 | func (sw *SyslogWriter) Destroy() { 66 | sw.syslog.Close() 67 | } 68 | 69 | func (sw *SyslogWriter) Flush() {} 70 | 71 | var facilities = map[string]syslog.Priority{ 72 | "user": syslog.LOG_USER, 73 | "daemon": syslog.LOG_DAEMON, 74 | "local0": syslog.LOG_LOCAL0, 75 | "local1": syslog.LOG_LOCAL1, 76 | "local2": syslog.LOG_LOCAL2, 77 | "local3": syslog.LOG_LOCAL3, 78 | "local4": syslog.LOG_LOCAL4, 79 | "local5": syslog.LOG_LOCAL5, 80 | "local6": syslog.LOG_LOCAL6, 81 | "local7": syslog.LOG_LOCAL7, 82 | } 83 | 84 | func parseFacility(facility string) (syslog.Priority, error) { 85 | prio, ok := facilities[facility] 86 | if !ok { 87 | return syslog.LOG_LOCAL0, errors.New("invalid syslog facility") 88 | } 89 | 90 | return prio, nil 91 | } 92 | 93 | func init() { 94 | Register("syslog", NewSyslog) 95 | } 96 | -------------------------------------------------------------------------------- /pkg/middleware/middleware.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "encoding/base64" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | "sync" 9 | "time" 10 | 11 | "github.com/grafana/metrictank/stats" 12 | "github.com/raintank/tsdb-gw/auth" 13 | "github.com/raintank/tsdb-gw/auth/gcom" 14 | "gopkg.in/macaron.v1" 15 | ) 16 | 17 | type Context struct { 18 | *macaron.Context 19 | *auth.User 20 | ApiKey string 21 | } 22 | 23 | type requestStats struct { 24 | sync.Mutex 25 | responseCounts map[string]map[int]*stats.CounterRate32 26 | latencyHistograms map[string]*stats.LatencyHistogram15s32 27 | sizeMeters map[string]*stats.Meter32 28 | } 29 | 30 | func (r *requestStats) StatusCount(handler string, status int) { 31 | metricKey := fmt.Sprintf("api.request.%s.status.%d", handler, status) 32 | r.Lock() 33 | p, ok := r.responseCounts[handler] 34 | if !ok { 35 | p = make(map[int]*stats.CounterRate32) 36 | r.responseCounts[handler] = p 37 | } 38 | c, ok := p[status] 39 | if !ok { 40 | c = stats.NewCounterRate32(metricKey) 41 | p[status] = c 42 | } 43 | r.Unlock() 44 | c.Inc() 45 | } 46 | 47 | func (r *requestStats) Latency(handler string, dur time.Duration) { 48 | r.Lock() 49 | p, ok := r.latencyHistograms[handler] 50 | if !ok { 51 | p = stats.NewLatencyHistogram15s32(fmt.Sprintf("api.request.%s", handler)) 52 | r.latencyHistograms[handler] = p 53 | } 54 | r.Unlock() 55 | p.Value(dur) 56 | } 57 | 58 | func (r *requestStats) PathSize(handler string, size int) { 59 | r.Lock() 60 | p, ok := r.sizeMeters[handler] 61 | if !ok { 62 | p = stats.NewMeter32(fmt.Sprintf("api.request.%s.size", handler), false) 63 | r.sizeMeters[handler] = p 64 | } 65 | r.Unlock() 66 | p.Value(size) 67 | } 68 | 69 | // RequestStats returns a middleware that tracks request metrics. 70 | func RequestStats(handler string) macaron.Handler { 71 | return func(ctx *Context) { 72 | if handlerStats == nil { 73 | return 74 | } 75 | start := time.Now() 76 | rw := ctx.Resp.(macaron.ResponseWriter) 77 | // call next handler. This will return after all handlers 78 | // have completed and the request has been sent. 79 | ctx.Next() 80 | status := rw.Status() 81 | handlerStats.StatusCount(handler, status) 82 | handlerStats.Latency(handler, time.Since(start)) 83 | // only record the request size if the request succeeded. 84 | if status < 300 { 85 | handlerStats.PathSize(handler, rw.Size()) 86 | } 87 | } 88 | } 89 | 90 | var authPlugin auth.AuthPlugin 91 | var handlerStats *requestStats 92 | 93 | func Init(adminKey string) { 94 | authPlugin = auth.GetAuthPlugin("grafana") 95 | auth.AdminKey = adminKey 96 | handlerStats = &requestStats{ 97 | responseCounts: make(map[string]map[int]*stats.CounterRate32), 98 | latencyHistograms: make(map[string]*stats.LatencyHistogram15s32), 99 | sizeMeters: make(map[string]*stats.Meter32), 100 | } 101 | } 102 | 103 | func GetContextHandler() macaron.Handler { 104 | return func(c *macaron.Context) { 105 | ctx := &Context{ 106 | Context: c, 107 | User: &auth.User{}, 108 | } 109 | c.Map(ctx) 110 | } 111 | } 112 | 113 | func RequireAdmin() macaron.Handler { 114 | return func(ctx *Context) { 115 | if !ctx.IsAdmin { 116 | ctx.JSON(403, "Permission denied") 117 | } 118 | } 119 | } 120 | 121 | func RoleAuth(roles ...gcom.RoleType) macaron.Handler { 122 | return func(c *Context) { 123 | ok := false 124 | for _, role := range roles { 125 | if role == c.Role { 126 | ok = true 127 | break 128 | } 129 | } 130 | if !ok { 131 | c.JSON(403, "Permission denied") 132 | } 133 | } 134 | } 135 | 136 | func GetUser(adminKey, key string) (*auth.User, error) { 137 | return authPlugin.Auth("api_key", key) 138 | } 139 | 140 | func Auth(adminKey string) macaron.Handler { 141 | if authPlugin == nil { 142 | Init(adminKey) 143 | } 144 | return func(ctx *Context) { 145 | key, err := getApiKey(ctx) 146 | if err != nil { 147 | ctx.JSON(401, "Invalid Authentication header.") 148 | return 149 | } 150 | if key == "" { 151 | ctx.JSON(401, "Unauthorized") 152 | return 153 | } 154 | 155 | user, err := GetUser(adminKey, key) 156 | if err != nil { 157 | if err == auth.ErrInvalidCredentials { 158 | ctx.JSON(401, "Unauthorized") 159 | return 160 | } 161 | ctx.JSON(500, err) 162 | return 163 | } 164 | // allow admin users to impersonate other orgs. 165 | if user.IsAdmin { 166 | header := ctx.Req.Header.Get("X-Worldping-Org") 167 | if header != "" { 168 | orgId, err := strconv.ParseInt(header, 10, 64) 169 | if err == nil && orgId != 0 { 170 | user.ID = int(orgId) 171 | } 172 | } 173 | } 174 | ctx.User = user 175 | ctx.ApiKey = key 176 | } 177 | } 178 | 179 | func getApiKey(c *Context) (string, error) { 180 | header := c.Req.Header.Get("Authorization") 181 | parts := strings.SplitN(header, " ", 2) 182 | if len(parts) == 2 && parts[0] == "Bearer" { 183 | key := parts[1] 184 | return key, nil 185 | } 186 | 187 | if len(parts) == 2 && parts[0] == "Basic" { 188 | decoded, err := base64.StdEncoding.DecodeString(parts[1]) 189 | if err != nil { 190 | return "", err 191 | } 192 | userAndPass := strings.SplitN(string(decoded), ":", 2) 193 | if userAndPass[0] == "api_key" { 194 | return userAndPass[1], nil 195 | } 196 | } 197 | 198 | return "", nil 199 | } 200 | -------------------------------------------------------------------------------- /pkg/middleware/quota.go: -------------------------------------------------------------------------------- 1 | package middleware 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/raintank/worldping-api/pkg/log" 7 | m "github.com/raintank/worldping-api/pkg/models" 8 | "github.com/raintank/worldping-api/pkg/services/sqlstore" 9 | "github.com/raintank/worldping-api/pkg/setting" 10 | "gopkg.in/macaron.v1" 11 | ) 12 | 13 | func Quota(target string) macaron.Handler { 14 | return func(c *Context) { 15 | limitReached, err := QuotaReached(c, target) 16 | if err != nil { 17 | c.JSON(500, fmt.Sprintf("failed to get quota: %s", err)) 18 | return 19 | } 20 | if limitReached { 21 | c.JSON(403, fmt.Sprintf("%s Quota reached", target)) 22 | return 23 | } 24 | } 25 | } 26 | 27 | func QuotaReached(c *Context, target string) (bool, error) { 28 | if !setting.Quota.Enabled { 29 | return false, nil 30 | } 31 | 32 | // get the list of scopes that this target is valid for. Org, User, Global 33 | scopes, err := m.GetQuotaScopes(target) 34 | if err != nil { 35 | return false, err 36 | } 37 | 38 | log.Debug("checking quota for %s in scopes %v", target, scopes) 39 | 40 | for _, scope := range scopes { 41 | log.Debug("checking scope %s", scope.Name) 42 | 43 | switch scope.Name { 44 | case "global": 45 | if scope.DefaultLimit < 0 { 46 | continue 47 | } 48 | if scope.DefaultLimit == 0 { 49 | return true, nil 50 | } 51 | quota, err := sqlstore.GetGlobalQuotaByTarget(scope.Target) 52 | if err != nil { 53 | return true, err 54 | } 55 | if quota.Used >= scope.DefaultLimit { 56 | return true, nil 57 | } 58 | case "org": 59 | quota, err := sqlstore.GetOrgQuotaByTarget(int64(c.User.ID), scope.Target, scope.DefaultLimit) 60 | if err != nil { 61 | return true, err 62 | } 63 | if quota.Limit < 0 { 64 | continue 65 | } 66 | if quota.Limit == 0 { 67 | return true, nil 68 | } 69 | 70 | if quota.Used >= quota.Limit { 71 | return true, nil 72 | } 73 | } 74 | } 75 | 76 | return false, nil 77 | } 78 | -------------------------------------------------------------------------------- /pkg/models/alert_scheduler_value.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type AlertSchedulerValue struct { 4 | Id string 5 | Value string 6 | } 7 | -------------------------------------------------------------------------------- /pkg/models/alerting.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | ) 7 | 8 | // Job is a job for an alert execution 9 | // note that LastPointTs is a time denoting the timestamp of the last point to run against 10 | // this way the check runs always on the right data, irrespective of execution delays 11 | // that said, for convenience, we track the generatedAt timestamp 12 | type AlertingJob struct { 13 | *CheckForAlertDTO 14 | GeneratedAt time.Time 15 | LastPointTs time.Time 16 | NewState CheckEvalResult 17 | TimeExec time.Time 18 | } 19 | 20 | func (job *AlertingJob) String() string { 21 | return fmt.Sprintf(" checkId=%d generatedAt=%s lastPointTs=%s definition: %d probes for %d steps", job.Id, job.GeneratedAt, job.LastPointTs, job.HealthSettings.NumProbes, job.HealthSettings.Steps) 22 | } 23 | -------------------------------------------------------------------------------- /pkg/models/emails.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import "errors" 4 | 5 | var ErrInvalidEmailCode = errors.New("Invalid or expired email code") 6 | 7 | type SendEmailCommand struct { 8 | To []string 9 | Template string 10 | Data map[string]interface{} 11 | Massive bool 12 | Info string 13 | } 14 | -------------------------------------------------------------------------------- /pkg/models/errors.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "fmt" 5 | ) 6 | 7 | type AppError interface { 8 | Code() int 9 | Message() string 10 | } 11 | 12 | type ValidationError struct { 13 | Msg string 14 | } 15 | 16 | func NewValidationError(msg string) ValidationError { 17 | return ValidationError{Msg: msg} 18 | } 19 | 20 | func (e ValidationError) Code() int { 21 | return 400 22 | } 23 | func (e ValidationError) Message() string { 24 | return e.Msg 25 | } 26 | 27 | func (e ValidationError) Error() string { 28 | return fmt.Sprintf("%d: %s", 400, e.Msg) 29 | } 30 | 31 | type NotFoundError struct { 32 | Msg string 33 | } 34 | 35 | func NewNotFoundError(msg string) NotFoundError { 36 | return NotFoundError{Msg: msg} 37 | } 38 | 39 | func (e NotFoundError) Code() int { 40 | return 404 41 | } 42 | func (e NotFoundError) Message() string { 43 | return e.Msg 44 | } 45 | 46 | func (e NotFoundError) Error() string { 47 | return fmt.Sprintf("%d: %s", 404, e.Msg) 48 | } 49 | -------------------------------------------------------------------------------- /pkg/models/probe.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "regexp" 5 | "strings" 6 | "time" 7 | ) 8 | 9 | // Typed errors 10 | var ( 11 | ErrProbeNotFound = NewNotFoundError("Probe not found") 12 | ErrProbeWithSameCodeExists = NewValidationError("A Probe with the same code already exists") 13 | ) 14 | 15 | type Probe struct { 16 | Id int64 17 | OrgId int64 18 | Slug string 19 | Name string 20 | Public bool 21 | Latitude float64 22 | Longitude float64 23 | Created time.Time 24 | Updated time.Time 25 | Online bool 26 | OnlineChange time.Time 27 | Enabled bool 28 | EnabledChange time.Time 29 | } 30 | 31 | type ProbeTag struct { 32 | Id int64 33 | OrgId int64 34 | ProbeId int64 35 | Tag string 36 | Created time.Time 37 | } 38 | 39 | type ProbeSession struct { 40 | Id int64 41 | OrgId int64 42 | ProbeId int64 43 | SocketId string 44 | Version string 45 | InstanceId string 46 | RemoteIp string 47 | Updated time.Time 48 | } 49 | 50 | // ---------------------- 51 | // DTO 52 | type ProbeDTO struct { 53 | Id int64 `json:"id" binding:"required"` 54 | OrgId int64 `json:"org_id"` 55 | Slug string `json:"slug"` 56 | Name string `json:"name" binding:"required"` 57 | Tags []string `json:"tags"` 58 | Public bool `json:"public"` 59 | Latitude float64 `json:"latitude"` 60 | Longitude float64 `json:"longitude"` 61 | Online bool `json:"online"` 62 | OnlineChange time.Time `json:"online_change"` 63 | Enabled bool `json:"enabled"` 64 | EnabledChange time.Time `json:"enabled_change"` 65 | Created time.Time `json:"created"` 66 | Updated time.Time `json:"updated"` 67 | RemoteIp []string `json:"remoteIp"` 68 | } 69 | 70 | type ProbeLocationDTO struct { 71 | Key string `json:"key"` 72 | Latitude float64 `json:"latitude"` 73 | Longitude float64 `json:"longitude"` 74 | Name string `json:"name"` 75 | } 76 | 77 | type ProbeReadyPayload struct { 78 | Collector *ProbeDTO `json:"collector"` 79 | MonitorTypes []MonitorTypeDTO `json:"monitor_types"` 80 | SocketId string `json:"socket_id"` 81 | } 82 | 83 | // --------------------- 84 | // QUERIES 85 | 86 | type GetProbesQuery struct { 87 | OrgId int64 `form:"-"` 88 | Public string `form:"public"` 89 | Enabled string `form:"enabled"` 90 | Online string `form:"online"` 91 | Name string `form:"name"` 92 | Slug string `form:"slug"` 93 | Tag string `form:"tag"` 94 | OrderBy string `form:"orderBy" binding:"In(name,slug,created,updated,)"` 95 | } 96 | 97 | func (collector *Probe) UpdateSlug() { 98 | name := strings.ToLower(collector.Name) 99 | re := regexp.MustCompile("[^\\w ]+") 100 | re2 := regexp.MustCompile("\\s") 101 | collector.Slug = re2.ReplaceAllString(re.ReplaceAllString(name, ""), "-") 102 | } 103 | -------------------------------------------------------------------------------- /pkg/models/quotas.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | import ( 4 | "errors" 5 | "github.com/raintank/worldping-api/pkg/setting" 6 | "time" 7 | ) 8 | 9 | var ErrInvalidQuotaTarget = errors.New("Invalid quota target") 10 | 11 | type Quota struct { 12 | Id int64 13 | OrgId int64 14 | Target string 15 | Limit int64 16 | Created time.Time 17 | Updated time.Time 18 | } 19 | 20 | type QuotaScope struct { 21 | Name string 22 | Target string 23 | DefaultLimit int64 24 | } 25 | 26 | type OrgQuotaDTO struct { 27 | OrgId int64 `json:"org_id"` 28 | Target string `json:"target"` 29 | Limit int64 `json:"limit"` 30 | Used int64 `json:"used"` 31 | } 32 | 33 | type GlobalQuotaDTO struct { 34 | Target string `json:"target"` 35 | Limit int64 `json:"limit"` 36 | Used int64 `json:"used"` 37 | } 38 | 39 | func GetQuotaScopes(target string) ([]QuotaScope, error) { 40 | scopes := make([]QuotaScope, 0) 41 | switch target { 42 | case "endpoint": 43 | scopes = append(scopes, 44 | QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.Endpoint}, 45 | QuotaScope{Name: "org", Target: target, DefaultLimit: setting.Quota.Org.Endpoint}, 46 | ) 47 | return scopes, nil 48 | case "probe": 49 | scopes = append(scopes, 50 | QuotaScope{Name: "global", Target: target, DefaultLimit: setting.Quota.Global.Probe}, 51 | QuotaScope{Name: "org", Target: target, DefaultLimit: setting.Quota.Org.Probe}, 52 | ) 53 | return scopes, nil 54 | case "downloadLimit": 55 | scopes = append(scopes, 56 | QuotaScope{Name: "org", Target: target, DefaultLimit: setting.Quota.Org.DownloadLimit}, 57 | ) 58 | return scopes, nil 59 | default: 60 | return scopes, ErrInvalidQuotaTarget 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /pkg/models/usage.go: -------------------------------------------------------------------------------- 1 | package models 2 | 3 | type Usage struct { 4 | Endpoints EndpointUsage 5 | Probes ProbeUsage 6 | Checks CheckUsage 7 | } 8 | 9 | type EndpointUsage struct { 10 | Total int64 11 | PerOrg map[string]int64 12 | } 13 | 14 | type ProbeUsage struct { 15 | Total int64 16 | PerOrg map[string]int64 17 | } 18 | 19 | type CheckUsage struct { 20 | Total int64 21 | HTTP CheckHTTPUsage 22 | HTTPS CheckHTTPSUsage 23 | PING CheckPINGUsage 24 | DNS CheckDNSUsage 25 | } 26 | 27 | type CheckHTTPUsage struct { 28 | Total int64 29 | PerOrg map[string]int64 30 | } 31 | 32 | type CheckHTTPSUsage struct { 33 | Total int64 34 | PerOrg map[string]int64 35 | } 36 | type CheckPINGUsage struct { 37 | Total int64 38 | PerOrg map[string]int64 39 | } 40 | type CheckDNSUsage struct { 41 | Total int64 42 | PerOrg map[string]int64 43 | } 44 | 45 | func NewUsage() *Usage { 46 | return &Usage{ 47 | Endpoints: EndpointUsage{ 48 | PerOrg: make(map[string]int64), 49 | }, 50 | Probes: ProbeUsage{ 51 | PerOrg: make(map[string]int64), 52 | }, 53 | Checks: CheckUsage{ 54 | HTTP: CheckHTTPUsage{ 55 | PerOrg: make(map[string]int64), 56 | }, 57 | HTTPS: CheckHTTPSUsage{ 58 | PerOrg: make(map[string]int64), 59 | }, 60 | PING: CheckPINGUsage{ 61 | PerOrg: make(map[string]int64), 62 | }, 63 | DNS: CheckDNSUsage{ 64 | PerOrg: make(map[string]int64), 65 | }, 66 | }, 67 | } 68 | } 69 | 70 | type BillingUsage struct { 71 | OrgId int64 72 | ChecksPerMinute float64 73 | } 74 | -------------------------------------------------------------------------------- /pkg/services/interfaces.go: -------------------------------------------------------------------------------- 1 | package services 2 | 3 | import ( 4 | "gopkg.in/raintank/schema.v1" 5 | ) 6 | 7 | type MetricsPublisher interface { 8 | Add([]*schema.MetricData) 9 | } 10 | 11 | type EventsPublisher interface { 12 | AddEvent(*schema.ProbeEvent) 13 | } 14 | 15 | type MetricsEventsPublisher interface { 16 | MetricsPublisher 17 | EventsPublisher 18 | } 19 | -------------------------------------------------------------------------------- /pkg/services/notifications/email.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "github.com/raintank/worldping-api/pkg/setting" 5 | ) 6 | 7 | type Message struct { 8 | To []string 9 | From string 10 | Subject string 11 | Body string 12 | Massive bool 13 | Info string 14 | } 15 | 16 | // create mail content 17 | func (m *Message) Content() string { 18 | contentType := "text/html; charset=UTF-8" 19 | content := "From: " + m.From + "\r\nSubject: " + m.Subject + "\r\nContent-Type: " + contentType + "\r\n\r\n" + m.Body 20 | return content 21 | } 22 | 23 | func setDefaultTemplateData(data map[string]interface{}) { 24 | data["AppUrl"] = setting.AppUrl 25 | data["BuildVersion"] = setting.BuildVersion 26 | data["BuildStamp"] = setting.BuildStamp 27 | data["Subject"] = map[string]interface{}{} 28 | } 29 | -------------------------------------------------------------------------------- /pkg/services/notifications/mailer.go: -------------------------------------------------------------------------------- 1 | // Copyright 2014 The Gogs Authors. All rights reserved. 2 | // Use of this source code is governed by a MIT-style 3 | // license that can be found in the LICENSE file. 4 | 5 | package notifications 6 | 7 | import ( 8 | "crypto/tls" 9 | "fmt" 10 | "net" 11 | "net/mail" 12 | "net/smtp" 13 | "os" 14 | "strings" 15 | 16 | "github.com/raintank/worldping-api/pkg/log" 17 | "github.com/raintank/worldping-api/pkg/setting" 18 | ) 19 | 20 | var mailQueue chan *Message 21 | 22 | func initMailQueue() { 23 | mailQueue = make(chan *Message, 10) 24 | go processMailQueue() 25 | } 26 | 27 | func processMailQueue() { 28 | for { 29 | select { 30 | case msg := <-mailQueue: 31 | num, err := buildAndSend(msg) 32 | tos := strings.Join(msg.To, "; ") 33 | info := "" 34 | if err != nil { 35 | if len(msg.Info) > 0 { 36 | info = ", info: " + msg.Info 37 | } 38 | log.Error(4, fmt.Sprintf("Async sent email %d succeed, not send emails: %s%s err: %s", num, tos, info, err)) 39 | } else { 40 | log.Trace(fmt.Sprintf("Async sent email %d succeed, sent emails: %s%s", num, tos, info)) 41 | } 42 | } 43 | } 44 | } 45 | 46 | var addToMailQueue = func(msg *Message) { 47 | mailQueue <- msg 48 | } 49 | 50 | func sendToSmtpServer(recipients []string, msgContent []byte) error { 51 | host, port, err := net.SplitHostPort(setting.Smtp.Host) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | tlsconfig := &tls.Config{ 57 | InsecureSkipVerify: setting.Smtp.SkipVerify, 58 | ServerName: host, 59 | } 60 | 61 | if setting.Smtp.CertFile != "" { 62 | cert, err := tls.LoadX509KeyPair(setting.Smtp.CertFile, setting.Smtp.KeyFile) 63 | if err != nil { 64 | return err 65 | } 66 | tlsconfig.Certificates = []tls.Certificate{cert} 67 | } 68 | 69 | conn, err := net.Dial("tcp", net.JoinHostPort(host, port)) 70 | if err != nil { 71 | return err 72 | } 73 | defer conn.Close() 74 | 75 | isSecureConn := false 76 | // Start TLS directly if the port ends with 465 (SMTPS protocol) 77 | if strings.HasSuffix(port, "465") { 78 | conn = tls.Client(conn, tlsconfig) 79 | isSecureConn = true 80 | } 81 | 82 | client, err := smtp.NewClient(conn, host) 83 | if err != nil { 84 | return err 85 | } 86 | 87 | hostname, err := os.Hostname() 88 | if err != nil { 89 | return err 90 | } 91 | 92 | if err = client.Hello(hostname); err != nil { 93 | return err 94 | } 95 | 96 | // If not using SMTPS, alway use STARTTLS if available 97 | hasStartTLS, _ := client.Extension("STARTTLS") 98 | if !isSecureConn && hasStartTLS { 99 | if err = client.StartTLS(tlsconfig); err != nil { 100 | return err 101 | } 102 | } 103 | 104 | canAuth, options := client.Extension("AUTH") 105 | 106 | if canAuth && len(setting.Smtp.User) > 0 { 107 | var auth smtp.Auth 108 | 109 | if strings.Contains(options, "CRAM-MD5") { 110 | auth = smtp.CRAMMD5Auth(setting.Smtp.User, setting.Smtp.Password) 111 | } else if strings.Contains(options, "PLAIN") { 112 | auth = smtp.PlainAuth("", setting.Smtp.User, setting.Smtp.Password, host) 113 | } 114 | 115 | if auth != nil { 116 | if err = client.Auth(auth); err != nil { 117 | return err 118 | } 119 | } 120 | } 121 | 122 | if fromAddress, err := mail.ParseAddress(setting.Smtp.FromAddress); err != nil { 123 | return err 124 | } else { 125 | if err = client.Mail(fromAddress.Address); err != nil { 126 | return err 127 | } 128 | } 129 | 130 | for _, rec := range recipients { 131 | if err = client.Rcpt(rec); err != nil { 132 | return err 133 | } 134 | } 135 | 136 | w, err := client.Data() 137 | if err != nil { 138 | return err 139 | } 140 | if _, err = w.Write([]byte(msgContent)); err != nil { 141 | return err 142 | } 143 | 144 | if err = w.Close(); err != nil { 145 | return err 146 | } 147 | 148 | return client.Quit() 149 | } 150 | 151 | func buildAndSend(msg *Message) (int, error) { 152 | log.Trace("Sending mails to: %s", strings.Join(msg.To, "; ")) 153 | 154 | // get message body 155 | content := msg.Content() 156 | 157 | if len(msg.To) == 0 { 158 | return 0, fmt.Errorf("empty receive emails") 159 | } else if len(msg.Body) == 0 { 160 | return 0, fmt.Errorf("empty email body") 161 | } 162 | 163 | if msg.Massive { 164 | // send mail to multiple emails one by one 165 | num := 0 166 | for _, to := range msg.To { 167 | body := []byte("To: " + to + "\r\n" + content) 168 | err := sendToSmtpServer([]string{to}, body) 169 | if err != nil { 170 | return num, err 171 | } 172 | num++ 173 | } 174 | return num, nil 175 | } else { 176 | body := []byte("To: " + strings.Join(msg.To, ";") + "\r\n" + content) 177 | 178 | // send to multiple emails in one message 179 | err := sendToSmtpServer(msg.To, body) 180 | if err != nil { 181 | return 0, err 182 | } else { 183 | return 1, nil 184 | } 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /pkg/services/notifications/notifications.go: -------------------------------------------------------------------------------- 1 | package notifications 2 | 3 | import ( 4 | "bytes" 5 | "errors" 6 | "fmt" 7 | "html/template" 8 | 9 | "path/filepath" 10 | 11 | "github.com/grafana/grafana/pkg/util" 12 | "github.com/raintank/worldping-api/pkg/log" 13 | m "github.com/raintank/worldping-api/pkg/models" 14 | "github.com/raintank/worldping-api/pkg/setting" 15 | ) 16 | 17 | var mailTemplates *template.Template 18 | 19 | func Init() error { 20 | initMailQueue() 21 | 22 | mailTemplates = template.New("name") 23 | mailTemplates.Funcs(template.FuncMap{ 24 | "Subject": subjectTemplateFunc, 25 | }) 26 | 27 | templatePattern := filepath.Join(setting.StaticRootPath, setting.Smtp.TemplatesPattern) 28 | _, err := mailTemplates.ParseGlob(templatePattern) 29 | if err != nil { 30 | return err 31 | } 32 | 33 | if !util.IsEmail(setting.Smtp.FromAddress) { 34 | return errors.New("Invalid email address for smpt from_adress config") 35 | } 36 | 37 | return nil 38 | } 39 | 40 | func subjectTemplateFunc(obj map[string]interface{}, value string) string { 41 | obj["value"] = value 42 | return "" 43 | } 44 | 45 | func SendEmail(cmd *m.SendEmailCommand) error { 46 | if !setting.Smtp.Enabled { 47 | return errors.New("Worldping mailing/smtp options not configured, contact your network admin") 48 | } 49 | if mailTemplates == nil { 50 | log.Fatal(4, "email templates not yet initialized.") 51 | } 52 | 53 | var buffer bytes.Buffer 54 | var err error 55 | var subjectText interface{} 56 | 57 | data := cmd.Data 58 | if data == nil { 59 | data = make(map[string]interface{}, 10) 60 | } 61 | 62 | setDefaultTemplateData(data) 63 | err = mailTemplates.ExecuteTemplate(&buffer, cmd.Template, data) 64 | if err != nil { 65 | return err 66 | } 67 | 68 | subjectData := data["Subject"].(map[string]interface{}) 69 | subjectText, hasSubject := subjectData["value"] 70 | 71 | if !hasSubject { 72 | return errors.New(fmt.Sprintf("Missing subject in Template %s", cmd.Template)) 73 | } 74 | 75 | subjectTmpl, err := template.New("subject").Parse(subjectText.(string)) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | var subjectBuffer bytes.Buffer 81 | err = subjectTmpl.ExecuteTemplate(&subjectBuffer, "subject", data) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | addToMailQueue(&Message{ 87 | To: cmd.To, 88 | From: setting.Smtp.FromAddress, 89 | Subject: subjectBuffer.String(), 90 | Body: buffer.String(), 91 | Massive: true, 92 | }) 93 | 94 | return nil 95 | } 96 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/alert_scheduler_value.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | m "github.com/raintank/worldping-api/pkg/models" 5 | ) 6 | 7 | func GetAlertSchedulerValue(id string) (string, error) { 8 | sess, err := newSession(false, "alert_scheduler_value") 9 | if err != nil { 10 | return "", err 11 | } 12 | return getAlertSchedulerValue(sess, id) 13 | } 14 | 15 | func getAlertSchedulerValue(sess *session, id string) (string, error) { 16 | rawSql := "SELECT value from alert_scheduler_value where id=?" 17 | results, err := sess.Query(rawSql, id) 18 | 19 | if err != nil { 20 | return "", err 21 | } 22 | 23 | if len(results) == 0 { 24 | return "", nil 25 | } 26 | 27 | return string(results[0]["value"]), nil 28 | } 29 | 30 | func UpdateAlertSchedulerValue(id, value string) error { 31 | sess, err := newSession(true, "alert_scheduler_value") 32 | if err != nil { 33 | return err 34 | } 35 | defer sess.Cleanup() 36 | 37 | if err = updateAlertSchedulerValue(sess, id, value); err != nil { 38 | return err 39 | } 40 | // audit log? 41 | 42 | sess.Complete() 43 | return nil 44 | } 45 | 46 | func updateAlertSchedulerValue(sess *session, id, value string) error { 47 | entity := m.AlertSchedulerValue{ 48 | Id: id, 49 | Value: value, 50 | } 51 | 52 | affected, err := sess.Update(&entity) 53 | if err == nil && affected == 0 { 54 | _, err = sess.Insert(&entity) 55 | } 56 | 57 | return err 58 | } 59 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrations/alert_scheduler_value.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import . "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 4 | 5 | func addAlertSchedulerValueMigration(mg *Migrator) { 6 | 7 | var alertSchedV1 = Table{ 8 | Name: "alert_scheduler_value", 9 | Columns: []*Column{ 10 | {Name: "id", Type: DB_Varchar, Length: 255, IsPrimaryKey: true}, 11 | {Name: "value", Type: DB_Varchar, Length: 255, Nullable: false}, 12 | }, 13 | } 14 | mg.AddMigration("create alert_scheduler_value table v1", NewAddTableMigration(alertSchedV1)) 15 | 16 | } 17 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrations/common.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "fmt" 5 | 6 | . "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 7 | ) 8 | 9 | func addDropAllIndicesMigrations(mg *Migrator, versionSuffix string, table Table) { 10 | for _, index := range table.Indices { 11 | migrationId := fmt.Sprintf("drop index %s - %s", index.XName(table.Name), versionSuffix) 12 | mg.AddMigration(migrationId, NewDropIndexMigration(table, index)) 13 | } 14 | } 15 | 16 | func addTableIndicesMigrations(mg *Migrator, versionSuffix string, table Table) { 17 | for _, index := range table.Indices { 18 | migrationId := fmt.Sprintf("create index %s - %s", index.XName(table.Name), versionSuffix) 19 | mg.AddMigration(migrationId, NewAddIndexMigration(table, index)) 20 | } 21 | } 22 | 23 | func addTableRenameMigration(mg *Migrator, oldName string, newName string, versionSuffix string) { 24 | migrationId := fmt.Sprintf("Rename table %s to %s - %s", oldName, newName, versionSuffix) 25 | mg.AddMigration(migrationId, NewRenameTableMigration(oldName, newName)) 26 | } 27 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrations/endpoint_mig.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "github.com/go-xorm/xorm" 5 | m "github.com/raintank/worldping-api/pkg/models" 6 | . "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 7 | ) 8 | 9 | func addEndpointMigration(mg *Migrator) { 10 | 11 | var endpointV1 = Table{ 12 | Name: "endpoint", 13 | Columns: []*Column{ 14 | {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, 15 | {Name: "org_id", Type: DB_BigInt, Nullable: false}, 16 | {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, 17 | {Name: "created", Type: DB_DateTime, Nullable: false}, 18 | {Name: "updated", Type: DB_DateTime, Nullable: false}, 19 | }, 20 | Indices: []*Index{ 21 | {Cols: []string{"org_id", "name"}, Type: UniqueIndex}, 22 | }, 23 | } 24 | mg.AddMigration("create endpoint table v1", NewAddTableMigration(endpointV1)) 25 | 26 | //------- indexes ------------------ 27 | addTableIndicesMigrations(mg, "v1", endpointV1) 28 | 29 | slugCol := &Column{Name: "slug", Type: DB_NVarchar, Length: 255, Nullable: true} 30 | migration := NewAddColumnMigration(endpointV1, slugCol) 31 | migration.OnSuccess = func(sess *xorm.Session) error { 32 | sess.Table("endpoint") 33 | endpoints := make([]m.Endpoint, 0) 34 | if err := sess.Find(&endpoints); err != nil { 35 | return err 36 | } 37 | for _, e := range endpoints { 38 | e.UpdateSlug() 39 | if _, err := sess.Id(e.Id).Update(e); err != nil { 40 | return err 41 | } 42 | } 43 | return nil 44 | } 45 | mg.AddMigration("add slug column to endpoint v1", migration) 46 | 47 | // add endpoint_tags 48 | var endpointTagV1 = Table{ 49 | Name: "endpoint_tag", 50 | Columns: []*Column{ 51 | {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, 52 | {Name: "org_id", Type: DB_BigInt, Nullable: false}, 53 | {Name: "endpoint_id", Type: DB_BigInt, Nullable: false}, 54 | {Name: "tag", Type: DB_NVarchar, Length: 255, Nullable: false}, 55 | }, 56 | Indices: []*Index{ 57 | {Cols: []string{"org_id", "endpoint_id"}}, 58 | {Cols: []string{"endpoint_id", "tag"}, Type: UniqueIndex}, 59 | }, 60 | } 61 | mg.AddMigration("create endpoint_tag table v1", NewAddTableMigration(endpointTagV1)) 62 | 63 | //------- indexes ------------------ 64 | addTableIndicesMigrations(mg, "v1", endpointTagV1) 65 | 66 | mg.AddMigration("endpoint_tag add created v1", NewAddColumnMigration(endpointTagV1, &Column{ 67 | Name: "created", Type: DB_DateTime, Nullable: true, 68 | })) 69 | 70 | } 71 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrations/migrations.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import . "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 4 | 5 | // --- Migration Guide line --- 6 | // 1. Never change a migration that is committed and pushed to master 7 | // 2. Always add new migrations (to change or undo previous migrations) 8 | // 3. Some migraitons are not yet written (rename column, table, drop table, index etc) 9 | 10 | func AddMigrations(mg *Migrator) { 11 | addMigrationLogMigrations(mg) 12 | addCollectorMigration(mg) 13 | addCheckMigration(mg) 14 | addEndpointMigration(mg) 15 | addAlertSchedulerValueMigration(mg) 16 | addQuotaMigration(mg) 17 | } 18 | 19 | func addMigrationLogMigrations(mg *Migrator) { 20 | migrationLogV1 := Table{ 21 | Name: "migration_log", 22 | Columns: []*Column{ 23 | {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, 24 | {Name: "migration_id", Type: DB_NVarchar, Length: 255}, 25 | {Name: "sql", Type: DB_Text}, 26 | {Name: "success", Type: DB_Bool}, 27 | {Name: "error", Type: DB_Text}, 28 | {Name: "timestamp", Type: DB_DateTime}, 29 | }, 30 | } 31 | 32 | mg.AddMigration("create migration_log table", NewAddTableMigration(migrationLogV1)) 33 | } 34 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrations/migrations_test.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | "testing" 5 | 6 | "github.com/go-xorm/xorm" 7 | . "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 8 | "github.com/raintank/worldping-api/pkg/services/sqlstore/sqlutil" 9 | 10 | . "github.com/smartystreets/goconvey/convey" 11 | ) 12 | 13 | var indexTypes = []string{"Unknown", "INDEX", "UNIQUE INDEX"} 14 | 15 | func TestMigrations(t *testing.T) { 16 | //log.NewLogger(0, "console", `{"level": 0}`) 17 | 18 | testDBs := []sqlutil.TestDB{ 19 | sqlutil.TestDB_Sqlite3, 20 | } 21 | 22 | for _, testDB := range testDBs { 23 | 24 | Convey("Initial "+testDB.DriverName+" migration", t, func() { 25 | x, err := xorm.NewEngine(testDB.DriverName, testDB.ConnStr) 26 | So(err, ShouldBeNil) 27 | 28 | sqlutil.CleanDB(x) 29 | 30 | mg := NewMigrator(x) 31 | //mg.LogLevel = log.DEBUG 32 | AddMigrations(mg) 33 | 34 | err = mg.Start() 35 | So(err, ShouldBeNil) 36 | 37 | // tables, err := x.DBMetas() 38 | // So(err, ShouldBeNil) 39 | // 40 | // fmt.Printf("\nDB Schema after migration: table count: %v\n", len(tables)) 41 | // 42 | // for _, table := range tables { 43 | // fmt.Printf("\nTable: %v \n", table.Name) 44 | // for _, column := range table.Columns() { 45 | // fmt.Printf("\t %v \n", column.String(x.Dialect())) 46 | // } 47 | // 48 | // if len(table.Indexes) > 0 { 49 | // fmt.Printf("\n\tIndexes:\n") 50 | // for _, index := range table.Indexes { 51 | // fmt.Printf("\t %v (%v) %v \n", index.Name, strings.Join(index.Cols, ","), indexTypes[index.Type]) 52 | // } 53 | // } 54 | // } 55 | }) 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrations/quota_mig.go: -------------------------------------------------------------------------------- 1 | package migrations 2 | 3 | import ( 4 | . "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 5 | ) 6 | 7 | func addQuotaMigration(mg *Migrator) { 8 | 9 | var quotaV1 = Table{ 10 | Name: "quota", 11 | Columns: []*Column{ 12 | {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, 13 | {Name: "org_id", Type: DB_BigInt, Nullable: false}, 14 | {Name: "target", Type: DB_NVarchar, Length: 255, Nullable: false}, 15 | {Name: "limit", Type: DB_BigInt, Nullable: false}, 16 | {Name: "created", Type: DB_DateTime, Nullable: false}, 17 | {Name: "updated", Type: DB_DateTime, Nullable: false}, 18 | }, 19 | Indices: []*Index{ 20 | {Cols: []string{"org_id", "target"}, Type: UniqueIndex}, 21 | }, 22 | } 23 | mg.AddMigration("create quota table v1", NewAddTableMigration(quotaV1)) 24 | 25 | //------- indexes ------------------ 26 | addTableIndicesMigrations(mg, "v1", quotaV1) 27 | 28 | // move to new table schema. 29 | //------- drop indexes ------------------ 30 | addDropAllIndicesMigrations(mg, "v1", quotaV1) 31 | 32 | //------- rename table ------------------ 33 | addTableRenameMigration(mg, "quota", "quota_v1", "v1") 34 | 35 | var quotaV2 = Table{ 36 | Name: "quota", 37 | Columns: []*Column{ 38 | {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, 39 | {Name: "org_id", Type: DB_BigInt, Nullable: true}, 40 | {Name: "user_id", Type: DB_BigInt, Nullable: true}, 41 | {Name: "target", Type: DB_NVarchar, Length: 255, Nullable: false}, 42 | {Name: "limit", Type: DB_BigInt, Nullable: false}, 43 | {Name: "created", Type: DB_DateTime, Nullable: false}, 44 | {Name: "updated", Type: DB_DateTime, Nullable: false}, 45 | }, 46 | Indices: []*Index{ 47 | {Cols: []string{"org_id", "user_id", "target"}, Type: UniqueIndex}, 48 | }, 49 | } 50 | mg.AddMigration("create quota table v2", NewAddTableMigration(quotaV2)) 51 | 52 | //------- indexes ------------------ 53 | addTableIndicesMigrations(mg, "v2", quotaV2) 54 | 55 | //------- copy data from v1 to v2 ------------------- 56 | mg.AddMigration("copy quota v1 to v2", NewCopyTableDataMigration("quota", "quota_v1", map[string]string{ 57 | "id": "id", 58 | "org_id": "org_id", 59 | "target": "target", 60 | "limit": "limit", 61 | "created": "created", 62 | "updated": "updated", 63 | })) 64 | mg.AddMigration("Drop old table quota_v1", NewDropTableMigration("quota_v1")) 65 | 66 | //------- drop indexes ------------------ 67 | addDropAllIndicesMigrations(mg, "v2", quotaV2) 68 | 69 | //------- rename table ------------------ 70 | addTableRenameMigration(mg, "quota", "quota_v2", "v2") 71 | 72 | var quotaV3 = Table{ 73 | Name: "quota", 74 | Columns: []*Column{ 75 | {Name: "id", Type: DB_BigInt, IsPrimaryKey: true, IsAutoIncrement: true}, 76 | {Name: "org_id", Type: DB_BigInt, Nullable: true}, 77 | {Name: "target", Type: DB_NVarchar, Length: 255, Nullable: false}, 78 | {Name: "limit", Type: DB_BigInt, Nullable: false}, 79 | {Name: "created", Type: DB_DateTime, Nullable: false}, 80 | {Name: "updated", Type: DB_DateTime, Nullable: false}, 81 | }, 82 | Indices: []*Index{ 83 | {Cols: []string{"org_id", "target"}, Type: UniqueIndex}, 84 | }, 85 | } 86 | mg.AddMigration("create quota table v3", NewAddTableMigration(quotaV3)) 87 | 88 | //------- indexes ------------------ 89 | addTableIndicesMigrations(mg, "v3", quotaV3) 90 | 91 | //------- copy data from v1 to v2 ------------------- 92 | mg.AddMigration("copy quota v2 to v3", NewCopyTableDataMigration("quota", "quota_v2", map[string]string{ 93 | "id": "id", 94 | "org_id": "org_id", 95 | "target": "target", 96 | "limit": "limit", 97 | "created": "created", 98 | "updated": "updated", 99 | })) 100 | mg.AddMigration("Drop old table quota_v2", NewDropTableMigration("quota_v2")) 101 | 102 | } 103 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/column.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | // Notice 4 | // code based on parts from from https://github.com/go-xorm/core/blob/3e0fa232ab5c90996406c0cd7ae86ad0e5ecf85f/column.go 5 | 6 | type Column struct { 7 | Name string 8 | Type string 9 | Length int 10 | Length2 int 11 | Nullable bool 12 | IsPrimaryKey bool 13 | IsAutoIncrement bool 14 | Default string 15 | } 16 | 17 | func (col *Column) String(d Dialect) string { 18 | sql := d.QuoteStr() + col.Name + d.QuoteStr() + " " 19 | 20 | sql += d.SqlType(col) + " " 21 | 22 | if col.IsPrimaryKey { 23 | sql += "PRIMARY KEY " 24 | if col.IsAutoIncrement { 25 | sql += d.AutoIncrStr() + " " 26 | } 27 | } 28 | 29 | if d.ShowCreateNull() { 30 | if col.Nullable { 31 | sql += "NULL " 32 | } else { 33 | sql += "NOT NULL " 34 | } 35 | } 36 | 37 | if col.Default != "" { 38 | sql += "DEFAULT " + col.Default + " " 39 | } 40 | 41 | return sql 42 | } 43 | 44 | func (col *Column) StringNoPk(d Dialect) string { 45 | sql := d.QuoteStr() + col.Name + d.QuoteStr() + " " 46 | 47 | sql += d.SqlType(col) + " " 48 | 49 | if d.ShowCreateNull() { 50 | if col.Nullable { 51 | sql += "NULL " 52 | } else { 53 | sql += "NOT NULL " 54 | } 55 | } 56 | 57 | if col.Default != "" { 58 | sql += "DEFAULT " + col.Default + " " 59 | } 60 | 61 | return sql 62 | } 63 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/conditions.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | type MigrationCondition interface { 4 | Sql(dialect Dialect) (string, []interface{}) 5 | } 6 | 7 | type IfTableExistsCondition struct { 8 | TableName string 9 | } 10 | 11 | func (c *IfTableExistsCondition) Sql(dialect Dialect) (string, []interface{}) { 12 | return dialect.TableCheckSql(c.TableName) 13 | } 14 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/dialect.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | ) 7 | 8 | type Dialect interface { 9 | DriverName() string 10 | QuoteStr() string 11 | Quote(string) string 12 | AndStr() string 13 | AutoIncrStr() string 14 | OrStr() string 15 | EqStr() string 16 | ShowCreateNull() bool 17 | SqlType(col *Column) string 18 | SupportEngine() bool 19 | LikeStr() string 20 | 21 | CreateIndexSql(tableName string, index *Index) string 22 | CreateTableSql(table *Table) string 23 | AddColumnSql(tableName string, Col *Column) string 24 | CopyTableData(sourceTable string, targetTable string, sourceCols []string, targetCols []string) string 25 | DropTable(tableName string) string 26 | DropIndexSql(tableName string, index *Index) string 27 | 28 | TableCheckSql(tableName string) (string, []interface{}) 29 | RenameTable(oldName string, newName string) string 30 | } 31 | 32 | func NewDialect(name string) Dialect { 33 | switch name { 34 | case MYSQL: 35 | return NewMysqlDialect() 36 | case SQLITE: 37 | return NewSqlite3Dialect() 38 | case POSTGRES: 39 | return NewPostgresDialect() 40 | } 41 | 42 | panic("Unsupported database type: " + name) 43 | } 44 | 45 | type BaseDialect struct { 46 | dialect Dialect 47 | driverName string 48 | } 49 | 50 | func (d *BaseDialect) DriverName() string { 51 | return d.driverName 52 | } 53 | 54 | func (b *BaseDialect) ShowCreateNull() bool { 55 | return true 56 | } 57 | 58 | func (b *BaseDialect) AndStr() string { 59 | return "AND" 60 | } 61 | 62 | func (b *BaseDialect) LikeStr() string { 63 | return "LIKE" 64 | } 65 | 66 | func (b *BaseDialect) OrStr() string { 67 | return "OR" 68 | } 69 | 70 | func (b *BaseDialect) EqStr() string { 71 | return "=" 72 | } 73 | 74 | func (b *BaseDialect) CreateTableSql(table *Table) string { 75 | var sql string 76 | sql = "CREATE TABLE IF NOT EXISTS " 77 | sql += b.dialect.Quote(table.Name) + " (\n" 78 | 79 | pkList := table.PrimaryKeys 80 | 81 | for _, col := range table.Columns { 82 | if col.IsPrimaryKey && len(pkList) == 1 { 83 | sql += col.String(b.dialect) 84 | } else { 85 | sql += col.StringNoPk(b.dialect) 86 | } 87 | sql = strings.TrimSpace(sql) 88 | sql += "\n, " 89 | } 90 | 91 | if len(pkList) > 1 { 92 | sql += "PRIMARY KEY ( " 93 | sql += b.dialect.Quote(strings.Join(pkList, b.dialect.Quote(","))) 94 | sql += " ), " 95 | } 96 | 97 | sql = sql[:len(sql)-2] + ")" 98 | if b.dialect.SupportEngine() { 99 | sql += " ENGINE=InnoDB DEFAULT CHARSET UTF8 " 100 | } 101 | 102 | sql += ";" 103 | return sql 104 | } 105 | 106 | func (db *BaseDialect) AddColumnSql(tableName string, col *Column) string { 107 | return fmt.Sprintf("alter table %s ADD COLUMN %s", db.dialect.Quote(tableName), col.StringNoPk(db.dialect)) 108 | } 109 | 110 | func (db *BaseDialect) CreateIndexSql(tableName string, index *Index) string { 111 | quote := db.dialect.Quote 112 | var unique string 113 | if index.Type == UniqueIndex { 114 | unique = " UNIQUE" 115 | } 116 | 117 | idxName := index.XName(tableName) 118 | 119 | return fmt.Sprintf("CREATE%s INDEX %v ON %v (%v);", unique, 120 | quote(idxName), quote(tableName), 121 | quote(strings.Join(index.Cols, quote(",")))) 122 | } 123 | 124 | func (db *BaseDialect) QuoteColList(cols []string) string { 125 | var sourceColsSql = "" 126 | for _, col := range cols { 127 | sourceColsSql += db.dialect.Quote(col) 128 | sourceColsSql += "\n, " 129 | } 130 | return strings.TrimSuffix(sourceColsSql, "\n, ") 131 | } 132 | 133 | func (db *BaseDialect) CopyTableData(sourceTable string, targetTable string, sourceCols []string, targetCols []string) string { 134 | sourceColsSql := db.QuoteColList(sourceCols) 135 | targetColsSql := db.QuoteColList(targetCols) 136 | 137 | quote := db.dialect.Quote 138 | return fmt.Sprintf("INSERT INTO %s (%s) SELECT %s FROM %s", quote(targetTable), targetColsSql, sourceColsSql, quote(sourceTable)) 139 | } 140 | 141 | func (db *BaseDialect) DropTable(tableName string) string { 142 | quote := db.dialect.Quote 143 | return fmt.Sprintf("DROP TABLE IF EXISTS %s", quote(tableName)) 144 | } 145 | 146 | func (db *BaseDialect) RenameTable(oldName string, newName string) string { 147 | quote := db.dialect.Quote 148 | return fmt.Sprintf("ALTER TABLE %s RENAME TO %s", quote(oldName), quote(newName)) 149 | } 150 | 151 | func (db *BaseDialect) DropIndexSql(tableName string, index *Index) string { 152 | quote := db.dialect.Quote 153 | var name string 154 | name = index.XName(tableName) 155 | return fmt.Sprintf("DROP INDEX %v ON %s", quote(name), quote(tableName)) 156 | } 157 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/migrations.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/go-xorm/xorm" 8 | ) 9 | 10 | type OnSuccess func(*xorm.Session) error 11 | 12 | type MigrationBase struct { 13 | id string 14 | Condition MigrationCondition 15 | OnSuccess OnSuccess 16 | } 17 | 18 | func (m *MigrationBase) Id() string { 19 | return m.id 20 | } 21 | 22 | func (m *MigrationBase) SetId(id string) { 23 | m.id = id 24 | } 25 | 26 | func (m *MigrationBase) GetCondition() MigrationCondition { 27 | return m.Condition 28 | } 29 | 30 | func (m *MigrationBase) ExecOnSuccess(sess *xorm.Session) error { 31 | if m.OnSuccess != nil { 32 | return m.OnSuccess(sess) 33 | } 34 | return nil 35 | } 36 | 37 | type RawSqlMigration struct { 38 | MigrationBase 39 | 40 | sqlite string 41 | mysql string 42 | postgres string 43 | } 44 | 45 | func (m *RawSqlMigration) Sql(dialect Dialect) string { 46 | switch dialect.DriverName() { 47 | case MYSQL: 48 | return m.mysql 49 | case SQLITE: 50 | return m.sqlite 51 | case POSTGRES: 52 | return m.postgres 53 | } 54 | 55 | panic("db type not supported") 56 | } 57 | 58 | func (m *RawSqlMigration) Sqlite(sql string) *RawSqlMigration { 59 | m.sqlite = sql 60 | return m 61 | } 62 | 63 | func (m *RawSqlMigration) Mysql(sql string) *RawSqlMigration { 64 | m.mysql = sql 65 | return m 66 | } 67 | 68 | func (m *RawSqlMigration) Postgres(sql string) *RawSqlMigration { 69 | m.postgres = sql 70 | return m 71 | } 72 | 73 | type AddColumnMigration struct { 74 | MigrationBase 75 | tableName string 76 | column *Column 77 | } 78 | 79 | func NewAddColumnMigration(table Table, col *Column) *AddColumnMigration { 80 | return &AddColumnMigration{tableName: table.Name, column: col} 81 | } 82 | 83 | func (m *AddColumnMigration) Table(tableName string) *AddColumnMigration { 84 | m.tableName = tableName 85 | return m 86 | } 87 | 88 | func (m *AddColumnMigration) Column(col *Column) *AddColumnMigration { 89 | m.column = col 90 | return m 91 | } 92 | 93 | func (m *AddColumnMigration) Sql(dialect Dialect) string { 94 | return dialect.AddColumnSql(m.tableName, m.column) 95 | } 96 | 97 | type AddIndexMigration struct { 98 | MigrationBase 99 | tableName string 100 | index *Index 101 | } 102 | 103 | func NewAddIndexMigration(table Table, index *Index) *AddIndexMigration { 104 | return &AddIndexMigration{tableName: table.Name, index: index} 105 | } 106 | 107 | func (m *AddIndexMigration) Table(tableName string) *AddIndexMigration { 108 | m.tableName = tableName 109 | return m 110 | } 111 | 112 | func (m *AddIndexMigration) Sql(dialect Dialect) string { 113 | return dialect.CreateIndexSql(m.tableName, m.index) 114 | } 115 | 116 | type DropIndexMigration struct { 117 | MigrationBase 118 | tableName string 119 | index *Index 120 | } 121 | 122 | func NewDropIndexMigration(table Table, index *Index) *DropIndexMigration { 123 | return &DropIndexMigration{tableName: table.Name, index: index} 124 | } 125 | 126 | func (m *DropIndexMigration) Sql(dialect Dialect) string { 127 | if m.index.Name == "" { 128 | m.index.Name = fmt.Sprintf("%s", strings.Join(m.index.Cols, "_")) 129 | } 130 | return dialect.DropIndexSql(m.tableName, m.index) 131 | } 132 | 133 | type AddTableMigration struct { 134 | MigrationBase 135 | table Table 136 | } 137 | 138 | func NewAddTableMigration(table Table) *AddTableMigration { 139 | for _, col := range table.Columns { 140 | if col.IsPrimaryKey { 141 | table.PrimaryKeys = append(table.PrimaryKeys, col.Name) 142 | } 143 | } 144 | return &AddTableMigration{table: table} 145 | } 146 | 147 | func (m *AddTableMigration) Sql(d Dialect) string { 148 | return d.CreateTableSql(&m.table) 149 | } 150 | 151 | type DropTableMigration struct { 152 | MigrationBase 153 | tableName string 154 | } 155 | 156 | func NewDropTableMigration(tableName string) *DropTableMigration { 157 | return &DropTableMigration{tableName: tableName} 158 | } 159 | 160 | func (m *DropTableMigration) Sql(d Dialect) string { 161 | return d.DropTable(m.tableName) 162 | } 163 | 164 | type RenameTableMigration struct { 165 | MigrationBase 166 | oldName string 167 | newName string 168 | } 169 | 170 | func NewRenameTableMigration(oldName string, newName string) *RenameTableMigration { 171 | return &RenameTableMigration{oldName: oldName, newName: newName} 172 | } 173 | 174 | func (m *RenameTableMigration) IfTableExists(tableName string) *RenameTableMigration { 175 | m.Condition = &IfTableExistsCondition{TableName: tableName} 176 | return m 177 | } 178 | 179 | func (m *RenameTableMigration) Rename(oldName string, newName string) *RenameTableMigration { 180 | m.oldName = oldName 181 | m.newName = newName 182 | return m 183 | } 184 | 185 | func (m *RenameTableMigration) Sql(d Dialect) string { 186 | return d.RenameTable(m.oldName, m.newName) 187 | } 188 | 189 | type CopyTableDataMigration struct { 190 | MigrationBase 191 | sourceTable string 192 | targetTable string 193 | sourceCols []string 194 | targetCols []string 195 | colMap map[string]string 196 | } 197 | 198 | func NewCopyTableDataMigration(targetTable string, sourceTable string, colMap map[string]string) *CopyTableDataMigration { 199 | m := &CopyTableDataMigration{sourceTable: sourceTable, targetTable: targetTable} 200 | for key, value := range colMap { 201 | m.targetCols = append(m.targetCols, key) 202 | m.sourceCols = append(m.sourceCols, value) 203 | } 204 | return m 205 | } 206 | 207 | func (m *CopyTableDataMigration) IfTableExists(tableName string) *CopyTableDataMigration { 208 | m.Condition = &IfTableExistsCondition{TableName: tableName} 209 | return m 210 | } 211 | 212 | func (m *CopyTableDataMigration) Sql(d Dialect) string { 213 | return d.CopyTableData(m.sourceTable, m.targetTable, m.sourceCols, m.targetCols) 214 | } 215 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/migrator.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "time" 5 | 6 | _ "github.com/go-sql-driver/mysql" 7 | "github.com/go-xorm/xorm" 8 | _ "github.com/lib/pq" 9 | _ "github.com/mattn/go-sqlite3" 10 | "github.com/raintank/worldping-api/pkg/log" 11 | ) 12 | 13 | type Migrator struct { 14 | LogLevel log.LogLevel 15 | 16 | x *xorm.Engine 17 | dialect Dialect 18 | migrations []Migration 19 | } 20 | 21 | type MigrationLog struct { 22 | Id int64 23 | MigrationId string 24 | Sql string 25 | Success bool 26 | Error string 27 | Timestamp time.Time 28 | } 29 | 30 | func NewMigrator(engine *xorm.Engine) *Migrator { 31 | mg := &Migrator{} 32 | mg.x = engine 33 | mg.LogLevel = log.WARN 34 | mg.migrations = make([]Migration, 0) 35 | mg.dialect = NewDialect(mg.x.DriverName()) 36 | return mg 37 | } 38 | 39 | func (mg *Migrator) AddMigration(id string, m Migration) { 40 | m.SetId(id) 41 | mg.migrations = append(mg.migrations, m) 42 | } 43 | 44 | func (mg *Migrator) GetMigrationLog() (map[string]MigrationLog, error) { 45 | logMap := make(map[string]MigrationLog) 46 | logItems := make([]MigrationLog, 0) 47 | 48 | exists, err := mg.x.IsTableExist(new(MigrationLog)) 49 | if err != nil { 50 | return nil, err 51 | } 52 | 53 | if !exists { 54 | return logMap, nil 55 | } 56 | 57 | if err = mg.x.Find(&logItems); err != nil { 58 | return nil, err 59 | } 60 | 61 | for _, logItem := range logItems { 62 | if !logItem.Success { 63 | continue 64 | } 65 | logMap[logItem.MigrationId] = logItem 66 | } 67 | 68 | return logMap, nil 69 | } 70 | 71 | func (mg *Migrator) Start() error { 72 | if mg.LogLevel <= log.INFO { 73 | log.Info("Migrator: Starting DB migration") 74 | } 75 | 76 | logMap, err := mg.GetMigrationLog() 77 | if err != nil { 78 | return err 79 | } 80 | 81 | for _, m := range mg.migrations { 82 | _, exists := logMap[m.Id()] 83 | if exists { 84 | if mg.LogLevel <= log.DEBUG { 85 | log.Debug("Migrator: Skipping migration: %v, Already executed", m.Id()) 86 | } 87 | continue 88 | } 89 | 90 | sql := m.Sql(mg.dialect) 91 | 92 | record := MigrationLog{ 93 | MigrationId: m.Id(), 94 | Sql: sql, 95 | Timestamp: time.Now(), 96 | } 97 | 98 | if mg.LogLevel <= log.DEBUG { 99 | log.Debug("Migrator: Executing SQL: \n %v \n", sql) 100 | } 101 | 102 | if err := mg.exec(m); err != nil { 103 | log.Error(3, "Migrator: error: \n%s:\n%s", err, sql) 104 | record.Error = err.Error() 105 | mg.x.Insert(&record) 106 | return err 107 | } else { 108 | record.Success = true 109 | mg.x.Insert(&record) 110 | } 111 | } 112 | 113 | return nil 114 | } 115 | 116 | func (mg *Migrator) exec(m Migration) error { 117 | if mg.LogLevel <= log.INFO { 118 | log.Info("Migrator: exec migration id: %v", m.Id()) 119 | } 120 | 121 | err := mg.inTransaction(func(sess *xorm.Session) error { 122 | 123 | condition := m.GetCondition() 124 | if condition != nil { 125 | sql, args := condition.Sql(mg.dialect) 126 | results, err := sess.Query(sql, args...) 127 | if err != nil || len(results) == 0 { 128 | log.Info("Migrator: skipping migration id: %v, condition not fulfilled", m.Id()) 129 | return sess.Rollback() 130 | } 131 | } 132 | 133 | _, err := sess.Exec(m.Sql(mg.dialect)) 134 | if err != nil { 135 | log.Error(3, "Migrator: exec FAILED migration id: %v, err: %v", m.Id(), err) 136 | return err 137 | } 138 | //run additiona migration code. 139 | return m.ExecOnSuccess(sess) 140 | }) 141 | 142 | if err != nil { 143 | return err 144 | } 145 | 146 | return nil 147 | } 148 | 149 | type dbTransactionFunc func(sess *xorm.Session) error 150 | 151 | func (mg *Migrator) inTransaction(callback dbTransactionFunc) error { 152 | var err error 153 | 154 | sess := mg.x.NewSession() 155 | defer sess.Close() 156 | 157 | if err = sess.Begin(); err != nil { 158 | return err 159 | } 160 | 161 | err = callback(sess) 162 | 163 | if err != nil { 164 | sess.Rollback() 165 | return err 166 | } else if err = sess.Commit(); err != nil { 167 | return err 168 | } 169 | 170 | return nil 171 | } 172 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/mysql_dialect.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import "strconv" 4 | 5 | type Mysql struct { 6 | BaseDialect 7 | } 8 | 9 | func NewMysqlDialect() *Mysql { 10 | d := Mysql{} 11 | d.BaseDialect.dialect = &d 12 | d.BaseDialect.driverName = MYSQL 13 | return &d 14 | } 15 | 16 | func (db *Mysql) SupportEngine() bool { 17 | return true 18 | } 19 | 20 | func (db *Mysql) Quote(name string) string { 21 | return "`" + name + "`" 22 | } 23 | 24 | func (db *Mysql) QuoteStr() string { 25 | return "`" 26 | } 27 | 28 | func (db *Mysql) AutoIncrStr() string { 29 | return "AUTO_INCREMENT" 30 | } 31 | 32 | func (db *Mysql) SqlType(c *Column) string { 33 | var res string 34 | switch c.Type { 35 | case DB_Bool: 36 | res = DB_TinyInt 37 | c.Length = 1 38 | case DB_Serial: 39 | c.IsAutoIncrement = true 40 | c.IsPrimaryKey = true 41 | c.Nullable = false 42 | res = DB_Int 43 | case DB_BigSerial: 44 | c.IsAutoIncrement = true 45 | c.IsPrimaryKey = true 46 | c.Nullable = false 47 | res = DB_BigInt 48 | case DB_Bytea: 49 | res = DB_Blob 50 | case DB_TimeStampz: 51 | res = DB_Char 52 | c.Length = 64 53 | case DB_NVarchar: 54 | res = DB_Varchar 55 | default: 56 | res = c.Type 57 | } 58 | 59 | var hasLen1 bool = (c.Length > 0) 60 | var hasLen2 bool = (c.Length2 > 0) 61 | 62 | if res == DB_BigInt && !hasLen1 && !hasLen2 { 63 | c.Length = 20 64 | hasLen1 = true 65 | } 66 | 67 | if hasLen2 { 68 | res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" 69 | } else if hasLen1 { 70 | res += "(" + strconv.Itoa(c.Length) + ")" 71 | } 72 | return res 73 | } 74 | 75 | func (db *Mysql) TableCheckSql(tableName string) (string, []interface{}) { 76 | args := []interface{}{"grafana", tableName} 77 | sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" 78 | return sql, args 79 | } 80 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/postgres_dialect.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "fmt" 5 | "strconv" 6 | ) 7 | 8 | type Postgres struct { 9 | BaseDialect 10 | } 11 | 12 | func NewPostgresDialect() *Postgres { 13 | d := Postgres{} 14 | d.BaseDialect.dialect = &d 15 | d.BaseDialect.driverName = POSTGRES 16 | return &d 17 | } 18 | 19 | func (db *Postgres) SupportEngine() bool { 20 | return false 21 | } 22 | 23 | func (db *Postgres) Quote(name string) string { 24 | return "\"" + name + "\"" 25 | } 26 | 27 | func (db *Postgres) QuoteStr() string { 28 | return "\"" 29 | } 30 | 31 | func (b *Postgres) LikeStr() string { 32 | return "ILIKE" 33 | } 34 | 35 | func (db *Postgres) AutoIncrStr() string { 36 | return "" 37 | } 38 | 39 | func (db *Postgres) SqlType(c *Column) string { 40 | var res string 41 | switch t := c.Type; t { 42 | case DB_TinyInt: 43 | res = DB_SmallInt 44 | return res 45 | case DB_MediumInt, DB_Int, DB_Integer: 46 | if c.IsAutoIncrement { 47 | return DB_Serial 48 | } 49 | return DB_Integer 50 | case DB_Serial, DB_BigSerial: 51 | c.IsAutoIncrement = true 52 | c.Nullable = false 53 | res = t 54 | case DB_Binary, DB_VarBinary: 55 | return DB_Bytea 56 | case DB_DateTime: 57 | res = DB_TimeStamp 58 | case DB_TimeStampz: 59 | return "timestamp with time zone" 60 | case DB_Float: 61 | res = DB_Real 62 | case DB_TinyText, DB_MediumText, DB_LongText: 63 | res = DB_Text 64 | case DB_NVarchar: 65 | res = DB_Varchar 66 | case DB_Uuid: 67 | res = DB_Uuid 68 | case DB_Blob, DB_TinyBlob, DB_MediumBlob, DB_LongBlob: 69 | return DB_Bytea 70 | case DB_Double: 71 | return "DOUBLE PRECISION" 72 | default: 73 | if c.IsAutoIncrement { 74 | return DB_Serial 75 | } 76 | res = t 77 | } 78 | 79 | var hasLen1 bool = (c.Length > 0) 80 | var hasLen2 bool = (c.Length2 > 0) 81 | if hasLen2 { 82 | res += "(" + strconv.Itoa(c.Length) + "," + strconv.Itoa(c.Length2) + ")" 83 | } else if hasLen1 { 84 | res += "(" + strconv.Itoa(c.Length) + ")" 85 | } 86 | return res 87 | } 88 | 89 | func (db *Postgres) TableCheckSql(tableName string) (string, []interface{}) { 90 | args := []interface{}{"grafana", tableName} 91 | sql := "SELECT `TABLE_NAME` from `INFORMATION_SCHEMA`.`TABLES` WHERE `TABLE_SCHEMA`=? and `TABLE_NAME`=?" 92 | return sql, args 93 | } 94 | 95 | func (db *Postgres) DropIndexSql(tableName string, index *Index) string { 96 | quote := db.Quote 97 | idxName := index.XName(tableName) 98 | return fmt.Sprintf("DROP INDEX %v", quote(idxName)) 99 | } 100 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/sqlite_dialect.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import "fmt" 4 | 5 | type Sqlite3 struct { 6 | BaseDialect 7 | } 8 | 9 | func NewSqlite3Dialect() *Sqlite3 { 10 | d := Sqlite3{} 11 | d.BaseDialect.dialect = &d 12 | d.BaseDialect.driverName = SQLITE 13 | return &d 14 | } 15 | 16 | func (db *Sqlite3) SupportEngine() bool { 17 | return false 18 | } 19 | 20 | func (db *Sqlite3) Quote(name string) string { 21 | return "`" + name + "`" 22 | } 23 | 24 | func (db *Sqlite3) QuoteStr() string { 25 | return "`" 26 | } 27 | 28 | func (db *Sqlite3) AutoIncrStr() string { 29 | return "AUTOINCREMENT" 30 | } 31 | 32 | func (db *Sqlite3) SqlType(c *Column) string { 33 | switch c.Type { 34 | case DB_Date, DB_DateTime, DB_TimeStamp, DB_Time: 35 | return DB_DateTime 36 | case DB_TimeStampz: 37 | return DB_Text 38 | case DB_Char, DB_Varchar, DB_NVarchar, DB_TinyText, DB_Text, DB_MediumText, DB_LongText: 39 | return DB_Text 40 | case DB_Bit, DB_TinyInt, DB_SmallInt, DB_MediumInt, DB_Int, DB_Integer, DB_BigInt, DB_Bool: 41 | return DB_Integer 42 | case DB_Float, DB_Double, DB_Real: 43 | return DB_Real 44 | case DB_Decimal, DB_Numeric: 45 | return DB_Numeric 46 | case DB_TinyBlob, DB_Blob, DB_MediumBlob, DB_LongBlob, DB_Bytea, DB_Binary, DB_VarBinary: 47 | return DB_Blob 48 | case DB_Serial, DB_BigSerial: 49 | c.IsPrimaryKey = true 50 | c.IsAutoIncrement = true 51 | c.Nullable = false 52 | return DB_Integer 53 | default: 54 | return c.Type 55 | } 56 | } 57 | 58 | func (db *Sqlite3) TableCheckSql(tableName string) (string, []interface{}) { 59 | args := []interface{}{tableName} 60 | return "SELECT name FROM sqlite_master WHERE type='table' and name = ?", args 61 | } 62 | 63 | func (db *Sqlite3) DropIndexSql(tableName string, index *Index) string { 64 | quote := db.Quote 65 | //var unique string 66 | idxName := index.XName(tableName) 67 | return fmt.Sprintf("DROP INDEX %v", quote(idxName)) 68 | } 69 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/migrator/types.go: -------------------------------------------------------------------------------- 1 | package migrator 2 | 3 | import ( 4 | "fmt" 5 | "github.com/go-xorm/xorm" 6 | "strings" 7 | ) 8 | 9 | const ( 10 | POSTGRES = "postgres" 11 | SQLITE = "sqlite3" 12 | MYSQL = "mysql" 13 | ) 14 | 15 | type Migration interface { 16 | Sql(dialect Dialect) string 17 | Id() string 18 | SetId(string) 19 | GetCondition() MigrationCondition 20 | ExecOnSuccess(*xorm.Session) error 21 | } 22 | 23 | type SQLType string 24 | 25 | type ColumnType string 26 | 27 | const ( 28 | DB_TYPE_STRING ColumnType = "String" 29 | ) 30 | 31 | type Table struct { 32 | Name string 33 | Columns []*Column 34 | PrimaryKeys []string 35 | Indices []*Index 36 | } 37 | 38 | const ( 39 | IndexType = iota + 1 40 | UniqueIndex 41 | ) 42 | 43 | type Index struct { 44 | Name string 45 | Type int 46 | Cols []string 47 | } 48 | 49 | func (index *Index) XName(tableName string) string { 50 | if index.Name == "" { 51 | index.Name = fmt.Sprintf("%s", strings.Join(index.Cols, "_")) 52 | } 53 | 54 | if !strings.HasPrefix(index.Name, "UQE_") && 55 | !strings.HasPrefix(index.Name, "IDX_") { 56 | if index.Type == UniqueIndex { 57 | return fmt.Sprintf("UQE_%v_%v", tableName, index.Name) 58 | } 59 | return fmt.Sprintf("IDX_%v_%v", tableName, index.Name) 60 | } 61 | return index.Name 62 | } 63 | 64 | var ( 65 | DB_Bit = "BIT" 66 | DB_TinyInt = "TINYINT" 67 | DB_SmallInt = "SMALLINT" 68 | DB_MediumInt = "MEDIUMINT" 69 | DB_Int = "INT" 70 | DB_Integer = "INTEGER" 71 | DB_BigInt = "BIGINT" 72 | 73 | DB_Enum = "ENUM" 74 | DB_Set = "SET" 75 | 76 | DB_Char = "CHAR" 77 | DB_Varchar = "VARCHAR" 78 | DB_NVarchar = "NVARCHAR" 79 | DB_TinyText = "TINYTEXT" 80 | DB_Text = "TEXT" 81 | DB_MediumText = "MEDIUMTEXT" 82 | DB_LongText = "LONGTEXT" 83 | DB_Uuid = "UUID" 84 | 85 | DB_Date = "DATE" 86 | DB_DateTime = "DATETIME" 87 | DB_Time = "TIME" 88 | DB_TimeStamp = "TIMESTAMP" 89 | DB_TimeStampz = "TIMESTAMPZ" 90 | 91 | DB_Decimal = "DECIMAL" 92 | DB_Numeric = "NUMERIC" 93 | 94 | DB_Real = "REAL" 95 | DB_Float = "FLOAT" 96 | DB_Double = "DOUBLE" 97 | 98 | DB_Binary = "BINARY" 99 | DB_VarBinary = "VARBINARY" 100 | DB_TinyBlob = "TINYBLOB" 101 | DB_Blob = "BLOB" 102 | DB_MediumBlob = "MEDIUMBLOB" 103 | DB_LongBlob = "LONGBLOB" 104 | DB_Bytea = "BYTEA" 105 | 106 | DB_Bool = "BOOL" 107 | 108 | DB_Serial = "SERIAL" 109 | DB_BigSerial = "BIGSERIAL" 110 | ) 111 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/monitor_test.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | m "github.com/raintank/worldping-api/pkg/models" 9 | ) 10 | 11 | type scenario struct { 12 | now int64 13 | inState m.CheckEvalResult 14 | stateCheck int64 15 | stateChange int64 16 | frequency int64 17 | outState m.CheckEvalResult 18 | } 19 | 20 | func (s scenario) String() string { 21 | return fmt.Sprintf(" now=%d, inState=%s, stateCheck=%d, freq=%d, outState=%s", s.now, s.inState, s.stateCheck, s.frequency, s.outState) 22 | } 23 | 24 | func scrutinizeTest(s scenario) m.Check { 25 | mon := m.Check{ 26 | State: s.inState, 27 | StateCheck: time.Unix(s.stateCheck, 0), 28 | StateChange: time.Unix(s.stateChange, 0), 29 | Frequency: s.frequency, 30 | } 31 | scrutinizeState(time.Unix(s.now, 0), &mon) 32 | return mon 33 | } 34 | 35 | // if exec date is in future, func should be safe and not make changes to state 36 | func TestScrutinizeStateFuture(t *testing.T) { 37 | scenarios := []scenario{ 38 | {120, m.EvalResultUnknown, 121, 1, 10, m.EvalResultUnknown}, 39 | {120, m.EvalResultUnknown, 130, 1, 10, m.EvalResultUnknown}, 40 | {120, m.EvalResultUnknown, 140, 1, 10, m.EvalResultUnknown}, 41 | {120, m.EvalResultUnknown, 150, 1, 10, m.EvalResultUnknown}, 42 | {120, m.EvalResultCrit, 121, 1, 10, m.EvalResultCrit}, 43 | {120, m.EvalResultCrit, 130, 1, 10, m.EvalResultCrit}, 44 | {120, m.EvalResultCrit, 140, 1, 10, m.EvalResultCrit}, 45 | {120, m.EvalResultCrit, 150, 1, 10, m.EvalResultCrit}, 46 | } 47 | for _, s := range scenarios { 48 | res := scrutinizeTest(s) 49 | if res.State != s.outState { 50 | t.Errorf("scenario %s: expected %s - got %s", s, s.outState, res.State) 51 | } 52 | } 53 | } 54 | 55 | var realScenarios = []scenario{ 56 | // should run at (or shortly after) 107, 117, 127, 137, etc 57 | // if last run stays at 117, state should become unknown after 117 + 2*10 = 137 58 | {117, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 59 | {118, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 60 | {119, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 61 | {120, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 62 | {121, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 63 | {122, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 64 | {123, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 65 | {124, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 66 | {125, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 67 | {126, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 68 | {127, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 69 | {128, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 70 | {129, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 71 | {130, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 72 | {131, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 73 | {132, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 74 | {133, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 75 | {134, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 76 | {135, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 77 | {136, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 78 | {137, m.EvalResultCrit, 117, 1, 10, m.EvalResultCrit}, 79 | {148, m.EvalResultCrit, 117, 1, 10, m.EvalResultUnknown}, 80 | {149, m.EvalResultCrit, 117, 1, 10, m.EvalResultUnknown}, 81 | {150, m.EvalResultCrit, 117, 1, 10, m.EvalResultUnknown}, 82 | {151, m.EvalResultCrit, 117, 1, 10, m.EvalResultUnknown}, 83 | } 84 | 85 | // crit/warn should become unknown after 2*freq has passed 86 | func TestScrutinizeStateCritToUnknown(t *testing.T) { 87 | for _, s := range realScenarios { 88 | res := scrutinizeTest(s) 89 | if res.State != s.outState { 90 | t.Errorf("scenario %s: expected %s - got %s", s, s.outState, res.State) 91 | } 92 | } 93 | } 94 | 95 | // unknown should just stay unknown 96 | func TestScrutinizeStateStayUnknown(t *testing.T) { 97 | for _, s := range realScenarios { 98 | s.inState = m.EvalResultUnknown 99 | res := scrutinizeTest(s) 100 | if res.State != m.EvalResultUnknown { 101 | t.Errorf("scenario %s: expected %s - got %s", s, s.outState, res.State) 102 | } 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/probe_test.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | "time" 7 | 8 | m "github.com/raintank/worldping-api/pkg/models" 9 | . "github.com/smartystreets/goconvey/convey" 10 | ) 11 | 12 | func TestProbes(t *testing.T) { 13 | InitTestDB(t) 14 | probeCount := 0 15 | Convey("When adding probe", t, func() { 16 | pre := time.Now() 17 | p := &m.ProbeDTO{ 18 | Name: fmt.Sprintf("test%d", probeCount), 19 | OrgId: 1, 20 | Tags: []string{"test", "dev"}, 21 | Public: false, 22 | Latitude: 1.0, 23 | Longitude: 1.0, 24 | Online: false, 25 | Enabled: true, 26 | } 27 | err := AddProbe(p) 28 | So(err, ShouldBeNil) 29 | probeCount++ 30 | So(p.Id, ShouldNotEqual, 0) 31 | So(p.Name, ShouldEqual, fmt.Sprintf("test%d", probeCount-1)) 32 | So(p.OrgId, ShouldEqual, 1) 33 | So(len(p.Tags), ShouldEqual, 2) 34 | So(p.Tags, ShouldContain, "test") 35 | So(p.Tags, ShouldContain, "dev") 36 | So(p.Public, ShouldEqual, false) 37 | So(p.Online, ShouldEqual, false) 38 | So(p.Created.Unix(), ShouldBeGreaterThanOrEqualTo, pre.Unix()) 39 | So(p.Created.Unix(), ShouldBeLessThanOrEqualTo, time.Now().Unix()) 40 | So(p.Updated.Unix(), ShouldBeGreaterThanOrEqualTo, pre.Unix()) 41 | So(p.Updated.Unix(), ShouldBeLessThanOrEqualTo, time.Now().Unix()) 42 | So(p.OnlineChange.Unix(), ShouldBeGreaterThanOrEqualTo, pre.Unix()) 43 | So(p.OnlineChange.Unix(), ShouldBeLessThanOrEqualTo, time.Now().Unix()) 44 | Convey("When replacing Probe tags", func() { 45 | p.Tags = []string{"foo", "bar"} 46 | err := UpdateProbe(p) 47 | So(err, ShouldBeNil) 48 | So(p.Updated.Unix(), ShouldBeGreaterThanOrEqualTo, p.Created.Unix()) 49 | So(p.Updated.Unix(), ShouldBeLessThanOrEqualTo, time.Now().Unix()) 50 | Convey("Tags should be updated in DB", func() { 51 | updated, err := GetProbeById(p.Id, p.OrgId) 52 | So(err, ShouldBeNil) 53 | So(len(updated.Tags), ShouldEqual, 2) 54 | So(updated.Tags, ShouldContain, "foo") 55 | So(updated.Tags, ShouldContain, "bar") 56 | }) 57 | 58 | }) 59 | Convey("When removing probe tag", func() { 60 | p.Tags = []string{"test"} 61 | err := UpdateProbe(p) 62 | So(err, ShouldBeNil) 63 | So(p.Updated.Unix(), ShouldBeGreaterThanOrEqualTo, p.Created.Unix()) 64 | So(p.Updated.Unix(), ShouldBeLessThanOrEqualTo, time.Now().Unix()) 65 | Convey("Tags should be updated in DB", func() { 66 | updated, err := GetProbeById(p.Id, p.OrgId) 67 | So(err, ShouldBeNil) 68 | So(len(updated.Tags), ShouldEqual, 1) 69 | So(updated.Tags, ShouldContain, "test") 70 | }) 71 | }) 72 | Convey("When adding additional probe tags", func() { 73 | p.Tags = []string{"test", "dev", "foo"} 74 | err := UpdateProbe(p) 75 | So(err, ShouldBeNil) 76 | So(p.Updated.Unix(), ShouldBeGreaterThanOrEqualTo, p.Created.Unix()) 77 | So(p.Updated.Unix(), ShouldBeLessThanOrEqualTo, time.Now().Unix()) 78 | Convey("Tags should be updated in DB", func() { 79 | updated, err := GetProbeById(p.Id, p.OrgId) 80 | So(err, ShouldBeNil) 81 | So(len(updated.Tags), ShouldEqual, 3) 82 | So(updated.Tags, ShouldContain, "foo") 83 | So(updated.Tags, ShouldContain, "dev") 84 | So(updated.Tags, ShouldContain, "test") 85 | }) 86 | }) 87 | Convey("When deleting probe", func() { 88 | err := DeleteProbe(p.Id, p.OrgId) 89 | So(err, ShouldBeNil) 90 | probeCount-- 91 | Convey("When listing probes for org with probes", func() { 92 | probes, err := GetProbes(&m.GetProbesQuery{ 93 | OrgId: 1, 94 | }) 95 | So(err, ShouldBeNil) 96 | So(len(probes), ShouldEqual, probeCount) 97 | }) 98 | }) 99 | Convey("When listing probes for org with no probes", func() { 100 | probes, err := GetProbes(&m.GetProbesQuery{ 101 | OrgId: 123, 102 | }) 103 | So(err, ShouldBeNil) 104 | So(len(probes), ShouldEqual, 0) 105 | }) 106 | }) 107 | } 108 | 109 | func TestProbeSessions(t *testing.T) { 110 | InitTestDB(t) 111 | p := &m.ProbeDTO{ 112 | Name: fmt.Sprintf("test%d", 1), 113 | OrgId: 1, 114 | Tags: []string{"test", "dev"}, 115 | Public: false, 116 | Latitude: 1.0, 117 | Longitude: 1.0, 118 | Online: false, 119 | Enabled: true, 120 | } 121 | err := AddProbe(p) 122 | if err != nil { 123 | t.Fatal(err) 124 | } 125 | Convey("When adding probeSession", t, func() { 126 | pre := time.Now() 127 | session := m.ProbeSession{ 128 | OrgId: 1, 129 | ProbeId: p.Id, 130 | SocketId: "sid1", 131 | Version: "1.0.0", 132 | InstanceId: "default", 133 | RemoteIp: "127.0.0.1", 134 | } 135 | err := AddProbeSession(&session) 136 | So(err, ShouldBeNil) 137 | So(session.Id, ShouldNotEqual, 0) 138 | 139 | Convey("new session should set probe to online", func() { 140 | probe, err := GetProbeById(p.Id, p.OrgId) 141 | So(err, ShouldBeNil) 142 | So(probe, ShouldNotBeNil) 143 | So(probe.Online, ShouldEqual, true) 144 | So(probe.OnlineChange.Unix(), ShouldBeGreaterThanOrEqualTo, pre.Unix()) 145 | 146 | Convey("when deleting probeSesson", func() { 147 | err := DeleteProbeSession(&session) 148 | So(err, ShouldBeNil) 149 | Convey("probe should be offline again", func() { 150 | probe, err := GetProbeById(p.Id, p.OrgId) 151 | So(err, ShouldBeNil) 152 | So(probe, ShouldNotBeNil) 153 | So(probe.Online, ShouldEqual, false) 154 | So(probe.OnlineChange.Unix(), ShouldBeGreaterThanOrEqualTo, pre.Unix()) 155 | }) 156 | }) 157 | }) 158 | }) 159 | } 160 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/quota.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "fmt" 5 | m "github.com/raintank/worldping-api/pkg/models" 6 | "github.com/raintank/worldping-api/pkg/setting" 7 | ) 8 | 9 | type targetCount struct { 10 | Count int64 11 | } 12 | 13 | func GetOrgQuotaByTarget(orgId int64, target string, def int64) (*m.OrgQuotaDTO, error) { 14 | sess, err := newSession(false, "quota") 15 | if err != nil { 16 | return nil, err 17 | } 18 | return getOrgQuotaByTarget(sess, orgId, target, def) 19 | } 20 | 21 | func getOrgQuotaByTarget(sess *session, orgId int64, target string, def int64) (*m.OrgQuotaDTO, error) { 22 | quota := m.Quota{ 23 | Target: target, 24 | OrgId: orgId, 25 | } 26 | has, err := sess.Get("a) 27 | if err != nil { 28 | return nil, err 29 | } else if !has { 30 | quota.Limit = def 31 | } 32 | 33 | //get quota used. 34 | var used int64 35 | if target == "downloadLimit" { 36 | used = int64(0) 37 | } else { 38 | rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s where org_id=?", dialect.Quote(target)) 39 | var resp targetCount 40 | if _, err := sess.Sql(rawSql, orgId).Get(&resp); err != nil { 41 | return nil, err 42 | } 43 | used = resp.Count 44 | } 45 | 46 | q := &m.OrgQuotaDTO{ 47 | Target: quota.Target, 48 | Limit: quota.Limit, 49 | OrgId: quota.OrgId, 50 | Used: used, 51 | } 52 | 53 | return q, nil 54 | } 55 | 56 | func GetOrgQuotas(orgId int64) ([]m.OrgQuotaDTO, error) { 57 | sess, err := newSession(false, "quota") 58 | if err != nil { 59 | return nil, err 60 | } 61 | return getOrgQuotas(sess, orgId) 62 | } 63 | 64 | func getOrgQuotas(sess *session, orgId int64) ([]m.OrgQuotaDTO, error) { 65 | quotas := make([]*m.Quota, 0) 66 | if err := sess.Where("org_id=?", orgId).Find("as); err != nil { 67 | return nil, err 68 | } 69 | 70 | defaultQuotas := setting.Quota.Org.ToMap() 71 | 72 | seenTargets := make(map[string]bool) 73 | for _, q := range quotas { 74 | seenTargets[q.Target] = true 75 | } 76 | 77 | for t, v := range defaultQuotas { 78 | if _, ok := seenTargets[t]; !ok { 79 | quotas = append(quotas, &m.Quota{ 80 | OrgId: orgId, 81 | Target: t, 82 | Limit: v, 83 | }) 84 | } 85 | } 86 | 87 | result := make([]m.OrgQuotaDTO, len(quotas)) 88 | for i, q := range quotas { 89 | //get quota used. 90 | var used int64 91 | if q.Target == "downloadLimit" { 92 | used = int64(0) 93 | } else { 94 | rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s where org_id=?", dialect.Quote(q.Target)) 95 | var resp targetCount 96 | if _, err := sess.Sql(rawSql, q.OrgId).Get(&resp); err != nil { 97 | return nil, err 98 | } 99 | used = resp.Count 100 | } 101 | 102 | result[i] = m.OrgQuotaDTO{ 103 | Target: q.Target, 104 | Limit: q.Limit, 105 | OrgId: q.OrgId, 106 | Used: used, 107 | } 108 | } 109 | return result, nil 110 | } 111 | 112 | func UpdateOrgQuota(q *m.OrgQuotaDTO) error { 113 | sess, err := newSession(true, "quota") 114 | if err != nil { 115 | return err 116 | } 117 | defer sess.Cleanup() 118 | 119 | if err = updateOrgQuota(sess, q); err != nil { 120 | return err 121 | } 122 | // audit log? 123 | 124 | sess.Complete() 125 | return nil 126 | } 127 | 128 | func updateOrgQuota(sess *session, q *m.OrgQuotaDTO) error { 129 | //Check if quota is already defined in the DB 130 | quota := m.Quota{ 131 | Target: q.Target, 132 | OrgId: q.OrgId, 133 | } 134 | has, err := sess.Get("a) 135 | if err != nil { 136 | return err 137 | } 138 | quota.Limit = q.Limit 139 | if !has { 140 | //No quota in the DB for this target, so create a new one. 141 | if _, err := sess.Insert("a); err != nil { 142 | return err 143 | } 144 | } else { 145 | //update existing quota entry in the DB. 146 | if _, err := sess.Id(quota.Id).Update("a); err != nil { 147 | return err 148 | } 149 | } 150 | return nil 151 | } 152 | 153 | func GetGlobalQuotaByTarget(target string) (*m.GlobalQuotaDTO, error) { 154 | sess, err := newSession(false, "quota") 155 | if err != nil { 156 | return nil, err 157 | } 158 | return getGlobalQuotaByTarget(sess, target) 159 | } 160 | 161 | func getGlobalQuotaByTarget(sess *session, target string) (*m.GlobalQuotaDTO, error) { 162 | //get quota used. 163 | rawSql := fmt.Sprintf("SELECT COUNT(*) as count from %s", dialect.Quote(target)) 164 | var resp targetCount 165 | if _, err := sess.Sql(rawSql).Get(&resp); err != nil { 166 | return nil, err 167 | } 168 | 169 | quota := &m.GlobalQuotaDTO{ 170 | Target: target, 171 | Limit: setting.Quota.Global.ToMap()[target], 172 | Used: resp.Count, 173 | } 174 | 175 | return quota, nil 176 | } 177 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/quota_test.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "testing" 5 | 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | "github.com/raintank/worldping-api/pkg/setting" 8 | . "github.com/smartystreets/goconvey/convey" 9 | ) 10 | 11 | func InitTestDB(t *testing.T) { 12 | t.Log("InitTestDB") 13 | err := MockEngine() 14 | if err != nil { 15 | t.Fatalf("failed to init DB. %s", err) 16 | } 17 | } 18 | 19 | func TestQuotaCommandsAndQueries(t *testing.T) { 20 | InitTestDB(t) 21 | setting.Quota = setting.QuotaSettings{ 22 | Enabled: true, 23 | Org: &setting.OrgQuota{ 24 | Endpoint: 5, 25 | Probe: 5, 26 | DownloadLimit: 102400, 27 | }, 28 | Global: &setting.GlobalQuota{ 29 | Endpoint: 5, 30 | Probe: 5, 31 | }, 32 | } 33 | 34 | err := AddEndpoint(&m.EndpointDTO{ 35 | Name: "test1", 36 | OrgId: 1, 37 | }) 38 | if err != nil { 39 | t.Fatal(err) 40 | } 41 | 42 | err = AddProbe(&m.ProbeDTO{ 43 | Name: "test1", 44 | OrgId: 1, 45 | }) 46 | if err != nil { 47 | t.Fatal(err) 48 | } 49 | 50 | err = AddEndpoint(&m.EndpointDTO{ 51 | Name: "test1", 52 | OrgId: 2, 53 | }) 54 | if err != nil { 55 | t.Fatal(err) 56 | } 57 | 58 | err = AddProbe(&m.ProbeDTO{ 59 | Name: "test1", 60 | OrgId: 2, 61 | }) 62 | if err != nil { 63 | t.Fatal(err) 64 | } 65 | 66 | Convey("when org Quota for probes set to 10", t, func() { 67 | newQuota := m.OrgQuotaDTO{ 68 | OrgId: 1, 69 | Target: "probe", 70 | Limit: 10, 71 | } 72 | err := UpdateOrgQuota(&newQuota) 73 | So(err, ShouldBeNil) 74 | 75 | newQuota.OrgId = 4 76 | err = UpdateOrgQuota(&newQuota) 77 | So(err, ShouldBeNil) 78 | 79 | Convey("When geting probe quota for org with 1 probe", func() { 80 | q, err := GetOrgQuotaByTarget(1, "probe", 1) 81 | So(err, ShouldBeNil) 82 | So(q.Limit, ShouldEqual, 10) 83 | So(q.Used, ShouldEqual, 1) 84 | }) 85 | Convey("When geting probe quota for org with 0 probe", func() { 86 | q, err := GetOrgQuotaByTarget(4, "probe", 1) 87 | So(err, ShouldBeNil) 88 | So(q.Limit, ShouldEqual, 10) 89 | So(q.Used, ShouldEqual, 0) 90 | }) 91 | Convey("When getting quota list for org", func() { 92 | quotas, err := GetOrgQuotas(1) 93 | So(err, ShouldBeNil) 94 | So(len(quotas), ShouldEqual, 3) 95 | for _, res := range quotas { 96 | limit := 5 //default quota limit 97 | used := 1 98 | if res.Target == "probe" { 99 | limit = 10 //customized quota limit. 100 | } 101 | if res.Target == "downloadLimit" { 102 | limit = 102400 //customized quota limit. 103 | used = 0 104 | } 105 | 106 | So(res.Limit, ShouldEqual, limit) 107 | So(res.Used, ShouldEqual, used) 108 | 109 | } 110 | }) 111 | }) 112 | Convey("when org Quota for probes set to default", t, func() { 113 | Convey("When geting probe quota for org with 1 probe", func() { 114 | q, err := GetOrgQuotaByTarget(2, "probe", 3) 115 | So(err, ShouldBeNil) 116 | So(q.Limit, ShouldEqual, 3) 117 | So(q.Used, ShouldEqual, 1) 118 | }) 119 | Convey("When geting probe quota for org with 0 probe", func() { 120 | q, err := GetOrgQuotaByTarget(5, "probe", 3) 121 | So(err, ShouldBeNil) 122 | So(q.Limit, ShouldEqual, 3) 123 | So(q.Used, ShouldEqual, 0) 124 | }) 125 | }) 126 | 127 | Convey("When getting global endpoint quota", t, func() { 128 | q, err := GetGlobalQuotaByTarget("endpoint") 129 | So(err, ShouldBeNil) 130 | 131 | So(q.Limit, ShouldEqual, 5) 132 | So(q.Used, ShouldEqual, 2) 133 | }) 134 | Convey("Should be able to global probe quota", t, func() { 135 | q, err := GetGlobalQuotaByTarget("probe") 136 | So(err, ShouldBeNil) 137 | 138 | So(q.Limit, ShouldEqual, 5) 139 | So(q.Used, ShouldEqual, 2) 140 | }) 141 | 142 | } 143 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/shared.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "github.com/go-xorm/xorm" 5 | ) 6 | 7 | type session struct { 8 | *xorm.Session 9 | transaction bool 10 | complete bool 11 | } 12 | 13 | func newSession(transaction bool, table string) (*session, error) { 14 | if !transaction { 15 | return &session{Session: x.Table(table)}, nil 16 | } 17 | sess := session{Session: x.NewSession(), transaction: true} 18 | if err := sess.Begin(); err != nil { 19 | return nil, err 20 | } 21 | sess.Table(table) 22 | return &sess, nil 23 | } 24 | 25 | func (sess *session) Complete() { 26 | if sess.transaction { 27 | if err := sess.Commit(); err == nil { 28 | sess.complete = true 29 | } 30 | } 31 | } 32 | 33 | func (sess *session) Cleanup() { 34 | if sess.transaction { 35 | if !sess.complete { 36 | sess.Rollback() 37 | } 38 | sess.Close() 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/sqlstore.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "path" 7 | "path/filepath" 8 | "sync" 9 | 10 | "github.com/raintank/worldping-api/pkg/log" 11 | "github.com/raintank/worldping-api/pkg/services/sqlstore/migrations" 12 | "github.com/raintank/worldping-api/pkg/services/sqlstore/migrator" 13 | "github.com/raintank/worldping-api/pkg/services/sqlstore/sqlutil" 14 | "github.com/raintank/worldping-api/pkg/setting" 15 | 16 | _ "github.com/go-sql-driver/mysql" 17 | "github.com/go-xorm/xorm" 18 | _ "github.com/mattn/go-sqlite3" 19 | ) 20 | 21 | var ( 22 | x *xorm.Engine 23 | dialect migrator.Dialect 24 | l sync.Mutex 25 | DbCfg struct { 26 | Type, Host, Name, User, Pwd, Path, SslMode string 27 | } 28 | 29 | UseSQLite3 bool 30 | ) 31 | 32 | // setup and manage an SQLite3 engine for testing. 33 | func MockEngine() error { 34 | l.Lock() 35 | defer l.Unlock() 36 | if x != nil { 37 | log.Info("cleaning existing DB") 38 | sqlutil.CleanDB(x) 39 | migrator := migrator.NewMigrator(x) 40 | migrator.LogLevel = log.INFO 41 | migrations.AddMigrations(migrator) 42 | 43 | return migrator.Start() 44 | } 45 | e, err := xorm.NewEngine(sqlutil.TestDB_Sqlite3.DriverName, sqlutil.TestDB_Sqlite3.ConnStr) 46 | //x, err := xorm.NewEngine(sqlutil.TestDB_Mysql.DriverName, sqlutil.TestDB_Mysql.ConnStr) 47 | //x, err := xorm.NewEngine(sqlutil.TestDB_Postgres.DriverName, sqlutil.TestDB_Postgres.ConnStr) 48 | e.SetMaxOpenConns(1) 49 | if err != nil { 50 | return err 51 | } 52 | 53 | sqlutil.CleanDB(e) 54 | 55 | return SetEngine(e, false) 56 | } 57 | 58 | func NewEngine() { 59 | x, err := getEngine() 60 | 61 | if err != nil { 62 | log.Fatal(3, "Sqlstore: Fail to connect to database: %v", err) 63 | } 64 | 65 | err = SetEngine(x, setting.Env == setting.DEV) 66 | 67 | if err != nil { 68 | log.Fatal(3, "fail to initialize orm engine: %v", err) 69 | } 70 | x.SetMaxOpenConns(20) 71 | } 72 | 73 | func SetEngine(engine *xorm.Engine, enableLog bool) (err error) { 74 | x = engine 75 | 76 | dialect = migrator.NewDialect(x.DriverName()) 77 | 78 | migrator := migrator.NewMigrator(x) 79 | migrator.LogLevel = log.INFO 80 | migrations.AddMigrations(migrator) 81 | 82 | if err := migrator.Start(); err != nil { 83 | return fmt.Errorf("Sqlstore::Migration failed err: %v\n", err) 84 | } 85 | 86 | if enableLog { 87 | logPath := path.Join(setting.LogsPath, "xorm.log") 88 | os.MkdirAll(path.Dir(logPath), os.ModePerm) 89 | 90 | f, err := os.Create(logPath) 91 | if err != nil { 92 | return fmt.Errorf("sqlstore.init(fail to create xorm.log): %v", err) 93 | } 94 | x.SetLogger(xorm.NewSimpleLogger(f)) 95 | if setting.Env == setting.DEV { 96 | x.ShowSQL(true) 97 | } 98 | } 99 | return nil 100 | } 101 | 102 | func getEngine() (*xorm.Engine, error) { 103 | LoadConfig() 104 | 105 | cnnstr := "" 106 | switch DbCfg.Type { 107 | case "mysql": 108 | cnnstr = fmt.Sprintf("%s:%s@tcp(%s)/%s?charset=utf8", 109 | DbCfg.User, DbCfg.Pwd, DbCfg.Host, DbCfg.Name) 110 | case "sqlite3": 111 | if !filepath.IsAbs(DbCfg.Path) { 112 | DbCfg.Path = filepath.Join(setting.DataPath, DbCfg.Path) 113 | } 114 | os.MkdirAll(path.Dir(DbCfg.Path), os.ModePerm) 115 | cnnstr = "file:" + DbCfg.Path + "?cache=shared&mode=rwc&_loc=Local" 116 | default: 117 | return nil, fmt.Errorf("Unknown database type: %s", DbCfg.Type) 118 | } 119 | 120 | log.Info("Database: %v", DbCfg.Type) 121 | 122 | return xorm.NewEngine(DbCfg.Type, cnnstr) 123 | } 124 | 125 | func LoadConfig() { 126 | sec := setting.Cfg.Section("database") 127 | 128 | DbCfg.Type = sec.Key("type").String() 129 | if DbCfg.Type == "sqlite3" { 130 | UseSQLite3 = true 131 | } 132 | DbCfg.Host = sec.Key("host").String() 133 | DbCfg.Name = sec.Key("name").String() 134 | DbCfg.User = sec.Key("user").String() 135 | if len(DbCfg.Pwd) == 0 { 136 | DbCfg.Pwd = sec.Key("password").String() 137 | } 138 | DbCfg.SslMode = sec.Key("ssl_mode").String() 139 | DbCfg.Path = sec.Key("path").MustString("data/grafana.db") 140 | } 141 | 142 | func TestDB() error { 143 | sess, err := newSession(true, "endpoint") 144 | if err != nil { 145 | return err 146 | } 147 | defer sess.Cleanup() 148 | 149 | if err = testDB(sess); err != nil { 150 | return err 151 | } 152 | sess.Complete() 153 | return nil 154 | } 155 | 156 | func testDB(sess *session) error { 157 | _, err := sess.Query("SELECT 1") 158 | return err 159 | } 160 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/sqlstore.goconvey: -------------------------------------------------------------------------------- 1 | -timeout=20s 2 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/sqlutil/sqlutil.go: -------------------------------------------------------------------------------- 1 | package sqlutil 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/go-xorm/xorm" 7 | ) 8 | 9 | type TestDB struct { 10 | DriverName string 11 | ConnStr string 12 | } 13 | 14 | var TestDB_Sqlite3 = TestDB{DriverName: "sqlite3", ConnStr: ":memory:?_loc=Local"} 15 | var TestDB_Mysql = TestDB{DriverName: "mysql", ConnStr: "grafana:password@tcp(localhost:3306)/grafana_tests?charset=utf8"} 16 | var TestDB_Postgres = TestDB{DriverName: "postgres", ConnStr: "user=grafanatest password=grafanatest host=localhost port=5432 dbname=grafanatest sslmode=disable"} 17 | 18 | func CleanDB(x *xorm.Engine) { 19 | if x.DriverName() == "postgres" { 20 | sess := x.NewSession() 21 | defer sess.Close() 22 | 23 | if _, err := sess.Exec("DROP SCHEMA public CASCADE;"); err != nil { 24 | panic("Failed to drop schema public") 25 | } 26 | 27 | if _, err := sess.Exec("CREATE SCHEMA public;"); err != nil { 28 | panic("Failed to create schema public") 29 | } 30 | } else if x.DriverName() == "mysql" { 31 | tables, _ := x.DBMetas() 32 | sess := x.NewSession() 33 | defer sess.Close() 34 | 35 | for _, table := range tables { 36 | if _, err := sess.Exec("set foreign_key_checks = 0"); err != nil { 37 | panic("failed to disable foreign key checks") 38 | } 39 | if _, err := sess.Exec("drop table " + table.Name + " ;"); err != nil { 40 | panic(fmt.Sprintf("failed to delete table: %v, err: %v", table.Name, err)) 41 | } 42 | if _, err := sess.Exec("set foreign_key_checks = 1"); err != nil { 43 | panic("failed to disable foreign key checks") 44 | } 45 | } 46 | } else if x.DriverName() == "sqlite3" { 47 | tables, _ := x.DBMetas() 48 | sess := x.NewSession() 49 | defer sess.Close() 50 | for _, table := range tables { 51 | if _, err := sess.Exec("drop table `" + table.Name + "` ;"); err != nil { 52 | panic(fmt.Sprintf("failed to delete table: %v, err: %v", table.Name, err)) 53 | } 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/usage.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "strconv" 5 | 6 | m "github.com/raintank/worldping-api/pkg/models" 7 | ) 8 | 9 | func GetUsage() (*m.Usage, error) { 10 | sess, err := newSession(false, "endpoint") 11 | if err != nil { 12 | return nil, err 13 | } 14 | return getUsage(sess) 15 | } 16 | 17 | type usageRow struct { 18 | OrgId int64 19 | Count int64 20 | } 21 | 22 | func getUsage(sess *session) (*m.Usage, error) { 23 | usage := m.NewUsage() 24 | 25 | // get endpoints 26 | rows := make([]usageRow, 0) 27 | err := sess.Sql("SELECT org_id, COUNT(*) as count FROM endpoint GROUP BY org_id").Find(&rows) 28 | if err != nil { 29 | return nil, err 30 | } 31 | 32 | for _, row := range rows { 33 | usage.Endpoints.Total += row.Count 34 | usage.Endpoints.PerOrg[strconv.FormatInt(row.OrgId, 10)] = row.Count 35 | } 36 | 37 | rows = rows[:0] 38 | err = sess.Sql("SELECT org_id, COUNT(*) as count FROM probe GROUP BY org_id").Find(&rows) 39 | if err != nil { 40 | return nil, err 41 | } 42 | 43 | for _, row := range rows { 44 | usage.Probes.Total += row.Count 45 | usage.Probes.PerOrg[strconv.FormatInt(row.OrgId, 10)] = row.Count 46 | } 47 | 48 | rows = rows[:0] 49 | err = sess.Sql("SELECT org_id, COUNT(*) as count FROM `check` where type='http' GROUP BY org_id").Find(&rows) 50 | if err != nil { 51 | return nil, err 52 | } 53 | 54 | for _, row := range rows { 55 | usage.Checks.Total += row.Count 56 | usage.Checks.HTTP.Total += row.Count 57 | usage.Checks.HTTP.PerOrg[strconv.FormatInt(row.OrgId, 10)] = row.Count 58 | } 59 | 60 | rows = rows[:0] 61 | err = sess.Sql("SELECT org_id, COUNT(*) as count FROM `check` where type='https' GROUP BY org_id").Find(&rows) 62 | if err != nil { 63 | return nil, err 64 | } 65 | 66 | for _, row := range rows { 67 | usage.Checks.Total += row.Count 68 | usage.Checks.HTTPS.Total += row.Count 69 | usage.Checks.HTTPS.PerOrg[strconv.FormatInt(row.OrgId, 10)] = row.Count 70 | } 71 | 72 | rows = rows[:0] 73 | err = sess.Sql("SELECT org_id, COUNT(*) as count FROM `check` where type='ping' GROUP BY org_id").Find(&rows) 74 | if err != nil { 75 | return nil, err 76 | } 77 | 78 | for _, row := range rows { 79 | usage.Checks.Total += row.Count 80 | usage.Checks.PING.Total += row.Count 81 | usage.Checks.PING.PerOrg[strconv.FormatInt(row.OrgId, 10)] = row.Count 82 | } 83 | 84 | rows = rows[:0] 85 | err = sess.Sql("SELECT org_id, COUNT(*) as count FROM `check` where type='dns' GROUP BY org_id").Find(&rows) 86 | if err != nil { 87 | return nil, err 88 | } 89 | 90 | for _, row := range rows { 91 | usage.Checks.Total += row.Count 92 | usage.Checks.DNS.Total += row.Count 93 | usage.Checks.DNS.PerOrg[strconv.FormatInt(row.OrgId, 10)] = row.Count 94 | } 95 | 96 | return usage, nil 97 | } 98 | -------------------------------------------------------------------------------- /pkg/services/sqlstore/usage_test.go: -------------------------------------------------------------------------------- 1 | package sqlstore 2 | 3 | import ( 4 | "fmt" 5 | "testing" 6 | 7 | m "github.com/raintank/worldping-api/pkg/models" 8 | . "github.com/smartystreets/goconvey/convey" 9 | ) 10 | 11 | func TestUsageQuery(t *testing.T) { 12 | InitTestDB(t) 13 | err := AddProbe(&m.ProbeDTO{ 14 | Name: "public", 15 | OrgId: 1, 16 | Tags: []string{"test"}, 17 | Public: false, 18 | Latitude: 1.0, 19 | Longitude: 1.0, 20 | Online: false, 21 | Enabled: true, 22 | }) 23 | if err != nil { 24 | t.Fatal(err) 25 | } 26 | for _, i := range []int64{1, 2, 3, 4, 5} { 27 | err := AddProbe(&m.ProbeDTO{ 28 | Name: fmt.Sprintf("test%d", i), 29 | OrgId: i % 2, 30 | Tags: []string{"test"}, 31 | Public: false, 32 | Latitude: 1.0, 33 | Longitude: 1.0, 34 | Online: false, 35 | Enabled: true, 36 | }) 37 | if err != nil { 38 | t.Fatal(err) 39 | } 40 | } 41 | for _, i := range []int64{1, 2, 3, 4, 5, 6} { 42 | e := &m.EndpointDTO{ 43 | Name: fmt.Sprintf("www%d.google.com", i), 44 | OrgId: i % 3, 45 | Checks: []m.Check{ 46 | { 47 | Route: &m.CheckRoute{ 48 | Type: m.RouteByTags, 49 | Config: map[string]interface{}{ 50 | "tags": []string{"test"}, 51 | }, 52 | }, 53 | Frequency: 60, 54 | Type: m.HTTP_CHECK, 55 | Enabled: true, 56 | Settings: map[string]interface{}{ 57 | "host": fmt.Sprintf("www%d.google.com", i), 58 | "path": "/", 59 | "port": 80, 60 | "method": "GET", 61 | "timeout": 5, 62 | }, 63 | HealthSettings: &m.CheckHealthSettings{ 64 | NumProbes: 1, 65 | Steps: 3, 66 | }, 67 | }, 68 | { 69 | Route: &m.CheckRoute{ 70 | Type: m.RouteByIds, 71 | Config: map[string]interface{}{ 72 | "ids": []int64{1}, 73 | }, 74 | }, 75 | Frequency: 60, 76 | Type: m.PING_CHECK, 77 | Enabled: true, 78 | Settings: map[string]interface{}{ 79 | "hostname": fmt.Sprintf("www%d.google.com", i), 80 | "timeout": 5, 81 | }, 82 | HealthSettings: &m.CheckHealthSettings{ 83 | NumProbes: 1, 84 | Steps: 3, 85 | }, 86 | }, 87 | }, 88 | } 89 | err := AddEndpoint(e) 90 | if err != nil { 91 | t.Fatal(err) 92 | } 93 | } 94 | Convey("when getting usage metrics", t, func() { 95 | usage, err := GetUsage() 96 | So(err, ShouldBeNil) 97 | Convey("endpoint data should be accurate", func() { 98 | So(usage.Endpoints.Total, ShouldEqual, 6) 99 | So(len(usage.Endpoints.PerOrg), ShouldEqual, 3) 100 | So(usage.Endpoints.PerOrg["1"], ShouldEqual, 2) 101 | }) 102 | Convey("probe data should be accurate", func() { 103 | So(usage.Probes.Total, ShouldEqual, 6) 104 | So(len(usage.Probes.PerOrg), ShouldEqual, 2) 105 | So(usage.Probes.PerOrg["1"], ShouldEqual, 4) 106 | }) 107 | Convey("checks data should be accurate", func() { 108 | So(usage.Checks.Total, ShouldEqual, 12) 109 | So(usage.Checks.HTTP.Total, ShouldEqual, 6) 110 | So(usage.Checks.HTTPS.Total, ShouldEqual, 0) 111 | So(usage.Checks.PING.Total, ShouldEqual, 6) 112 | So(usage.Checks.DNS.Total, ShouldEqual, 0) 113 | So(usage.Endpoints.PerOrg["1"], ShouldEqual, 2) 114 | So(len(usage.Checks.HTTP.PerOrg), ShouldEqual, 3) 115 | So(len(usage.Checks.HTTPS.PerOrg), ShouldEqual, 0) 116 | So(len(usage.Checks.PING.PerOrg), ShouldEqual, 3) 117 | So(len(usage.Checks.DNS.PerOrg), ShouldEqual, 0) 118 | So(usage.Checks.HTTP.PerOrg["1"], ShouldEqual, 2) 119 | }) 120 | 121 | }) 122 | } 123 | -------------------------------------------------------------------------------- /pkg/setting/alerting.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | import ( 4 | "net/url" 5 | 6 | "github.com/raintank/worldping-api/pkg/log" 7 | ) 8 | 9 | type AlertingSettings struct { 10 | Enabled bool 11 | Topic string 12 | Distributed bool 13 | TickQueueSize int 14 | InternalJobQueueSize int 15 | ExecutorLRUSize int 16 | EnableScheduler bool 17 | EnableWorker bool 18 | Executors int 19 | GraphiteUrl string 20 | } 21 | 22 | func readAlertingSettings() { 23 | alerting := Cfg.Section("alerting") 24 | Alerting.Enabled = alerting.Key("enabled").MustBool(false) 25 | Alerting.Distributed = alerting.Key("distributed").MustBool(false) 26 | Alerting.Topic = alerting.Key("topic").MustString("worldping-alerts") 27 | Alerting.TickQueueSize = alerting.Key("tickqueue_size").MustInt(0) 28 | Alerting.InternalJobQueueSize = alerting.Key("internal_jobqueue_size").MustInt(0) 29 | 30 | Alerting.ExecutorLRUSize = alerting.Key("executor_lru_size").MustInt(0) 31 | Alerting.EnableScheduler = alerting.Key("enable_scheduler").MustBool(true) 32 | Alerting.EnableWorker = alerting.Key("enable_worker").MustBool(true) 33 | 34 | Alerting.GraphiteUrl = alerting.Key("graphite_url").MustString("http://localhost:8888/") 35 | if Alerting.GraphiteUrl[len(Alerting.GraphiteUrl)-1] != '/' { 36 | Alerting.GraphiteUrl += "/" 37 | } 38 | // Check if has app suburl. 39 | _, err := url.Parse(Alerting.GraphiteUrl) 40 | if err != nil { 41 | log.Fatal(4, "Invalid graphite_url(%s): %s", Alerting.GraphiteUrl, err) 42 | } 43 | 44 | if Alerting.Distributed && !Kafka.Enabled { 45 | log.Fatal(4, "Kafka must be enabled to use distributed alerting.") 46 | 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /pkg/setting/kafka_settings.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | type KafkaSettings struct { 4 | Enabled bool 5 | Brokers string 6 | Topic string 7 | } 8 | 9 | func readKafkaSettings() { 10 | sec := Cfg.Section("kafka") 11 | Kafka.Enabled = sec.Key("enabled").MustBool(false) 12 | Kafka.Brokers = sec.Key("brokers").MustString("localhost:9092") 13 | Kafka.Topic = sec.Key("topic").MustString("worldping") 14 | } 15 | -------------------------------------------------------------------------------- /pkg/setting/setting_quota.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | import ( 4 | "reflect" 5 | ) 6 | 7 | type OrgQuota struct { 8 | Endpoint int64 `target:"endpoint"` 9 | Probe int64 `target:"probe"` 10 | DownloadLimit int64 `target:"downloadLimit"` 11 | } 12 | 13 | type GlobalQuota struct { 14 | Endpoint int64 `target:"endpoint"` 15 | Probe int64 `target:"probe"` 16 | } 17 | 18 | func (q *OrgQuota) ToMap() map[string]int64 { 19 | return quotaToMap(*q) 20 | } 21 | 22 | func (q *GlobalQuota) ToMap() map[string]int64 { 23 | return quotaToMap(*q) 24 | } 25 | 26 | func quotaToMap(q interface{}) map[string]int64 { 27 | qMap := make(map[string]int64) 28 | typ := reflect.TypeOf(q) 29 | val := reflect.ValueOf(q) 30 | 31 | for i := 0; i < typ.NumField(); i++ { 32 | field := typ.Field(i) 33 | name := field.Tag.Get("target") 34 | if name == "" { 35 | name = field.Name 36 | } 37 | if name == "-" { 38 | continue 39 | } 40 | value := val.Field(i) 41 | qMap[name] = value.Int() 42 | } 43 | return qMap 44 | } 45 | 46 | type QuotaSettings struct { 47 | Enabled bool 48 | Org *OrgQuota 49 | Global *GlobalQuota 50 | } 51 | 52 | func readQuotaSettings() { 53 | // set global defaults. 54 | quota := Cfg.Section("quota") 55 | Quota.Enabled = quota.Key("enabled").MustBool(false) 56 | 57 | // per ORG Limits 58 | Quota.Org = &OrgQuota{ 59 | Endpoint: quota.Key("org_endpoint").MustInt64(10), 60 | Probe: quota.Key("org_probe").MustInt64(10), 61 | DownloadLimit: quota.Key("org_downloadlimit").MustInt64(100 * 1024), 62 | } 63 | 64 | // Global Limits 65 | Quota.Global = &GlobalQuota{ 66 | Endpoint: quota.Key("global_endpoint").MustInt64(10), 67 | Probe: quota.Key("global_probe").MustInt64(10), 68 | } 69 | 70 | } 71 | -------------------------------------------------------------------------------- /pkg/setting/setting_smtp.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | type SmtpSettings struct { 4 | Enabled bool 5 | Host string 6 | User string 7 | Password string 8 | CertFile string 9 | KeyFile string 10 | FromAddress string 11 | SkipVerify bool 12 | 13 | SendWelcomeEmailOnSignUp bool 14 | TemplatesPattern string 15 | } 16 | 17 | func readSmtpSettings() { 18 | sec := Cfg.Section("smtp") 19 | Smtp.Enabled = sec.Key("enabled").MustBool(false) 20 | Smtp.Host = sec.Key("host").String() 21 | Smtp.User = sec.Key("user").String() 22 | Smtp.Password = sec.Key("password").String() 23 | Smtp.CertFile = sec.Key("cert_file").String() 24 | Smtp.KeyFile = sec.Key("key_file").String() 25 | Smtp.FromAddress = sec.Key("from_address").String() 26 | Smtp.SkipVerify = sec.Key("skip_verify").MustBool(false) 27 | 28 | emails := Cfg.Section("emails") 29 | Smtp.SendWelcomeEmailOnSignUp = emails.Key("welcome_email_on_sign_up").MustBool(false) 30 | Smtp.TemplatesPattern = emails.Key("templates_pattern").MustString("emails/*.html") 31 | } 32 | -------------------------------------------------------------------------------- /pkg/setting/setting_test.go: -------------------------------------------------------------------------------- 1 | package setting 2 | 3 | import ( 4 | "os" 5 | "path/filepath" 6 | "testing" 7 | 8 | . "github.com/smartystreets/goconvey/convey" 9 | ) 10 | 11 | func TestLoadingSettings(t *testing.T) { 12 | 13 | Convey("Testing loading settings from ini file", t, func() { 14 | Convey("Given the default ini files", func() { 15 | err := NewConfigContext(&CommandLineArgs{HomePath: "../../"}) 16 | So(err, ShouldBeNil) 17 | 18 | So(AdminKey, ShouldEqual, "changeme") 19 | }) 20 | 21 | Convey("Should be able to override via environment variables", func() { 22 | os.Setenv("WP_SERVER_ADMIN_KEY", "superduper") 23 | NewConfigContext(&CommandLineArgs{HomePath: "../../"}) 24 | 25 | So(AdminKey, ShouldEqual, "superduper") 26 | So(DataPath, ShouldEqual, filepath.Join(HomePath, "data")) 27 | So(LogsPath, ShouldEqual, filepath.Join(DataPath, "log")) 28 | }) 29 | 30 | Convey("Should get property map from command line args array", func() { 31 | props := getCommandLineProperties([]string{"cfg:test=value", "cfg:map.test=1"}) 32 | 33 | So(len(props), ShouldEqual, 2) 34 | So(props["test"], ShouldEqual, "value") 35 | So(props["map.test"], ShouldEqual, "1") 36 | }) 37 | 38 | Convey("Should be able to override via command line", func() { 39 | NewConfigContext(&CommandLineArgs{ 40 | HomePath: "../../", 41 | Args: []string{"cfg:paths.data=/tmp/data", "cfg:paths.logs=/tmp/logs"}, 42 | }) 43 | 44 | So(DataPath, ShouldEqual, "/tmp/data") 45 | So(LogsPath, ShouldEqual, "/tmp/logs") 46 | }) 47 | 48 | Convey("Should be able to override defaults via command line", func() { 49 | NewConfigContext(&CommandLineArgs{ 50 | HomePath: "../../", 51 | Args: []string{ 52 | "cfg:default.server.enable_gzip=true", 53 | }, 54 | Config: filepath.Join(HomePath, "tests/config-files/override.ini"), 55 | }) 56 | 57 | So(EnableGzip, ShouldEqual, true) 58 | }) 59 | 60 | Convey("Defaults can be overriden in specified config file", func() { 61 | NewConfigContext(&CommandLineArgs{ 62 | HomePath: "../../", 63 | Config: filepath.Join(HomePath, "tests/config-files/override.ini"), 64 | Args: []string{"cfg:default.paths.data=/tmp/data"}, 65 | }) 66 | 67 | So(DataPath, ShouldEqual, "/tmp/override") 68 | }) 69 | 70 | Convey("Command line overrides specified config file", func() { 71 | NewConfigContext(&CommandLineArgs{ 72 | HomePath: "../../", 73 | Config: filepath.Join(HomePath, "tests/config-files/override.ini"), 74 | Args: []string{"cfg:paths.data=/tmp/data"}, 75 | }) 76 | 77 | So(DataPath, ShouldEqual, "/tmp/data") 78 | }) 79 | 80 | Convey("Can use environment variables in config values", func() { 81 | os.Setenv("WP_DATA_PATH", "/tmp/env_override") 82 | NewConfigContext(&CommandLineArgs{ 83 | HomePath: "../../", 84 | Args: []string{"cfg:paths.data=${WP_DATA_PATH}"}, 85 | }) 86 | 87 | So(DataPath, ShouldEqual, "/tmp/env_override") 88 | }) 89 | 90 | }) 91 | } 92 | -------------------------------------------------------------------------------- /pkg/util/remoteIp.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | // copied from https://husobee.github.io/golang/ip-address/2015/12/17/remote-ip-go.html 4 | 5 | import ( 6 | "bytes" 7 | "net" 8 | "net/http" 9 | "strings" 10 | ) 11 | 12 | //ipRange - a structure that holds the start and end of a range of ip addresses 13 | type ipRange struct { 14 | start net.IP 15 | end net.IP 16 | } 17 | 18 | // inRange - check to see if a given ip address is within a range given 19 | func inRange(r ipRange, ipAddress net.IP) bool { 20 | // strcmp type byte comparison 21 | if bytes.Compare(ipAddress, r.start) >= 0 && bytes.Compare(ipAddress, r.end) <= 0 { 22 | return true 23 | } 24 | return false 25 | } 26 | 27 | var privateRanges = []ipRange{ 28 | { 29 | start: net.ParseIP("10.0.0.0"), 30 | end: net.ParseIP("10.255.255.255"), 31 | }, 32 | { 33 | start: net.ParseIP("100.64.0.0"), 34 | end: net.ParseIP("100.127.255.255"), 35 | }, 36 | { 37 | start: net.ParseIP("172.16.0.0"), 38 | end: net.ParseIP("172.31.255.255"), 39 | }, 40 | { 41 | start: net.ParseIP("192.0.0.0"), 42 | end: net.ParseIP("192.0.0.255"), 43 | }, 44 | { 45 | start: net.ParseIP("192.168.0.0"), 46 | end: net.ParseIP("192.168.255.255"), 47 | }, 48 | { 49 | start: net.ParseIP("198.18.0.0"), 50 | end: net.ParseIP("198.19.255.255"), 51 | }, 52 | } 53 | 54 | // isPrivateSubnet - check to see if this ip is in a private subnet 55 | func isPrivateSubnet(ipAddress net.IP) bool { 56 | // my use case is only concerned with ipv4 atm 57 | if ipCheck := ipAddress.To4(); ipCheck != nil { 58 | // iterate over all our ranges 59 | for _, r := range privateRanges { 60 | // check if this ip is in a private range 61 | if inRange(r, ipAddress) { 62 | return true 63 | } 64 | } 65 | } 66 | return false 67 | } 68 | 69 | func GetRemoteIp(r *http.Request) string { 70 | addr, err := net.ResolveTCPAddr("tcp", r.RemoteAddr) 71 | var ipAddress string 72 | if err == nil { 73 | ipAddress = addr.IP.String() 74 | } 75 | for _, h := range []string{"X-Forwarded-For", "X-Real-Ip"} { 76 | for _, ip := range strings.Split(r.Header.Get(h), ",") { 77 | // header can contain spaces too, strip those out. 78 | realIP := net.ParseIP(strings.TrimSpace(ip)) 79 | if !realIP.IsGlobalUnicast() && !isPrivateSubnet(realIP) { 80 | // bad address, go to next 81 | continue 82 | } else { 83 | ipAddress = ip 84 | goto Done 85 | } 86 | } 87 | } 88 | Done: 89 | return ipAddress 90 | } 91 | -------------------------------------------------------------------------------- /pkg/util/time.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "time" 4 | 5 | // Since returns the number of milliseconds since t. 6 | func Since(t time.Time) int { 7 | return int(time.Since(t) / time.Millisecond) 8 | } 9 | -------------------------------------------------------------------------------- /public/robots.txt: -------------------------------------------------------------------------------- 1 | User-agent: * 2 | Disallow: / 3 | -------------------------------------------------------------------------------- /scripts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:stable-slim 2 | MAINTAINER Anthony Woods awoods@raintank.io 3 | 4 | RUN apt-get update && apt-get -y install netcat-traditional ca-certificates iputils-ping 5 | 6 | RUN mkdir -p /etc/raintank 7 | COPY docker/worldping-api.ini /etc/raintank/worldping-api.ini 8 | 9 | COPY build/worldping-api /usr/bin/worldping-api 10 | COPY docker/entrypoint.sh /usr/bin/ 11 | RUN mkdir /usr/share/worldping-api 12 | COPY build/public /usr/share/worldping-api/public 13 | COPY build/conf /usr/share/worldping-api/conf 14 | 15 | EXPOSE 80 16 | EXPOSE 443 17 | 18 | RUN mkdir /var/log/worldping-api 19 | RUN mkdir /var/lib/worldping-api 20 | VOLUME /var/log/worldping-api 21 | VOLUME /var/lib/worldping-api 22 | 23 | ENTRYPOINT ["/usr/bin/entrypoint.sh"] 24 | CMD ["--config=/etc/raintank/worldping-api.ini", "--homepath=/usr/share/worldping-api/", "cfg:default.paths.data=/var/lib/worldping-api", "cfg:default.paths.logs=/var/log/worldping-api"] 25 | -------------------------------------------------------------------------------- /scripts/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | # Find the directory we exist within 4 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 5 | cd ${DIR} 6 | 7 | GITVERSION=`git describe --long` 8 | SOURCEDIR=${DIR}/.. 9 | BUILDDIR=$SOURCEDIR/build 10 | 11 | # Make dir 12 | mkdir -p $BUILDDIR 13 | 14 | # Clean build bin dir 15 | rm -rf $BUILDDIR/* 16 | 17 | # Build binary 18 | cd $SOURCEDIR 19 | go build -ldflags "-X main.commit=$GITVERSION" -o $BUILDDIR/worldping-api -------------------------------------------------------------------------------- /scripts/build_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | # Find the directory we exist within 5 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 6 | cd ${DIR} 7 | 8 | VERSION=`git describe --always --long` 9 | 10 | mkdir build 11 | cp ../build/worldping-api build/ 12 | cp -a ../public build/ 13 | cp -a ../conf build/ 14 | 15 | docker build -t raintank/worldping-api:$VERSION . 16 | docker tag raintank/worldping-api:$VERSION raintank/worldping-api:latest 17 | -------------------------------------------------------------------------------- /scripts/deploy_docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | # Find the directory we exist within 5 | DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd ) 6 | cd ${DIR} 7 | 8 | VERSION=`git describe --always --long` 9 | 10 | docker push raintank/worldping-api:$VERSION 11 | docker push raintank/worldping-api:latest 12 | -------------------------------------------------------------------------------- /scripts/docker/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | WAIT_TIMEOUT=${WAIT_TIMEOUT:-10} 4 | 5 | for endpoint in $(echo $WAIT_HOSTS | tr "," "\n"); do 6 | _start_time=$(date +%s) 7 | while true; do 8 | _now=$(date +%s) 9 | _run_time=$(( $_now - $_start_time )) 10 | if [ $_run_time -gt $WAIT_TIMEOUT ]; then 11 | echo "timed out waiting for $endpoint" 12 | break 13 | fi 14 | echo "waiting for $endpoint to become up..." 15 | host=${endpoint%:*} 16 | port=${endpoint#*:} 17 | nc -z $host $port && echo "$endpoint is up!" && break 18 | sleep 1 19 | done 20 | done 21 | 22 | echo executing worldping-api $@ 23 | exec /usr/bin/worldping-api $@ 24 | 25 | -------------------------------------------------------------------------------- /scripts/docker/worldping-api.ini: -------------------------------------------------------------------------------- 1 | ##################### WorldpingApi Configuration Defaults ##################### 2 | # all grafana instances in your environment should have a unique instance_id 3 | instance_id = default 4 | 5 | #################################### Paths #################################### 6 | [paths] 7 | # Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) 8 | # note: must be unique if you run multiple grafana processes on the same machine. 9 | data = data 10 | 11 | # Directory where grafana can store logs 12 | # 13 | logs = data/log 14 | 15 | #################################### Server #################################### 16 | [server] 17 | # Protocol (http or https) 18 | protocol = http 19 | 20 | # The ip address to bind to, empty will bind to all interfaces 21 | http_addr = 22 | 23 | # The http port to use 24 | http_port = 3000 25 | 26 | # Log web requests 27 | router_logging = false 28 | 29 | root_url = %(protocol)s://worldping-api:%(http_port)s/ 30 | 31 | static_root_path = public 32 | 33 | # enable gzip 34 | enable_gzip = false 35 | 36 | # https certs & key file 37 | cert_file = 38 | cert_key = 39 | 40 | admin_key = changeme 41 | 42 | #################################### Database #################################### 43 | [database] 44 | # Either "mysql", "postgres" or "sqlite3", it's your choice 45 | type = sqlite3 46 | host = 127.0.0.1:3306 47 | name = grafana 48 | user = root 49 | password = 50 | 51 | # For "postgres" only, either "disable", "require" or "verify-full" 52 | ssl_mode = disable 53 | 54 | # For "sqlite3" only, path relative to data_path setting 55 | path = worldping-api.db 56 | 57 | #################################### SMTP / Emailing ########################## 58 | [smtp] 59 | enabled = false 60 | host = localhost:25 61 | user = 62 | password = 63 | cert_file = 64 | key_file = 65 | skip_verify = false 66 | from_address = admin@grafana.localhost 67 | 68 | [emails] 69 | templates_pattern = emails/*.html 70 | 71 | #################################### Logging ########################## 72 | [log] 73 | # Either "console", "file", default is "console" 74 | # Use comma to separate multiple modes, e.g. "console, file" 75 | mode = console, file 76 | 77 | # Buffer length of channel, keep it as it is if you don't know what it is. 78 | buffer_len = 10000 79 | 80 | # Either "Trace", "Debug", "Info", "Warn", "Error", "Critical", default is "Trace" 81 | level = Info 82 | 83 | # For "console" mode only 84 | [log.console] 85 | level = 86 | # Set formatting to "false" to disable color formatting of console logs 87 | formatting = false 88 | 89 | # For "file" mode only 90 | [log.file] 91 | level = 92 | # This enables automated log rotate(switch of following options), default is true 93 | log_rotate = true 94 | 95 | # Max line number of single file, default is 1000000 96 | max_lines = 1000000 97 | 98 | # Max size shift of single file, default is 28 means 1 << 28, 256MB 99 | max_lines_shift = 28 100 | 101 | # Segment log daily, default is true 102 | daily_rotate = true 103 | 104 | # Expired days of log file(delete after max days), default is 7 105 | max_days = 7 106 | 107 | [raintank] 108 | elasticsearch_url = http://elasticsearch:9200/ 109 | tsdb_url = http://tsdb-gw/ 110 | 111 | [telemetry] 112 | stats_enabled = false 113 | stats_addr = localhost:2003 114 | stats_prefix = worldping.worldping-api.stats.default.$hostname 115 | stats_interval = 10 116 | stats_timeout = 10s 117 | stats_buffer_size = 20000 118 | 119 | [kafka] 120 | enabled = false 121 | brokers = kafka:9092 122 | topic = worldping 123 | 124 | [quota] 125 | enabled = false 126 | 127 | # limit number of endpoints per Org. 128 | org_endpoint = 10 129 | 130 | # limit number of collectorsper Org. 131 | org_probe = 10 132 | 133 | # golbal limit of endpoints 134 | global_endpoint = -1 135 | 136 | # golbal limit of collectors 137 | global_probe = -1 138 | 139 | #################################### Alerting ########################## 140 | [alerting] 141 | enabled = false 142 | distributed = false 143 | topic = worldping-alerts 144 | tickqueue_size = 20 145 | internal_jobqueue_size = 1000 146 | executor_lru_size = 10000 147 | enable_scheduler = true 148 | graphite_url = http://graphite-api:8080/ -------------------------------------------------------------------------------- /tests/config-files/override.ini: -------------------------------------------------------------------------------- 1 | [paths] 2 | data = /tmp/override 3 | 4 | --------------------------------------------------------------------------------