├── .gitignore ├── .travis.yml ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── bin └── .gitkeep ├── cmd ├── dummy │ ├── data.go │ ├── main.go │ ├── requests.go │ ├── root.go │ └── samples.go ├── item │ ├── main.go │ ├── root.go │ └── runserver.go └── order │ ├── main.go │ ├── root.go │ └── runserver.go ├── deploy ├── docker │ ├── conf │ │ ├── kibana.yml │ │ ├── logstash.conf │ │ ├── logstash.yml │ │ └── prometheus.yml │ ├── dashboards │ │ ├── grafana.json │ │ ├── micro-obs.json │ │ ├── prometheus.json │ │ └── redis.json │ └── docker-compose.yml └── k8s │ ├── 000-monitoring │ ├── 000-namespace.yml │ ├── 100-prometheus │ │ ├── 100-prometheus │ │ │ ├── 010-alertmanager-crd.yml │ │ │ ├── 020-prometheus-crd.yml │ │ │ ├── 030-prometheus-rules-crd.yml │ │ │ ├── 040-servicemonitor-crd.yml │ │ │ ├── 100-prometheus-operator-full.yml │ │ │ ├── 110-prometheus-core-full.yml │ │ │ ├── 120-prometheus-rules.yml │ │ │ └── 130-alertmanager-full.yml │ │ ├── 200-internal │ │ │ ├── etcd-full.yml │ │ │ ├── kube-apiserver-full.yml │ │ │ ├── kube-controller-manager.yml │ │ │ ├── kube-dns-full.yml │ │ │ ├── kube-scheduler-full.yml │ │ │ ├── kube-state-metrics-full.yml │ │ │ ├── kubelet-full.yml │ │ │ └── node-exporter-full.yml │ │ └── 300-grafana │ │ │ ├── 100-grafana-datasources.yml │ │ │ ├── 150-grafana-dashboards.yml │ │ │ └── 200-grafana-full.yml │ ├── 200-elk │ │ ├── 100-elasticsearch-full.yml │ │ ├── 200-logstash-full.yml │ │ ├── 300-filebeat-full.yml │ │ └── 400-kibana-full.yml │ ├── 300-mailhog │ │ └── 100-mailhog-full.yml │ └── 400-jaeger │ │ └── 000-jaeger-dev-full.yml │ └── 100-micro-obs │ ├── 000-namespace.yml │ ├── 100-item-redis.yml │ ├── 150-item-full.yml │ ├── 200-order-redis.yml │ └── 250-order-full.yml ├── go.mod ├── go.sum ├── item ├── handlers.go ├── item.go ├── item_test.go ├── redis.go ├── redis_test.go ├── response.go ├── response_test.go ├── routes.go ├── server.go └── server_test.go ├── order ├── handlers.go ├── order.go ├── order_test.go ├── redis.go ├── redis_test.go ├── response.go ├── response_test.go ├── routes.go ├── server.go └── server_test.go ├── static ├── grafana1.PNG ├── grafana2.png ├── jaeger1.PNG ├── jaeger2.PNG ├── kibana1.png ├── micro-obs-kubernetes.png ├── micro-obs-observability.png └── micro-obs-overview.png └── util ├── handlers.go ├── logger.go ├── prometheus.go ├── requestID.go ├── route.go ├── router.go ├── server.go ├── tracer.go ├── util.go └── util_test.go /.gitignore: -------------------------------------------------------------------------------- 1 | /bin/order 2 | /bin/item 3 | /bin/dummy 4 | /deploy/docker/data/* 5 | *.xml 6 | 7 | # Created by https://www.gitignore.io/api/go,macos,python,vagrant,terraform 8 | # Edit at https://www.gitignore.io/?templates=go,macos,python,vagrant,terraform 9 | 10 | ### Go ### 11 | # Binaries for programs and plugins 12 | *.exe 13 | *.exe~ 14 | *.dll 15 | *.so 16 | *.dylib 17 | 18 | # Test binary, build with `go test -c` 19 | *.test 20 | 21 | # Output of the go coverage tool, specifically when used with LiteIDE 22 | *.out 23 | 24 | ### Go Patch ### 25 | /vendor/ 26 | /Godeps/ 27 | 28 | ### macOS ### 29 | # General 30 | .DS_Store 31 | .AppleDouble 32 | .LSOverride 33 | 34 | # Icon must end with two \r 35 | Icon 36 | 37 | # Thumbnails 38 | ._* 39 | 40 | # Files that might appear in the root of a volume 41 | .DocumentRevisions-V100 42 | .fseventsd 43 | .Spotlight-V100 44 | .TemporaryItems 45 | .Trashes 46 | .VolumeIcon.icns 47 | .com.apple.timemachine.donotpresent 48 | 49 | # Directories potentially created on remote AFP share 50 | .AppleDB 51 | .AppleDesktop 52 | Network Trash Folder 53 | Temporary Items 54 | .apdisk 55 | 56 | ### Python ### 57 | # Byte-compiled / optimized / DLL files 58 | __pycache__/ 59 | *.py[cod] 60 | *$py.class 61 | 62 | # C extensions 63 | 64 | # Distribution / packaging 65 | .Python 66 | build/ 67 | develop-eggs/ 68 | dist/ 69 | downloads/ 70 | eggs/ 71 | .eggs/ 72 | lib/ 73 | lib64/ 74 | parts/ 75 | sdist/ 76 | var/ 77 | wheels/ 78 | share/python-wheels/ 79 | *.egg-info/ 80 | .installed.cfg 81 | *.egg 82 | MANIFEST 83 | 84 | # PyInstaller 85 | # Usually these files are written by a python script from a template 86 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 87 | *.manifest 88 | *.spec 89 | 90 | # Installer logs 91 | pip-log.txt 92 | pip-delete-this-directory.txt 93 | 94 | # Unit test / coverage reports 95 | htmlcov/ 96 | .tox/ 97 | .nox/ 98 | .coverage 99 | .coverage.* 100 | .cache 101 | nosetests.xml 102 | coverage.xml 103 | *.cover 104 | .hypothesis/ 105 | .pytest_cache/ 106 | 107 | # Translations 108 | *.mo 109 | *.pot 110 | 111 | # Django stuff: 112 | *.log 113 | local_settings.py 114 | db.sqlite3 115 | 116 | # Flask stuff: 117 | instance/ 118 | .webassets-cache 119 | 120 | # Scrapy stuff: 121 | .scrapy 122 | 123 | # Sphinx documentation 124 | docs/_build/ 125 | 126 | # PyBuilder 127 | target/ 128 | 129 | # Jupyter Notebook 130 | .ipynb_checkpoints 131 | 132 | # IPython 133 | profile_default/ 134 | ipython_config.py 135 | 136 | # pyenv 137 | .python-version 138 | 139 | # celery beat schedule file 140 | celerybeat-schedule 141 | 142 | # SageMath parsed files 143 | *.sage.py 144 | 145 | # Environments 146 | .env 147 | .venv 148 | env/ 149 | venv/ 150 | ENV/ 151 | env.bak/ 152 | venv.bak/ 153 | 154 | # Spyder project settings 155 | .spyderproject 156 | .spyproject 157 | 158 | # Rope project settings 159 | .ropeproject 160 | 161 | # mkdocs documentation 162 | /site 163 | 164 | # mypy 165 | .mypy_cache/ 166 | .dmypy.json 167 | dmypy.json 168 | 169 | # Pyre type checker 170 | .pyre/ 171 | 172 | ### Python Patch ### 173 | .venv/ 174 | 175 | ### Terraform ### 176 | # Local .terraform directories 177 | **/.terraform/* 178 | 179 | # .tfstate files 180 | *.tfstate 181 | *.tfstate.* 182 | 183 | # Crash log files 184 | crash.log 185 | 186 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 187 | # .tfvars files are managed as part of configuration and so should be included in 188 | # version control. 189 | # 190 | # example.tfvars 191 | 192 | # Ignore override files as they are usually used to override resources locally and so 193 | # are not checked in 194 | override.tf 195 | override.tf.json 196 | *_override.tf 197 | *_override.tf.json 198 | 199 | # Include override files you do wish to add to version control using negated pattern 200 | # 201 | # !example_override.tf 202 | 203 | ### Vagrant ### 204 | # General 205 | .vagrant/ 206 | 207 | # Log files (if you are creating logs in debug mode, uncomment this) 208 | # *.logs 209 | 210 | ### Vagrant Patch ### 211 | *.box 212 | 213 | # End of https://www.gitignore.io/api/go,macos,python,vagrant,terraform -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | 3 | language: go 4 | go: 5 | - "1.11.x" 6 | 7 | env: 8 | - GO111MODULE=on 9 | 10 | jobs: 11 | include: 12 | - stage: "Test" 13 | name: "Vetting and unit tests" 14 | script: 15 | - go get -u golang.org/x/lint/golint 16 | - make prepare 17 | - make test 18 | - stage: "Build" 19 | name: "Building & verifying binary" 20 | script: 21 | - make build 22 | - ./bin/item --help 23 | - ./bin/order --help 24 | - ./bin/dummy --help 25 | - stage: "Build" 26 | name: "Building & pushing Docker image" 27 | if: (branch = master) OR (tag IS present) 28 | script: 29 | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin 30 | - docker build -t micro-obs . 31 | - docker images 32 | - docker tag micro-obs $DOCKER_USERNAME/micro-obs:$TRAVIS_BRANCH 33 | - docker push $DOCKER_USERNAME/micro-obs:$TRAVIS_BRANCH 34 | - stage: "Verify" 35 | name: "Pulling & verifying Docker image" 36 | if: (branch = master) OR (tag IS present) 37 | script: 38 | - echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin 39 | - docker pull $DOCKER_USERNAME/micro-obs:$TRAVIS_BRANCH 40 | - docker run $DOCKER_USERNAME/micro-obs:$TRAVIS_BRANCH item --help -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.11 as builder 2 | 3 | ENV GOOS=linux 4 | ENV GOARCH=amd64 5 | ENV CGO_ENABLED=0 6 | 7 | WORKDIR /app 8 | 9 | COPY go.mod go.sum Makefile ./ 10 | RUN go mod download 11 | 12 | COPY util/ util/ 13 | COPY item/ item/ 14 | COPY order/ order/ 15 | COPY cmd/ cmd/ 16 | 17 | RUN make build 18 | FROM alpine:latest 19 | 20 | COPY --from=builder /app/bin/* /usr/local/bin/ -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Alexander Knipping 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all build docker docker-build prepare test build-item build-order build-dummy clean 2 | 3 | TAG=obitech/micro-obs:master 4 | DOCKERFILE=Dockerfile 5 | 6 | all: prepare test build 7 | 8 | build: build-item build-order build-dummy 9 | 10 | prepare: 11 | go mod tidy 12 | go fmt ./... 13 | go vet ./... 14 | golint ./... 15 | 16 | test: 17 | go test ./... 18 | 19 | build-item: 20 | go build -o bin/item ./cmd/item 21 | 22 | build-order: 23 | go build -o bin/order ./cmd/order 24 | 25 | build-dummy: 26 | go build -o bin/dummy ./cmd/dummy 27 | 28 | docker: prepare test docker-build 29 | 30 | docker-build: 31 | docker build -t $(TAG) -f $(DOCKERFILE) . 32 | 33 | clean: 34 | rm bin/* -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # [micro-obs](https://github.comobitech/microservices-observability) 2 | 3 | [![Build Status](https://travis-ci.org/obitech/micro-obs.svg?branch=master)](https://travis-ci.org/obitech/micro-obs) [![Go Report Card](https://goreportcard.com/badge/github.com/obitech/micro-obs)](https://goreportcard.com/report/github.com/obitech/micro-obs) [![](https://img.shields.io/docker/automated/jrottenberg/ffmpeg.svg)](https://hub.docker.com/r/obitech/micro-obs) 4 | 5 | ## Contents 6 | 7 | - [Overview](#overview) 8 | - [Build it](#build-it) 9 | - [Deploy it](#deploy-it) 10 | - [Docker](#docker) 11 | - [Kubernetes](#kubernetes) 12 | - [Use it](#use-it) 13 | - [Docker](#docker-1) 14 | - [Preparation](#preparation) 15 | - [Jaeger](#jaeger) 16 | - [Prometheus](#prometheus) 17 | - [ELK](#elk) 18 | - [Kubernetes](#kubernetes-1) 19 | - [Preparation](#preparation) 20 | - [Jaeger](#jaeger-1) 21 | - [Prometheus](#prometheus-1) 22 | - [ELK](#elk-1) 23 | - [item](#item) 24 | - [order](#order) 25 | - [util](#util) 26 | - [License](#license) 27 | 28 | ## Overview 29 | 30 | Example of instrumenting a Go microservices application: 31 | 32 | - Structured logging via [zap](https://github.com/uber-go/zap) 33 | - Automatic endpoint monitoring exposing metrics to [Prometheus](https://github.com/prometheus/prometheus) 34 | - Internal & distributed tracing via [Jaeger](https://github.com/jaegertracing/jaeger) 35 | 36 | Deployments can be made via Docker and on Kubernetes. Additional instrumentation: 37 | 38 | - Monitoring of demo app and internal Kubernetes components via [Prometheus Operator](https://github.com/coreos/prometheus-operator) 39 | - Automatic container log aggregation via [ELK Stack](https://www.elastic.co/elk-stack) 40 | 41 | The example application consists of two services: `item` and `order`. Both service have a seperate [redis](https://redis.io) instance as their primary data store. When an order gets placed, `order` contacts `item` to check if the wished items with the requested quantities are in stock: 42 | 43 | ![Application overview](static/micro-obs-overview.png) 44 | 45 | API endpoints of both services are instrumented via Prometheus and export the following metrics: 46 | 47 | - `in_flight_requests`: a gauge of requests currently being served by the wrapped handler 48 | - `api_requests_total`: a counter for requests to the wrapped handler 49 | - `request_duration_seconds`: a histogram for request latencies 50 | - `response_size_bytes`: a histogram for response sizes 51 | 52 | Additionally, all requests are traced via Jaeger: 53 | 54 | ![Observability overview](static/micro-obs-observability.png) 55 | 56 | On Kubernetes, next to the application and node-specific metrics exposed via [node_exporter](https://github.com/prometheus/node_exporter), internal Kubernetes components are monitored as well: 57 | 58 | - `kube-apiserver` 59 | - `kube-dns` 60 | - `etcd` 61 | - `kube-controller-manager` 62 | - `kube-scheduler` 63 | - `kubelet` 64 | - [`kube-state-metrics`](https://github.com/kubernetes/kube-state-metrics) 65 | 66 | Proemtheus is fully managed via the Prometheus Operator, allowing flexible deployment of new instances, rules, alertmanagers as well as convenient monitoring of Services via ServiceMonitors. A [Mailhog](https://github.com/mailhog/MailHog) instance is deployed to test the alerting pipeline. 67 | 68 | For the ELK Stack, a Filebeat DaemonSet is watching all container logs on the nodes. 69 | 70 | ![Kubernetes overview, arrows are omitted for visibility](static/micro-obs-kubernetes.png) 71 | 72 | ## Build it 73 | 74 | To build it from source you need [Go 1.11+](https://golang.org/dl/) installed. 75 | 76 | This project uses [Go Modules](https://github.com/golang/go/wiki/Modules) so you can clone the repo to anywhere: 77 | 78 | ```bash 79 | git clone https://github.com/obitech/micro-obs.git 80 | cd micro-obs/ 81 | ``` 82 | 83 | Run `make` to test & build it: 84 | 85 | ```bash 86 | make 87 | # ... 88 | ``` 89 | 90 | Or `make docker` to build a Docker image: 91 | 92 | ```bash 93 | make docker TAG=yourname/yourimage 94 | # ... 95 | ``` 96 | 97 | ## Deploy it 98 | 99 | ### Docker 100 | 101 | Make sure [Docker](https://docs.docker.com/install/) and [Docker Compose](https://docs.docker.com/compose/install/) are installed. 102 | 103 | Deploy the stack: 104 | 105 | ```bash 106 | cd deploy/docker 107 | docker-compose up -d 108 | ``` 109 | 110 | ### Kubernetes 111 | 112 | Make sure [Docker](https://docs.docker.com/install/) and Kubernets (via [Minikube](https://github.com/kubernetes/minikube) or [microk8s](https://microk8s.io), for example) are installed. 113 | 114 | Deploy the `monitoring` stack first: 115 | 116 | ```bash 117 | kubectl create -f deploy/k8s/000-monitoring --recursive 118 | ``` 119 | 120 | It might take a while for the [Prometheus Operator](https://github.com/coreos/prometheus-operator) CRDs to be created. If the deployment fails, try running above command again. 121 | 122 | Check if everything is up: 123 | 124 | ``` 125 | $ kubectl get pods -n monitoring 126 | NAME READY STATUS RESTARTS AGE 127 | alertmanager-main-0 2/2 Running 0 1m 128 | elasticsearch-f8dc4db44-l7ddn 2/2 Running 0 1m 129 | filebeat-pnx76 2/2 Running 0 1m 130 | grafana-5bfbfb9665-nf9c9 1/1 Running 0 1m 131 | jaeger-deployment-ff5c4dccc-tfxns 1/1 Running 0 1m 132 | kibana-86c4b5577b-dxtpf 1/1 Running 0 1m 133 | kube-state-metrics-58dcbb8579-hsw78 4/4 Running 0 1m 134 | logstash-74479c885f-5lfcw 1/1 Running 0 1m 135 | mailhog-778bd8484d-8vqfg 1/1 Running 0 1m 136 | node-exporter-swtd6 2/2 Running 0 1m 137 | prometheus-core-0 3/3 Running 0 1m 138 | prometheus-operator-85bb8dc95b-wqpx6 1/1 Running 0 1m 139 | ``` 140 | 141 | Next deploy `micro-obs` stack: 142 | 143 | ```bash 144 | kubectl create -f deploy/k8s/100-micro-obs/ --recursive 145 | ``` 146 | 147 | Check if everything is up: 148 | 149 | ``` 150 | $ kubectl get pods -n micro-obs 151 | NAME READY STATUS RESTARTS AGE 152 | item-54d9d7d554-2thtl 1/1 Running 0 1m 153 | order-6549584969-k2cp8 1/1 Running 0 1m 154 | redis-item-5bcf99c9f7-zdf2r 2/2 Running 0 1m 155 | redis-order-68869c7986-4s7w2 2/2 Running 0 1m 156 | ``` 157 | 158 | ## Use it 159 | 160 | First build the `dummy` CLI application: 161 | 162 | ```make 163 | make build-dummy 164 | ``` 165 | 166 | ### Docker 167 | 168 | Service|Location 169 | ---|--- 170 | item API|http://localhost:8080/ 171 | order API|http://localhost:8090/ 172 | Jaeger Query|http://localhost:16686/ 173 | Prometheus|http://localhost:9090/ 174 | Grafana|http://localhost:3000/ 175 | 176 | #### Preparation 177 | 178 | Create some dummy data: 179 | 180 | ``` 181 | ./bin/dummy data all 182 | ``` 183 | 184 | #### Jaeger 185 | 186 | After creating dummy data, those transactions can be found in the Jaeger Query UI at http://localhost:16686: 187 | 188 | ![Jaeger UI start screen](static/jaeger1.PNG) 189 | ![Jaeger sample trace](static/jaeger2.PNG) 190 | 191 | #### Prometheus 192 | 193 | Send some dummy requests: 194 | 195 | ``` 196 | ./bin/dummy requests all / /aksdasd /items /orders /delay /error -n 100 -c 5 197 | ``` 198 | 199 | Start Grafana and upload the `deploy/docker/dashboards/micro-obs.json` dashboard: 200 | 201 | ![Grafana micro-obs dashboard example](static/grafana2.png) 202 | 203 | #### ELK 204 | 205 | After sending some requests, logs can be queried via Kibana running on http://localhost:5601: 206 | 207 | ![Kibana micro-obs error query](static/kibana1.png) 208 | 209 | ### Kubernetes 210 | 211 | Service|Location|Internal FQDN 212 | ---|---|--- 213 | item API|http://localhost:30808|http://item.micro-obs.service.cluster.local:8080 214 | item Redis|.|redis-item.micro-obs.service.cluster.local:3879 215 | order API|http://localhost:30809|http://order.micro-obs.service.cluster.local:8090 216 | order Redis|.|http://redis-order.micro-obs.service.cluster.local:3879 217 | Jaeger Query|http://localhost:30686|. 218 | Prometheus|http://localhost:30900|http://prometheus.monitoring.svc.cluster.local:9090 219 | Grafana|http://localhost:30300|. 220 | ElasticSearch|.|http://elasticsearch.monitoring.svc.cluster.local:9200 221 | Kibana|http://localhost:30601|. 222 | Mailhog|http://localhost:32025|mailhog.svc.cluster.local:1025 223 | 224 | #### Preparation 225 | 226 | Create some dummy data: 227 | 228 | ``` 229 | ./bin/dummy data all -i http://localhost:30808 -o http://localhost:30809 230 | ``` 231 | 232 | #### Jaeger 233 | 234 | After creating the dummy data, those transactions can be found in the Jaeger Query UI at http://localhost:30686: 235 | 236 | ![Jaeger UI start screen](static/jaeger1.PNG) 237 | ![Jaeger sample trace](static/jaeger2.PNG) 238 | 239 | #### Prometheus 240 | 241 | Both the Kubernetes' internal components as well as the `micro-obs` application (TODO) is being monitored by Prometheus. Some pre-installed Dashboards can be found in Grafana via http://localhost:30300 (default login is admin:admin): 242 | 243 | ![Grafana Kubernetes dashboard](static/grafana1.PNG) 244 | 245 | #### ELK 246 | 247 | TODO 248 | 249 | ## [item](https://godoc.org/github.com/obitech/micro-obs/item) 250 | [![godoc reference for item](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/obitech/micro-obs/item) 251 | 252 | Method|Endpoint|Comment 253 | ---|---|--- 254 | GET|`/healthz`|Returns `OK` as string 255 | GET|`/ping`|Returns a standard API response 256 | GET|`/items`|Returns all items 257 | GET|`/items/{id:[a-zA-Z0-9]+}`|Returns a single item by ID 258 | DELETE|`/items/{id:[a-zA-Z0-9]+}`|Deletes a single item by ID 259 | POST|`/items`|Sends a JSON body to create a new item. Will not update if item already exists 260 | PUT|`/items`|Sends a JSON body to create or update an item. Will update existing item 261 | 262 | Request: 263 | 264 | ```json 265 | POST http://localhost:8080/items 266 | [ 267 | { 268 | "name": "banana", 269 | "desc": "a yello fruit", 270 | "qty": 5 271 | }, 272 | { 273 | "name": "water", 274 | "desc": "bottles of water", 275 | "qty": 10 276 | }, 277 | { 278 | "name": "apple", 279 | "desc": "delicious", 280 | "qty": 15 281 | } 282 | ] 283 | ``` 284 | 285 | Response: 286 | 287 | ```json 288 | { 289 | "status": 201, 290 | "message": "items BxYs9DiGaIMXuakIxX, GWkUo1hE3u7vTxR, JAQU27CQrTkQCNr, created", 291 | "count": 3, 292 | "data": [ 293 | { 294 | "name": "banana", 295 | "id": "BxYs9DiGaIMXuakIxX", 296 | "desc": "a yello fruit", 297 | "qty": 5 298 | }, 299 | { 300 | "name": "water", 301 | "id": "GWkUo1hE3u7vTxR", 302 | "desc": "bottles of water", 303 | "qty": 10 304 | }, 305 | { 306 | "name": "apple", 307 | "id": "JAQU27CQrTkQCNr", 308 | "desc": "delicious", 309 | "qty": 15 310 | } 311 | ] 312 | } 313 | ``` 314 | 315 | ## [order](https://godoc.org/github.com/obitech/micro-obs/order) 316 | [![godoc reference for ](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/obitech/micro-obs/order) 317 | 318 | Method|Endpoint|Comment 319 | ---|---|--- 320 | GET|`/healthz`|Returns `OK` as string 321 | GET|`/ping`|Returns a standard API response 322 | GET|`/orders`|Returns all orders 323 | GET|`/orders/{id:[0-9]+}`|Returns a single order by ID 324 | DELETE|`/orders/{id:[a-zA-Z0-9]+}`|Deletes a single order by ID 325 | POST|`/orders/create`|Creates a new order. Will query the `item` service first to check if passed items exist and are present in the wished quantity 326 | 327 | Request: 328 | 329 | ```json 330 | POST http://localhost:8090/orders/create 331 | { 332 | "items": [ 333 | { 334 | "id": "BxYs9DiGaIMXuakIxX", 335 | "qty": 2 336 | }, 337 | { 338 | "id": "GWkUo1hE3u7vTxR", 339 | "qty": 8 340 | } 341 | ] 342 | } 343 | ``` 344 | 345 | Response: 346 | 347 | ```json 348 | { 349 | "status": 201, 350 | "message": "order 1 created", 351 | "count": 1, 352 | "data": [ 353 | { 354 | "id": 1, 355 | "items": [ 356 | { 357 | "id": "BxYs9DiGaIMXuakIxX", 358 | "qty": 2 359 | }, 360 | { 361 | "id": "GWkUo1hE3u7vTxR", 362 | "qty": 8 363 | } 364 | ] 365 | } 366 | ] 367 | } 368 | ``` 369 | 370 | ## [util](https://godoc.org/github.com/obitech/micro-obs/util) 371 | [![godoc reference for util](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/obitech/micro-obs/util) 372 | 373 | ## License 374 | 375 | [MIT](https://spdx.org/licenses/MIT.html) -------------------------------------------------------------------------------- /bin/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/bin/.gitkeep -------------------------------------------------------------------------------- /cmd/dummy/data.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "bytes" 5 | "fmt" 6 | "io/ioutil" 7 | "net/http" 8 | "os" 9 | "time" 10 | 11 | "github.com/pkg/errors" 12 | "github.com/spf13/cobra" 13 | ) 14 | 15 | var ( 16 | dataCmd = &cobra.Command{ 17 | Use: "data [target]", 18 | Short: "populate data to a single or all services", 19 | Long: fmt.Sprintf("Sample data for item service: %s\nSample data for order service: %s", itemJSON, orderJSON), 20 | Args: func(cmd *cobra.Command, args []string) error { 21 | if len(args) != 1 { 22 | return errors.New("requires one argument: [target]") 23 | } 24 | 25 | if _, prs := allowedTargets[args[0]]; !prs { 26 | return errors.New(fmt.Sprintf("invalid argument, must be of [\"item\", \"order\", \"all\"]")) 27 | } 28 | 29 | return nil 30 | }, 31 | Run: sendData, 32 | } 33 | numDataReq = 1 34 | concDataReq = 1 35 | waitData = 0 36 | ) 37 | 38 | func init() { 39 | dataCmd.Flags().IntVarP(&numDataReq, "number", "n", numDataReq, "number of times data should be sent") 40 | dataCmd.Flags().IntVarP(&concDataReq, "concurrency", "c", concDataReq, "number of requests to be sent concurrently") 41 | dataCmd.Flags().IntVarP(&waitData, "wait", "w", waitData, "time to wait between requests in ms") 42 | } 43 | 44 | type dataRequest struct { 45 | method string 46 | url string 47 | data []string 48 | } 49 | 50 | func sendData(cmd *cobra.Command, args []string) { 51 | switch args[0] { 52 | case "item": 53 | dr := dataRequest{ 54 | method: "PUT", 55 | url: fmt.Sprintf("%s/items", itemAddr), 56 | data: itemJSON, 57 | } 58 | distributeWorkData(dr) 59 | 60 | case "order": 61 | dr := dataRequest{ 62 | method: "POST", 63 | url: fmt.Sprintf("%s/orders/create", orderAddr), 64 | data: orderJSON, 65 | } 66 | distributeWorkData(dr) 67 | 68 | default: 69 | dr := dataRequest{ 70 | method: "PUT", 71 | url: fmt.Sprintf("%s/items", itemAddr), 72 | data: itemJSON, 73 | } 74 | distributeWorkData(dr) 75 | 76 | dr = dataRequest{ 77 | method: "POST", 78 | url: fmt.Sprintf("%s/orders/create", orderAddr), 79 | data: orderJSON, 80 | } 81 | distributeWorkData(dr) 82 | } 83 | } 84 | 85 | func distributeWorkData(dr dataRequest) { 86 | jobQ := make(chan dataRequest, numDataReq) 87 | jobsDone := make(chan bool, numDataReq) 88 | 89 | for w := 0; w < concDataReq; w++ { 90 | go workerData(w, jobQ, jobsDone) 91 | } 92 | 93 | for j := 0; j < numDataReq; j++ { 94 | jobQ <- dr 95 | } 96 | 97 | close(jobQ) 98 | 99 | for j := 0; j < numDataReq; j++ { 100 | <-jobsDone 101 | } 102 | } 103 | 104 | func workerData(id int, jobs <-chan dataRequest, jobsDone chan<- bool) { 105 | for dr := range jobs { 106 | start := time.Now() 107 | for _, js := range dr.data { 108 | buf := bytes.NewBuffer([]byte(js)) 109 | req, err := http.NewRequest(dr.method, dr.url, buf) 110 | if err != nil { 111 | errExit(err) 112 | } 113 | req.Header.Add("Content-Type", "application/JSON; charset=UTF-8") 114 | 115 | vl(fmt.Sprintf("Worker %d -> %s %s\n%s\n", id, dr.method, dr.url, dr.data)) 116 | 117 | c := &http.Client{} 118 | res, err := c.Do(req) 119 | errExit(err) 120 | 121 | b, err := ioutil.ReadAll(res.Body) 122 | errExit(err) 123 | defer res.Body.Close() 124 | 125 | switch res.StatusCode { 126 | case http.StatusOK: 127 | fallthrough 128 | case http.StatusCreated: 129 | vl(fmt.Sprintf("%s", string(b))) 130 | default: 131 | fmt.Printf("Unexpected status code %d: %s", res.StatusCode, b) 132 | os.Exit(1) 133 | } 134 | } 135 | time.Sleep(time.Duration(waitData) * time.Millisecond) 136 | vl(fmt.Sprintf("Workder %d <- Work to %s completed after %v\n", id, dr.url, time.Since(start))) 137 | jobsDone <- true 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /cmd/dummy/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func main() { 4 | Execute() 5 | } 6 | -------------------------------------------------------------------------------- /cmd/dummy/requests.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "math/rand" 6 | "net/http" 7 | "time" 8 | 9 | "github.com/pkg/errors" 10 | "github.com/spf13/cobra" 11 | ) 12 | 13 | var ( 14 | requestsCmd = &cobra.Command{ 15 | Use: "requests [target] [handlers]", 16 | Short: "sends requests to handlers of a single or all services (targets)", 17 | Args: func(cmd *cobra.Command, args []string) error { 18 | if len(args) < 2 { 19 | return errors.New("requires two arguments: [target] [handlers]") 20 | } 21 | 22 | if _, prs := allowedTargets[args[0]]; !prs { 23 | // Format allowed targets 24 | targets := make([]string, len(allowedTargets)) 25 | i := 0 26 | for k := range allowedTargets { 27 | targets[i] = k 28 | i++ 29 | } 30 | return errors.New(fmt.Sprintf("invalid argument, must be of %v", targets)) 31 | } 32 | 33 | return nil 34 | }, 35 | Run: sendRequest, 36 | } 37 | numReq = 15 38 | concReq = 2 39 | wait = 500 40 | ) 41 | 42 | func init() { 43 | requestsCmd.Flags().IntVarP(&numReq, "number", "n", numReq, "number of requests to sent") 44 | requestsCmd.Flags().IntVarP(&concReq, "concurrency", "c", concReq, "number of requests to be sent concurrently") 45 | requestsCmd.Flags().IntVarP(&wait, "wait", "w", wait, "time to wait between requests in ms") 46 | } 47 | 48 | // buildURLS returns a shuffled slice of strings where the length is equal to the total number of requests. 49 | // Each string is an URL and the slice represents the total amount of work to be handled by workers. 50 | func buildURLs(addr string, handlers ...string) []string { 51 | urls := []string{} 52 | 53 | // Each handler is a single request to be made and gets added to the output slice 54 | for _, handler := range handlers { 55 | for j := 0; j < numReq; j++ { 56 | urls = append(urls, fmt.Sprintf("%s%s", addr, handler)) 57 | } 58 | } 59 | 60 | // Shuffle slice so requests will be evenly distributed 61 | rand.Shuffle(len(urls), func(i, j int) { 62 | urls[i], urls[j] = urls[j], urls[i] 63 | }) 64 | 65 | return urls 66 | } 67 | 68 | // sendRequest creates and distributes work according to passed arguments. 69 | func sendRequest(cmd *cobra.Command, args []string) { 70 | switch args[0] { 71 | case "item": 72 | urls := buildURLs(itemAddr, args[1:]...) 73 | done := distributeRequests(urls...) 74 | for i := 0; i < len(urls); i++ { 75 | <-done 76 | } 77 | 78 | case "order": 79 | urls := buildURLs(orderAddr, args[1:]...) 80 | done := distributeRequests(urls...) 81 | for i := 0; i < len(urls); i++ { 82 | <-done 83 | } 84 | 85 | case "all": 86 | urls := []string{} 87 | urls = append(urls, buildURLs(itemAddr, args[1:]...)...) 88 | urls = append(urls, buildURLs(orderAddr, args[1:]...)...) 89 | rand.Shuffle(len(urls), func(i, j int) { 90 | urls[i], urls[j] = urls[j], urls[i] 91 | }) 92 | 93 | done := distributeRequests(urls...) 94 | for j := 0; j < len(urls); j++ { 95 | <-done 96 | } 97 | } 98 | } 99 | 100 | // distributeRequests takes URLs as work, spwans workers according to flags and dsitributes work to them. 101 | func distributeRequests(urls ...string) <-chan bool { 102 | numJobs := len(urls) 103 | jobQ := make(chan string, numJobs) 104 | jobsDone := make(chan bool, numJobs) 105 | 106 | // Spawn -c workers 107 | for w := 0; w < concReq; w++ { 108 | go workerRequest(w, jobQ, jobsDone) 109 | } 110 | 111 | // Send requests to jobQ channel 112 | for _, url := range urls { 113 | jobQ <- url 114 | } 115 | 116 | // Work assigned, close jobQ channel 117 | close(jobQ) 118 | 119 | return jobsDone 120 | } 121 | 122 | // workerRequest listenes on a passed channel for work, processes it, waits according to flags and then 123 | // confirms job completion on a different chanel. 124 | func workerRequest(id int, jobs <-chan string, jobsDone chan<- bool) { 125 | // Grab jobs from channel and perform request 126 | for url := range jobs { 127 | start := time.Now() 128 | vl(fmt.Sprintf("Worker %d -> %s\n", id, url)) 129 | 130 | _, err := http.Get(url) 131 | errExit(err) 132 | time.Sleep(time.Duration(wait) * time.Millisecond) 133 | 134 | vl(fmt.Sprintf("Worker %d <- Work to %s completed after %v\n", id, url, time.Since(start))) 135 | jobsDone <- true 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /cmd/dummy/root.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | var ( 11 | rootCmd = &cobra.Command{ 12 | Use: "dummy", 13 | Short: "populate item or order service with dummy data", 14 | Long: "will contact either the item or order service via HTTP to populate data in the underlying datastores or send dummy requests", 15 | } 16 | allowedTargets = map[string]bool{"item": true, "order": true, "all": true} 17 | itemAddr = "http://localhost:8080" 18 | orderAddr = "http://localhost:8090" 19 | verbose bool 20 | ) 21 | 22 | func init() { 23 | rootCmd.AddCommand(dataCmd) 24 | rootCmd.AddCommand(requestsCmd) 25 | 26 | rootCmd.PersistentFlags().StringVarP(&itemAddr, "item-addr", "i", itemAddr, "address of the item service") 27 | rootCmd.PersistentFlags().StringVarP(&orderAddr, "order-addr", "o", orderAddr, "address of the order service") 28 | rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "verbose output") 29 | } 30 | 31 | // Execute runs the cobra rootCmd. 32 | func Execute() { 33 | if err := rootCmd.Execute(); err != nil { 34 | errExit(err) 35 | } 36 | } 37 | 38 | // Prints error and exits 39 | func errExit(err error) { 40 | if err != nil { 41 | fmt.Println(err) 42 | os.Exit(1) 43 | } 44 | } 45 | 46 | // Verbose logging 47 | func vl(str string) { 48 | if verbose { 49 | fmt.Print(str) 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /cmd/dummy/samples.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | var itemJSON = []string{`[ 4 | { 5 | "name": "banana", 6 | "desc": "a yellow fruit", 7 | "qty": 5 8 | }, 9 | { 10 | "name": "water", 11 | "desc": "bottles of water", 12 | "qty": 10 13 | }, 14 | { 15 | "name": "apple", 16 | "desc": "delicious", 17 | "qty": 15 18 | } 19 | ]`, 20 | } 21 | 22 | var orderJSON = []string{ 23 | `{ 24 | "items": [ 25 | { 26 | "id": "BxYs9DiGaIMXuakIxX", 27 | "qty": 2 28 | }, 29 | { 30 | "id": "GWkUo1hE3u7vTxR", 31 | "qty": 8 32 | } 33 | ] 34 | }`, 35 | `{ 36 | "items": [ 37 | { 38 | "id": "BxYs9DiGaIMXuakIxX", 39 | "qty": 4 40 | }, 41 | { 42 | "id": "GWkUo1hE3u7vTxR", 43 | "qty": 5 44 | }, 45 | { 46 | "id": "JAQU27CQrTkQCNr", 47 | "qty": 15 48 | } 49 | ] 50 | }`, 51 | `{ 52 | "items": [ 53 | { 54 | "id": "JAQU27CQrTkQCNr", 55 | "qty": 3 56 | } 57 | ] 58 | }`, 59 | } 60 | -------------------------------------------------------------------------------- /cmd/item/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func main() { 4 | Execute() 5 | } 6 | -------------------------------------------------------------------------------- /cmd/item/root.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | // Default values to be used to initialize the item server 11 | var ( 12 | address = ":8080" 13 | endpoint = "127.0.0.1:8081" 14 | logLevel = "info" 15 | redis = "redis://127.0.0.1:6379/0" 16 | rootCmd = &cobra.Command{ 17 | Use: "item", 18 | Short: "Simple HTTP item serivce", 19 | Run: runServer, 20 | } 21 | ) 22 | 23 | // Execute runs the cobra rootCommand. 24 | func Execute() { 25 | if err := rootCmd.Execute(); err != nil { 26 | fmt.Println(err) 27 | os.Exit(1) 28 | } 29 | } 30 | 31 | func init() { 32 | f := rootCmd.Flags() 33 | f.StringVarP(&address, "address", "a", address, "listening address") 34 | f.StringVarP(&endpoint, "endpoint", "e", endpoint, "endpoint for other services to reach item service") 35 | f.StringVarP(&logLevel, "log-level", "l", logLevel, "log level (debug, info, warn, error), empty or invalid values will fallback to default") 36 | f.StringVarP(&redis, "redis-address", "r", redis, "redis address to connect to") 37 | } 38 | -------------------------------------------------------------------------------- /cmd/item/runserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/obitech/micro-obs/item" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func runServer(cmd *cobra.Command, args []string) { 12 | s, err := item.NewServer( 13 | item.SetServerAddress(address), 14 | item.SetServerEndpoint(endpoint), 15 | item.SetLogLevel(logLevel), 16 | item.SetRedisAddress(redis), 17 | ) 18 | if err != nil { 19 | fmt.Fprintln(os.Stderr, err) 20 | os.Exit(2) 21 | } 22 | s.InitPromReg() 23 | 24 | if err := s.Run(); err != nil { 25 | fmt.Fprintln(os.Stderr, err) 26 | os.Exit(3) 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /cmd/order/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | func main() { 4 | Execute() 5 | } 6 | -------------------------------------------------------------------------------- /cmd/order/root.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/spf13/cobra" 8 | ) 9 | 10 | // Default values to be used to initialize the order service 11 | var ( 12 | address = ":8090" 13 | endpoint = "127.0.0.1:9091" 14 | logLevel = "info" 15 | redis = "redis://127.0.0.1:6380/0" 16 | item = "http://127.0.0.1:8080" 17 | rootCmd = &cobra.Command{ 18 | Use: "order", 19 | Short: "Simple HTTP order serivce", 20 | Run: runServer, 21 | } 22 | ) 23 | 24 | // Execute runs the cobra rootCommand. 25 | func Execute() { 26 | if err := rootCmd.Execute(); err != nil { 27 | fmt.Println(err) 28 | os.Exit(1) 29 | } 30 | } 31 | 32 | func init() { 33 | f := rootCmd.Flags() 34 | f.StringVarP(&address, "address", "a", address, "listening address") 35 | f.StringVarP(&endpoint, "endpoint", "e", endpoint, "endpoint for other services to reach order service") 36 | f.StringVarP(&logLevel, "log-level", "l", logLevel, "log level (debug, info, warn, error), empty or invalid values will fallback to default") 37 | f.StringVarP(&redis, "redis-address", "r", redis, "redis address to connect to") 38 | f.StringVarP(&item, "item-address", "i", item, "item service address to query") 39 | } 40 | -------------------------------------------------------------------------------- /cmd/order/runserver.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "github.com/obitech/micro-obs/order" 8 | "github.com/spf13/cobra" 9 | ) 10 | 11 | func runServer(cmd *cobra.Command, args []string) { 12 | s, err := order.NewServer( 13 | order.SetServerAddress(address), 14 | order.SetServerEndpoint(endpoint), 15 | order.SetLogLevel(logLevel), 16 | order.SetRedisAddress(redis), 17 | order.SetItemServiceAddress(item), 18 | ) 19 | if err != nil { 20 | fmt.Fprintln(os.Stderr, err) 21 | os.Exit(2) 22 | } 23 | 24 | s.InitPromReg() 25 | 26 | if err := s.Run(); err != nil { 27 | fmt.Fprintln(os.Stderr, err) 28 | os.Exit(3) 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /deploy/docker/conf/kibana.yml: -------------------------------------------------------------------------------- 1 | server.name: kibana 2 | server.host: "0" 3 | elasticsearch.url: elasticsearch:9200 4 | xpack.security.enabled: false 5 | xpack.monitoring.enabled: false 6 | xpack.graph.enabled: false 7 | xpack.reporting.enabled: false -------------------------------------------------------------------------------- /deploy/docker/conf/logstash.conf: -------------------------------------------------------------------------------- 1 | input { 2 | syslog { 3 | port => 5000 4 | type => "docker" 5 | } 6 | } 7 | 8 | filter { 9 | grok { 10 | pattern_definitions => { 11 | "SYSLOGHEADER" => "%{SYSLOG5424PRI}%{NONNEGINT:ver} +(?:%{TIMESTAMP_ISO8601:ts}|-) +(?:%{HOSTNAME:service}|-) +(?:%{NOTSPACE:containerName}|-) +(?:%{NOTSPACE:proc}|-) +(?:%{WORD:msgid}|-) +(?:%{SYSLOG5424SD:sd}|-|)" 12 | } 13 | match => { "message" => "%{SYSLOGHEADER} %{GREEDYDATA:msg}" } 14 | } 15 | syslog_pri { } 16 | date { 17 | match => [ "syslog_timestamp", "MMM d HH:mm:ss", "MMM dd HH:mm:ss" ] 18 | } 19 | mutate { 20 | remove_field => ["message", "host", "type", "priority", "service", "ts", "severity", "facility", "facility_label", "severity_label", "syslog5424_pri", "proc", "syslog_severity_code", "syslog_facility_code", "syslog_facility", "syslog_severity", "syslog_hostname", "syslog_message", "syslog_timestamp", "ver" ] 21 | } 22 | mutate { 23 | remove_tag => [ "_grokparsefailure_sysloginput" ] 24 | } 25 | 26 | json { 27 | source => "msg" 28 | target => "log" 29 | } 30 | } 31 | 32 | output { 33 | elasticsearch { 34 | hosts => "elasticsearch:9200" 35 | } 36 | } -------------------------------------------------------------------------------- /deploy/docker/conf/logstash.yml: -------------------------------------------------------------------------------- 1 | http.host: "0.0.0.0" 2 | xpack.monitoring.enabled: false -------------------------------------------------------------------------------- /deploy/docker/conf/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 15s 3 | evaluation_interval: 30s 4 | 5 | scrape_configs: 6 | - job_name: 'item' 7 | static_configs: 8 | - targets: ['item:8080'] 9 | 10 | - job_name: 'order' 11 | static_configs: 12 | - targets: ['order:8090'] 13 | 14 | - job_name: 'redis_exporter' 15 | static_configs: 16 | - targets: ['redis-exporter:9121'] 17 | 18 | - job_name: 'jaeger' 19 | static_configs: 20 | - targets: ['jaeger:16686'] 21 | 22 | - job_name: 'prometheus' 23 | static_configs: 24 | - targets: ['localhost:9090'] 25 | 26 | - job_name: 'grafana' 27 | static_configs: 28 | - targets: ['grafana:3000'] 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /deploy/docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.3" 2 | services: 3 | jaeger: 4 | container_name: micro-obs-jaeger 5 | image: jaegertracing/all-in-one:1.8 6 | ports: 7 | - "6831:6831/udp" 8 | - "6832:6832/udp" 9 | - "16686:16686" 10 | restart: always 11 | 12 | prometheus: 13 | container_name: micro-obs-prometheus 14 | image: prom/prometheus:v2.5.0 15 | ports: 16 | - "9090:9090" 17 | volumes: 18 | - "./conf/prometheus.yml:/etc/prometheus/prometheus.yml:ro" 19 | restart: always 20 | 21 | grafana: 22 | container_name: micro-obs-grafana 23 | image: grafana/grafana:5.4.2 24 | ports: 25 | - "3000:3000" 26 | volumes: 27 | - "./data/grafana:/var/lib/grafana" 28 | restart: always 29 | 30 | elasticsearch: 31 | container_name: micro-obs-elasticsearch 32 | image: elastic/elasticsearch:6.5.4 33 | restart: always 34 | environment: 35 | ES_JAVA_OPTS: "-Xmx256m -Xms256m" 36 | xpack.security.enabled: "false" 37 | xpack.watcher.enabled: "false" 38 | xpack.graph.enabled: "false" 39 | xpack.monitoring.enabled: "false" 40 | 41 | logstash: 42 | container_name: micro-obs-logstash 43 | image: elastic/logstash:6.5.4 44 | ports: 45 | - "5000:5000" 46 | volumes: 47 | - "./conf/logstash.yml:/usr/share/logstash/config/logstash.yml:ro" 48 | - "./conf/logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro" 49 | environment: 50 | LS_JAVA_OPTS: "-Xmx256m -Xms256m" 51 | depends_on: 52 | - elasticsearch 53 | restart: always 54 | 55 | kibana: 56 | container_name: micro-obs-kibana 57 | image: elastic/kibana:6.5.4 58 | ports: 59 | - "5601:5601" 60 | volumes: 61 | - "./conf/kibana.yml:/usr/share/kibana/kibana.yml:ro" 62 | depends_on: 63 | - elasticsearch 64 | restart: always 65 | 66 | redis-item: 67 | container_name: micro-obs-redis 68 | image: redis:5 69 | ports: 70 | - "6379:6379" 71 | restart: always 72 | 73 | item: 74 | container_name: micro-obs-item 75 | image: obitech/micro-obs:master 76 | command: item -r redis://redis-item:6379/0 77 | ports: 78 | - "8080:8080" 79 | environment: 80 | - "JAEGER_AGENT_HOST=jaeger" 81 | - "JAEGER_AGENT_PORT=6831" 82 | logging: 83 | driver: syslog 84 | options: 85 | syslog-address: "tcp://127.0.0.1:5000" 86 | syslog-facility: daemon 87 | syslog-format: rfc5424 88 | depends_on: 89 | - redis-item 90 | - logstash 91 | restart: always 92 | 93 | redis-order: 94 | container_name: micro-obs-redis-order 95 | image: redis:5 96 | command: redis-server --port 6380 97 | ports: 98 | - "6380:6380" 99 | restart: always 100 | 101 | order: 102 | container_name: micro-obs-order 103 | image: obitech/micro-obs:master 104 | command: order -r redis://redis-order:6380/0 -i http://item:8080 105 | ports: 106 | - "8090:8090" 107 | environment: 108 | - "JAEGER_AGENT_HOST=jaeger" 109 | - "JAEGER_AGENT_PORT=6831" 110 | logging: 111 | driver: syslog 112 | options: 113 | syslog-address: "tcp://127.0.0.1:5000" 114 | syslog-facility: daemon 115 | syslog-format: rfc5424 116 | depends_on: 117 | - redis-order 118 | - logstash 119 | restart: always 120 | 121 | redis-exporter: 122 | container_name: micro-obs-redis-exporter 123 | image: oliver006/redis_exporter:v0.24.0 124 | command: --redis.addr=redis://redis-order:6380/0,redis://redis-item:6379/0 125 | depends_on: 126 | - redis-item 127 | - redis-order 128 | restart: always 129 | 130 | 131 | -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/000-namespace.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Monitoring namespace ------------------- # 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/100-prometheus/100-prometheus-operator-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Prometheus Operator ServiceAccount ------------------- # 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: prometheus-operator 6 | namespace: monitoring 7 | --- 8 | 9 | # ------------------- Prometheus Operator ClusterRole ------------------- # 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRole 12 | metadata: 13 | name: prometheus-operator 14 | namespace: monitoring 15 | rules: 16 | - apiGroups: 17 | - apiextensions.k8s.io 18 | resources: 19 | - customresourcedefinitions 20 | verbs: 21 | - '*' 22 | - apiGroups: 23 | - monitoring.coreos.com 24 | resources: 25 | - alertmanagers 26 | - prometheuses 27 | - prometheuses/finalizers 28 | - alertmanagers/finalizers 29 | - servicemonitors 30 | - prometheusrules 31 | verbs: 32 | - '*' 33 | - apiGroups: 34 | - apps 35 | resources: 36 | - statefulsets 37 | verbs: 38 | - '*' 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - configmaps 43 | - secrets 44 | verbs: 45 | - '*' 46 | - apiGroups: 47 | - "" 48 | resources: 49 | - pods 50 | verbs: 51 | - list 52 | - delete 53 | - apiGroups: 54 | - "" 55 | resources: 56 | - services 57 | - endpoints 58 | verbs: 59 | - get 60 | - create 61 | - update 62 | - apiGroups: 63 | - "" 64 | resources: 65 | - nodes 66 | verbs: 67 | - list 68 | - watch 69 | - apiGroups: 70 | - "" 71 | resources: 72 | - namespaces 73 | verbs: 74 | - get 75 | - list 76 | - watch 77 | --- 78 | 79 | # ------------------- Prometheus Operator ClusterRoleBinding ------------------- # 80 | apiVersion: rbac.authorization.k8s.io/v1 81 | kind: ClusterRoleBinding 82 | metadata: 83 | name: prometheus-operator 84 | roleRef: 85 | apiGroup: rbac.authorization.k8s.io 86 | kind: ClusterRole 87 | name: prometheus-operator 88 | subjects: 89 | - kind: ServiceAccount 90 | name: prometheus-operator 91 | namespace: monitoring 92 | --- 93 | 94 | # ------------------- Prometheus Operator Service ------------------- # 95 | apiVersion: v1 96 | kind: Service 97 | metadata: 98 | labels: 99 | app: prometheus-operator 100 | name: prometheus-operator 101 | namespace: monitoring 102 | spec: 103 | clusterIP: None 104 | ports: 105 | - name: http 106 | port: 8080 107 | targetPort: http 108 | selector: 109 | app: prometheus-operator 110 | --- 111 | 112 | # ------------------- Prometheus Operator Deployment ------------------- # 113 | apiVersion: apps/v1beta2 114 | kind: Deployment 115 | metadata: 116 | labels: 117 | app: prometheus-operator 118 | name: prometheus-operator 119 | namespace: monitoring 120 | spec: 121 | replicas: 1 122 | selector: 123 | matchLabels: 124 | app: prometheus-operator 125 | template: 126 | metadata: 127 | labels: 128 | app: prometheus-operator 129 | spec: 130 | containers: 131 | - args: 132 | - --kubelet-service=kube-system/kubelet 133 | - --logtostderr=true 134 | - --config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1 135 | - --prometheus-config-reloader=quay.io/coreos/prometheus-config-reloader:v0.26.0 136 | image: quay.io/coreos/prometheus-operator:v0.26.0 137 | name: prometheus-operator 138 | ports: 139 | - containerPort: 8080 140 | name: http 141 | resources: 142 | limits: 143 | cpu: 200m 144 | memory: 200Mi 145 | requests: 146 | cpu: 100m 147 | memory: 100Mi 148 | securityContext: 149 | allowPrivilegeEscalation: false 150 | readOnlyRootFilesystem: true 151 | nodeSelector: 152 | beta.kubernetes.io/os: linux 153 | securityContext: 154 | runAsNonRoot: true 155 | runAsUser: 65534 156 | serviceAccountName: prometheus-operator 157 | --- 158 | 159 | # ------------------- Prometheus Operator Service Monitor ------------------- # 160 | apiVersion: monitoring.coreos.com/v1 161 | kind: ServiceMonitor 162 | metadata: 163 | labels: 164 | app: prometheus-operator 165 | name: prometheus-operator 166 | namespace: monitoring 167 | spec: 168 | endpoints: 169 | - honorLabels: true 170 | port: http 171 | selector: 172 | matchLabels: 173 | app: prometheus-operator -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/100-prometheus/110-prometheus-core-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Prometheus ServiceAccount ------------------- # 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: prometheus-core 6 | namespace: monitoring 7 | --- 8 | 9 | # ------------------- Prometheus ClusterRole ------------------- # 10 | apiVersion: rbac.authorization.k8s.io/v1beta1 11 | kind: ClusterRole 12 | metadata: 13 | name: prometheus-core 14 | rules: 15 | - apiGroups: [""] 16 | resources: 17 | - nodes 18 | - services 19 | - endpoints 20 | - pods 21 | verbs: ["get", "list", "watch"] 22 | - apiGroups: [""] 23 | resources: 24 | - configmaps 25 | verbs: ["get"] 26 | - nonResourceURLs: ["/metrics"] 27 | verbs: ["get"] 28 | --- 29 | 30 | # ------------------- Prometheus ClusterRoleBinding ------------------- # 31 | apiVersion: rbac.authorization.k8s.io/v1beta1 32 | kind: ClusterRoleBinding 33 | metadata: 34 | name: prometheus-core 35 | roleRef: 36 | apiGroup: rbac.authorization.k8s.io 37 | kind: ClusterRole 38 | name: prometheus-core 39 | subjects: 40 | - kind: ServiceAccount 41 | name: prometheus-core 42 | namespace: monitoring 43 | --- 44 | 45 | # ------------------- Prometheus Service ------------------- # 46 | apiVersion: v1 47 | kind: Service 48 | metadata: 49 | name: prometheus-core 50 | namespace: monitoring 51 | labels: 52 | app: prometheus-core 53 | spec: 54 | type: NodePort 55 | ports: 56 | - name: web 57 | nodePort: 30900 58 | port: 9090 59 | protocol: TCP 60 | targetPort: web 61 | selector: 62 | prometheus: core 63 | sessionAffinity: ClientIP 64 | --- 65 | 66 | # ------------------- Prometheus Instance ------------------- # 67 | apiVersion: monitoring.coreos.com/v1 68 | kind: Prometheus 69 | metadata: 70 | name: core 71 | namespace: monitoring 72 | labels: 73 | prometheus: core 74 | app: prometheus-core 75 | spec: 76 | replicas: 1 77 | serviceAccountName: prometheus-core 78 | serviceMonitorNamespaceSelector: {} 79 | serviceMonitorSelector: {} 80 | ruleSelector: 81 | matchLabels: 82 | role: alert-rules 83 | prometheus: core 84 | alerting: 85 | alertmanagers: 86 | - namespace: monitoring 87 | name: alertmanager-main 88 | port: web 89 | --- 90 | 91 | # ------------------- Prometheus Service Monitor ------------------- # 92 | apiVersion: monitoring.coreos.com/v1 93 | kind: ServiceMonitor 94 | metadata: 95 | name: prometheus-core 96 | namespace: monitoring 97 | spec: 98 | selector: 99 | matchLabels: 100 | app: prometheus-core 101 | endpoints: 102 | - port: web -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/100-prometheus/130-alertmanager-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Alertmanager Secret ------------------- # 2 | apiVersion: v1 3 | kind: Secret 4 | metadata: 5 | name: alertmanager-main 6 | namespace: monitoring 7 | type: Opaque 8 | data: 9 | alertmanager.yaml: Z2xvYmFsOgogIHNtdHBfc21hcnRob3N0OiAnbWFpbGhvZy5tb25pdG9yaW5nLnN2Yy5jbHVzdGVyLmxvY2FsOjEwMjUnCiAgc210cF9mcm9tOiAnYWxlcnRtYW5hZ2VyQGs4cy1leGFtcGxlLmNvbScKICBzbXRwX3JlcXVpcmVfdGxzOiBmYWxzZQogIHNtdHBfaGVsbG86ICdrOHMtYWxlcnRtYW5hZ2VyJwogIAojIERlZmF1bHQgcm91dGUKcm91dGU6CiAgZ3JvdXBfYnk6IFtjbHVzdGVyLCBhbGVydG5hbWUsIHJlc291cmNlLCBzZXZlcml0eV0KCiAgIyBEZWZhdWx0IHJlY2VpdmVyCiAgcmVjZWl2ZXI6IHRlc3QtZW1haWxzCgogICMgQ2hpbGQgcm91dGVzCiAgcm91dGVzOgogICAgCiAgIyBDcmVhdGluZyBhIHJ1bGUgZm9yIENQVSByZWxhdGVkIGFsZXJ0cwogIC0gbWF0Y2hfcmU6CiAgICAgIHJlc291cmNlOiBeY3B1JAogICAgcmVjZWl2ZXI6IGNwdS1tYWlscwoKICAgICMgU3ViIHJvdXRlIGZvciBjcml0aWNhbCBDUFUgYWxlcnRzCiAgICByb3V0ZXM6CiAgICAtIG1hdGNoOgogICAgICAgIHNldmVyaXR5OiBjcml0aWNhbAogICAgICByZWNlaXZlcjogY3JpdC1tYWlscwoKIyBTZXR0aW5nIGRpZmZlcmVudCByZWNlaXZlcnMKcmVjZWl2ZXJzOgotIG5hbWU6ICd0ZXN0LWVtYWlscycKICBlbWFpbF9jb25maWdzOgogIC0gdG86ICd0ZXN0LWVtYWlsc0BleGFtcGxlLmNvbScKLSBuYW1lOiAnY3B1LW1haWxzJwogIGVtYWlsX2NvbmZpZ3M6CiAgLSB0bzogJ2NwdS1tYWlsc0BleGFtcGxlLmNvbScKLSBuYW1lOiAnY3JpdC1tYWlscycKICBlbWFpbF9jb25maWdzOgogICAgLSB0bzogJ2NyaXQtbWFpbHNAZXhhbXBsZS5jb20n 10 | --- 11 | 12 | # ------------------- Alertmanager ServiceAccount ------------------- # 13 | apiVersion: v1 14 | kind: ServiceAccount 15 | metadata: 16 | name: alertmanager-main 17 | namespace: monitoring 18 | --- 19 | 20 | # ------------------- Alertmanager Service ------------------- # 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: alertmanager-main 25 | namespace: monitoring 26 | labels: 27 | app: alertmanager-main 28 | spec: 29 | type: NodePort 30 | ports: 31 | - name: web 32 | nodePort: 30903 33 | port: 9093 34 | protocol: TCP 35 | targetPort: web 36 | selector: 37 | alertmanager: main 38 | --- 39 | 40 | # ------------------- Alertmanager Cluster ------------------- # 41 | apiVersion: monitoring.coreos.com/v1 42 | kind: Alertmanager 43 | metadata: 44 | name: main 45 | namespace: monitoring 46 | labels: 47 | app: alertmanager-main 48 | spec: 49 | replicas: 1 50 | baseImage: quay.io/prometheus/alertmanager 51 | version: v0.15.2 52 | serviceAccountName: alertmanager-main 53 | --- 54 | 55 | # ------------------- Alertmanager Service Monitor ------------------- # 56 | apiVersion: monitoring.coreos.com/v1 57 | kind: ServiceMonitor 58 | metadata: 59 | name: alertmanager-main 60 | namespace: monitoring 61 | spec: 62 | selector: 63 | matchLabels: 64 | app: alertmanager-main 65 | endpoints: 66 | - port: web -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/etcd-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- etcd Service ------------------- # 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | namespace: kube-system 6 | name: etcd-prometheus-discovery 7 | labels: 8 | component: etcd 9 | jobname: kube-etcd 10 | spec: 11 | selector: 12 | component: etcd 13 | type: ClusterIP 14 | ports: 15 | - name: http-metrics 16 | port: 10255 17 | targetPort: 10255 18 | protocol: TCP 19 | --- 20 | 21 | # ------------------- etcd Service Monitor ------------------- # 22 | apiVersion: monitoring.coreos.com/v1 23 | kind: ServiceMonitor 24 | metadata: 25 | labels: 26 | component: etcd 27 | name: kube-etcd 28 | namespace: monitoring 29 | spec: 30 | endpoints: 31 | - interval: 30s 32 | port: http-metrics 33 | jobLabel: jobname 34 | namespaceSelector: 35 | matchNames: 36 | - kube-system 37 | selector: 38 | matchLabels: 39 | component: etcd -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/kube-apiserver-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- API Server Service Monitor ------------------- # 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | labels: 6 | k8s-app: apiserver 7 | name: kube-apiserver 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | interval: 30s 13 | metricRelabelings: 14 | - action: drop 15 | regex: etcd_(debugging|disk|request|server).* 16 | sourceLabels: 17 | - __name__ 18 | port: https 19 | scheme: https 20 | tlsConfig: 21 | caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt 22 | serverName: kubernetes 23 | jobLabel: component 24 | namespaceSelector: 25 | matchNames: 26 | - default 27 | selector: 28 | matchLabels: 29 | component: apiserver 30 | provider: kubernetes -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/kube-controller-manager.yml: -------------------------------------------------------------------------------- 1 | # # ------------------- Kube Controller Manager Service ------------------- # 2 | # apiVersion: v1 3 | # kind: Service 4 | # metadata: 5 | # namespace: kube-system 6 | # name: kube-controller-manager-prometheus-discovery 7 | # labels: 8 | # component: kube-controller-manager 9 | # spec: 10 | # selector: 11 | # component: kube-controller-manager 12 | # type: ClusterIP 13 | # ports: 14 | # - name: http-metrics 15 | # port: 10252 16 | # targetPort: 10252 17 | # protocol: TCP 18 | # --- 19 | 20 | # # ------------------- Kube Controller Service Monitor ------------------- # 21 | # apiVersion: monitoring.coreos.com/v1 22 | # kind: ServiceMonitor 23 | # metadata: 24 | # labels: 25 | # k8s-app: kube-controller-manager 26 | # name: kube-controller-manager 27 | # namespace: monitoring 28 | # spec: 29 | # endpoints: 30 | # - interval: 30s 31 | # metricRelabelings: 32 | # - action: drop 33 | # regex: etcd_(debugging|disk|request|server).* 34 | # sourceLabels: 35 | # - __name__ 36 | # port: http-metrics 37 | # jobLabel: component 38 | # namespaceSelector: 39 | # matchNames: 40 | # - kube-system 41 | # selector: 42 | # matchLabels: 43 | # component: kube-controller-manager -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/kube-dns-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- kube-dns Service ------------------- # 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | namespace: kube-system 6 | name: kube-dns-prometheus-discovery 7 | labels: 8 | k8s-app: kube-dns 9 | spec: 10 | selector: 11 | k8s-app: kube-dns 12 | type: ClusterIP 13 | ports: 14 | - name: http-metrics 15 | port: 10054 16 | targetPort: 10054 17 | protocol: TCP 18 | --- 19 | 20 | # ------------------- etcd Service Monitor ------------------- # 21 | apiVersion: monitoring.coreos.com/v1 22 | kind: ServiceMonitor 23 | metadata: 24 | labels: 25 | k8s-app: kube-dns 26 | name: kube-dns 27 | namespace: monitoring 28 | spec: 29 | endpoints: 30 | - interval: 30s 31 | port: http-metrics 32 | jobLabel: k8s-app 33 | namespaceSelector: 34 | matchNames: 35 | - kube-system 36 | selector: 37 | matchLabels: 38 | k8s-app: kube-dns -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/kube-scheduler-full.yml: -------------------------------------------------------------------------------- 1 | # # ------------------- Kube Scheduler Service ------------------- # 2 | # apiVersion: v1 3 | # kind: Service 4 | # metadata: 5 | # namespace: kube-system 6 | # name: kube-scheduler-prometheus-discovery 7 | # labels: 8 | # component: kube-scheduler 9 | # spec: 10 | # selector: 11 | # component: kube-scheduler 12 | # type: ClusterIP 13 | # ports: 14 | # - name: http-metrics 15 | # port: 10251 16 | # targetPort: 10251 17 | # protocol: TCP 18 | # --- 19 | 20 | # # ------------------- Kube Scheduler Service Monitor ------------------- # 21 | # apiVersion: monitoring.coreos.com/v1 22 | # kind: ServiceMonitor 23 | # metadata: 24 | # labels: 25 | # component: kube-scheduler 26 | # name: kube-scheduler 27 | # namespace: monitoring 28 | # spec: 29 | # endpoints: 30 | # - interval: 30s 31 | # port: http-metrics 32 | # jobLabel: component 33 | # namespaceSelector: 34 | # matchNames: 35 | # - kube-system 36 | # selector: 37 | # matchLabels: 38 | # component: kube-scheduler -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/kube-state-metrics-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- KSM ServiceAccount ------------------- # 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: kube-state-metrics 6 | namespace: monitoring 7 | --- 8 | 9 | # ------------------- KSM ClusterRole ------------------- # 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRole 12 | metadata: 13 | name: kube-state-metrics 14 | rules: 15 | - apiGroups: 16 | - "" 17 | resources: 18 | - configmaps 19 | - secrets 20 | - nodes 21 | - pods 22 | - services 23 | - resourcequotas 24 | - replicationcontrollers 25 | - limitranges 26 | - persistentvolumeclaims 27 | - persistentvolumes 28 | - namespaces 29 | - endpoints 30 | verbs: 31 | - list 32 | - watch 33 | - apiGroups: 34 | - extensions 35 | resources: 36 | - daemonsets 37 | - deployments 38 | - replicasets 39 | verbs: 40 | - list 41 | - watch 42 | - apiGroups: 43 | - apps 44 | resources: 45 | - statefulsets 46 | - daemonsets 47 | - deployments 48 | - replicasets 49 | verbs: 50 | - list 51 | - watch 52 | - apiGroups: 53 | - batch 54 | resources: 55 | - cronjobs 56 | - jobs 57 | verbs: 58 | - list 59 | - watch 60 | - apiGroups: 61 | - autoscaling 62 | resources: 63 | - horizontalpodautoscalers 64 | verbs: 65 | - list 66 | - watch 67 | - apiGroups: 68 | - authentication.k8s.io 69 | resources: 70 | - tokenreviews 71 | verbs: 72 | - create 73 | - apiGroups: 74 | - authorization.k8s.io 75 | resources: 76 | - subjectaccessreviews 77 | verbs: 78 | - create 79 | --- 80 | 81 | # ------------------- KSM ClusterRoleBinding ------------------- # 82 | apiVersion: rbac.authorization.k8s.io/v1 83 | kind: ClusterRoleBinding 84 | metadata: 85 | name: kube-state-metrics 86 | roleRef: 87 | apiGroup: rbac.authorization.k8s.io 88 | kind: ClusterRole 89 | name: kube-state-metrics 90 | subjects: 91 | - kind: ServiceAccount 92 | name: kube-state-metrics 93 | namespace: monitoring 94 | --- 95 | 96 | # ------------------- KSM Service ------------------- # 97 | apiVersion: v1 98 | kind: Service 99 | metadata: 100 | labels: 101 | k8s-app: kube-state-metrics 102 | name: kube-state-metrics 103 | namespace: monitoring 104 | spec: 105 | clusterIP: None 106 | ports: 107 | - name: https-main 108 | port: 8443 109 | targetPort: https-main 110 | - name: https-self 111 | port: 9443 112 | targetPort: https-self 113 | selector: 114 | app: kube-state-metrics 115 | --- 116 | 117 | # ------------------- KSM Deployment ------------------- # 118 | apiVersion: apps/v1beta2 119 | kind: Deployment 120 | metadata: 121 | labels: 122 | app: kube-state-metrics 123 | name: kube-state-metrics 124 | namespace: monitoring 125 | spec: 126 | replicas: 1 127 | selector: 128 | matchLabels: 129 | app: kube-state-metrics 130 | template: 131 | metadata: 132 | labels: 133 | app: kube-state-metrics 134 | spec: 135 | containers: 136 | - args: 137 | - --secure-listen-address=:8443 138 | - --upstream=http://127.0.0.1:8081/ 139 | image: quay.io/coreos/kube-rbac-proxy:v0.3.1 140 | name: kube-rbac-proxy-main 141 | ports: 142 | - containerPort: 8443 143 | name: https-main 144 | resources: 145 | limits: 146 | cpu: 20m 147 | memory: 40Mi 148 | requests: 149 | cpu: 10m 150 | memory: 20Mi 151 | - args: 152 | - --secure-listen-address=:9443 153 | - --upstream=http://127.0.0.1:8082/ 154 | image: quay.io/coreos/kube-rbac-proxy:v0.3.1 155 | name: kube-rbac-proxy-self 156 | ports: 157 | - containerPort: 9443 158 | name: https-self 159 | resources: 160 | limits: 161 | cpu: 20m 162 | memory: 40Mi 163 | requests: 164 | cpu: 10m 165 | memory: 20Mi 166 | - args: 167 | - --host=127.0.0.1 168 | - --port=8081 169 | - --telemetry-host=127.0.0.1 170 | - --telemetry-port=8082 171 | image: quay.io/coreos/kube-state-metrics:v1.3.1 172 | name: kube-state-metrics 173 | resources: 174 | limits: 175 | cpu: 100m 176 | memory: 150Mi 177 | requests: 178 | cpu: 100m 179 | memory: 150Mi 180 | - command: 181 | - /pod_nanny 182 | - --container=kube-state-metrics 183 | - --cpu=100m 184 | - --extra-cpu=2m 185 | - --memory=150Mi 186 | - --extra-memory=30Mi 187 | - --threshold=5 188 | - --deployment=kube-state-metrics 189 | env: 190 | - name: MY_POD_NAME 191 | valueFrom: 192 | fieldRef: 193 | apiVersion: v1 194 | fieldPath: metadata.name 195 | - name: MY_POD_NAMESPACE 196 | valueFrom: 197 | fieldRef: 198 | apiVersion: v1 199 | fieldPath: metadata.namespace 200 | image: quay.io/coreos/addon-resizer:1.0 201 | name: addon-resizer 202 | resources: 203 | limits: 204 | cpu: 10m 205 | memory: 30Mi 206 | requests: 207 | cpu: 10m 208 | memory: 30Mi 209 | nodeSelector: 210 | beta.kubernetes.io/os: linux 211 | securityContext: 212 | runAsNonRoot: true 213 | runAsUser: 65534 214 | serviceAccountName: kube-state-metrics 215 | --- 216 | 217 | # ------------------- KSM Service Monitor ------------------- # 218 | apiVersion: monitoring.coreos.com/v1 219 | kind: ServiceMonitor 220 | metadata: 221 | labels: 222 | k8s-app: kube-state-metrics 223 | name: kube-state-metrics 224 | namespace: monitoring 225 | spec: 226 | endpoints: 227 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 228 | honorLabels: true 229 | interval: 30s 230 | port: https-main 231 | scheme: https 232 | scrapeTimeout: 30s 233 | tlsConfig: 234 | insecureSkipVerify: true 235 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 236 | interval: 30s 237 | port: https-self 238 | scheme: https 239 | tlsConfig: 240 | insecureSkipVerify: true 241 | jobLabel: k8s-app 242 | selector: 243 | matchLabels: 244 | k8s-app: kube-state-metrics -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/kubelet-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Kubelet Service Monitor ------------------- # 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | labels: 6 | k8s-app: kubelet 7 | name: kubelet 8 | namespace: monitoring 9 | spec: 10 | endpoints: 11 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 12 | honorLabels: true 13 | interval: 30s 14 | port: https-metrics 15 | scheme: https 16 | tlsConfig: 17 | insecureSkipVerify: true 18 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 19 | honorLabels: true 20 | interval: 30s 21 | path: /metrics/cadvisor 22 | port: https-metrics 23 | scheme: https 24 | tlsConfig: 25 | insecureSkipVerify: true 26 | jobLabel: k8s-app 27 | namespaceSelector: 28 | matchNames: 29 | - kube-system 30 | selector: 31 | matchLabels: 32 | k8s-app: kubelet -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/200-internal/node-exporter-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Node Exporter Service Account ------------------- # 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: node-exporter 6 | namespace: monitoring 7 | --- 8 | 9 | # ------------------- Node Exporter ClusterRole ------------------- # 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | kind: ClusterRole 12 | metadata: 13 | name: node-exporter 14 | rules: 15 | - apiGroups: 16 | - authentication.k8s.io 17 | resources: 18 | - tokenreviews 19 | verbs: 20 | - create 21 | - apiGroups: 22 | - authorization.k8s.io 23 | resources: 24 | - subjectaccessreviews 25 | verbs: 26 | - create 27 | --- 28 | 29 | # ------------------- Node Exporter ClusterRoleBinding ------------------- # 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: ClusterRoleBinding 32 | metadata: 33 | name: node-exporter 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: node-exporter 38 | subjects: 39 | - kind: ServiceAccount 40 | name: node-exporter 41 | namespace: monitoring 42 | --- 43 | 44 | # ------------------- Node Exporter Service ------------------- # 45 | apiVersion: v1 46 | kind: Service 47 | metadata: 48 | labels: 49 | app: node-exporter 50 | name: node-exporter 51 | namespace: monitoring 52 | spec: 53 | clusterIP: None 54 | ports: 55 | - name: https 56 | port: 9100 57 | targetPort: https 58 | selector: 59 | app: node-exporter 60 | --- 61 | 62 | # ------------------- Node Exporter DaemonSet ------------------- # 63 | apiVersion: apps/v1beta2 64 | kind: DaemonSet 65 | metadata: 66 | labels: 67 | app: node-exporter 68 | name: node-exporter 69 | namespace: monitoring 70 | spec: 71 | selector: 72 | matchLabels: 73 | app: node-exporter 74 | template: 75 | metadata: 76 | labels: 77 | app: node-exporter 78 | spec: 79 | containers: 80 | - args: 81 | - --web.listen-address 82 | - 127.0.0.1:9101 83 | - --path.procfs 84 | - /host/proc 85 | - --path.sysfs 86 | - /host/sys 87 | - --collector.filesystem.ignored-mount-points 88 | - '"^/(sys|proc|dev|host|etc)($|/)"' 89 | name: node-exporter 90 | volumeMounts: 91 | - name: dev 92 | mountPath: /host/dev 93 | - name: proc 94 | mountPath: /host/proc 95 | - name: sys 96 | mountPath: /host/sys 97 | - name: rootfs 98 | mountPath: /rootfs 99 | image: quay.io/prometheus/node-exporter:v0.16.0 100 | resources: 101 | limits: 102 | cpu: 102m 103 | memory: 180Mi 104 | requests: 105 | cpu: 102m 106 | memory: 180Mi 107 | - args: 108 | - --secure-listen-address=:9100 109 | - --upstream=http://127.0.0.1:9101/ 110 | image: quay.io/coreos/kube-rbac-proxy:v0.3.1 111 | name: kube-rbac-proxy 112 | ports: 113 | - containerPort: 9100 114 | hostPort: 9100 115 | name: https 116 | resources: 117 | limits: 118 | cpu: 20m 119 | memory: 40Mi 120 | requests: 121 | cpu: 10m 122 | memory: 20Mi 123 | hostNetwork: true 124 | hostPID: true 125 | nodeSelector: 126 | beta.kubernetes.io/os: linux 127 | securityContext: 128 | runAsNonRoot: true 129 | runAsUser: 65534 130 | serviceAccountName: node-exporter 131 | tolerations: 132 | - effect: NoSchedule 133 | key: node-role.kubernetes.io/master 134 | volumes: 135 | - name: proc 136 | hostPath: 137 | path: /proc 138 | - name: dev 139 | hostPath: 140 | path: /dev 141 | - name: sys 142 | hostPath: 143 | path: /sys 144 | - name: rootfs 145 | hostPath: 146 | path: / 147 | --- 148 | 149 | # ------------------- Node Exporter Service Monitor ------------------- # 150 | apiVersion: monitoring.coreos.com/v1 151 | kind: ServiceMonitor 152 | metadata: 153 | labels: 154 | app: node-exporter 155 | name: node-exporter 156 | namespace: monitoring 157 | spec: 158 | endpoints: 159 | - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token 160 | interval: 30s 161 | port: https 162 | scheme: https 163 | tlsConfig: 164 | insecureSkipVerify: true 165 | jobLabel: app 166 | selector: 167 | matchLabels: 168 | app: node-exporter -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/300-grafana/100-grafana-datasources.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Grafana Data Sources ConfigMap ------------------- # 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: grafana-datasources 6 | namespace: monitoring 7 | labels: 8 | app: grafana 9 | data: 10 | prometheus-datasource.yml: | 11 | apiVersion: 1 12 | datasources: 13 | - name: prometheus 14 | type: prometheus 15 | access: proxy 16 | url: http://prometheus-core.monitoring.svc.cluster.local:9090 17 | isDefault: true 18 | version: 1 19 | editable: true -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/100-prometheus/300-grafana/200-grafana-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Grafana Service Account ------------------- # 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: grafana 6 | namespace: monitoring 7 | --- 8 | 9 | # ------------------- Grafana Service ------------------- # 10 | apiVersion: v1 11 | kind: Service 12 | metadata: 13 | name: grafana 14 | labels: 15 | app: grafana 16 | namespace: monitoring 17 | spec: 18 | type: NodePort 19 | ports: 20 | - name: http 21 | nodePort: 30300 22 | port: 3000 23 | targetPort: http 24 | selector: 25 | app: grafana 26 | sessionAffinity: ClientIP 27 | --- 28 | 29 | # ------------------- Grafana Deployment ------------------- # 30 | apiVersion: apps/v1beta2 31 | kind: Deployment 32 | metadata: 33 | labels: 34 | app: grafana 35 | name: grafana 36 | namespace: monitoring 37 | spec: 38 | replicas: 1 39 | selector: 40 | matchLabels: 41 | app: grafana 42 | template: 43 | metadata: 44 | labels: 45 | app: grafana 46 | spec: 47 | containers: 48 | - image: grafana/grafana:5.2.4 49 | name: grafana 50 | ports: 51 | - containerPort: 3000 52 | name: http 53 | resources: 54 | limits: 55 | cpu: 200m 56 | memory: 200Mi 57 | requests: 58 | cpu: 100m 59 | memory: 100Mi 60 | env: 61 | # The following env variables set up basic auth twith the default admin user and admin password. 62 | - name: GF_AUTH_BASIC_ENABLED 63 | value: "true" 64 | - name: GF_AUTH_ANONYMOUS_ENABLED 65 | value: "false" 66 | - name: GF_PATHS_PROVISIONING 67 | value: "/srv/provisioning" 68 | readinessProbe: 69 | httpGet: 70 | path: /login 71 | port: 3000 72 | volumeMounts: 73 | - name: grafana-persistent-storage 74 | mountPath: /var/lib/grafana 75 | - name: grafana-datasources 76 | mountPath: /srv/provisioning/datasources 77 | - name: grafana-dashboards-config 78 | mountPath: /srv/provisioning/dashboards 79 | - name: grafana-dashboards-kubernetes 80 | mountPath: /srv/provisioning/import/dashboards/grafana-dashboards-kubernetes.json 81 | subPath: grafana-dashboards-kubernetes.json 82 | - name: grafana-dashboards-node-exporter 83 | mountPath: /srv/provisioning/import/dashboards/grafana-dashboards-node-exporter.json 84 | subPath: grafana-dashboards-node-exporter.json 85 | - name: grafana-dashboards-redis 86 | mountPath: /srv/provisioning/import/dashboards/grafana-dashboards-redis.json 87 | subPath: grafana-dashboards-redis.json 88 | - name: grafana-dashboards-prometheus 89 | mountPath: /srv/provisioning/import/dashboards/grafana-dashboards-prometheus.json 90 | subPath: grafana-dashboards-prometheus.json 91 | - name: grafana-dashboards-grafana 92 | mountPath: /srv/provisioning/import/dashboards/grafana-dashboards-grafana.json 93 | subPath: grafana-dashboards-grafana.json 94 | securityContext: 95 | runAsNonRoot: true 96 | runAsUser: 65534 97 | serviceAccountName: grafana 98 | volumes: 99 | - name: grafana-persistent-storage 100 | emptyDir: {} 101 | - name: grafana-datasources 102 | configMap: 103 | name: grafana-datasources 104 | - name: grafana-dashboards-config 105 | configMap: 106 | name: grafana-dashboards-config 107 | - name: grafana-dashboards-kubernetes 108 | configMap: 109 | name: grafana-dashboards-kubernetes 110 | - name: grafana-dashboards-node-exporter 111 | configMap: 112 | name: grafana-dashboards-node-exporter 113 | - name: grafana-dashboards-redis 114 | configMap: 115 | name: grafana-dashboards-redis 116 | - name: grafana-dashboards-prometheus 117 | configMap: 118 | name: grafana-dashboards-prometheus 119 | - name: grafana-dashboards-grafana 120 | configMap: 121 | name: grafana-dashboards-grafana 122 | --- 123 | 124 | # ------------------- Grafana ServiceMonitor ------------------- # 125 | apiVersion: monitoring.coreos.com/v1 126 | kind: ServiceMonitor 127 | metadata: 128 | name: grafana 129 | namespace: monitoring 130 | spec: 131 | selector: 132 | matchLabels: 133 | app: grafana 134 | endpoints: 135 | - port: http 136 | -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/200-elk/100-elasticsearch-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- ElasticSearch Service ------------------- # 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: elasticsearch 6 | namespace: monitoring 7 | labels: 8 | app: elasticsearch 9 | prom-job: elk-elasticsearch 10 | spec: 11 | selector: 12 | app: elasticsearch 13 | ports: 14 | - name: es 15 | port: 9200 16 | - name: exporter 17 | port: 9108 18 | --- 19 | 20 | # ------------------- ElasticSearch Deployment ------------------- # 21 | apiVersion: apps/v1 22 | kind: Deployment 23 | metadata: 24 | name: elasticsearch 25 | namespace: monitoring 26 | labels: 27 | app: elasticsearch 28 | spec: 29 | selector: 30 | matchLabels: 31 | app: elasticsearch 32 | template: 33 | metadata: 34 | labels: 35 | app: elasticsearch 36 | spec: 37 | containers: 38 | - name: elasticsearch 39 | image: docker.elastic.co/elasticsearch/elasticsearch:6.4.0 40 | env: 41 | - name: xpack.security.enabled 42 | value: "false" 43 | - name: xpack.watcher.enabled 44 | value: "false" 45 | - name: xpack.graph.enabled 46 | value: "false" 47 | - name: xpack.monitoring.enabled 48 | value: "false" 49 | ports: 50 | - name: es 51 | containerPort: 9200 52 | - name: elasticsearch-exporter 53 | image: justwatch/elasticsearch_exporter:1.0.4rc1 54 | resources: 55 | requests: 56 | cpu: 100m 57 | memory: 100Mi 58 | ports: 59 | - name: exporter 60 | containerPort: 9108 61 | command: 62 | - "elasticsearch_exporter" 63 | - "-es.uri=localhost:9200" 64 | --- 65 | 66 | # ------------------- ElasticSearch ServiceMonitor ------------------- # 67 | apiVersion: monitoring.coreos.com/v1 68 | kind: ServiceMonitor 69 | metadata: 70 | name: elasticsearch 71 | namespace: monitoring 72 | spec: 73 | selector: 74 | matchLabels: 75 | app: elasticsearch 76 | prom-job: elk-elasticsearch 77 | jobLabel: prom-job 78 | endpoints: 79 | - port: exporter -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/200-elk/200-logstash-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Logstash Pipeline ConfigMap ------------------- # 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: logstash-pipeline 6 | namespace: monitoring 7 | labels: 8 | app: logstash 9 | data: 10 | logstash.conf: | 11 | input { 12 | beats { 13 | port => 5000 14 | } 15 | } 16 | 17 | filter { 18 | mutate { 19 | rename => ["log", "message"] 20 | } 21 | 22 | date { 23 | match => ["time", "ISO8601"] 24 | remove_field => ["time"] 25 | } 26 | 27 | grok { 28 | match => { "source" => "/var/log/containers/%{DATA:pod_name}_%{DATA:namespace}_%{GREEDYDATA:container_name}-%{DATA:container_id}.log" } 29 | remove_field => ["source"] 30 | } 31 | } 32 | 33 | output { 34 | elasticsearch { 35 | hosts => "${ES_HOST}" 36 | } 37 | } 38 | --- 39 | 40 | # ------------------- Logstash Config ConfigMap ------------------- # 41 | apiVersion: v1 42 | kind: ConfigMap 43 | metadata: 44 | name: logstash-config 45 | namespace: monitoring 46 | labels: 47 | app: logstash 48 | data: 49 | logstash.yml: | 50 | http.host: "0.0.0.0" 51 | xpack.monitoring.enabled: false 52 | --- 53 | 54 | # ------------------- Logstash Service ------------------- # 55 | kind: Service 56 | apiVersion: v1 57 | metadata: 58 | name: logstash 59 | namespace: monitoring 60 | labels: 61 | app: logstash 62 | spec: 63 | selector: 64 | app: logstash 65 | ports: 66 | - name: http 67 | port: 5000 68 | targetPort: 5000 69 | --- 70 | 71 | # ------------------- Logstash Deployment ------------------- # 72 | apiVersion: apps/v1 73 | kind: Deployment 74 | metadata: 75 | name: logstash 76 | namespace: monitoring 77 | labels: 78 | app: logstash 79 | spec: 80 | selector: 81 | matchLabels: 82 | app: logstash 83 | template: 84 | metadata: 85 | name: logstash 86 | labels: 87 | app: logstash 88 | spec: 89 | containers: 90 | - name: logstash 91 | image: docker.elastic.co/logstash/logstash:6.4.2 92 | env: 93 | - name: ES_HOST 94 | value: elasticsearch.monitoring.svc.cluster.local:9200 95 | securityContext: 96 | runAsUser: 0 97 | ports: 98 | - name: http 99 | containerPort: 5000 100 | volumeMounts: 101 | - name: pipeline 102 | mountPath: /usr/share/logstash/pipeline 103 | - name: config 104 | mountPath: /usr/share/logstash/config/logstash.yml 105 | subPath: logstash.yml 106 | volumes: 107 | - name: pipeline 108 | configMap: 109 | name: logstash-pipeline 110 | - name: config 111 | configMap: 112 | name: logstash-config 113 | 114 | -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/200-elk/300-filebeat-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Filebeat Config ConfigMap ------------------- # 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: filebeat-config 6 | namespace: monitoring 7 | labels: 8 | app: filebeat 9 | data: 10 | filebeat.yml: | 11 | # Stores the state and location info Filebeat uses to track where it was last reading 12 | # filebeat.registry_file: /var/log/containers/filebeat_registry 13 | 14 | # Specify inputs to read logs from 15 | filebeat.inputs: 16 | - type: log 17 | enabled: true 18 | paths: 19 | - /var/log/containers/*.log 20 | symlinks: true 21 | 22 | # Skip Filebeat & kube logs 23 | # exclude_files: ['filebeat.*log', 'kube.*log'] 24 | 25 | # Set key to 'log' 26 | json.message_key: log 27 | 28 | # Copy keys to top level of document 29 | json.keys_under_root: true 30 | 31 | # Add error key if unmarshaling fails 32 | json.add_error_key: true 33 | 34 | # Match multiple lines starting with whitespace 35 | multiline.pattern: ^\s 36 | 37 | # Append lines to pattern 38 | multiline.match: after 39 | 40 | http: 41 | enabled: true 42 | host: localhost 43 | port: 5066 44 | 45 | output.logstash: 46 | hosts: ["${LOGSTASH_HOSTS}"] 47 | index: ${INDEX_PREFIX} 48 | timeout: 15 49 | 50 | logging.level: ${LOG_LEVEL} 51 | --- 52 | 53 | # ------------------- Filebeat Service ------------------- # 54 | apiVersion: v1 55 | kind: Service 56 | metadata: 57 | labels: 58 | app: filebeat 59 | prom-job: elk-filebeat 60 | name: filebeat 61 | namespace: monitoring 62 | spec: 63 | clusterIP: None 64 | ports: 65 | - name: metrics 66 | port: 5066 67 | - name: exporter 68 | port: 9479 69 | selector: 70 | app: filebeat 71 | --- 72 | 73 | # ------------------- Filebeat DaemonSet ------------------- # 74 | apiVersion: apps/v1 75 | kind: DaemonSet 76 | metadata: 77 | name: filebeat 78 | namespace: monitoring 79 | labels: 80 | app: filebeat 81 | spec: 82 | selector: 83 | matchLabels: 84 | app: filebeat 85 | template: 86 | metadata: 87 | name: filebeat 88 | labels: 89 | namespace: monitoring 90 | app: filebeat 91 | spec: 92 | containers: 93 | - name: filebeat 94 | image: docker.elastic.co/beats/filebeat:6.4.2 95 | securityContext: 96 | runAsUser: 0 97 | resources: 98 | limits: 99 | cpu: 50m 100 | memory: 50Mi 101 | ports: 102 | - name: metrics 103 | containerPort: 5066 104 | env: 105 | - name: LOGSTASH_HOSTS 106 | value: logstash.monitoring.svc.cluster.local:5000 107 | - name: LOG_LEVEL 108 | value: info 109 | - name: INDEX_PREFIX 110 | value: "k8s-" 111 | volumeMounts: 112 | - name: config 113 | mountPath: /usr/share/filebeat/filebeat.yml 114 | subPath: filebeat.yml 115 | - name: varlogcontainers 116 | mountPath: /var/log/containers 117 | - name: varlogpods 118 | mountPath: /var/log/pods 119 | readOnly: true 120 | - name: varlibdockercontainers 121 | mountPath: /var/lib/docker/containers 122 | readOnly: true 123 | - name: beat-exporter 124 | image: trustpilot/beat-exporter:0.1.1 125 | ports: 126 | - name: exporter 127 | containerPort: 9479 128 | resources: 129 | limits: 130 | cpu: 50m 131 | memory: 100Mi 132 | volumes: 133 | - name: config 134 | configMap: 135 | name: filebeat-config 136 | - name: varlogcontainers 137 | hostPath: 138 | path: /var/log/containers 139 | - name: varlogpods 140 | hostPath: 141 | path: /var/log/pods 142 | - name: varlibdockercontainers 143 | hostPath: 144 | path: /var/lib/docker/containers 145 | --- 146 | 147 | # ------------------- Filebeat ServiceMonitor ------------------- # 148 | apiVersion: monitoring.coreos.com/v1 149 | kind: ServiceMonitor 150 | metadata: 151 | name: filebeat 152 | namespace: monitoring 153 | spec: 154 | selector: 155 | matchLabels: 156 | app: filebeat 157 | prom-job: elk-filebeat 158 | jobLabel: prom-job 159 | endpoints: 160 | - port: exporter -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/200-elk/400-kibana-full.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: kibana-config 5 | namespace: monitoring 6 | labels: 7 | app: kibana 8 | data: 9 | kibana.yml: | 10 | server.name: ${KIBANA_HOST} 11 | server.host: "0" 12 | elasticsearch.url: ${ES_HOST} 13 | xpack.security.enabled: false 14 | xpack.monitoring.enabled: false 15 | xpack.graph.enabled: false 16 | xpack.reporting.enabled: false 17 | --- 18 | 19 | apiVersion: v1 20 | kind: Service 21 | metadata: 22 | name: kibana 23 | namespace: monitoring 24 | labels: 25 | app: kibana 26 | spec: 27 | selector: 28 | app: kibana 29 | type: NodePort 30 | ports: 31 | - name: http 32 | port: 5601 33 | targetPort: 5601 34 | nodePort: 30601 35 | --- 36 | 37 | apiVersion: apps/v1 38 | kind: Deployment 39 | metadata: 40 | name: kibana 41 | namespace: monitoring 42 | labels: 43 | app: kibana 44 | spec: 45 | replicas: 1 46 | selector: 47 | matchLabels: 48 | app: kibana 49 | template: 50 | metadata: 51 | name: kibana 52 | labels: 53 | app: kibana 54 | spec: 55 | containers: 56 | - name: kibana 57 | image: docker.elastic.co/kibana/kibana:6.4.2 58 | env: 59 | - name: KIBANA_HOST 60 | value: kibana.monitoring.cluster.local 61 | - name: ES_HOST 62 | value: elasticsearch.monitoring.svc.cluster.local:9200 63 | ports: 64 | - name: http 65 | containerPort: 5601 66 | volumeMounts: 67 | - name: config 68 | mountPath: /usr/share/kibana/kibana.yml 69 | subPath: kibana.yml 70 | volumes: 71 | - name: config 72 | configMap: 73 | name: kibana-config 74 | -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/300-mailhog/100-mailhog-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Mailhog Service ------------------- # 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: mailhog 6 | namespace: monitoring 7 | labels: 8 | app: mailhog 9 | spec: 10 | selector: 11 | app: mailhog 12 | ports: 13 | - name: smtp 14 | nodePort: 31025 15 | port: 1025 16 | targetPort: 1025 17 | - name: http 18 | nodePort: 32025 19 | port: 8025 20 | targetPort: 8025 21 | type: NodePort 22 | --- 23 | 24 | # ------------------- Mailhog Deployment ------------------- # 25 | apiVersion: apps/v1 26 | kind: Deployment 27 | metadata: 28 | name: mailhog 29 | namespace: monitoring 30 | labels: 31 | app: mailhog 32 | spec: 33 | replicas: 1 34 | selector: 35 | matchLabels: 36 | app: mailhog 37 | template: 38 | metadata: 39 | labels: 40 | app: mailhog 41 | spec: 42 | containers: 43 | - name: mailhog 44 | image: mailhog/mailhog 45 | ports: 46 | - name: smtp 47 | containerPort: 1025 48 | - name: http 49 | containerPort: 8025 50 | -------------------------------------------------------------------------------- /deploy/k8s/000-monitoring/400-jaeger/000-jaeger-dev-full.yml: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2017-2018 The Jaeger Authors 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except 5 | # in compliance with the License. You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software distributed under the License 10 | # is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 11 | # or implied. See the License for the specific language governing permissions and limitations under 12 | # the License. 13 | # 14 | # See: https://github.com/jaegertracing/jaeger-kubernetes 15 | 16 | # ------------------- Jaeger all-in-one Deployment ------------------- # 17 | apiVersion: v1 18 | kind: List 19 | items: 20 | - apiVersion: extensions/v1beta1 21 | kind: Deployment 22 | metadata: 23 | name: jaeger-deployment 24 | namespace: monitoring 25 | labels: 26 | app: jaeger 27 | jaeger-infra: jaeger-deployment 28 | spec: 29 | replicas: 1 30 | strategy: 31 | type: Recreate 32 | template: 33 | metadata: 34 | labels: 35 | app: jaeger 36 | jaeger-infra: jaeger-pod 37 | spec: 38 | containers: 39 | - env: 40 | - name: COLLECTOR_ZIPKIN_HTTP_PORT 41 | value: "9411" 42 | image: jaegertracing/all-in-one 43 | name: jaeger 44 | ports: 45 | - containerPort: 5775 46 | protocol: UDP 47 | name: agent-zipkint 48 | - containerPort: 6831 49 | protocol: UDP 50 | name: agent-compact 51 | - containerPort: 6832 52 | protocol: UDP 53 | name: agent-binary 54 | - containerPort: 5778 55 | protocol: TCP 56 | name: aget-configs 57 | - containerPort: 16686 58 | protocol: TCP 59 | name: query-http 60 | - containerPort: 9411 61 | protocol: TCP 62 | name: collector-z 63 | readinessProbe: 64 | httpGet: 65 | path: "/" 66 | port: 14269 67 | initialDelaySeconds: 5 68 | 69 | # ------------------- Jaeger Query Service ------------------- # 70 | - apiVersion: v1 71 | kind: Service 72 | metadata: 73 | name: jaeger-query 74 | namespace: monitoring 75 | labels: 76 | app: jaeger 77 | jaeger-infra: query-service 78 | spec: 79 | type: NodePort 80 | ports: 81 | - name: query-http 82 | nodePort: 30686 83 | port: 16686 84 | protocol: TCP 85 | targetPort: query-http 86 | selector: 87 | jaeger-infra: jaeger-pod 88 | 89 | # ------------------- Jaeger Query ServiceMonitor ------------------- # 90 | - apiVersion: monitoring.coreos.com/v1 91 | kind: ServiceMonitor 92 | metadata: 93 | name: jaeger-query 94 | namespace: monitoring 95 | spec: 96 | selector: 97 | matchLabels: 98 | jaeger-infra: query-service 99 | endpoints: 100 | - port: query-http 101 | 102 | # ------------------- Jaeger Collector Service ------------------- # 103 | - apiVersion: v1 104 | kind: Service 105 | metadata: 106 | name: jaeger-collector 107 | namespace: monitoring 108 | labels: 109 | app: jaeger 110 | jaeger-infra: collector-service 111 | spec: 112 | type: ClusterIP 113 | ports: 114 | - name: collector-tchannel 115 | port: 14267 116 | protocol: TCP 117 | targetPort: 14267 118 | - name: collector-http 119 | port: 14268 120 | protocol: TCP 121 | targetPort: 14268 122 | - name: collector-z 123 | port: 9411 124 | protocol: TCP 125 | targetPort: collector-z 126 | selector: 127 | jaeger-infra: jaeger-pod 128 | 129 | # ------------------- Jaeger Agent Service ------------------- # 130 | - apiVersion: v1 131 | kind: Service 132 | metadata: 133 | name: jaeger-agent 134 | namespace: monitoring 135 | labels: 136 | app: jaeger 137 | jaeger-infra: agent-service 138 | spec: 139 | clusterIP: None 140 | ports: 141 | - name: agent-zipkint 142 | port: 5775 143 | protocol: UDP 144 | targetPort: agent-zipkint 145 | - name: agent-compact 146 | port: 6831 147 | protocol: UDP 148 | targetPort: agent-compact 149 | - name: agent-binary 150 | port: 6832 151 | protocol: UDP 152 | targetPort: agent-binary 153 | - name: agent-configs 154 | port: 5778 155 | protocol: TCP 156 | targetPort: agent-configs 157 | selector: 158 | jaeger-infra: jaeger-pod 159 | 160 | # ------------------- Jaeger Zipkin Service ------------------- # 161 | - apiVersion: v1 162 | kind: Service 163 | metadata: 164 | name: jaeger-zipkin 165 | namespace: monitoring 166 | labels: 167 | app: jaeger 168 | jaeger-infra: zipkin-service 169 | spec: 170 | clusterIP: None 171 | ports: 172 | - name: collector-z 173 | port: 9411 174 | protocol: TCP 175 | targetPort: collector-z 176 | selector: 177 | jaeger-infra: jaeger-pod 178 | -------------------------------------------------------------------------------- /deploy/k8s/100-micro-obs/000-namespace.yml: -------------------------------------------------------------------------------- 1 | # -------------------micro-obs namespace ------------------- # 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: micro-obs -------------------------------------------------------------------------------- /deploy/k8s/100-micro-obs/100-item-redis.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Item Redis Service ------------------- # 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: redis-item 6 | namespace: micro-obs 7 | labels: 8 | app: redis 9 | service: item 10 | spec: 11 | type: ClusterIP 12 | selector: 13 | app: redis 14 | service: item 15 | ports: 16 | - name: redis 17 | protocol: TCP 18 | port: 6379 19 | targetPort: redis 20 | - name: prom 21 | protocol: TCP 22 | port: 9121 23 | --- 24 | 25 | # ------------------- Item Redis Deployment ------------------- # 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | metadata: 29 | name: redis-item 30 | namespace: micro-obs 31 | labels: 32 | app: redis 33 | service: item 34 | spec: 35 | selector: 36 | matchLabels: 37 | app: redis 38 | service: item 39 | replicas: 1 40 | template: 41 | metadata: 42 | labels: 43 | app: redis 44 | service: item 45 | spec: 46 | containers: 47 | - name: redis-master 48 | image: redis:5 49 | resources: 50 | requests: 51 | cpu: 100m 52 | memory: 100Mi 53 | ports: 54 | - name: redis 55 | protocol: TCP 56 | containerPort: 6379 57 | - name: redis-exporter 58 | image: oliver006/redis_exporter:latest 59 | resources: 60 | requests: 61 | cpu: 100m 62 | memory: 100Mi 63 | ports: 64 | - name: prom 65 | containerPort: 9121 66 | --- 67 | 68 | # ------------------- Item Redis ServiceMonitor ------------------- # 69 | apiVersion: monitoring.coreos.com/v1 70 | kind: ServiceMonitor 71 | metadata: 72 | name: redis-item 73 | namespace: micro-obs 74 | labels: 75 | app: redis 76 | service: item 77 | spec: 78 | selector: 79 | matchLabels: 80 | app: redis 81 | service: item 82 | endpoints: 83 | - port: prom 84 | -------------------------------------------------------------------------------- /deploy/k8s/100-micro-obs/150-item-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Item Service ------------------- # 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: item 6 | namespace: micro-obs 7 | labels: 8 | app: item 9 | service: item 10 | spec: 11 | type: NodePort 12 | selector: 13 | app: item 14 | service: item 15 | ports: 16 | - name: http 17 | protocol: TCP 18 | port: 8080 19 | targetPort: http 20 | nodePort: 30808 21 | --- 22 | 23 | # ------------------- Item Deployment ------------------- # 24 | apiVersion: extensions/v1beta1 25 | kind: Deployment 26 | metadata: 27 | name: item 28 | namespace: micro-obs 29 | labels: 30 | app: item 31 | service: item 32 | spec: 33 | replicas: 1 34 | selector: 35 | matchLabels: 36 | app: item 37 | service: item 38 | template: 39 | metadata: 40 | labels: 41 | app: item 42 | service: item 43 | spec: 44 | containers: 45 | - name: item 46 | image: obitech/micro-obs:v0.2.2 47 | env: 48 | - name: JAEGER_AGENT_HOST 49 | value: jaeger-agent.monitoring.svc.cluster.local 50 | - name: JAEGER_AGENT_PORT 51 | value: "6831" 52 | command: ["item"] 53 | args: ["-r", "redis://redis-item:6379/0"] 54 | ports: 55 | - name: http 56 | containerPort: 8080 57 | --- 58 | 59 | # ------------------- Item ServiceMonitor ------------------- # 60 | apiVersion: monitoring.coreos.com/v1 61 | kind: ServiceMonitor 62 | metadata: 63 | name: item 64 | namespace: micro-obs 65 | labels: 66 | app: item 67 | service: item 68 | spec: 69 | selector: 70 | matchLabels: 71 | app: item 72 | service: item 73 | endpoints: 74 | - port: http -------------------------------------------------------------------------------- /deploy/k8s/100-micro-obs/200-order-redis.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Item Redis Service ------------------- # 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: redis-order 6 | namespace: micro-obs 7 | labels: 8 | app: redis 9 | service: order 10 | spec: 11 | type: ClusterIP 12 | selector: 13 | app: redis 14 | service: order 15 | ports: 16 | - name: redis 17 | protocol: TCP 18 | port: 6379 19 | targetPort: redis 20 | - name: prom 21 | protocol: TCP 22 | port: 9121 23 | --- 24 | 25 | # ------------------- Item Redis Deployment ------------------- # 26 | apiVersion: apps/v1 27 | kind: Deployment 28 | metadata: 29 | name: redis-order 30 | namespace: micro-obs 31 | labels: 32 | app: redis 33 | service: order 34 | spec: 35 | selector: 36 | matchLabels: 37 | app: redis 38 | service: order 39 | replicas: 1 40 | template: 41 | metadata: 42 | labels: 43 | app: redis 44 | service: order 45 | spec: 46 | containers: 47 | - name: redis-master 48 | image: redis:5 49 | resources: 50 | requests: 51 | cpu: 100m 52 | memory: 100Mi 53 | ports: 54 | - name: redis 55 | protocol: TCP 56 | containerPort: 6379 57 | - name: redis-exporter 58 | image: oliver006/redis_exporter:latest 59 | resources: 60 | requests: 61 | cpu: 100m 62 | memory: 100Mi 63 | ports: 64 | - name: prom 65 | containerPort: 9121 66 | --- 67 | 68 | # ------------------- Item Redis ServiceMonitor ------------------- # 69 | apiVersion: monitoring.coreos.com/v1 70 | kind: ServiceMonitor 71 | metadata: 72 | name: redis-order 73 | namespace: micro-obs 74 | labels: 75 | app: redis 76 | service: order 77 | spec: 78 | selector: 79 | matchLabels: 80 | app: redis 81 | service: order 82 | endpoints: 83 | - port: prom 84 | -------------------------------------------------------------------------------- /deploy/k8s/100-micro-obs/250-order-full.yml: -------------------------------------------------------------------------------- 1 | # ------------------- Item Service ------------------- # 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: order 6 | namespace: micro-obs 7 | labels: 8 | app: order 9 | service: order 10 | spec: 11 | type: NodePort 12 | selector: 13 | app: order 14 | service: order 15 | ports: 16 | - name: http 17 | protocol: TCP 18 | port: 8090 19 | targetPort: http 20 | nodePort: 30809 21 | --- 22 | 23 | # ------------------- Item Deployment ------------------- # 24 | apiVersion: extensions/v1beta1 25 | kind: Deployment 26 | metadata: 27 | name: order 28 | namespace: micro-obs 29 | labels: 30 | app: order 31 | service: order 32 | spec: 33 | replicas: 1 34 | selector: 35 | matchLabels: 36 | app: order 37 | service: order 38 | template: 39 | metadata: 40 | labels: 41 | app: order 42 | service: order 43 | spec: 44 | containers: 45 | - name: order 46 | image: obitech/micro-obs:v0.2.2 47 | env: 48 | - name: JAEGER_AGENT_HOST 49 | value: jaeger-agent.monitoring.svc.cluster.local 50 | - name: JAEGER_AGENT_PORT 51 | value: "6831" 52 | command: ["order"] 53 | args: [ 54 | "-r", "redis://redis-order:6379/0", 55 | "-i", "http://item.micro-obs.svc.cluster.local:8080" 56 | ] 57 | ports: 58 | - name: http 59 | containerPort: 8090 60 | --- 61 | 62 | # ------------------- Item ServiceMonitor ------------------- # 63 | apiVersion: monitoring.coreos.com/v1 64 | kind: ServiceMonitor 65 | metadata: 66 | name: order 67 | namespace: micro-obs 68 | labels: 69 | app: order 70 | service: order 71 | spec: 72 | selector: 73 | matchLabels: 74 | app: order 75 | service: order 76 | endpoints: 77 | - port: http -------------------------------------------------------------------------------- /go.mod: -------------------------------------------------------------------------------- 1 | module github.com/obitech/micro-obs 2 | 3 | require ( 4 | github.com/alicebob/miniredis v2.4.5+incompatible 5 | github.com/go-redis/redis v6.14.2+incompatible 6 | github.com/gofrs/uuid v3.2.0+incompatible 7 | github.com/gorilla/mux v1.6.2 8 | github.com/opentracing/opentracing-go v1.1.0 9 | github.com/pkg/errors v0.8.0 10 | github.com/prometheus/client_golang v0.9.1 11 | github.com/speps/go-hashids v2.0.0+incompatible 12 | github.com/spf13/cobra v0.0.3 13 | github.com/uber/jaeger-client-go v2.15.0+incompatible 14 | github.com/uber/jaeger-lib v1.5.0 15 | go.uber.org/zap v1.9.1 16 | ) 17 | 18 | require ( 19 | github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect 20 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 // indirect 21 | github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect 22 | github.com/davecgh/go-spew v1.1.1 // indirect 23 | github.com/gogo/protobuf v1.3.2 // indirect 24 | github.com/golang/protobuf v1.2.0 // indirect 25 | github.com/gomodule/redigo v2.0.0+incompatible // indirect 26 | github.com/gorilla/context v1.1.1 // indirect 27 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 28 | github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect 29 | github.com/onsi/ginkgo v1.7.0 // indirect 30 | github.com/onsi/gomega v1.4.3 // indirect 31 | github.com/pmezard/go-difflib v1.0.0 // indirect 32 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 // indirect 33 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 // indirect 34 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a // indirect 35 | github.com/spf13/pflag v1.0.3 // indirect 36 | github.com/stretchr/testify v1.2.2 // indirect 37 | github.com/uber-go/atomic v1.3.2 // indirect 38 | github.com/yuin/gopher-lua v0.0.0-20181109042959-a0dfe84f6227 // indirect 39 | go.uber.org/atomic v1.3.2 // indirect 40 | go.uber.org/multierr v1.1.0 // indirect 41 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 // indirect 42 | ) 43 | -------------------------------------------------------------------------------- /go.sum: -------------------------------------------------------------------------------- 1 | github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= 2 | github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= 3 | github.com/alicebob/miniredis v2.4.5+incompatible h1:Ml+i18wdJPeO/AqNQYpaDyStCAh7TKwLUiaUaQx0yhs= 4 | github.com/alicebob/miniredis v2.4.5+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= 5 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= 6 | github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= 7 | github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= 8 | github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= 9 | github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= 10 | github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= 11 | github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= 12 | github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= 13 | github.com/go-redis/redis v6.14.2+incompatible h1:UE9pLhzmWf+xHNmZsoccjXosPicuiNaInPgym8nzfg0= 14 | github.com/go-redis/redis v6.14.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= 15 | github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE= 16 | github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= 17 | github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= 18 | github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= 19 | github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= 20 | github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= 21 | github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= 22 | github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= 23 | github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= 24 | github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= 25 | github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= 26 | github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= 27 | github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= 28 | github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= 29 | github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= 30 | github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= 31 | github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= 32 | github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= 33 | github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= 34 | github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= 35 | github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 36 | github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= 37 | github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= 38 | github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= 39 | github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= 40 | github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= 41 | github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= 42 | github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= 43 | github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= 44 | github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= 45 | github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= 46 | github.com/prometheus/client_golang v0.9.1 h1:K47Rk0v/fkEfwfQet2KWhscE0cJzjgCCDBG2KHZoVno= 47 | github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= 48 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= 49 | github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= 50 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= 51 | github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= 52 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= 53 | github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= 54 | github.com/speps/go-hashids v2.0.0+incompatible h1:kSfxGfESueJKTx0mpER9Y/1XHl+FVQjtCqRyYcviFbw= 55 | github.com/speps/go-hashids v2.0.0+incompatible/go.mod h1:P7hqPzMdnZOfyIk+xrlG1QaSMw+gCBdHKsBDnhpaZvc= 56 | github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8= 57 | github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= 58 | github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= 59 | github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= 60 | github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= 61 | github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= 62 | github.com/uber-go/atomic v1.3.2 h1:Azu9lPBWRNKzYXSIwRfgRuDuS0YKsK4NFhiQv98gkxo= 63 | github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g= 64 | github.com/uber/jaeger-client-go v2.15.0+incompatible h1:NP3qsSqNxh8VYr956ur1N/1C1PjvOJnJykCzcD5QHbk= 65 | github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= 66 | github.com/uber/jaeger-lib v1.5.0 h1:OHbgr8l656Ub3Fw5k9SWnBfIEwvoHQ+W2y+Aa9D1Uyo= 67 | github.com/uber/jaeger-lib v1.5.0/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= 68 | github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 69 | github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= 70 | github.com/yuin/gopher-lua v0.0.0-20181109042959-a0dfe84f6227 h1:GRy+0tGtORsCA+CJUMfhLuN71eQ0LtsQRDBQKbzESdc= 71 | github.com/yuin/gopher-lua v0.0.0-20181109042959-a0dfe84f6227/go.mod h1:fFiAh+CowNFr0NK5VASokuwKwkbacRmHsVA7Yb1Tqac= 72 | go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= 73 | go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= 74 | go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= 75 | go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= 76 | go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= 77 | go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= 78 | golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= 79 | golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= 80 | golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= 81 | golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 82 | golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= 83 | golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= 84 | golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= 85 | golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 86 | golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= 87 | golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= 88 | golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= 89 | golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 90 | golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 91 | golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 92 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= 93 | golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= 94 | golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 95 | golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= 96 | golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 97 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= 98 | golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= 99 | golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= 100 | golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= 101 | golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= 102 | golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= 103 | golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= 104 | golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= 105 | golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= 106 | golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 107 | golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 108 | golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 109 | golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= 110 | gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= 111 | gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= 112 | gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= 113 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= 114 | gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= 115 | gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= 116 | gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= 117 | -------------------------------------------------------------------------------- /item/handlers.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "math/rand" 9 | "net/http" 10 | "time" 11 | 12 | "github.com/gorilla/mux" 13 | "github.com/obitech/micro-obs/util" 14 | ot "github.com/opentracing/opentracing-go" 15 | "github.com/pkg/errors" 16 | ) 17 | 18 | // NotFound is a 404 Message according to the Response type 19 | func (s *Server) notFound() http.HandlerFunc { 20 | return func(w http.ResponseWriter, r *http.Request) { 21 | span, ctx := ot.StartSpanFromContext(r.Context(), "notFound") 22 | defer span.Finish() 23 | s.Respond(ctx, http.StatusNotFound, "resource not found", 0, nil, w) 24 | } 25 | } 26 | 27 | // pong sends a simple JSON response. 28 | func (s *Server) pong() http.HandlerFunc { 29 | return func(w http.ResponseWriter, r *http.Request) { 30 | span, ctx := ot.StartSpanFromContext(r.Context(), "pong") 31 | defer span.Finish() 32 | s.Respond(ctx, http.StatusOK, "pong", 0, nil, w) 33 | } 34 | } 35 | 36 | // getAllItems retrieves all items from Redis. 37 | func (s *Server) getAllItems() http.HandlerFunc { 38 | return func(w http.ResponseWriter, r *http.Request) { 39 | span, ctx := ot.StartSpanFromContext(r.Context(), "getAllItems") 40 | defer span.Finish() 41 | log := util.RequestIDLogger(s.logger, r) 42 | 43 | defaultErrMsg := "unable to retrieve items" 44 | keys, err := s.RedisScanKeys(ctx) 45 | if err != nil { 46 | log.Errorw("unable to SCAN redis for keys", 47 | "error", err, 48 | ) 49 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 50 | return 51 | } 52 | 53 | var items = []*Item{} 54 | for _, k := range keys { 55 | i, err := s.RedisGetItem(ctx, k) 56 | if err != nil { 57 | log.Errorw("unable to retrieve item", 58 | "key", k, 59 | "error", err, 60 | ) 61 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 62 | return 63 | } 64 | items = append(items, i) 65 | } 66 | 67 | l := len(items) 68 | if l == 0 { 69 | s.Respond(ctx, http.StatusNotFound, "no items present", 0, nil, w) 70 | return 71 | } 72 | 73 | s.Respond(ctx, http.StatusOK, "items retrieved", l, items, w) 74 | } 75 | } 76 | 77 | // setItem sets an Item as a hash in Redis with URL parameters. 78 | func (s *Server) setItem(update bool) http.HandlerFunc { 79 | return func(w http.ResponseWriter, r *http.Request) { 80 | span, ctx := ot.StartSpanFromContext(r.Context(), "setItem") 81 | defer span.Finish() 82 | log := util.RequestIDLogger(s.logger, r) 83 | 84 | var ( 85 | defaultErrMsg = "unable to create items" 86 | items []*Item 87 | ) 88 | 89 | // Accept payload 90 | body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) 91 | if err != nil { 92 | log.Errorw("unable to read request body", 93 | "error", err, 94 | ) 95 | r.Body.Close() 96 | s.Respond(ctx, http.StatusInternalServerError, "unable to read payload", 0, nil, w) 97 | return 98 | } 99 | defer r.Body.Close() 100 | 101 | // Parse payload 102 | if err := json.Unmarshal(body, &items); err != nil { 103 | log.Errorw("unable to parse payload", 104 | "error", err, 105 | ) 106 | s.Respond(ctx, http.StatusBadRequest, "unable to parse payload", 0, nil, w) 107 | return 108 | } 109 | 110 | if items == nil || len(items) == 0 { 111 | s.Respond(ctx, http.StatusUnprocessableEntity, "items can't be empty", 0, nil, w) 112 | } 113 | 114 | // Verify sent items 115 | for _, item := range items { 116 | // Catch empty response 117 | if item.Name == "" { 118 | s.Respond(ctx, http.StatusUnprocessableEntity, "item needs name", 0, nil, w) 119 | return 120 | } 121 | 122 | if err := item.SetID(ctx); err != nil { 123 | log.Errorw("unable to set item ID", 124 | "error", err, 125 | ) 126 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 127 | return 128 | } 129 | 130 | log.Debugw("item struct created", 131 | "id", item.ID, 132 | "name", item.Name, 133 | "desc", item.Desc, 134 | "qty", item.Qty, 135 | ) 136 | 137 | // Check for existence 138 | _, err := s.RedisGetItem(ctx, item.ID) 139 | if err != nil { 140 | log.Errorw("unable to retrieve Item from Redis", 141 | "key", item.ID, 142 | "error", err, 143 | ) 144 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 145 | return 146 | } 147 | } 148 | 149 | var itemsCreatedMsg string 150 | var itemsCreatedData []*Item 151 | var itemsFailedMsg string 152 | 153 | for _, item := range items { 154 | // Check for existence 155 | i, err := s.RedisGetItem(ctx, item.ID) 156 | if err != nil { 157 | log.Errorw("unable to retrieve Item from Redis", 158 | "key", item.ID, 159 | "error", err, 160 | ) 161 | s.Respond(ctx, http.StatusInternalServerError, "unable to create items", 0, nil, w) 162 | return 163 | } 164 | if i != nil { 165 | if !update { 166 | log.Debugw("item already exists", 167 | "key", item.ID, 168 | ) 169 | itemsFailedMsg += fmt.Sprintf("%s already exists, ", item.ID) 170 | continue 171 | } 172 | } 173 | 174 | // Create Item in Redis 175 | err = s.RedisSetItem(ctx, item) 176 | if err != nil { 177 | log.Errorw("unable to create item in redis", 178 | "key", item.ID, 179 | "error", err, 180 | ) 181 | itemsFailedMsg += fmt.Sprintf("%s, ", item.ID) 182 | continue 183 | } 184 | itemsCreatedMsg += fmt.Sprintf("%s, ", item.ID) 185 | itemsCreatedData = append(itemsCreatedData, item) 186 | } 187 | 188 | switch { 189 | // All failed 190 | case len(itemsFailedMsg) != 0 && len(itemsCreatedData) == 0: 191 | msg := fmt.Sprintf("unable to create items: %s", itemsFailedMsg) 192 | s.Respond(ctx, http.StatusUnprocessableEntity, msg, 0, nil, w) 193 | return 194 | 195 | // Some created, some failed 196 | case len(itemsFailedMsg) != 0 && len(itemsCreatedData) > 0: 197 | msg := fmt.Sprintf("items %s created but some failed: %s", itemsCreatedMsg, itemsFailedMsg) 198 | s.Respond(ctx, http.StatusOK, msg, len(itemsCreatedData), itemsCreatedData, w) 199 | return 200 | } 201 | 202 | // All created, none failed 203 | s.Respond(ctx, http.StatusCreated, fmt.Sprintf("items %s created", itemsCreatedMsg), len(itemsCreatedData), itemsCreatedData, w) 204 | } 205 | } 206 | 207 | // getItem retrieves a single Item by ID from Redis. 208 | func (s *Server) getItem() http.HandlerFunc { 209 | return func(w http.ResponseWriter, r *http.Request) { 210 | span, ctx := ot.StartSpanFromContext(r.Context(), "getItem") 211 | defer span.Finish() 212 | log := util.RequestIDLogger(s.logger, r) 213 | 214 | pr := mux.Vars(r) 215 | key := pr["id"] 216 | 217 | item, err := s.RedisGetItem(ctx, key) 218 | if err != nil { 219 | log.Errorw("unable to get key from redis", 220 | "key", key, 221 | "error", err, 222 | ) 223 | s.Respond(ctx, http.StatusInternalServerError, "unable to retreive item", 0, nil, w) 224 | return 225 | } 226 | if item == nil { 227 | s.Respond(ctx, http.StatusNotFound, fmt.Sprintf("item with ID %s doesn't exist", key), 0, nil, w) 228 | return 229 | } 230 | s.Respond(ctx, http.StatusOK, "item retrieved", 1, []*Item{item}, w) 231 | } 232 | } 233 | 234 | // delItem deletes a single item by ID. 235 | func (s *Server) delItem() http.HandlerFunc { 236 | return func(w http.ResponseWriter, r *http.Request) { 237 | span, ctx := ot.StartSpanFromContext(r.Context(), "delItem") 238 | defer span.Finish() 239 | log := util.RequestIDLogger(s.logger, r) 240 | 241 | pr := mux.Vars(r) 242 | key := pr["id"] 243 | 244 | err := s.RedisDelItem(ctx, key) 245 | if err != nil { 246 | log.Errorw("unable to delete key from redis", 247 | "key", key, 248 | "error", err, 249 | ) 250 | s.Respond(ctx, http.StatusInternalServerError, "an error occured while tring to delete item", 0, nil, w) 251 | return 252 | } 253 | s.Respond(ctx, http.StatusOK, "item deleted", 0, nil, w) 254 | } 255 | } 256 | 257 | // delay returns after a random period to simulate reequest delay. 258 | func (s *Server) delay() http.HandlerFunc { 259 | return func(w http.ResponseWriter, r *http.Request) { 260 | span, ctx := ot.StartSpanFromContext(r.Context(), "delay") 261 | defer span.Finish() 262 | log := util.RequestIDLogger(s.logger, r) 263 | 264 | // Simulate delay between 10ms - 500ms 265 | t := time.Duration((rand.Float64()*500)+10) * time.Millisecond 266 | 267 | span.SetTag("wait", t) 268 | log.Debugw("Waiting", 269 | "time", t, 270 | ) 271 | 272 | time.Sleep(t) 273 | 274 | s.Respond(ctx, http.StatusOK, fmt.Sprintf("waited %v", t), 0, nil, w) 275 | } 276 | } 277 | 278 | // simulateError returns a 500 279 | func (s *Server) simulateError() http.HandlerFunc { 280 | return func(w http.ResponseWriter, r *http.Request) { 281 | span, ctx := ot.StartSpanFromContext(r.Context(), "simulateError") 282 | defer span.Finish() 283 | log := util.RequestIDLogger(s.logger, r) 284 | 285 | log.Errorw("Error occured", 286 | "error", errors.New("nasty error message"), 287 | ) 288 | 289 | s.Respond(ctx, http.StatusInternalServerError, "simulated error", 0, nil, w) 290 | } 291 | } 292 | -------------------------------------------------------------------------------- /item/item.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "strconv" 8 | "strings" 9 | 10 | "github.com/obitech/micro-obs/util" 11 | ot "github.com/opentracing/opentracing-go" 12 | "github.com/pkg/errors" 13 | ) 14 | 15 | // Item defines a shop item with attributes. ID should be a HashID of the name. 16 | // // See https://hashids.org for more info. 17 | type Item struct { 18 | Name string `json:"name"` 19 | ID string `json:"id"` 20 | Desc string `json:"desc"` 21 | Qty int `json:"qty"` 22 | } 23 | 24 | func (i *Item) String() string { 25 | return fmt.Sprintf("Name:%s ID:%s Desc:%s Qty:%d", i.Name, i.ID, i.Desc, i.Qty) 26 | } 27 | 28 | // NewItem creates a new item where the ID becomes the HashID of the lowercase name. 29 | func NewItem(name, desc string, qty int) (*Item, error) { 30 | id, err := util.StringToHashID(strings.ToLower(name)) 31 | if err != nil { 32 | return nil, err 33 | } 34 | 35 | return &Item{ 36 | ID: id, 37 | Name: name, 38 | Desc: desc, 39 | Qty: qty, 40 | }, nil 41 | } 42 | 43 | // DataToItems takes a JSON-encoded byte array and marshals it into a list of item.Items 44 | func DataToItems(data []byte) ([]*Item, error) { 45 | items := []*Item{} 46 | 47 | err := json.Unmarshal(data, &items) 48 | if err != nil { 49 | return nil, errors.Wrapf(err, "unable to marshal %s", data) 50 | } 51 | 52 | if len(items) == 0 { 53 | return nil, errors.New("data can't be empty") 54 | } 55 | 56 | for _, i := range items { 57 | if err = i.SetID(context.Background()); err != nil { 58 | return nil, errors.Wrapf(err, "unable to set HashID of %s", i.Name) 59 | } 60 | } 61 | 62 | return items, nil 63 | } 64 | 65 | // SetID creates a HashID from an already set Name field. 66 | func (i *Item) SetID(ctx context.Context) error { 67 | span, _ := ot.StartSpanFromContext(ctx, "SetID") 68 | defer span.Finish() 69 | 70 | id, err := util.StringToHashID(strings.ToLower(i.Name)) 71 | if err != nil { 72 | return err 73 | } 74 | i.ID = id 75 | return nil 76 | } 77 | 78 | // MarshalRedis marshalls and Item to hand over to go-redis. 79 | // Item.ID will be the key (as string), where the other fields will be a map[string]string. 80 | func (i *Item) MarshalRedis() (string, map[string]string) { 81 | return i.ID, map[string]string{ 82 | "name": i.Name, 83 | "desc": i.Desc, 84 | "qty": strconv.Itoa(i.Qty), 85 | } 86 | } 87 | 88 | // UnmarshalRedis parses a passed string and map into an Item. 89 | func UnmarshalRedis(key string, data map[string]string, i *Item) error { 90 | // Check for key existance 91 | ks := []string{"name", "desc", "qty"} 92 | for _, k := range ks { 93 | if _, prs := data[k]; !prs { 94 | return errors.Errorf("key %s not present", k) 95 | } 96 | } 97 | 98 | t, err := strconv.Atoi(data["qty"]) 99 | if err != nil { 100 | return err 101 | } 102 | 103 | // Populate 104 | i.Name = data["name"] 105 | i.ID = key 106 | i.Desc = data["desc"] 107 | i.Qty = t 108 | 109 | return nil 110 | } 111 | -------------------------------------------------------------------------------- /item/item_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "strconv" 7 | "testing" 8 | 9 | "github.com/obitech/micro-obs/util" 10 | ) 11 | 12 | type redisMarshal struct { 13 | key string 14 | fv map[string]string 15 | } 16 | 17 | var ( 18 | sampleItems = []struct { 19 | name string 20 | desc string 21 | qty int 22 | }{ 23 | {"test", "test", 0}, 24 | {"orange", "a juicy fruit", 100}, 25 | {"😍", "lovely smily", 999}, 26 | {" ", "﷽", 249093419}, 27 | {" 123asd🙆 🙋 asdlloqwe", "test", 0}, 28 | } 29 | 30 | itemsJSON = []struct { 31 | js string 32 | shouldSucceed bool 33 | }{ 34 | {`[{"name": "test", "desc": "test", "qty": 100}]`, true}, 35 | {`[{"name": "abc2", "desc": "test", "qty": 100}, {"name": "abc", "desc": "test", "qty": 100}]`, true}, 36 | {`[{"name": "😍", "desc": "test", "qty": 100}, {"name": "abc", "desc": "test", "qty": 100},{"name": "123", "desc": " ", "qty": 42}]`, true}, 37 | {`[{}]`, false}, 38 | {`{`, false}, 39 | {`{}`, false}, 40 | {`[{"unknown": "key"}]`, false}, 41 | } 42 | ) 43 | 44 | func TestItem(t *testing.T) { 45 | var items []*Item 46 | var marshalledItems []redisMarshal 47 | 48 | t.Run("Create new item", func(t *testing.T) { 49 | for _, tt := range sampleItems { 50 | i, err := NewItem(tt.name, tt.desc, tt.qty) 51 | if err != nil { 52 | t.Errorf("failed to create new item: %#v", err) 53 | } 54 | items = append(items, i) 55 | } 56 | }) 57 | 58 | t.Run("Confirm hash conversions", func(t *testing.T) { 59 | for _, i := range items { 60 | s, err := util.HashIDToString(i.ID) 61 | if err != nil { 62 | t.Errorf("unable to decode %#v to string: %#v", i.ID, err) 63 | } 64 | if s != i.Name { 65 | t.Errorf("HashIDToString(%#v), expected: %#v, got: %#v", i.ID, i.Name, s) 66 | } 67 | } 68 | }) 69 | 70 | t.Run("SetID", func(t *testing.T) { 71 | i := &Item{ 72 | Name: "testing", 73 | Desc: "test", 74 | Qty: 0, 75 | } 76 | if err := i.SetID(context.Background()); err != nil { 77 | t.Errorf("unable to set HashID: %s", err) 78 | } 79 | s, err := util.HashIDToString(i.ID) 80 | if err != nil { 81 | t.Errorf("unable to decode %#v to string: %#v", i.ID, err) 82 | } 83 | if s != i.Name { 84 | t.Errorf("HashIDToString(%#v), expected: %#v, got: %#v", i.ID, i.Name, s) 85 | } 86 | }) 87 | 88 | t.Run("Redis marshalling", func(t *testing.T) { 89 | prsKeys := []string{"name", "desc", "qty"} 90 | for _, i := range items { 91 | key, fv := i.MarshalRedis() 92 | if key != i.ID { 93 | t.Errorf("marshaling unsuccesful, expected key = %#v, got key = %#v", i.ID, key) 94 | } 95 | for _, k := range prsKeys { 96 | if _, prs := fv[k]; !prs { 97 | t.Errorf("key %s not present in marshalled map.", k) 98 | } 99 | } 100 | if fv["name"] != i.Name { 101 | t.Errorf("marshaling unsuccesful, expected name = %#v, got name = %#v", i.Name, fv["name"]) 102 | } 103 | if fv["desc"] != i.Desc { 104 | t.Errorf("marshaling unsuccesful, expected desc = %#v, got desc = %#v", i.Name, fv["desc"]) 105 | } 106 | if fv["qty"] != strconv.Itoa(i.Qty) { 107 | t.Errorf("marshaling unsuccesful, expected qty = %#v, got qty = %#v", i.Qty, fv["qty"]) 108 | } 109 | rm := redisMarshal{ 110 | key: key, 111 | fv: fv, 112 | } 113 | marshalledItems = append(marshalledItems, rm) 114 | } 115 | }) 116 | 117 | t.Run("Redis unmarshalling", func(t *testing.T) { 118 | var c int 119 | var i = &Item{} 120 | 121 | for _, rm := range marshalledItems { 122 | err := UnmarshalRedis(rm.key, rm.fv, i) 123 | if err != nil { 124 | t.Errorf("unable to unmarshal %#v: %s", rm, err) 125 | } 126 | if !reflect.DeepEqual(i, items[c]) { 127 | t.Errorf("%#v != %#v", i, items[c]) 128 | } 129 | c++ 130 | } 131 | }) 132 | } 133 | 134 | func TestDataToItems(t *testing.T) { 135 | t.Run("Sample JSON", func(t *testing.T) { 136 | for _, tt := range itemsJSON { 137 | _, err := DataToItems([]byte(tt.js)) 138 | 139 | if tt.shouldSucceed { 140 | if err != nil { 141 | t.Errorf("unable to create items: %s", err) 142 | } 143 | } else { 144 | if err == nil { 145 | t.Errorf("should throw error with %s", tt.js) 146 | } 147 | } 148 | 149 | } 150 | }) 151 | } 152 | -------------------------------------------------------------------------------- /item/redis.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | 6 | ot "github.com/opentracing/opentracing-go" 7 | "github.com/pkg/errors" 8 | ) 9 | 10 | // RedisScanKeys retrieves all keys from a redis instance. 11 | // This uses the SCAN command so it's save to use on large database & in production. 12 | func (s *Server) RedisScanKeys(ctx context.Context) ([]string, error) { 13 | span, _ := ot.StartSpanFromContext(ctx, "RedisScanKeys") 14 | defer span.Finish() 15 | 16 | var cursor uint64 17 | var keys []string 18 | var err error 19 | 20 | for { 21 | var k []string 22 | k, cursor, err = s.redis.Scan(cursor, "", 10).Result() 23 | if err != nil { 24 | return nil, err 25 | } 26 | keys = append(keys, k...) 27 | 28 | if cursor == 0 { 29 | break 30 | } 31 | } 32 | return keys, err 33 | } 34 | 35 | // RedisGetItem retrieves an Item from Redis. 36 | func (s *Server) RedisGetItem(ctx context.Context, k string) (*Item, error) { 37 | span, _ := ot.StartSpanFromContext(ctx, "RedisGetItem") 38 | defer span.Finish() 39 | 40 | r, err := s.redis.HGetAll(k).Result() 41 | if err != nil { 42 | return nil, err 43 | } 44 | 45 | if len(r) == 0 { 46 | return nil, nil 47 | } 48 | 49 | var i = &Item{} 50 | err = UnmarshalRedis(k, r, i) 51 | 52 | return i, err 53 | } 54 | 55 | // RedisSetItem sets an Item as a hash in Redis. 56 | func (s *Server) RedisSetItem(ctx context.Context, i *Item) error { 57 | span, _ := ot.StartSpanFromContext(ctx, "RedisSetItem") 58 | defer span.Finish() 59 | 60 | k, fv := i.MarshalRedis() 61 | for f, v := range fv { 62 | _, err := s.redis.HSet(k, f, v).Result() 63 | if err != nil { 64 | return errors.Errorf("unable to HSET %s %s %s", k, f, v) 65 | } 66 | } 67 | 68 | return nil 69 | } 70 | 71 | // RedisDelItems deletes one or more Items from Redis. 72 | func (s *Server) RedisDelItems(ctx context.Context, items []*Item) error { 73 | span, _ := ot.StartSpanFromContext(ctx, "RedisDelItems") 74 | defer span.Finish() 75 | 76 | var keys = make([]string, len(items)) 77 | for i, v := range items { 78 | keys[i] = v.ID 79 | } 80 | 81 | err := s.redis.Del(keys...).Err() 82 | return err 83 | } 84 | 85 | // RedisDelItem deletes a single Item by ID. 86 | func (s *Server) RedisDelItem(ctx context.Context, id string) error { 87 | span, _ := ot.StartSpanFromContext(ctx, "RedisDelItems") 88 | defer span.Finish() 89 | 90 | return s.redis.Del(id).Err() 91 | } 92 | -------------------------------------------------------------------------------- /item/redis_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/alicebob/miniredis" 10 | "github.com/go-redis/redis" 11 | ) 12 | 13 | func helperPrepareMiniredis(t *testing.T) (*redis.Client, *miniredis.Miniredis) { 14 | mr, _ := miniredis.Run() 15 | c := redis.NewClient(&redis.Options{ 16 | Addr: mr.Addr(), 17 | }) 18 | 19 | return c, mr 20 | } 21 | 22 | func TestItemRedis(t *testing.T) { 23 | // Setup miniredis 24 | _, mr := helperPrepareMiniredis(t) 25 | defer mr.Close() 26 | 27 | addr := strings.Join([]string{"redis://", mr.Addr()}, "") 28 | s, err := NewServer( 29 | SetRedisAddress(addr), 30 | ) 31 | if err != nil { 32 | t.Errorf("unable to create server: %s", err) 33 | } 34 | 35 | var sampleKeys []string 36 | t.Run("Pinging miniredis with Server", func(t *testing.T) { 37 | if _, err := s.redis.Ping().Result(); err != nil { 38 | t.Errorf("unable to ping miniredis: %s", err) 39 | } 40 | }) 41 | 42 | t.Run("SetItems with sampleItems", func(t *testing.T) { 43 | for _, tt := range sampleItems { 44 | i, err := NewItem(tt.name, tt.desc, tt.qty) 45 | if err != nil { 46 | t.Errorf("unable to create new item: %s", err) 47 | } 48 | 49 | if err := s.RedisSetItem(context.Background(), i); err != nil { 50 | t.Error(err) 51 | } 52 | sampleKeys = append(sampleKeys, i.ID) 53 | } 54 | }) 55 | 56 | t.Run("ScanKeys function", func(t *testing.T) { 57 | keys, err := s.RedisScanKeys(context.Background()) 58 | if err != nil { 59 | t.Errorf("unable to SCAN redis for keys: %s", err) 60 | } 61 | 62 | if !reflect.DeepEqual(keys, sampleKeys) { 63 | t.Errorf("%#v != %#v", keys, sampleKeys) 64 | } 65 | }) 66 | 67 | t.Run("GetItem function", func(t *testing.T) { 68 | var c int 69 | for _, k := range sampleKeys { 70 | i, err := s.RedisGetItem(context.Background(), k) 71 | if err != nil { 72 | t.Errorf("unable to retrieve item with key %s: %s", k, err) 73 | } 74 | 75 | v, _ := NewItem(sampleItems[c].name, sampleItems[c].desc, sampleItems[c].qty) 76 | if !reflect.DeepEqual(i, v) { 77 | t.Errorf("%#v != %#v", i, v) 78 | } 79 | c++ 80 | } 81 | }) 82 | 83 | t.Run("DelItems function", func(t *testing.T) { 84 | var items = make([]*Item, len(sampleItems)) 85 | for i, v := range sampleItems { 86 | items[i], _ = NewItem(v.name, v.desc, v.qty) 87 | } 88 | 89 | err := s.RedisDelItems(context.Background(), items) 90 | if err != nil { 91 | t.Errorf("unable to delete itemes: %s", err) 92 | } 93 | }) 94 | 95 | t.Run("Recreting items", func(t *testing.T) { 96 | for _, tt := range sampleItems { 97 | i, err := NewItem(tt.name, tt.desc, tt.qty) 98 | if err != nil { 99 | t.Errorf("unable to create new item: %s", err) 100 | } 101 | 102 | if err := s.RedisSetItem(context.Background(), i); err != nil { 103 | t.Error(err) 104 | } 105 | sampleKeys = append(sampleKeys, i.ID) 106 | } 107 | }) 108 | 109 | t.Run("DelItem function", func(t *testing.T) { 110 | for _, k := range sampleKeys { 111 | err := s.RedisDelItem(context.Background(), k) 112 | if err != nil { 113 | t.Errorf("unable to delete key %#v: %s", k, err) 114 | } 115 | } 116 | }) 117 | } 118 | -------------------------------------------------------------------------------- /item/response.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | ) 7 | 8 | // Response defines an API response. 9 | type Response struct { 10 | Status int `json:"status"` 11 | Message string `json:"message"` 12 | Count int `json:"count"` 13 | Data []*Item `json:"data"` 14 | } 15 | 16 | // NewResponse returns a Response with a passed message string and slice of Data. 17 | // TODO: make d variadic 18 | func NewResponse(s int, m string, c int, d []*Item) (Response, error) { 19 | return Response{ 20 | Status: s, 21 | Message: m, 22 | Count: c, 23 | Data: d, 24 | }, nil 25 | } 26 | 27 | // SendJSON encodes a Response as JSON and sends it on a passed http.ResponseWriter. 28 | func (r Response) SendJSON(w http.ResponseWriter) error { 29 | w.Header().Set("Content-Type", "application/JSON; charset=UTF-8") 30 | w.WriteHeader(r.Status) 31 | err := json.NewEncoder(w).Encode(r) 32 | return err 33 | } 34 | -------------------------------------------------------------------------------- /item/response_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | ) 10 | 11 | func helperSendJSONResponse(rd Response, t *testing.T) { 12 | // Start HTTP Test Server 13 | s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 14 | err := rd.SendJSON(w) 15 | if err != nil { 16 | t.Errorf("error sending JSON: %#v", err) 17 | } 18 | })) 19 | defer s.Close() 20 | 21 | // Send request 22 | res, err := http.Get(s.URL) 23 | if err != nil { 24 | t.Errorf("unable to GET response: %#v", err) 25 | } 26 | 27 | // Retrieve data from response 28 | d, err := ioutil.ReadAll(res.Body) 29 | res.Body.Close() 30 | if err != nil { 31 | t.Errorf("unable to read response body from request %#v", rd) 32 | } 33 | 34 | // Verify response 35 | var v Response 36 | err = json.Unmarshal(d, &v) 37 | if err != nil { 38 | t.Errorf("unable to unmarshal response %#v", d) 39 | } 40 | } 41 | 42 | func TestResponse(t *testing.T) { 43 | var items []*Item 44 | for _, v := range sampleItems { 45 | item, _ := NewItem(v.name, v.desc, v.qty) 46 | items = append(items, item) 47 | } 48 | 49 | var validResponses []Response 50 | t.Run("Test NewResponse", func(t *testing.T) { 51 | r, err := NewResponse(200, "test", 0, nil) 52 | if err != nil { 53 | t.Errorf("unable to create new repsonse: %s", err) 54 | } 55 | validResponses = append(validResponses, r) 56 | 57 | r, err = NewResponse(200, "test", len(items), items) 58 | if err != nil { 59 | t.Errorf("unable to create new repsonse: %s", err) 60 | } 61 | validResponses = append(validResponses, r) 62 | 63 | r, err = NewResponse(200, "test", 1, []*Item{items[0]}) 64 | if err != nil { 65 | t.Errorf("unable to create new response: %s", err) 66 | } 67 | validResponses = append(validResponses, r) 68 | }) 69 | 70 | t.Run("Send Response", func(t *testing.T) { 71 | for _, rd := range validResponses { 72 | helperSendJSONResponse(rd, t) 73 | } 74 | }) 75 | } 76 | -------------------------------------------------------------------------------- /item/routes.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "github.com/obitech/micro-obs/util" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "github.com/prometheus/client_golang/prometheus/promhttp" 7 | ) 8 | 9 | var rm = util.NewRequestMetricHistogram( 10 | append([]float64{.001, .003}, prometheus.DefBuckets...), 11 | []float64{1, 5, 10, 50, 100}, 12 | ) 13 | 14 | // Routes defines all HTTP routes, hanging off the main Server struct. 15 | // Like that, all routes have access to the Server's dependencies. 16 | func (s *Server) createRoutes() { 17 | var routes = util.Routes{ 18 | util.Route{ 19 | Name: "pong", 20 | Method: "GET", 21 | Pattern: "/", 22 | HandlerFunc: s.pong(), 23 | }, 24 | util.Route{ 25 | Name: "healthz", 26 | Method: "GET", 27 | Pattern: "/healthz", 28 | HandlerFunc: util.Healthz(), 29 | }, 30 | util.Route{ 31 | Name: "getAllItems", 32 | Method: "GET", 33 | Pattern: "/items", 34 | HandlerFunc: s.getAllItems(), 35 | }, 36 | util.Route{ 37 | Name: "setItemsPOST", 38 | Method: "POST", 39 | Pattern: "/items", 40 | HandlerFunc: s.setItem(false), 41 | }, 42 | util.Route{ 43 | Name: "setItemsPUT", 44 | Method: "PUT", 45 | Pattern: "/items", 46 | HandlerFunc: s.setItem(true), 47 | }, 48 | util.Route{ 49 | Name: "getItem", 50 | Method: "GET", 51 | Pattern: "/items/{id:[a-zA-Z0-9]+}", 52 | HandlerFunc: s.getItem(), 53 | }, 54 | util.Route{ 55 | Name: "delItem", 56 | Method: "DELETE", 57 | Pattern: "/items/{id:[a-zA-Z0-9]+}", 58 | HandlerFunc: s.delItem(), 59 | }, 60 | util.Route{ 61 | Name: "delay", 62 | Method: "GET", 63 | Pattern: "/delay", 64 | HandlerFunc: s.delay(), 65 | }, 66 | util.Route{ 67 | Name: "simulateError", 68 | Method: "GET", 69 | Pattern: "/error", 70 | HandlerFunc: s.simulateError(), 71 | }, 72 | } 73 | 74 | for _, route := range routes { 75 | h := route.HandlerFunc 76 | 77 | // Tracing each request 78 | h = util.TracerMiddleware(h, route) 79 | 80 | // Logging each request 81 | h = util.LoggerMiddleware(h, s.logger) 82 | 83 | // Assign requestID to each request 84 | h = util.AssignRequestID(h, s.logger) 85 | 86 | // Monitoring each request 87 | // TODO: pass proper handler 88 | promHandler := util.PrometheusMiddleware(h, route.Pattern, rm) 89 | 90 | s.router. 91 | Methods(route.Method). 92 | Path(route.Pattern). 93 | Name(route.Name). 94 | Handler(promHandler) 95 | } 96 | 97 | // Prometheus endpoint 98 | route := util.Route{ 99 | Name: "metrics", 100 | Method: "GET", 101 | Pattern: "/metrics", 102 | HandlerFunc: nil, 103 | } 104 | 105 | promHandler := promhttp.HandlerFor(s.promReg, promhttp.HandlerOpts{}) 106 | promHandler = promhttp.InstrumentMetricHandler(s.promReg, promHandler) 107 | s.router. 108 | Methods(route.Method). 109 | Path(route.Pattern). 110 | Name(route.Name). 111 | Handler(promHandler) 112 | 113 | // 404 handler 114 | notFound := util.PrometheusMiddleware(s.notFound(), "metrics", rm) 115 | s.router.NotFoundHandler = notFound 116 | } 117 | -------------------------------------------------------------------------------- /item/server.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net" 8 | "net/http" 9 | "os" 10 | "os/signal" 11 | "sync/atomic" 12 | "syscall" 13 | "time" 14 | 15 | "github.com/go-redis/redis" 16 | "github.com/gorilla/mux" 17 | "github.com/obitech/micro-obs/util" 18 | ot "github.com/opentracing/opentracing-go" 19 | "github.com/pkg/errors" 20 | "github.com/prometheus/client_golang/prometheus" 21 | ) 22 | 23 | const ( 24 | serviceName = "item" 25 | ) 26 | 27 | // Server is a wrapper for a HTTP server, with dependencies attached. 28 | type Server struct { 29 | address string 30 | endpoint string 31 | redis *redis.Client 32 | redisOps uint64 33 | server *http.Server 34 | router *mux.Router 35 | logger *util.Logger 36 | promReg *prometheus.Registry 37 | } 38 | 39 | // ServerOptions sets options when creating a new server. 40 | type ServerOptions func(*Server) error 41 | 42 | // NewServer creates a new Server according to options. 43 | func NewServer(options ...ServerOptions) (*Server, error) { 44 | // Create default logger 45 | logger, err := util.NewLogger("info", serviceName) 46 | if err != nil { 47 | return nil, errors.Wrap(err, "unable to create Logger") 48 | } 49 | 50 | // Sane defaults 51 | rc, _ := NewRedisClient("redis://127.0.0.1:6379/0") 52 | s := &Server{ 53 | address: ":8080", 54 | endpoint: "127.0.0.1:8081", 55 | redis: rc, 56 | logger: logger, 57 | router: util.NewRouter(), 58 | promReg: prometheus.NewRegistry(), 59 | } 60 | 61 | // Applying custom settings 62 | for _, fn := range options { 63 | if err := fn(s); err != nil { 64 | return nil, errors.Wrap(err, "Failed to set server options") 65 | } 66 | } 67 | 68 | // Instrumenting redis 69 | s.redis.WrapProcess(func(old func(cmd redis.Cmder) error) func(cmd redis.Cmder) error { 70 | return func(cmd redis.Cmder) error { 71 | atomic.AddUint64(&s.redisOps, 1) 72 | ops := atomic.LoadUint64(&s.redisOps) 73 | s.logger.Debugw("redis sent", 74 | "count", ops, 75 | "cmd", cmd, 76 | ) 77 | err := old(cmd) 78 | s.logger.Debugw("redis received", 79 | "count", ops, 80 | "cmd", cmd, 81 | ) 82 | return err 83 | } 84 | }) 85 | 86 | s.logger.Debugw("Creating new server", 87 | "address", s.address, 88 | "endpoint", s.endpoint, 89 | ) 90 | 91 | // Setting routes 92 | s.createRoutes() 93 | 94 | return s, nil 95 | } 96 | 97 | // InitPromReg initializes a custom Prometheus registry with Collectors. 98 | func (s *Server) InitPromReg() { 99 | s.promReg.MustRegister( 100 | prometheus.NewGoCollector(), 101 | prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), 102 | rm.InFlightGauge, rm.Counter, rm.Duration, rm.ResponseSize, 103 | ) 104 | } 105 | 106 | // Run starts a Server and shuts it down properly on a SIGINT and SIGTERM. 107 | func (s *Server) Run() error { 108 | defer s.logger.Sync() 109 | defer s.redis.Close() 110 | 111 | // Create TCP listener 112 | l, err := net.Listen("tcp", s.address) 113 | if err != nil { 114 | return errors.Wrapf(err, "Failed creating listener on %s", s.address) 115 | } 116 | 117 | // Create HTTP Server 118 | s.server = &http.Server{ 119 | Handler: s.router, 120 | ReadTimeout: 10 * time.Second, 121 | WriteTimeout: 10 * time.Second, 122 | MaxHeaderBytes: 1 << 20, 123 | } 124 | 125 | // Creating tracer 126 | var tracer ot.Tracer 127 | var closer io.Closer 128 | tracer, closer, err = util.InitTracer("item", s.logger) 129 | if err != nil { 130 | s.logger.Warnw("unable to initialize tracer", 131 | "error", err, 132 | ) 133 | } else { 134 | defer closer.Close() 135 | ot.SetGlobalTracer(tracer) 136 | } 137 | 138 | // Listening 139 | go func() { 140 | s.logger.Infow("Server listening", 141 | "address", s.address, 142 | "endpoint", s.endpoint, 143 | ) 144 | s.logger.Fatal(s.server.Serve(l)) 145 | }() 146 | 147 | // Buffered channel to receive a single os.Signal 148 | stop := make(chan os.Signal, 1) 149 | signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM) 150 | 151 | // Blocking channel until interrupt occurs 152 | <-stop 153 | s.Stop() 154 | 155 | return nil 156 | } 157 | 158 | // Stop will stop the server 159 | func (s *Server) Stop() { 160 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 161 | defer cancel() 162 | 163 | s.logger.Info("Shutting down") 164 | if err := s.server.Shutdown(ctx); err != nil { 165 | s.logger.Errorw("HTTP server shutdown", 166 | "error", err, 167 | ) 168 | } 169 | } 170 | 171 | // ServeHTTP dispatches the request to the matching mux handler. 172 | // This function is mainly intended for testing purposes. 173 | func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { 174 | s.router.ServeHTTP(w, r) 175 | } 176 | 177 | func (s *Server) internalError(ctx context.Context, w http.ResponseWriter) { 178 | status := http.StatusInternalServerError 179 | parent := ot.SpanFromContext(ctx) 180 | if parent != nil { 181 | parent.SetTag("status", status) 182 | } 183 | 184 | w.Header().Del("Content-Type") 185 | w.WriteHeader(status) 186 | if _, err := io.WriteString(w, "Internal Server Error\n"); err != nil { 187 | log := util.RequestIDLoggerFromContext(ctx, s.logger) 188 | log.Panicw("unable to send response", 189 | "error", err, 190 | ) 191 | } 192 | } 193 | 194 | // Respond sends a JSON-encoded response. 195 | func (s *Server) Respond(ctx context.Context, status int, m string, c int, data []*Item, w http.ResponseWriter) { 196 | span, ctx := ot.StartSpanFromContext(ctx, "Respond") 197 | defer span.Finish() 198 | span.SetTag("status", status) 199 | log := util.RequestIDLoggerFromContext(ctx, s.logger) 200 | 201 | res, err := NewResponse(status, m, c, data) 202 | if err != nil { 203 | s.internalError(ctx, w) 204 | log.Panicw("unable to create JSON response", 205 | "error", err, 206 | ) 207 | } 208 | 209 | err = res.SendJSON(w) 210 | if err != nil { 211 | s.internalError(ctx, w) 212 | log.Panicw("sending JSON response failed", 213 | "error", err, 214 | "response", res, 215 | ) 216 | } 217 | 218 | // Tracing information 219 | for k, v := range w.Header() { 220 | span.SetTag(fmt.Sprintf("header.%s", k), v) 221 | } 222 | span.LogKV( 223 | "message", m, 224 | "count", c, 225 | "data", data, 226 | ) 227 | } 228 | 229 | // NewRedisClient creates a new go-redis/redis client according to passed options. 230 | // Address needs to be a valid redis URL, e.g. redis://127.0.0.1:6379/0 or redis://:qwerty@localhost:6379/1 231 | func NewRedisClient(addr string) (*redis.Client, error) { 232 | opt, err := redis.ParseURL(addr) 233 | if err != nil { 234 | return nil, err 235 | } 236 | 237 | c := redis.NewClient(&redis.Options{ 238 | Addr: opt.Addr, 239 | Password: opt.Password, 240 | DB: opt.DB, 241 | }) 242 | 243 | return c, nil 244 | } 245 | 246 | // SetServerAddress sets the server address. 247 | func SetServerAddress(address string) ServerOptions { 248 | return func(s *Server) error { 249 | if err := util.CheckTCPAddress(address); err != nil { 250 | return err 251 | } 252 | 253 | s.address = address 254 | return nil 255 | } 256 | } 257 | 258 | // SetServerEndpoint sets the server endpoint address for other services to call it. 259 | func SetServerEndpoint(address string) ServerOptions { 260 | return func(s *Server) error { 261 | s.endpoint = address 262 | return nil 263 | } 264 | } 265 | 266 | // SetLogLevel sets the log level to either debug, warn, error or info. Info is default. 267 | func SetLogLevel(level string) ServerOptions { 268 | return func(s *Server) error { 269 | l, err := util.NewLogger(level, serviceName) 270 | if err != nil { 271 | return err 272 | } 273 | s.logger = l 274 | return nil 275 | } 276 | } 277 | 278 | // SetRedisAddress sets a custom address for the redis connection. 279 | func SetRedisAddress(address string) ServerOptions { 280 | return func(s *Server) error { 281 | // Close old client 282 | err := s.redis.Close() 283 | if err != nil { 284 | s.logger.Warnw("Error while closing old redis client", 285 | "error", err, 286 | ) 287 | } 288 | 289 | rc, err := NewRedisClient(address) 290 | if err != nil { 291 | return err 292 | } 293 | s.redis = rc 294 | return nil 295 | } 296 | } 297 | -------------------------------------------------------------------------------- /item/server_test.go: -------------------------------------------------------------------------------- 1 | package item 2 | 3 | import ( 4 | "bytes" 5 | "context" 6 | "encoding/json" 7 | "fmt" 8 | "io/ioutil" 9 | "net/http" 10 | "net/http/httptest" 11 | "reflect" 12 | "strings" 13 | "testing" 14 | 15 | "github.com/alicebob/miniredis" 16 | ) 17 | 18 | var ( 19 | validListeningAddr = []string{ 20 | "127.0.0.1:8080", 21 | ":80", 22 | ":8080", 23 | "127.0.0.1:80", 24 | "192.0.2.1:http", 25 | } 26 | 27 | invalidListeningAddr = []string{ 28 | ":9999999", 29 | ":-1", 30 | "asokdklasd", 31 | "0.0.0.0.0.0:80", 32 | "256.0.0.1:80", 33 | } 34 | 35 | logLevels = []struct { 36 | level string 37 | want error 38 | }{ 39 | {"warn", nil}, 40 | {"info", nil}, 41 | {"debug", nil}, 42 | {"error", nil}, 43 | {"", nil}, 44 | {"asdo1293", nil}, 45 | {"😍", nil}, 46 | {"👾 🙇 💁 🙅 🙆 🙋 🙎 🙍", nil}, 47 | {"﷽", nil}, 48 | } 49 | 50 | validRedisAddr = []string{ 51 | "redis://127.0.0.1:6379/0", 52 | "redis://:qwerty@localhost:6379/1", 53 | "redis://test:55550", 54 | } 55 | 56 | invalidRedisAddr = []string{ 57 | "http://localhost:6379", 58 | "http://google.com", 59 | "https://google.com", 60 | "ftp://ftp.fu-berlin.de:21", 61 | } 62 | 63 | validEndpointAddr = append(validListeningAddr, []string{ 64 | "golang.org:80", 65 | "golang.org:http", 66 | }...) 67 | 68 | basicEndpoints = []struct { 69 | method string 70 | path string 71 | wantStatus int 72 | }{ 73 | {"GET", "/", http.StatusOK}, 74 | {"GET", "/healthz", http.StatusOK}, 75 | {"GET", "/asdasd", http.StatusNotFound}, 76 | {"GET", "/metrics", http.StatusOK}, 77 | {"GET", "/items", http.StatusNotFound}, 78 | {"GET", "/delay", http.StatusOK}, 79 | {"POST", "/items", http.StatusBadRequest}, 80 | {"PUT", "/items", http.StatusBadRequest}, 81 | {"DELETE", "/", http.StatusMethodNotAllowed}, 82 | {"DELETE", "/items", http.StatusMethodNotAllowed}, 83 | {"GET", "/error", http.StatusInternalServerError}, 84 | } 85 | 86 | validJSON = []string{ 87 | 88 | `[{"name": "😍aa", "qty": 42, "desc": "yes"}]`, 89 | `[{"name": "😍aabasd", "qty": 42, "desc": "yes"}, {"name": "bread", "desc": "love it", "qty": 15}]`, 90 | } 91 | 92 | invalidJSON = []struct { 93 | js string 94 | want int 95 | }{ 96 | {`test`, http.StatusBadRequest}, 97 | {`{}`, http.StatusBadRequest}, 98 | {`{"cat": "dog"}`, http.StatusBadRequest}, 99 | {`[`, http.StatusBadRequest}, 100 | {`{"name": "orange", "desc": "test", "qty": 1}`, http.StatusBadRequest}, 101 | {`{"name": "😍", "qty": 42, "desc": "yes"}`, http.StatusBadRequest}, 102 | {`[{}]`, http.StatusUnprocessableEntity}, 103 | {`[]`, http.StatusUnprocessableEntity}, 104 | } 105 | ) 106 | 107 | func helperSendJSON(js string, s *Server, method, path string, want int, t *testing.T) []byte { 108 | req, err := http.NewRequest(method, path, bytes.NewBuffer([]byte(js))) 109 | if err != nil { 110 | t.Errorf("unable to create buffer from %#v: %s", js, err) 111 | } 112 | req.Header.Set("Content-Tye", "application/json") 113 | w := httptest.NewRecorder() 114 | s.ServeHTTP(w, req) 115 | 116 | res := w.Result() 117 | b, err := ioutil.ReadAll(res.Body) 118 | if err != nil { 119 | t.Errorf("unable to read response body: %s", err) 120 | } 121 | res.Body.Close() 122 | 123 | if w.Code != want { 124 | t.Logf("%s %s -> %s", method, path, string(js)) 125 | t.Logf("wrong status code on request %#v %#v. Got: %d, want: %d", method, path, w.Code, want) 126 | t.Logf("revceived: %s", b) 127 | t.Fail() 128 | } 129 | 130 | return b 131 | } 132 | 133 | func helperPrepareRedis(t *testing.T) (*miniredis.Miniredis, *Server) { 134 | _, mr := helperPrepareMiniredis(t) 135 | 136 | s, err := NewServer( 137 | SetRedisAddress(strings.Join([]string{"redis://", mr.Addr()}, "")), 138 | ) 139 | if err != nil { 140 | t.Errorf("unable to create server: %s", err) 141 | } 142 | 143 | return mr, s 144 | } 145 | 146 | func helperSendSimpleRequest(s *Server, method, path string, want int, t *testing.T) []byte { 147 | body := bytes.NewBuffer([]byte{}) 148 | req, err := http.NewRequest(method, path, body) 149 | if err != nil { 150 | t.Errorf("unable to create request %#v %#v : %#v", method, path, err) 151 | } 152 | 153 | w := httptest.NewRecorder() 154 | s.ServeHTTP(w, req) 155 | 156 | res := w.Result() 157 | b, err := ioutil.ReadAll(res.Body) 158 | if err != nil { 159 | t.Errorf("unable to read body: %s", err) 160 | } 161 | res.Body.Close() 162 | 163 | if w.Code != want { 164 | t.Logf("wrong status code on request %#v %#v. Got: %d, want: %d", method, path, w.Code, want) 165 | t.Logf("revceived: %s", b) 166 | t.Fail() 167 | } 168 | 169 | return b 170 | } 171 | 172 | func TestNewServer(t *testing.T) { 173 | t.Run("Creating new default server", func(t *testing.T) { 174 | if _, err := NewServer(); err != nil { 175 | t.Errorf("error while creating new item server: %#v", err) 176 | } 177 | }) 178 | 179 | t.Run("Creating new default server with custom log levels", func(t *testing.T) { 180 | for _, tt := range logLevels { 181 | if _, err := NewServer(SetLogLevel(tt.level)); tt.want != nil { 182 | t.Errorf("error while creating new item server: %#v", err) 183 | } 184 | } 185 | }) 186 | 187 | t.Run("Creating new default server with custom redis address", func(t *testing.T) { 188 | t.Run("Checking valid addresses", func(t *testing.T) { 189 | for _, v := range validRedisAddr { 190 | if _, err := NewServer(SetRedisAddress(v)); err != nil { 191 | t.Errorf("error while creating new item server: %#v", err) 192 | } 193 | } 194 | }) 195 | 196 | t.Run("Checking invalid addresses", func(t *testing.T) { 197 | for _, v := range invalidRedisAddr { 198 | if _, err := NewServer(SetRedisAddress(v)); err == nil { 199 | t.Errorf("expected error while setting redis address to %#v, got %#v", v, err) 200 | } 201 | } 202 | }) 203 | }) 204 | 205 | t.Run("Creating new item server with custom listening addresses", func(t *testing.T) { 206 | t.Run("Checking valid addresses", func(t *testing.T) { 207 | for _, listen := range validListeningAddr { 208 | for _, ep := range validEndpointAddr { 209 | _, err := NewServer( 210 | SetServerAddress(listen), 211 | SetServerEndpoint(ep), 212 | ) 213 | if err != nil { 214 | t.Errorf("error while creating new item server: %#v", err) 215 | } 216 | } 217 | } 218 | }) 219 | t.Run("Checking invalid addresses", func(t *testing.T) { 220 | for _, tt := range invalidListeningAddr { 221 | if _, err := NewServer( 222 | SetServerAddress(tt), 223 | SetServerEndpoint(tt), 224 | ); err == nil { 225 | t.Errorf("expected error when creating item server with listening address %#v, got %#v", tt, err) 226 | } 227 | } 228 | }) 229 | }) 230 | } 231 | 232 | func TestEndpoints(t *testing.T) { 233 | t.Run("Basic endpoints", func(t *testing.T) { 234 | _, mr := helperPrepareMiniredis(t) 235 | defer mr.Close() 236 | 237 | s, err := NewServer( 238 | SetRedisAddress(strings.Join([]string{"redis://", mr.Addr()}, "")), 239 | ) 240 | if err != nil { 241 | t.Errorf("unable to create server: %s", err) 242 | } 243 | 244 | for _, tt := range basicEndpoints { 245 | helperSendSimpleRequest(s, tt.method, tt.path, tt.wantStatus, t) 246 | } 247 | }) 248 | 249 | t.Run("Items Endpoint", func(t *testing.T) { 250 | var ( 251 | path = "/items" 252 | method = "POST" 253 | want = http.StatusCreated 254 | ) 255 | 256 | t.Run("POST GET DELETE", func(t *testing.T) { 257 | mr, s := helperPrepareRedis(t) 258 | defer mr.Close() 259 | 260 | t.Run("GET all empty items", func(t *testing.T) { 261 | method = "GET" 262 | want = http.StatusNotFound 263 | 264 | helperSendSimpleRequest(s, method, path, want, t) 265 | }) 266 | 267 | t.Run("POST new item", func(t *testing.T) { 268 | method = "POST" 269 | want = http.StatusCreated 270 | 271 | for _, js := range validJSON { 272 | helperSendJSON(js, s, method, path, want, t) 273 | } 274 | }) 275 | 276 | t.Run("GET single item", func(t *testing.T) { 277 | method = "GET" 278 | want := http.StatusOK 279 | 280 | for _, js := range validJSON { 281 | // JSON to Item 282 | var items []*Item 283 | err := json.Unmarshal([]byte(js), &items) 284 | if err != nil { 285 | t.Errorf("unable to unmarshal %+v into item: %s", js, err) 286 | } 287 | 288 | for _, item := range items { 289 | err = item.SetID(context.Background()) 290 | if err != nil { 291 | t.Errorf("unable to set HashID on %+v: %s", item, err) 292 | } 293 | 294 | path := fmt.Sprintf("/items/%s", item.ID) 295 | 296 | // Send JSON 297 | b := helperSendSimpleRequest(s, method, path, want, t) 298 | var verify Response 299 | err = json.Unmarshal(b, &verify) 300 | if err != nil { 301 | t.Errorf("unable to parse response: %s", err) 302 | } 303 | 304 | // Verify response with item 305 | if !reflect.DeepEqual(verify.Data, []*Item{item}) { 306 | t.Errorf("%+v != %+v", verify.Data, []*Item{item}) 307 | } 308 | } 309 | } 310 | }) 311 | 312 | t.Run("POST existing item", func(t *testing.T) { 313 | method = "POST" 314 | want := http.StatusUnprocessableEntity 315 | path := "/items" 316 | 317 | for _, js := range validJSON { 318 | helperSendJSON(js, s, method, path, want, t) 319 | } 320 | }) 321 | 322 | t.Run("POST invalid JSON", func(t *testing.T) { 323 | method = "POST" 324 | want = http.StatusBadRequest 325 | 326 | for _, tt := range invalidJSON { 327 | helperSendJSON(tt.js, s, method, path, tt.want, t) 328 | } 329 | }) 330 | 331 | t.Run("GET all items", func(t *testing.T) { 332 | method = "GET" 333 | want = http.StatusOK 334 | 335 | b := helperSendSimpleRequest(s, method, path, want, t) 336 | 337 | var verify Response 338 | err := json.Unmarshal(b, &verify) 339 | if err != nil { 340 | t.Errorf("unable to parse response: %s", err) 341 | } 342 | }) 343 | 344 | t.Run("DELETE all items", func(t *testing.T) { 345 | method = "DELETE" 346 | want = http.StatusOK 347 | 348 | for _, js := range validJSON { 349 | // JSON to Item 350 | var items []*Item 351 | err := json.Unmarshal([]byte(js), &items) 352 | if err != nil { 353 | t.Errorf("unable to unmarshal %+v into item: %s", js, err) 354 | } 355 | 356 | for _, item := range items { 357 | err = item.SetID(context.Background()) 358 | if err != nil { 359 | t.Errorf("unable to set HashID on %+v: %s", item, err) 360 | } 361 | 362 | path := fmt.Sprintf("/items/%s", item.ID) 363 | helperSendSimpleRequest(s, method, path, want, t) 364 | } 365 | } 366 | }) 367 | }) 368 | 369 | t.Run("PUT GET DELETE", func(t *testing.T) { 370 | mr, s := helperPrepareRedis(t) 371 | defer mr.Close() 372 | 373 | t.Run("PUT new item", func(t *testing.T) { 374 | method = "PUT" 375 | want := http.StatusCreated 376 | 377 | for _, js := range validJSON { 378 | helperSendJSON(js, s, method, path, want, t) 379 | } 380 | }) 381 | 382 | t.Run("PUT existing item", func(t *testing.T) { 383 | method = "PUT" 384 | want := http.StatusCreated 385 | 386 | for _, js := range validJSON { 387 | helperSendJSON(js, s, method, path, want, t) 388 | } 389 | }) 390 | 391 | t.Run("GET single item", func(t *testing.T) { 392 | method = "GET" 393 | want := http.StatusOK 394 | 395 | for _, js := range validJSON { 396 | // JSON to Item 397 | var items []*Item 398 | err := json.Unmarshal([]byte(js), &items) 399 | if err != nil { 400 | t.Errorf("unable to unmarshal %+v into item: %s", js, err) 401 | } 402 | 403 | for _, item := range items { 404 | err = item.SetID(context.Background()) 405 | if err != nil { 406 | t.Errorf("unable to set HashID on %+v: %s", item, err) 407 | } 408 | 409 | path := fmt.Sprintf("/items/%s", item.ID) 410 | 411 | // Send JSON 412 | b := helperSendSimpleRequest(s, method, path, want, t) 413 | var verify Response 414 | err = json.Unmarshal(b, &verify) 415 | if err != nil { 416 | t.Errorf("unable to parse response: %s", err) 417 | } 418 | 419 | // Verify response with item 420 | if !reflect.DeepEqual(verify.Data, []*Item{item}) { 421 | t.Errorf("%+v != %+v", verify.Data, []*Item{item}) 422 | } 423 | } 424 | } 425 | }) 426 | 427 | t.Run("PUT invalid JSON", func(t *testing.T) { 428 | method = "PUT" 429 | want = http.StatusUnprocessableEntity 430 | 431 | for _, tt := range invalidJSON { 432 | helperSendJSON(tt.js, s, method, path, tt.want, t) 433 | } 434 | }) 435 | 436 | t.Run("GET all items", func(t *testing.T) { 437 | method = "GET" 438 | want = http.StatusOK 439 | 440 | b := helperSendSimpleRequest(s, method, path, want, t) 441 | 442 | var verify Response 443 | err := json.Unmarshal(b, &verify) 444 | if err != nil { 445 | t.Errorf("unable to parse response: %s", err) 446 | } 447 | }) 448 | 449 | t.Run("DELETE all items", func(t *testing.T) { 450 | method = "DELETE" 451 | want = http.StatusOK 452 | 453 | for _, js := range validJSON { 454 | // JSON to Item 455 | var items []*Item 456 | err := json.Unmarshal([]byte(js), &items) 457 | if err != nil { 458 | t.Errorf("unable to unmarshal %+v into item: %s", js, err) 459 | } 460 | 461 | for _, item := range items { 462 | err = item.SetID(context.Background()) 463 | if err != nil { 464 | t.Errorf("unable to set HashID on %+v: %s", item, err) 465 | } 466 | 467 | path := fmt.Sprintf("/items/%s", item.ID) 468 | helperSendSimpleRequest(s, method, path, want, t) 469 | } 470 | } 471 | }) 472 | }) 473 | }) 474 | } 475 | -------------------------------------------------------------------------------- /order/handlers.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "encoding/json" 5 | "fmt" 6 | "io" 7 | "io/ioutil" 8 | "math/rand" 9 | "net/http" 10 | "strconv" 11 | "time" 12 | 13 | "github.com/gorilla/mux" 14 | "github.com/obitech/micro-obs/util" 15 | ot "github.com/opentracing/opentracing-go" 16 | "github.com/pkg/errors" 17 | ) 18 | 19 | // NotFound is a 404 Message according to the Response type 20 | func (s *Server) notFound() http.HandlerFunc { 21 | return func(w http.ResponseWriter, r *http.Request) { 22 | span, ctx := ot.StartSpanFromContext(r.Context(), "notFound") 23 | defer span.Finish() 24 | s.Respond(ctx, http.StatusNotFound, "resource not found", 0, nil, w) 25 | } 26 | } 27 | 28 | // pong sends a simple JSON response. 29 | func (s *Server) pong() http.HandlerFunc { 30 | return func(w http.ResponseWriter, r *http.Request) { 31 | span, ctx := ot.StartSpanFromContext(r.Context(), "pong") 32 | defer span.Finish() 33 | log := util.RequestIDLogger(s.logger, r) 34 | log.Info("pong") 35 | s.Respond(ctx, http.StatusOK, "pong", 0, nil, w) 36 | } 37 | } 38 | 39 | // getAllOrders retrieves all orders from Redis 40 | func (s *Server) getAllOrders() http.HandlerFunc { 41 | return func(w http.ResponseWriter, r *http.Request) { 42 | span, ctx := ot.StartSpanFromContext(r.Context(), "getAllOrders") 43 | defer span.Finish() 44 | log := util.RequestIDLogger(s.logger, r) 45 | 46 | defaultErrMsg := "unable to retrieve orders" 47 | keys, err := s.RedisScanOrders(ctx) 48 | if err != nil { 49 | log.Errorw("unable to SCAN redis for keys", 50 | "error", err, 51 | ) 52 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 53 | return 54 | } 55 | 56 | var orders = []*Order{} 57 | for _, k := range keys { 58 | o, err := s.RedisGetOrder(ctx, k) 59 | if err != nil { 60 | log.Errorw("unable to get get order", 61 | "key", k, 62 | "error", err, 63 | ) 64 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 65 | return 66 | } 67 | orders = append(orders, o) 68 | } 69 | 70 | l := len(orders) 71 | if l == 0 { 72 | s.Respond(ctx, http.StatusNotFound, "no orders present", 0, nil, w) 73 | return 74 | } 75 | 76 | s.Respond(ctx, http.StatusOK, "orders retrieved", l, orders, w) 77 | } 78 | } 79 | 80 | // setOrder creates a new Order in Redis, regardless if the items are present in the Item service. 81 | func (s *Server) setOrder(update bool) http.HandlerFunc { 82 | return func(w http.ResponseWriter, r *http.Request) { 83 | span, ctx := ot.StartSpanFromContext(r.Context(), "setNewOrder") 84 | defer span.Finish() 85 | log := util.RequestIDLogger(s.logger, r) 86 | 87 | var ( 88 | defaultErrMsg string 89 | defaultStatus int 90 | order = &Order{} 91 | ) 92 | 93 | switch update { 94 | case true: 95 | defaultErrMsg = "unable to update order" 96 | defaultStatus = http.StatusOK 97 | case false: 98 | defaultErrMsg = "unable to create order" 99 | defaultStatus = http.StatusCreated 100 | } 101 | 102 | // Accept payload 103 | body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) 104 | if err != nil { 105 | log.Errorw("unable to read request body", 106 | "error", err, 107 | ) 108 | r.Body.Close() 109 | s.Respond(ctx, http.StatusInternalServerError, "unable to read payload", 0, nil, w) 110 | return 111 | } 112 | defer r.Body.Close() 113 | 114 | // Parse payload 115 | // TODO: handle multiple orders 116 | if err := json.Unmarshal(body, order); err != nil { 117 | log.Errorw("unable to parse payload", 118 | "error", err, 119 | ) 120 | s.Respond(ctx, http.StatusBadRequest, "unable to parse payload", 0, nil, w) 121 | return 122 | } 123 | 124 | // Check if order contains items 125 | if len(order.Items) == 0 { 126 | s.Respond(ctx, http.StatusUnprocessableEntity, "order needs items", 0, nil, w) 127 | return 128 | } 129 | 130 | // Check for existence 131 | i, err := s.RedisGetOrder(ctx, order.ID) 132 | if err != nil { 133 | log.Errorw("unable to retrieve Item from Redis", 134 | "key", order.ID, 135 | "error", err, 136 | ) 137 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 138 | return 139 | } 140 | if i != nil { 141 | if !update { 142 | s.Respond(ctx, http.StatusUnprocessableEntity, fmt.Sprintf("order with ID %d already exists", order.ID), 0, nil, w) 143 | return 144 | } 145 | } 146 | 147 | // Create Order in Redis 148 | err = s.RedisSetOrder(ctx, order) 149 | if err != nil { 150 | log.Errorw("unable to create order in redis", 151 | "key", order.ID, 152 | "error", err, 153 | ) 154 | s.Respond(ctx, http.StatusInternalServerError, defaultErrMsg, 0, nil, w) 155 | return 156 | } 157 | 158 | s.Respond(ctx, defaultStatus, fmt.Sprintf("order %d created", order.ID), 1, []*Order{order}, w) 159 | } 160 | } 161 | 162 | func (s *Server) createOrder() http.HandlerFunc { 163 | return func(w http.ResponseWriter, r *http.Request) { 164 | span, ctx := ot.StartSpanFromContext(r.Context(), "createOrder") 165 | defer span.Finish() 166 | log := util.RequestIDLogger(s.logger, r) 167 | 168 | // Accept payload 169 | body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1048576)) 170 | if err != nil { 171 | log.Errorw("unable to read request body", 172 | "error", err, 173 | ) 174 | r.Body.Close() 175 | s.Respond(ctx, http.StatusInternalServerError, "unable to read payload", 0, nil, w) 176 | return 177 | } 178 | defer r.Body.Close() 179 | 180 | // Parse payload 181 | var order *Order 182 | if err := json.Unmarshal(body, &order); err != nil { 183 | log.Errorw("unable to parse payload", 184 | "error", err, 185 | ) 186 | s.Respond(ctx, http.StatusBadRequest, "unable to parse payload", 0, nil, w) 187 | return 188 | } 189 | 190 | // Check if order contains items 191 | if len(order.Items) == 0 { 192 | s.Respond(ctx, http.StatusUnprocessableEntity, "order needs items", 0, nil, w) 193 | return 194 | } 195 | 196 | // Get requested items from item service 197 | // TODO: Send & process in bulk 198 | for _, orderItem := range order.Items { 199 | itemItem, err := s.getItem(ctx, orderItem.ID) 200 | if err != nil { 201 | if err, ok := err.(notFoundError); ok { 202 | s.Respond(ctx, http.StatusNotFound, err.Error(), 0, nil, w) 203 | return 204 | } 205 | 206 | log.Errorw("unable to retrieve item from item service", 207 | "itemID", orderItem.ID, 208 | "error", err, 209 | ) 210 | msg := fmt.Sprintf("unable to retrieve item %s from item service", orderItem.ID) 211 | s.Respond(ctx, http.StatusInternalServerError, msg, 0, nil, w) 212 | return 213 | } 214 | 215 | if itemItem.Qty < orderItem.Qty { 216 | msg := fmt.Sprintf("not enough units of %s available (%d avail, %d requested)", orderItem.ID, orderItem.Qty, itemItem.Qty) 217 | s.Respond(ctx, http.StatusUnprocessableEntity, msg, 0, nil, w) 218 | return 219 | } 220 | } 221 | 222 | // Get OrderID from Redis 223 | id, err := s.RedisGetNextOrderID(ctx) 224 | if err != nil { 225 | log.Errorw("unable to get next order ID", 226 | "error", err, 227 | ) 228 | s.Respond(ctx, http.StatusInternalServerError, "unable to create order", 0, nil, w) 229 | return 230 | } 231 | order.ID = id 232 | 233 | // Create order 234 | err = s.RedisSetOrder(ctx, order) 235 | if err != nil { 236 | log.Errorw("unable to create order in redis", 237 | "error", err, 238 | ) 239 | } 240 | 241 | // Respond 242 | msg := fmt.Sprintf("order %d created", order.ID) 243 | s.Respond(ctx, http.StatusCreated, msg, 1, []*Order{order}, w) 244 | } 245 | } 246 | 247 | func (s *Server) getOrder() http.HandlerFunc { 248 | return func(w http.ResponseWriter, r *http.Request) { 249 | span, ctx := ot.StartSpanFromContext(r.Context(), "getOrder") 250 | defer span.Finish() 251 | log := util.RequestIDLogger(s.logger, r) 252 | 253 | pr := mux.Vars(r) 254 | id, err := strconv.ParseInt(pr["id"], 10, 64) 255 | if err != nil { 256 | s.Respond(ctx, http.StatusBadRequest, "unable to parse ID", 0, nil, w) 257 | return 258 | } 259 | 260 | order, err := s.RedisGetOrder(ctx, id) 261 | if err != nil { 262 | log.Errorw("unable to get key from redis", 263 | "key", id, 264 | "error", err, 265 | ) 266 | s.Respond(ctx, http.StatusInternalServerError, "unable to retreive item", 0, nil, w) 267 | return 268 | } 269 | 270 | if order == nil { 271 | s.Respond(ctx, http.StatusNotFound, fmt.Sprintf("order %d doesn't exist", id), 0, nil, w) 272 | return 273 | } 274 | 275 | s.Respond(ctx, http.StatusOK, "order retrieved", 1, []*Order{order}, w) 276 | } 277 | } 278 | 279 | // delay returns after a random period to simulate reequest delay. 280 | func (s *Server) delay() http.HandlerFunc { 281 | return func(w http.ResponseWriter, r *http.Request) { 282 | span, ctx := ot.StartSpanFromContext(r.Context(), "delay") 283 | defer span.Finish() 284 | log := util.RequestIDLogger(s.logger, r) 285 | 286 | // Simulate delay between 30ms - 800ms 287 | t := time.Duration((rand.Float64()*800)+30) * time.Millisecond 288 | 289 | span.SetTag("wait", t) 290 | log.Debugw("Waiting", 291 | "time", t, 292 | ) 293 | 294 | time.Sleep(t) 295 | 296 | s.Respond(ctx, http.StatusOK, fmt.Sprintf("waited %v", t), 0, nil, w) 297 | } 298 | } 299 | 300 | // simulateError returns a 500 301 | func (s *Server) simulateError() http.HandlerFunc { 302 | return func(w http.ResponseWriter, r *http.Request) { 303 | span, ctx := ot.StartSpanFromContext(r.Context(), "simulateError") 304 | defer span.Finish() 305 | log := util.RequestIDLogger(s.logger, r) 306 | 307 | log.Errorw("Error occured", 308 | "error", errors.New("nasty error message"), 309 | ) 310 | 311 | s.Respond(ctx, http.StatusInternalServerError, "simulated error", 0, nil, w) 312 | } 313 | } 314 | -------------------------------------------------------------------------------- /order/order.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "context" 5 | "encoding/json" 6 | "fmt" 7 | "io/ioutil" 8 | "net/http" 9 | "sort" 10 | "strconv" 11 | "strings" 12 | 13 | "github.com/obitech/micro-obs/item" 14 | "github.com/obitech/micro-obs/util" 15 | ot "github.com/opentracing/opentracing-go" 16 | "github.com/opentracing/opentracing-go/ext" 17 | "github.com/pkg/errors" 18 | ) 19 | 20 | // Order defines a placed order with identifier and items. 21 | // An Order ID of -1 means that the item can be 22 | type Order struct { 23 | ID int64 `json:"id"` 24 | Items []*Item `json:"items"` 25 | } 26 | 27 | // Item holds stripped down information of a regular item, to be used in an Order. 28 | type Item struct { 29 | ID string `json:"id"` 30 | Qty int `json:"qty"` 31 | } 32 | 33 | type notFoundError struct { 34 | err string 35 | } 36 | 37 | func (e notFoundError) Error() string { 38 | return e.err 39 | } 40 | 41 | func (o *Order) String() string { 42 | return fmt.Sprintf("ID:%d Items:%+v", o.ID, o.Items) 43 | } 44 | 45 | func (i *Item) String() string { 46 | return fmt.Sprintf("ID:%s Qty:%d", i.ID, i.Qty) 47 | } 48 | 49 | // NewItem creates a new Item from an existing Item. 50 | func NewItem(item *item.Item) (*Item, error) { 51 | return &Item{ 52 | ID: item.ID, 53 | Qty: item.Qty, 54 | }, nil 55 | } 56 | 57 | // Sort will sort the order items according to ID 58 | func (o *Order) Sort() error { 59 | if o.Items == nil || len(o.Items) == 0 { 60 | return errors.New("order needs items") 61 | } 62 | 63 | // Sort the list of items according to ID 64 | sort.Slice(o.Items, func(i, j int) bool { 65 | return strings.Compare(o.Items[i].ID, o.Items[j].ID) == -1 66 | }) 67 | 68 | return nil 69 | } 70 | 71 | // NewOrder creates a new order according to arguments. This will sort the passed items. 72 | func NewOrder(id int64, items ...*Item) (*Order, error) { 73 | oi := make([]*Item, len(items)) 74 | for i, v := range items { 75 | if v == nil { 76 | return nil, errors.New("order needs items") 77 | } 78 | oi[i] = v 79 | } 80 | 81 | order := &Order{ 82 | ID: id, 83 | Items: oi, 84 | } 85 | 86 | order.Sort() 87 | return order, nil 88 | } 89 | 90 | // MarshalRedis marshals an Order to hand over to go-redis. 91 | func (o *Order) MarshalRedis() (string, map[string]int) { 92 | id := strconv.FormatInt(o.ID, 10) 93 | if o.Items == nil { 94 | return id, nil 95 | } 96 | 97 | items := make(map[string]int) 98 | for _, v := range o.Items { 99 | items[v.ID] = v.Qty 100 | } 101 | 102 | return id, items 103 | } 104 | 105 | // UnmarshalRedis parses a string and map into an Order. Order.Items will be sorted according to the ID. 106 | func UnmarshalRedis(id string, items map[string]int, order *Order) error { 107 | i, err := strconv.ParseInt(id, 10, 64) 108 | if err != nil { 109 | return err 110 | } 111 | 112 | // Sort map according to keys 113 | var keys []string 114 | for k := range items { 115 | keys = append(keys, k) 116 | } 117 | sort.Strings(keys) 118 | 119 | oi := []*Item{} 120 | for _, k := range keys { 121 | oi = append(oi, &Item{ 122 | ID: k, 123 | Qty: items[k], 124 | }) 125 | } 126 | 127 | order.ID = i 128 | order.Items = oi 129 | 130 | return nil 131 | } 132 | 133 | // getItem will query the item service to retrieve a item for a specific quantity 134 | func (s *Server) getItem(ctx context.Context, itemID string) (*Item, error) { 135 | span, ctx := ot.StartSpanFromContext(ctx, "getItem") 136 | defer span.Finish() 137 | 138 | // Create item service request 139 | url := fmt.Sprintf("%s/items/%s", s.itemService, itemID) 140 | req, err := http.NewRequest("GET", url, nil) 141 | 142 | // Inject requestID 143 | reqID := util.RequestIDFromContext(ctx) 144 | req.Header.Add("X-Request-ID", reqID) 145 | 146 | // Inect tracer 147 | ext.SpanKindRPCClient.Set(span) 148 | ext.HTTPMethod.Set(span, "GET") 149 | span.Tracer().Inject( 150 | span.Context(), 151 | ot.HTTPHeaders, 152 | ot.HTTPHeadersCarrier(req.Header), 153 | ) 154 | 155 | c := &http.Client{} 156 | resp, err := c.Do(req) 157 | if err != nil { 158 | return nil, errors.Wrapf(err, "unable to connect to item service") 159 | } 160 | 161 | if resp.StatusCode == http.StatusNotFound { 162 | return nil, notFoundError{fmt.Sprintf("item id %s not found", itemID)} 163 | } 164 | if resp.StatusCode != http.StatusOK { 165 | return nil, errors.Errorf("invalid status code from item service: %d", resp.StatusCode) 166 | } 167 | 168 | // Read response 169 | b, err := ioutil.ReadAll(resp.Body) 170 | if err != nil { 171 | return nil, errors.Wrapf(err, "unable to read response from item service") 172 | } 173 | defer resp.Body.Close() 174 | 175 | // Parse respone 176 | var r item.Response 177 | err = json.Unmarshal(b, &r) 178 | if err != nil { 179 | return nil, errors.Wrapf(err, "unable to parse respone from item service") 180 | } 181 | 182 | if r.Count == 0 || r.Data == nil { 183 | return nil, errors.New("no items returned from item service") 184 | } 185 | 186 | // Retrieve items from response 187 | for _, item := range r.Data { 188 | if itemID == item.ID { 189 | return &Item{ 190 | ID: itemID, 191 | Qty: item.Qty, 192 | }, nil 193 | } 194 | } 195 | return nil, errors.New("no items to yield") 196 | } 197 | -------------------------------------------------------------------------------- /order/order_test.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | _ "net/http" 5 | "reflect" 6 | "strings" 7 | "testing" 8 | 9 | "github.com/alicebob/miniredis" 10 | "github.com/obitech/micro-obs/item" 11 | ) 12 | 13 | var ( 14 | sampleItems = []struct { 15 | name string 16 | desc string 17 | qty int 18 | }{ 19 | {"test", "test", 0}, 20 | {"orange", "a juicy fruit", 100}, 21 | {"😍", "lovely smily", 999}, 22 | {" ", "﷽", 249093419}, 23 | {" 123asd🙆 🙋 asdlloqwe", "test", 0}, 24 | } 25 | 26 | sampleOrderIDs = []int64{-1, 0, 12, 42, 1242352235} 27 | items = []*Item{} 28 | orders = []*Order{} 29 | uniqueOrders = []*Order{} 30 | ) 31 | 32 | func helperInitItemServer(t *testing.T) (*miniredis.Miniredis, *item.Server) { 33 | // Create mr for item service 34 | _, mr := helperPrepareMiniredis(t) 35 | 36 | // Setup server 37 | s, err := item.NewServer( 38 | item.SetRedisAddress(strings.Join([]string{"redis://", mr.Addr()}, "")), 39 | ) 40 | if err != nil { 41 | mr.Close() 42 | t.Errorf("unable to create item server: %s", err) 43 | } 44 | 45 | return mr, s 46 | } 47 | 48 | func TestNewOrderItem(t *testing.T) { 49 | for _, v := range sampleItems { 50 | i, err := item.NewItem(v.name, v.desc, v.qty) 51 | if err != nil { 52 | t.Errorf("unable to create item: %s", err) 53 | } 54 | 55 | oi, err := NewItem(i) 56 | if err != nil { 57 | t.Errorf("unable to create order item: %s", err) 58 | } 59 | items = append(items, oi) 60 | } 61 | } 62 | 63 | func TestNewOrder(t *testing.T) { 64 | for _, v := range items { 65 | t.Run("With single item", func(t *testing.T) { 66 | for _, id := range sampleOrderIDs { 67 | o, err := NewOrder(id, v) 68 | if err != nil { 69 | t.Errorf("unable to create order: %s", err) 70 | } 71 | orders = append(orders, o) 72 | } 73 | }) 74 | } 75 | t.Run("With multiple items", func(t *testing.T) { 76 | for _, id := range sampleOrderIDs { 77 | o, err := NewOrder(id, items...) 78 | if err != nil { 79 | t.Errorf("unable to create order: %s", err) 80 | } 81 | orders = append(orders, o) 82 | } 83 | }) 84 | 85 | t.Run("Order with nil Items", func(t *testing.T) { 86 | _, err := NewOrder(42, nil) 87 | if err == nil { 88 | t.Error("should throw error with nil item") 89 | } 90 | }) 91 | 92 | } 93 | 94 | func TestMarshalRedis(t *testing.T) { 95 | var idMarshalled string 96 | var itemsMarshalled map[string]int 97 | 98 | for _, v := range orders { 99 | idMarshalled, itemsMarshalled = v.MarshalRedis() 100 | 101 | verify := &Order{} 102 | err := UnmarshalRedis(idMarshalled, itemsMarshalled, verify) 103 | if err != nil { 104 | t.Errorf("unmarshaling failed: %s", err) 105 | } 106 | if verify.ID != v.ID { 107 | t.Errorf("ID: %#v != %#v", verify.ID, v.ID) 108 | } 109 | 110 | if !reflect.DeepEqual(v.Items, verify.Items) { 111 | t.Errorf("%+v != %+v", v.Items, verify.Items) 112 | } 113 | } 114 | } 115 | -------------------------------------------------------------------------------- /order/redis.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "strconv" 7 | "strings" 8 | 9 | ot "github.com/opentracing/opentracing-go" 10 | "github.com/pkg/errors" 11 | ) 12 | 13 | func appendNamespace(id string) string { 14 | return fmt.Sprintf("%s:%s", orderKeyNamespace, id) 15 | } 16 | 17 | func removeNamespace(key string) string { 18 | str := strings.Split(key, ":")[1:] 19 | return strings.Join(str, "") 20 | } 21 | 22 | // RedisScanOrders retrieves the IDs (keys) of all orders. 23 | func (s *Server) RedisScanOrders(ctx context.Context) ([]int64, error) { 24 | span, _ := ot.StartSpanFromContext(ctx, "RedisScanOrders") 25 | defer span.Finish() 26 | 27 | var cursor uint64 28 | var keys []int64 29 | var err error 30 | 31 | for { 32 | var ks []string 33 | ks, cursor, err = s.redis.Scan(cursor, fmt.Sprintf("%s:*", orderKeyNamespace), 10).Result() 34 | if err != nil { 35 | return nil, err 36 | } 37 | 38 | for _, k := range ks { 39 | id, err := strconv.ParseInt(removeNamespace(k), 10, 64) 40 | if err != nil { 41 | return nil, err 42 | } 43 | keys = append(keys, id) 44 | } 45 | 46 | if cursor == 0 { 47 | break 48 | } 49 | } 50 | 51 | return keys, err 52 | } 53 | 54 | // RedisGetNextOrderID retrieves increments the order ID counter in redis and returns it. 55 | func (s *Server) RedisGetNextOrderID(ctx context.Context) (int64, error) { 56 | span, _ := ot.StartSpanFromContext(ctx, "RedisGetNextOrderID") 57 | defer span.Finish() 58 | 59 | r, err := s.redis.Incr(nextIDKey).Result() 60 | if err != nil { 61 | return -1, err 62 | } 63 | return r, nil 64 | } 65 | 66 | // RedisSetOrder creates or updates a new order in Redis. 67 | func (s *Server) RedisSetOrder(ctx context.Context, o *Order) error { 68 | span, _ := ot.StartSpanFromContext(ctx, "RedisScanOrders") 69 | defer span.Finish() 70 | 71 | if o.Items == nil { 72 | return errors.Errorf("order needs items, is %#v", o.Items) 73 | } 74 | 75 | id, items := o.MarshalRedis() 76 | key := appendNamespace(id) 77 | for k, v := range items { 78 | if err := s.redis.HSet(key, k, v).Err(); err != nil { 79 | return err 80 | } 81 | } 82 | 83 | return nil 84 | } 85 | 86 | // RedisGetOrder retrieves a single order from Redis. 87 | func (s *Server) RedisGetOrder(ctx context.Context, id int64) (*Order, error) { 88 | span, _ := ot.StartSpanFromContext(ctx, "RedisGetOrder") 89 | defer span.Finish() 90 | 91 | key := strconv.FormatInt(id, 10) 92 | key = appendNamespace(key) 93 | r, err := s.redis.HGetAll(key).Result() 94 | if err != nil { 95 | return nil, err 96 | } 97 | 98 | if len(r) == 0 { 99 | return nil, nil 100 | } 101 | 102 | items := make(map[string]int) 103 | for k, v := range r { 104 | qty, err := strconv.Atoi(v) 105 | if err != nil { 106 | return nil, err 107 | } 108 | items[k] = qty 109 | } 110 | 111 | o := &Order{} 112 | err = UnmarshalRedis(removeNamespace(key), items, o) 113 | return o, err 114 | } 115 | -------------------------------------------------------------------------------- /order/redis_test.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "context" 5 | "reflect" 6 | "sort" 7 | "strconv" 8 | "strings" 9 | "testing" 10 | 11 | "github.com/alicebob/miniredis" 12 | "github.com/go-redis/redis" 13 | "github.com/obitech/micro-obs/item" 14 | ) 15 | 16 | func helperPrepareMiniredis(t *testing.T) (*redis.Client, *miniredis.Miniredis) { 17 | mr, _ := miniredis.Run() 18 | c := redis.NewClient(&redis.Options{ 19 | Addr: mr.Addr(), 20 | }) 21 | 22 | return c, mr 23 | } 24 | 25 | func helperMapsEqual(m1, m2 map[string]int) bool { 26 | if len(m1) != len(m2) { 27 | return false 28 | } 29 | 30 | for k, v := range m1 { 31 | if vv, prs := m2[k]; !prs || vv != v { 32 | return false 33 | } 34 | } 35 | 36 | return true 37 | } 38 | 39 | func TestOrderRedis(t *testing.T) { 40 | // Init miniredis 41 | _, mr := helperPrepareMiniredis(t) 42 | defer mr.Close() 43 | 44 | s, err := NewServer( 45 | SetRedisAddress(strings.Join([]string{"redis://", mr.Addr()}, "")), 46 | ) 47 | if err != nil { 48 | t.Errorf("unable to create server: %s", err) 49 | } 50 | 51 | t.Run("Pinging miniredis with Server", func(t *testing.T) { 52 | if _, err := s.redis.Ping().Result(); err != nil { 53 | t.Errorf("unable to ping miniredis: %s", err) 54 | } 55 | }) 56 | 57 | var wantID int64 = 1 58 | t.Run("RedisGetNextOrderID should be 1 when called first", func(t *testing.T) { 59 | id, err := s.RedisGetNextOrderID(context.Background()) 60 | if err != nil { 61 | t.Errorf("unable to INCR %s: %s", nextIDKey, err) 62 | } 63 | if id != wantID { 64 | t.Errorf("id mismatch, got: %d, want: %d", id, wantID) 65 | } 66 | 67 | r, err := s.redis.Get(nextIDKey).Result() 68 | if err != nil { 69 | t.Errorf("unable to GET %s: %s", nextIDKey, err) 70 | } 71 | verify, err := strconv.ParseInt(r, 10, 64) 72 | if err != nil { 73 | t.Errorf("unable to convert %s to int64: %s", r, err) 74 | } 75 | if verify != wantID { 76 | t.Errorf("id mismatch, got: %d, want: %d", id, wantID) 77 | } 78 | }) 79 | 80 | t.Run("Incremnting nextID", func(t *testing.T) { 81 | wantID = 2 82 | id, err := s.RedisGetNextOrderID(context.Background()) 83 | if err != nil { 84 | t.Errorf("unable to INCR %s: %s", nextIDKey, err) 85 | } 86 | if id != wantID { 87 | t.Errorf("id mismatch, got: %d, want: %d", id, wantID) 88 | } 89 | 90 | var incBy int64 = 6 91 | var i int64 92 | for i = 0; i < incBy; i++ { 93 | id, err = s.RedisGetNextOrderID(context.Background()) 94 | if err != nil { 95 | t.Errorf("unable to INCR %s: %s", nextIDKey, err) 96 | } 97 | } 98 | if id != wantID+incBy { 99 | if id != wantID { 100 | t.Errorf("id mismatch, got: %d, want: %d", id, wantID+incBy) 101 | } 102 | } 103 | }) 104 | 105 | var uniqueSampleKeys []int64 106 | t.Run("Setting and getting new Order", func(t *testing.T) { 107 | 108 | allItems := []*Item{} 109 | for i, v := range sampleItems { 110 | item, err := item.NewItem(v.name, v.desc, v.qty) 111 | if err != nil { 112 | t.Errorf("unable to create item: %s", err) 113 | } 114 | 115 | oi, err := NewItem(item) 116 | if err != nil { 117 | t.Errorf("unable to create order item: %s", err) 118 | } 119 | allItems = append(allItems, oi) 120 | 121 | o, err := NewOrder(sampleOrderIDs[i], oi) 122 | uniqueOrders = append(uniqueOrders, o) 123 | } 124 | temp, err := NewOrder(int64(999999), allItems...) 125 | if err != nil { 126 | t.Errorf("unable to create order: %s", err) 127 | } 128 | uniqueOrders = append(uniqueOrders, temp) 129 | 130 | for _, o := range uniqueOrders { 131 | if err := s.RedisSetOrder(context.Background(), o); err != nil { 132 | t.Errorf("setting order failed: %s", err) 133 | } 134 | 135 | verify, err := s.RedisGetOrder(context.Background(), o.ID) 136 | if err != nil { 137 | t.Errorf("unable to get order: %s", err) 138 | } 139 | 140 | if o.ID != verify.ID { 141 | t.Errorf("%+v != %+v", o.ID, verify.ID) 142 | } 143 | 144 | if !reflect.DeepEqual(o.Items, verify.Items) { 145 | t.Errorf("%+v != %+v", o.Items, verify.Items) 146 | } 147 | uniqueSampleKeys = append(uniqueSampleKeys, o.ID) 148 | } 149 | }) 150 | 151 | t.Run("Scan for all orders", func(t *testing.T) { 152 | keys, err := s.RedisScanOrders(context.Background()) 153 | if err != nil { 154 | t.Errorf("unable to scan for orders: %s", err) 155 | } 156 | sort.Slice(keys, func(i, j int) bool { 157 | return keys[i] < keys[j] 158 | }) 159 | sort.Slice(uniqueSampleKeys, func(i, j int) bool { 160 | return uniqueSampleKeys[i] < uniqueSampleKeys[j] 161 | }) 162 | if !reflect.DeepEqual(keys, uniqueSampleKeys) { 163 | t.Errorf("%#v != %#v", keys, uniqueSampleKeys) 164 | } 165 | }) 166 | } 167 | -------------------------------------------------------------------------------- /order/response.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "encoding/json" 5 | "net/http" 6 | ) 7 | 8 | // Response defines an API response. 9 | type Response struct { 10 | Status int `json:"status"` 11 | Message string `json:"message"` 12 | Count int `json:"count"` 13 | Data []*Order `json:"data"` 14 | } 15 | 16 | // NewResponse returns a Response with a passed message string and slice of Data. 17 | // TODO: make d variadic 18 | func NewResponse(s int, m string, c int, d []*Order) (Response, error) { 19 | return Response{ 20 | Status: s, 21 | Message: m, 22 | Count: c, 23 | Data: d, 24 | }, nil 25 | } 26 | 27 | // SendJSON encodes a Response as JSON and sends it on a passed http.ResponseWriter. 28 | func (r Response) SendJSON(w http.ResponseWriter) error { 29 | w.Header().Set("Content-Type", "application/JSON; charset=UTF-8") 30 | w.WriteHeader(r.Status) 31 | err := json.NewEncoder(w).Encode(r) 32 | return err 33 | } 34 | -------------------------------------------------------------------------------- /order/response_test.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "encoding/json" 5 | "io/ioutil" 6 | "net/http" 7 | "net/http/httptest" 8 | "testing" 9 | ) 10 | 11 | func helperSendJSONResponse(rd Response, t *testing.T) { 12 | // Start HTTP Test Server 13 | s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 14 | err := rd.SendJSON(w) 15 | if err != nil { 16 | t.Errorf("error sending JSON: %#v", err) 17 | } 18 | })) 19 | defer s.Close() 20 | 21 | // Send request 22 | res, err := http.Get(s.URL) 23 | if err != nil { 24 | t.Errorf("unable to GET response: %#v", err) 25 | } 26 | 27 | // Retrieve data from response 28 | d, err := ioutil.ReadAll(res.Body) 29 | res.Body.Close() 30 | if err != nil { 31 | t.Errorf("unable to read response body from request %#v", rd) 32 | } 33 | 34 | // Verify response 35 | var v Response 36 | err = json.Unmarshal(d, &v) 37 | if err != nil { 38 | t.Errorf("unable to unmarshal response %#v", d) 39 | } 40 | } 41 | 42 | func TestResponse(t *testing.T) { 43 | var validResponses []Response 44 | t.Run("Test NewResponse", func(t *testing.T) { 45 | t.Run("nil orders", func(t *testing.T) { 46 | r, err := NewResponse(200, "test", 0, nil) 47 | if err != nil { 48 | t.Errorf("unable to create response: %s", err) 49 | } 50 | validResponses = append(validResponses, r) 51 | }) 52 | 53 | t.Run("One order", func(t *testing.T) { 54 | r, err := NewResponse(200, "test", 1, []*Order{uniqueOrders[0]}) 55 | if err != nil { 56 | t.Errorf("unable to create response: %s", err) 57 | } 58 | validResponses = append(validResponses, r) 59 | }) 60 | 61 | t.Run("Multiple orders", func(t *testing.T) { 62 | r, err := NewResponse(200, "test", len(uniqueOrders), uniqueOrders) 63 | if err != nil { 64 | t.Errorf("unable to create response: %s", err) 65 | } 66 | validResponses = append(validResponses, r) 67 | }) 68 | }) 69 | 70 | t.Run("Verify responses", func(t *testing.T) { 71 | for _, r := range validResponses { 72 | helperSendJSONResponse(r, t) 73 | } 74 | }) 75 | } 76 | -------------------------------------------------------------------------------- /order/routes.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "github.com/obitech/micro-obs/util" 5 | "github.com/prometheus/client_golang/prometheus" 6 | "github.com/prometheus/client_golang/prometheus/promhttp" 7 | ) 8 | 9 | var rm = util.NewRequestMetricHistogram( 10 | append([]float64{.001, .003}, prometheus.DefBuckets...), 11 | []float64{1, 5, 10, 50, 100}, 12 | ) 13 | 14 | // Routes defines all HTTP routes, hanging off the main Server struct. 15 | // Like that, all routes have access to the Server's dependencies. 16 | func (s *Server) createRoutes() { 17 | var routes = util.Routes{ 18 | util.Route{ 19 | Name: "pong", 20 | Method: "GET", 21 | Pattern: "/", 22 | HandlerFunc: s.pong(), 23 | }, 24 | util.Route{ 25 | Name: "healthz", 26 | Method: "GET", 27 | Pattern: "/healthz", 28 | HandlerFunc: util.Healthz(), 29 | }, 30 | util.Route{ 31 | Name: "getAllOrders", 32 | Method: "GET", 33 | Pattern: "/orders", 34 | HandlerFunc: s.getAllOrders(), 35 | }, 36 | util.Route{ 37 | Name: "setOrder", 38 | Method: "POST", 39 | Pattern: "/orders", 40 | HandlerFunc: s.setOrder(false), 41 | }, 42 | util.Route{ 43 | Name: "setOrder", 44 | Method: "PUT", 45 | Pattern: "/orders", 46 | HandlerFunc: s.setOrder(true), 47 | }, 48 | util.Route{ 49 | Name: "getOrder", 50 | Method: "GET", 51 | Pattern: "/orders/{id:-?[0-9]+}", 52 | HandlerFunc: s.getOrder(), 53 | }, 54 | util.Route{ 55 | Name: "createOrder", 56 | Method: "POST", 57 | Pattern: "/orders/create", 58 | HandlerFunc: s.createOrder(), 59 | }, 60 | util.Route{ 61 | Name: "delay", 62 | Method: "GET", 63 | Pattern: "/delay", 64 | HandlerFunc: s.delay(), 65 | }, 66 | util.Route{ 67 | Name: "simulateError", 68 | Method: "GET", 69 | Pattern: "/error", 70 | HandlerFunc: s.simulateError(), 71 | }, 72 | } 73 | 74 | for _, route := range routes { 75 | h := route.HandlerFunc 76 | 77 | // Tracing each request 78 | h = util.TracerMiddleware(h, route) 79 | 80 | // Logging each request 81 | h = util.LoggerMiddleware(h, s.logger) 82 | 83 | // Assign requestID to each request 84 | h = util.AssignRequestID(h, s.logger) 85 | 86 | // Monitoring each request 87 | // TODO: pass proper handler 88 | promHandler := util.PrometheusMiddleware(h, route.Pattern, rm) 89 | 90 | s.router. 91 | Methods(route.Method). 92 | Path(route.Pattern). 93 | Name(route.Name). 94 | Handler(promHandler) 95 | } 96 | 97 | // Prometheus endpoint 98 | route := util.Route{ 99 | Name: "metrics", 100 | Method: "GET", 101 | Pattern: "/metrics", 102 | HandlerFunc: nil, 103 | } 104 | promHandler := promhttp.HandlerFor(s.promReg, promhttp.HandlerOpts{}) 105 | promHandler = promhttp.InstrumentMetricHandler(s.promReg, promHandler) 106 | s.router. 107 | Methods(route.Method). 108 | Path(route.Pattern). 109 | Name(route.Name). 110 | Handler(promHandler) 111 | 112 | // 404 handler 113 | notFound := util.PrometheusMiddleware(s.notFound(), "metrics", rm) 114 | s.router.NotFoundHandler = notFound 115 | } 116 | -------------------------------------------------------------------------------- /order/server.go: -------------------------------------------------------------------------------- 1 | package order 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net" 8 | "net/http" 9 | "net/url" 10 | "os" 11 | "os/signal" 12 | "sync/atomic" 13 | "syscall" 14 | "time" 15 | 16 | "github.com/go-redis/redis" 17 | "github.com/gorilla/mux" 18 | "github.com/obitech/micro-obs/util" 19 | ot "github.com/opentracing/opentracing-go" 20 | "github.com/pkg/errors" 21 | "github.com/prometheus/client_golang/prometheus" 22 | ) 23 | 24 | const ( 25 | serviceName = "order" 26 | nextIDKey = "nextID" 27 | orderKeyNamespace = serviceName 28 | ) 29 | 30 | // Server is a wrapper for a HTTP server, with dependencies attached. 31 | type Server struct { 32 | address string 33 | endpoint string 34 | itemService string 35 | redis *redis.Client 36 | redisOps uint64 37 | server *http.Server 38 | router *mux.Router 39 | logger *util.Logger 40 | promReg *prometheus.Registry 41 | } 42 | 43 | // ServerOptions sets options when creating a new server. 44 | type ServerOptions func(*Server) error 45 | 46 | // NewServer creates a new Server according to options. 47 | func NewServer(options ...ServerOptions) (*Server, error) { 48 | // Create default logger 49 | logger, err := util.NewLogger("info", serviceName) 50 | if err != nil { 51 | return nil, errors.Wrap(err, "unable to create Logger") 52 | } 53 | 54 | // Sane defaults 55 | rc, _ := NewRedisClient("redis://127.0.0.1:6380/0") 56 | s := &Server{ 57 | address: ":8090", 58 | endpoint: "http://127.0.0.1:8091", 59 | itemService: "http://127.0.0.1:8080", 60 | redis: rc, 61 | logger: logger, 62 | router: util.NewRouter(), 63 | promReg: prometheus.NewRegistry(), 64 | } 65 | 66 | // Applying custom settings 67 | for _, fn := range options { 68 | if err := fn(s); err != nil { 69 | return nil, errors.Wrap(err, "failed to set server options") 70 | } 71 | } 72 | 73 | // Instrumenting redis 74 | s.redis.WrapProcess(func(old func(cmd redis.Cmder) error) func(cmd redis.Cmder) error { 75 | return func(cmd redis.Cmder) error { 76 | atomic.AddUint64(&s.redisOps, 1) 77 | ops := atomic.LoadUint64(&s.redisOps) 78 | s.logger.Debugw("redis sent", 79 | "count", ops, 80 | "cmd", cmd, 81 | ) 82 | err := old(cmd) 83 | s.logger.Debugw("redis received", 84 | "count", ops, 85 | "cmd", cmd, 86 | ) 87 | return err 88 | } 89 | }) 90 | 91 | s.logger.Debugw("Creating new server", 92 | "address", s.address, 93 | "endpoint", s.endpoint, 94 | ) 95 | 96 | // Setting routes 97 | s.createRoutes() 98 | 99 | return s, nil 100 | } 101 | 102 | // InitPromReg initializes a custom Prometheus registry with Collectors. 103 | func (s *Server) InitPromReg() { 104 | s.promReg.MustRegister( 105 | prometheus.NewGoCollector(), 106 | prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}), 107 | rm.InFlightGauge, rm.Counter, rm.Duration, rm.ResponseSize, 108 | ) 109 | } 110 | 111 | // Run starts a Server and shuts it down properly on a SIGINT and SIGTERM. 112 | func (s *Server) Run() error { 113 | defer s.logger.Sync() 114 | defer s.redis.Close() 115 | 116 | // Create TCP listener 117 | l, err := net.Listen("tcp", s.address) 118 | if err != nil { 119 | return errors.Wrapf(err, "Failed creating listener on %s", s.address) 120 | } 121 | 122 | // Create HTTP Server 123 | s.server = &http.Server{ 124 | Handler: s.router, 125 | ReadTimeout: 10 * time.Second, 126 | WriteTimeout: 10 * time.Second, 127 | MaxHeaderBytes: 1 << 20, 128 | } 129 | 130 | // Creating tracer 131 | var tracer ot.Tracer 132 | var closer io.Closer 133 | tracer, closer, err = util.InitTracer("order", s.logger) 134 | if err != nil { 135 | s.logger.Warnw("unable to initialize tracer", 136 | "error", err, 137 | ) 138 | } else { 139 | defer closer.Close() 140 | ot.SetGlobalTracer(tracer) 141 | } 142 | 143 | // Listening 144 | go func() { 145 | s.logger.Infow("Server listening", 146 | "address", s.address, 147 | "endpoint", s.endpoint, 148 | ) 149 | s.logger.Fatal(s.server.Serve(l)) 150 | }() 151 | 152 | // Buffered channel to receive a single os.Signal 153 | stop := make(chan os.Signal, 1) 154 | signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM) 155 | 156 | // Blocking channel until interrupt occurs 157 | <-stop 158 | s.Stop() 159 | 160 | return nil 161 | } 162 | 163 | // Stop will stop the server 164 | func (s *Server) Stop() { 165 | ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) 166 | defer cancel() 167 | 168 | s.logger.Info("Shutting down") 169 | if err := s.server.Shutdown(ctx); err != nil { 170 | s.logger.Errorw("HTTP server shutdown", 171 | "error", err, 172 | ) 173 | } 174 | } 175 | 176 | // ServeHTTP dispatches the request to the matching mux handler. 177 | // This function is mainly intended for testing purposes. 178 | func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { 179 | s.router.ServeHTTP(w, r) 180 | } 181 | 182 | func (s *Server) internalError(ctx context.Context, w http.ResponseWriter) { 183 | status := http.StatusInternalServerError 184 | parent := ot.SpanFromContext(ctx) 185 | if parent != nil { 186 | parent.SetTag("status", status) 187 | } 188 | 189 | w.Header().Del("Content-Type") 190 | w.WriteHeader(status) 191 | if _, err := io.WriteString(w, "Internal Server Error\n"); err != nil { 192 | log := util.RequestIDLoggerFromContext(ctx, s.logger) 193 | log.Panicw("unable to send response", 194 | "error", err, 195 | ) 196 | } 197 | } 198 | 199 | // Respond sends a JSON-encoded response. 200 | func (s *Server) Respond(ctx context.Context, status int, m string, c int, data []*Order, w http.ResponseWriter) { 201 | span, ctx := ot.StartSpanFromContext(ctx, "Respond") 202 | defer span.Finish() 203 | span.SetTag("status", status) 204 | log := util.RequestIDLoggerFromContext(ctx, s.logger) 205 | 206 | res, err := NewResponse(status, m, c, data) 207 | if err != nil { 208 | s.internalError(ctx, w) 209 | log.Panicw("unable to create JSON response", 210 | "error", err, 211 | ) 212 | } 213 | 214 | err = res.SendJSON(w) 215 | if err != nil { 216 | s.internalError(ctx, w) 217 | log.Panicw("sending JSON response failed", 218 | "error", err, 219 | "response", res, 220 | ) 221 | } 222 | 223 | // Tracing information 224 | for k, v := range w.Header() { 225 | span.SetTag(fmt.Sprintf("header.%s", k), v) 226 | } 227 | span.LogKV( 228 | "message", m, 229 | "count", c, 230 | "data", data, 231 | ) 232 | } 233 | 234 | // NewRedisClient creates a new go-redis/redis client according to passed options. 235 | // Address needs to be a valid redis URL, e.g. redis://127.0.0.1:6379/0 or redis://:qwerty@localhost:6379/1 236 | func NewRedisClient(addr string) (*redis.Client, error) { 237 | opt, err := redis.ParseURL(addr) 238 | if err != nil { 239 | return nil, err 240 | } 241 | 242 | c := redis.NewClient(&redis.Options{ 243 | Addr: opt.Addr, 244 | Password: opt.Password, 245 | DB: opt.DB, 246 | }) 247 | 248 | return c, nil 249 | } 250 | 251 | // SetServerAddress sets the server address. 252 | func SetServerAddress(address string) ServerOptions { 253 | return func(s *Server) error { 254 | if err := util.CheckTCPAddress(address); err != nil { 255 | return err 256 | } 257 | s.address = address 258 | return nil 259 | } 260 | } 261 | 262 | // SetServerEndpoint sets the server endpoint address for other services to call it. 263 | func SetServerEndpoint(address string) ServerOptions { 264 | return func(s *Server) error { 265 | s.endpoint = address 266 | return nil 267 | } 268 | } 269 | 270 | // SetItemServiceAddress sets the address to reach the Item service. 271 | func SetItemServiceAddress(address string) ServerOptions { 272 | return func(s *Server) error { 273 | if _, err := url.Parse(address); err != nil { 274 | return err 275 | } 276 | s.itemService = address 277 | return nil 278 | } 279 | } 280 | 281 | // SetLogLevel sets the log level to either debug, warn, error or info. Info is default. 282 | func SetLogLevel(level string) ServerOptions { 283 | return func(s *Server) error { 284 | l, err := util.NewLogger(level, serviceName) 285 | if err != nil { 286 | return err 287 | } 288 | s.logger = l 289 | return nil 290 | } 291 | } 292 | 293 | // SetRedisAddress sets a custom address for the redis connection. 294 | func SetRedisAddress(address string) ServerOptions { 295 | return func(s *Server) error { 296 | // Close old client 297 | err := s.redis.Close() 298 | if err != nil { 299 | s.logger.Warnw("Error while closing old redis client", 300 | "error", err, 301 | ) 302 | } 303 | 304 | rc, err := NewRedisClient(address) 305 | if err != nil { 306 | return err 307 | } 308 | s.redis = rc 309 | return nil 310 | } 311 | } 312 | -------------------------------------------------------------------------------- /static/grafana1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/grafana1.PNG -------------------------------------------------------------------------------- /static/grafana2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/grafana2.png -------------------------------------------------------------------------------- /static/jaeger1.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/jaeger1.PNG -------------------------------------------------------------------------------- /static/jaeger2.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/jaeger2.PNG -------------------------------------------------------------------------------- /static/kibana1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/kibana1.png -------------------------------------------------------------------------------- /static/micro-obs-kubernetes.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/micro-obs-kubernetes.png -------------------------------------------------------------------------------- /static/micro-obs-observability.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/micro-obs-observability.png -------------------------------------------------------------------------------- /static/micro-obs-overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/obitech/micro-obs/9ec2ec84c7cc0aea4b6aa99283efd0f853faaf0b/static/micro-obs-overview.png -------------------------------------------------------------------------------- /util/handlers.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | ) 7 | 8 | // Healthz responds to a HTTP healthcheck 9 | func Healthz() http.HandlerFunc { 10 | return func(w http.ResponseWriter, r *http.Request) { 11 | fmt.Fprintf(w, "OK\n") 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /util/logger.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | "time" 7 | 8 | "github.com/pkg/errors" 9 | "go.uber.org/zap" 10 | ) 11 | 12 | // Logger is an adapter type for zap's SugaredLogger 13 | type Logger struct { 14 | logger *zap.SugaredLogger 15 | } 16 | 17 | // NewLogger creates a new Logger. 18 | func NewLogger(level, serviceName string) (*Logger, error) { 19 | atom := zap.NewAtomicLevel() 20 | 21 | switch level { 22 | case "debug": 23 | atom.SetLevel(zap.DebugLevel) 24 | case "warn": 25 | atom.SetLevel(zap.WarnLevel) 26 | case "error": 27 | atom.SetLevel(zap.ErrorLevel) 28 | default: 29 | level = "info" 30 | atom.SetLevel(zap.InfoLevel) 31 | } 32 | 33 | cfg := zap.Config{ 34 | Development: false, 35 | DisableCaller: true, 36 | DisableStacktrace: false, 37 | EncoderConfig: zap.NewProductionEncoderConfig(), 38 | Encoding: "json", 39 | ErrorOutputPaths: []string{"stdout"}, 40 | Level: atom, 41 | OutputPaths: []string{"stdout"}, 42 | } 43 | 44 | if cfg.InitialFields == nil { 45 | cfg.InitialFields = make(map[string]interface{}) 46 | } 47 | cfg.InitialFields["service"] = serviceName 48 | 49 | l, err := cfg.Build() 50 | if err != nil { 51 | return nil, errors.Wrap(err, "Unable to initialize zap Logger") 52 | } 53 | 54 | l.Debug("Logger created", 55 | zap.String("level", level), 56 | ) 57 | 58 | return &Logger{logger: l.Sugar()}, nil 59 | } 60 | 61 | // LoggerMiddleware is a decorator for a HTTP Request, adding structured logging functionality 62 | func LoggerMiddleware(inner http.Handler, logger *Logger) http.HandlerFunc { 63 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 64 | start := time.Now() 65 | log := RequestIDLogger(logger, r) 66 | log.Debugw("request received", 67 | "address", r.RemoteAddr, 68 | "method", r.Method, 69 | "path", r.RequestURI, 70 | ) 71 | inner.ServeHTTP(w, r) 72 | log.Infow("request completed", 73 | "address", r.RemoteAddr, 74 | "method", r.Method, 75 | "path", r.RequestURI, 76 | "duration", time.Since(start), 77 | ) 78 | }) 79 | } 80 | 81 | // RequestIDLogger extracts the requestID from the request context, adds it to 82 | // a child logger and then returns the child logger. 83 | func RequestIDLogger(l *Logger, r *http.Request) *Logger { 84 | reqID := RequestIDFromContext(r.Context()) 85 | log := l.logger.With("requestID", reqID) 86 | return &Logger{log} 87 | } 88 | 89 | // RequestIDLoggerFromContext extract the requestID from a passed context, adds 90 | // it to a child logger and then returns the child logger. 91 | func RequestIDLoggerFromContext(ctx context.Context, l *Logger) *Logger { 92 | reqID := RequestIDFromContext(ctx) 93 | log := l.logger.With("requestID", reqID) 94 | return &Logger{log} 95 | } 96 | 97 | // Info uses fmt.Sprint to log a templated message. 98 | func (l *Logger) Info(args ...interface{}) { 99 | l.logger.Info(args...) 100 | } 101 | 102 | // Infof uses fmt.Sprintf to log a templated message. 103 | func (l *Logger) Infof(msg string, args ...interface{}) { 104 | l.logger.Infof(msg, args...) 105 | } 106 | 107 | // Infow logs a message with some additional context. The variadic key-value pairs are treated as they are in With. 108 | func (l *Logger) Infow(msg string, kv ...interface{}) { 109 | l.logger.Infow(msg, kv...) 110 | } 111 | 112 | // Error uses fmt.Sprint to construct and log a message. 113 | func (l *Logger) Error(msg string) { 114 | l.logger.Error(msg) 115 | } 116 | 117 | // Errorw logs a message with some additional context. The variadic key-value 118 | // pairs are treated as they are in With. 119 | func (l *Logger) Errorw(msg string, kv ...interface{}) { 120 | l.logger.Errorw(msg, kv...) 121 | } 122 | 123 | // Warn uses fmt.Sprint to log a templated message. 124 | func (l *Logger) Warn(args ...interface{}) { 125 | l.logger.Warn(args...) 126 | } 127 | 128 | // Warnf uses fmt.Sprintf to log a templated message. 129 | func (l *Logger) Warnf(msg string, args ...interface{}) { 130 | l.logger.Warnf(msg, args...) 131 | } 132 | 133 | // Warnw logs a message with some additional context. The variadic key-value pairs are treated as they are in With. 134 | func (l *Logger) Warnw(msg string, kv ...interface{}) { 135 | l.logger.Warnw(msg, kv...) 136 | } 137 | 138 | // Debug uses fmt.Sprint to log a templated message. 139 | func (l *Logger) Debug(args ...interface{}) { 140 | l.logger.Debug(args...) 141 | } 142 | 143 | // Debugf uses fmt.Sprintf to log a templated message. 144 | func (l *Logger) Debugf(msg string, args ...interface{}) { 145 | l.logger.Debugf(msg, args...) 146 | } 147 | 148 | // Debugw logs a message with some additional context. The variadic key-value pairs are treated as they are in With. 149 | func (l *Logger) Debugw(msg string, kv ...interface{}) { 150 | l.logger.Debugw(msg, kv...) 151 | } 152 | 153 | // Panic uses fmt.Sprint to log a templated message, then panics. 154 | func (l *Logger) Panic(args ...interface{}) { 155 | l.logger.Panic(args...) 156 | } 157 | 158 | // Panicf uses fmt.Sprintf to log a templated message, then panics. 159 | func (l *Logger) Panicf(msg string, args ...interface{}) { 160 | l.logger.Panicf(msg, args...) 161 | } 162 | 163 | // Panicw logs a message with some additional context, then panics The variadic key-value pairs are treated as they are in With. 164 | func (l *Logger) Panicw(msg string, kv ...interface{}) { 165 | l.logger.Panicw(msg, kv...) 166 | } 167 | 168 | // Fatal uses fmt.Sprint to log a templated message, then calls os.Exit. 169 | func (l *Logger) Fatal(args ...interface{}) { 170 | l.logger.Fatal(args...) 171 | } 172 | 173 | // Fatalf uses fmt.Sprintf to log a templated message, then calls os.Exit. 174 | func (l *Logger) Fatalf(msg string, args ...interface{}) { 175 | l.logger.Fatalf(msg, args...) 176 | } 177 | 178 | // Fatalw logs a message with some additional context, then calls os.Exit. The variadic key-value pairs are treated as they are in With. 179 | func (l *Logger) Fatalw(msg string, kv ...interface{}) { 180 | l.logger.Fatalw(msg, kv...) 181 | } 182 | 183 | // Sync flushes any buffered log entries. 184 | func (l *Logger) Sync() { 185 | l.logger.Sync() 186 | } 187 | -------------------------------------------------------------------------------- /util/prometheus.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "net/http" 5 | 6 | "github.com/prometheus/client_golang/prometheus" 7 | "github.com/prometheus/client_golang/prometheus/promhttp" 8 | ) 9 | 10 | // RequestMetricHistogram defines a type of used metrics for a specific request, using Histograms for observations 11 | type RequestMetricHistogram struct { 12 | InFlightGauge prometheus.Gauge 13 | Counter *prometheus.CounterVec 14 | Duration *prometheus.HistogramVec 15 | ResponseSize *prometheus.HistogramVec 16 | } 17 | 18 | // NewRequestMetricHistogram creates a RequestMetricHistogram struct with sane defaults 19 | func NewRequestMetricHistogram(durationBuckets, responseSizeBuckets []float64) *RequestMetricHistogram { 20 | return &RequestMetricHistogram{ 21 | InFlightGauge: prometheus.NewGauge(prometheus.GaugeOpts{ 22 | Name: "in_flight_requests", 23 | Help: "A gauge of requests currently being served.", 24 | }), 25 | Counter: prometheus.NewCounterVec( 26 | prometheus.CounterOpts{ 27 | Name: "api_requests_total", 28 | Help: "A counter for requests to the wrapped handler.", 29 | }, 30 | []string{"code", "method"}, 31 | ), 32 | Duration: prometheus.NewHistogramVec( 33 | prometheus.HistogramOpts{ 34 | Name: "http_request_duration_seconds", 35 | Help: "A histogram for latencies for requests.", 36 | Buckets: durationBuckets, 37 | }, 38 | []string{"handler", "method"}, 39 | ), 40 | ResponseSize: prometheus.NewHistogramVec( 41 | prometheus.HistogramOpts{ 42 | Name: "http_response_size_bytes", 43 | Help: "A histogram of response sizes for requests.", 44 | Buckets: responseSizeBuckets, 45 | }, 46 | []string{}, 47 | ), 48 | } 49 | } 50 | 51 | // PrometheusMiddleware wraps a request for monitoring via Prometheus. 52 | func PrometheusMiddleware(h http.Handler, handler string, rm *RequestMetricHistogram) http.Handler { 53 | // TODO: capture return code & handler for rm.Counter 54 | promHandler := promhttp.InstrumentHandlerInFlight( 55 | rm.InFlightGauge, 56 | promhttp.InstrumentHandlerDuration( 57 | rm.Duration.MustCurryWith(prometheus.Labels{"handler": handler}), 58 | promhttp.InstrumentHandlerCounter( 59 | rm.Counter, 60 | promhttp.InstrumentHandlerResponseSize( 61 | rm.ResponseSize, 62 | h, 63 | ), 64 | ), 65 | ), 66 | ) 67 | 68 | return promHandler 69 | } 70 | -------------------------------------------------------------------------------- /util/requestID.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | "net/http" 6 | 7 | "github.com/gofrs/uuid" 8 | ) 9 | 10 | type key int 11 | 12 | const requestIDKey key = iota 13 | 14 | // RequestIDFromContext extracts a Request ID from a context. 15 | func RequestIDFromContext(ctx context.Context) string { 16 | reqID, ok := ctx.Value(requestIDKey).(string) 17 | if !ok { 18 | reqID = "" 19 | } 20 | return reqID 21 | } 22 | 23 | // AssignRequestID checks the X-Request-ID Header if an ID is present and adds it to the 24 | // request context. If not present, will create a Request ID and add it to the 25 | // request context and headers. 26 | func AssignRequestID(inner http.Handler, logger *Logger) http.HandlerFunc { 27 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 28 | reqID := r.Header.Get("X-Request-ID") 29 | ctx := r.Context() 30 | if reqID == "" { 31 | reqID = uuid.Must(uuid.NewV4()).String() 32 | ctx = context.WithValue(ctx, requestIDKey, reqID) 33 | logger.Debugw("assigned new requestID", 34 | "requestID", reqID, 35 | ) 36 | r.Header.Add("X-Request-ID", reqID) 37 | } else { 38 | logger.Debugw("found existing requestID", 39 | "requestID", reqID, 40 | ) 41 | } 42 | w.Header().Set("X-Request-ID", reqID) 43 | inner.ServeHTTP(w, r.WithContext(ctx)) 44 | }) 45 | } 46 | -------------------------------------------------------------------------------- /util/route.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "net/http" 4 | 5 | //Route defines a specific route 6 | type Route struct { 7 | Name string 8 | Method string 9 | Pattern string 10 | HandlerFunc http.HandlerFunc 11 | } 12 | 13 | // Routes defines a slice of all available API Routes 14 | type Routes []Route 15 | -------------------------------------------------------------------------------- /util/router.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "github.com/gorilla/mux" 4 | 5 | // NewRouter returns a gorilla/mux router 6 | func NewRouter() *mux.Router { 7 | router := mux.NewRouter() 8 | return router 9 | } 10 | -------------------------------------------------------------------------------- /util/server.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import "net/http" 4 | 5 | // Server is an interface for types that implement ServeHTTP. 6 | // For now this is only used for testing but will possibly extended for refactoring at a later point. 7 | type Server interface { 8 | ServeHTTP(w http.ResponseWriter, r *http.Request) 9 | } 10 | -------------------------------------------------------------------------------- /util/tracer.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "context" 5 | "fmt" 6 | "io" 7 | "net/http" 8 | 9 | ot "github.com/opentracing/opentracing-go" 10 | "github.com/opentracing/opentracing-go/ext" 11 | config "github.com/uber/jaeger-client-go/config" 12 | "github.com/uber/jaeger-lib/metrics/prometheus" 13 | ) 14 | 15 | // InitTracer returns an instance of Jaeger Tracer that samples 100% of traces and logs all spans to stdout. 16 | func InitTracer(serviceName string, logger *Logger) (ot.Tracer, io.Closer, error) { 17 | cfg, err := config.FromEnv() 18 | if err != nil { 19 | return nil, nil, err 20 | } 21 | 22 | cfg.Sampler.Type = "const" 23 | cfg.Sampler.Param = 1 24 | cfg.Reporter.LogSpans = false 25 | 26 | tracer, closer, err := cfg.New( 27 | serviceName, 28 | config.Logger(logger), 29 | config.Metrics(prometheus.New()), 30 | ) 31 | if err != nil { 32 | return nil, nil, err 33 | } 34 | return tracer, closer, nil 35 | } 36 | 37 | // TracerMiddleware adds a Span to the request Context ready for other handlers to use it. 38 | func TracerMiddleware(inner http.Handler, route Route) http.HandlerFunc { 39 | return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 40 | var ctx context.Context 41 | var span ot.Span 42 | tracer := ot.GlobalTracer() 43 | 44 | // If possible, extract span context from headers 45 | spanCtx, _ := tracer.Extract(ot.HTTPHeaders, ot.HTTPHeadersCarrier(r.Header)) 46 | if spanCtx == nil { 47 | span = tracer.StartSpan("request") 48 | defer span.Finish() 49 | ctx = ot.ContextWithSpan(r.Context(), span) 50 | } else { 51 | span = tracer.StartSpan("request", ext.RPCServerOption(spanCtx)) 52 | defer span.Finish() 53 | ctx = ot.ContextWithSpan(r.Context(), span) 54 | } 55 | 56 | for k, v := range r.Header { 57 | span.SetTag(fmt.Sprintf("header.%s", k), v) 58 | } 59 | 60 | // TODO: capture return code as tag in root trace 61 | span.SetTag("method", r.Method) 62 | span.SetTag("url", r.URL.Path) 63 | span.SetTag("handler", route.Name) 64 | 65 | r = r.WithContext(ctx) 66 | inner.ServeHTTP(w, r) 67 | }) 68 | } 69 | -------------------------------------------------------------------------------- /util/util.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "strconv" 7 | 8 | "github.com/pkg/errors" 9 | "github.com/speps/go-hashids" 10 | ) 11 | 12 | // CheckTCPAddress checks if a passed TCP address is a valid listening address 13 | func CheckTCPAddress(address string) error { 14 | host, port, err := net.SplitHostPort(address) 15 | if err != nil { 16 | return fmt.Errorf("invalid host: %s", address) 17 | } 18 | 19 | ip := net.ParseIP(host) 20 | if ip == nil && host != "" { 21 | return fmt.Errorf("invalid IP address: %#v", host) 22 | } 23 | 24 | if err := CheckPort(port); err != nil { 25 | return err 26 | } 27 | return err 28 | } 29 | 30 | // CheckPort checks if a port is in the valid 0 <= port <= 65535 range 31 | func CheckPort(port string) error { 32 | var validServices = []string{"http"} 33 | for _, v := range validServices { 34 | if port == v { 35 | return nil 36 | } 37 | } 38 | 39 | i, err := strconv.Atoi(port) 40 | if err != nil { 41 | return errors.Wrapf(err, "unable to convert %s to int", port) 42 | } 43 | 44 | if i < 0 || i > 65535 { 45 | return fmt.Errorf("invalid port %s", port) 46 | } 47 | 48 | return nil 49 | } 50 | 51 | // newHashID returns a HashID to perform en-/decoding on. 52 | func newHashID() (*hashids.HashID, error) { 53 | // Defaults 54 | salt := "Best salt" 55 | minLength := 8 56 | 57 | // Initiliazing HashID 58 | hd := hashids.NewData() 59 | hd.Salt = salt 60 | hd.MinLength = minLength 61 | h, err := hashids.NewWithData(hd) 62 | return h, err 63 | } 64 | 65 | // StringToHashID converts a UTF-8 string into a HashID. 66 | func StringToHashID(str string) (string, error) { 67 | h, err := newHashID() 68 | if err != nil { 69 | return "", err 70 | } 71 | 72 | // Convert string to []int 73 | var ss []int 74 | for _, v := range str { 75 | ss = append(ss, int(v)) 76 | } 77 | 78 | // Encode 79 | e, err := h.Encode(ss) 80 | if err != nil { 81 | return "", err 82 | } 83 | return e, nil 84 | } 85 | 86 | // HashIDToString decodes a HashID-encoded string into a normal string 87 | func HashIDToString(hash string) (string, error) { 88 | h, err := newHashID() 89 | if err != nil { 90 | return "", err 91 | } 92 | 93 | // Decode into []int 94 | ss, err := h.DecodeWithError(hash) 95 | if err != nil { 96 | return "", err 97 | } 98 | 99 | // Translate []int into string 100 | var s []rune 101 | for _, v := range ss { 102 | s = append(s, rune(v)) 103 | } 104 | return string(s), nil 105 | } 106 | -------------------------------------------------------------------------------- /util/util_test.go: -------------------------------------------------------------------------------- 1 | package util 2 | 3 | import ( 4 | "testing" 5 | ) 6 | 7 | var ( 8 | validAddr = []string{ 9 | "127.0.0.1:8080", 10 | "0.0.0.0:80", 11 | ":80", 12 | ":8080", 13 | "127.0.0.1:80", 14 | "192.0.2.1:http", 15 | } 16 | 17 | invalidAddr = []string{ 18 | ":9999999", 19 | ":-1", 20 | "asokdklasd", 21 | "0.0.0.0.0.0:80", 22 | "256.0.0.1:80", 23 | } 24 | 25 | validPort = []string{ 26 | "0", 27 | "80", 28 | "42", 29 | "8080", 30 | "http", 31 | } 32 | 33 | invalidPort = []string{ 34 | "9999999", 35 | "tcp", 36 | "-1", 37 | } 38 | 39 | toHash = []string{ 40 | "Hello world", 41 | "0", 42 | "-1", 43 | "01234", 44 | "orange", 45 | "test", 46 | "😍", 47 | "👾 🙇 💁 🙅 🙆 🙋 🙎 🙍", 48 | "﷽", 49 | " ", 50 | "1)4_?&$", 51 | } 52 | ) 53 | 54 | func TestCheckTCPAddress(t *testing.T) { 55 | t.Run("Valid addresses", func(t *testing.T) { 56 | for _, tt := range validAddr { 57 | if err := CheckTCPAddress(tt); err != nil { 58 | t.Errorf("CheckTCPAddress(%#v) unsuccessful, expected: %v, got: %#v", tt, nil, err) 59 | } 60 | } 61 | }) 62 | 63 | t.Run("Invalid addresses", func(t *testing.T) { 64 | for _, tt := range invalidAddr { 65 | if err := CheckTCPAddress(tt); err == nil { 66 | t.Errorf("CheckTCPAddress(%#v) should throw error, got: %v", tt, nil) 67 | } 68 | } 69 | }) 70 | } 71 | 72 | func TestCheckPort(t *testing.T) { 73 | t.Run("Valid ports", func(t *testing.T) { 74 | for _, tt := range validPort { 75 | if err := CheckPort(tt); err != nil { 76 | t.Errorf("CheckPort(%#v) unsuccessful, expected: %v, got: %#v", tt, nil, err) 77 | } 78 | } 79 | }) 80 | } 81 | 82 | func TestHashIDConversion(t *testing.T) { 83 | // String -> Encoded 84 | var hashMap = make(map[string]string) 85 | 86 | t.Run("Encoding", func(t *testing.T) { 87 | for _, tt := range toHash { 88 | h, err := StringToHashID(tt) 89 | if err != nil { 90 | t.Errorf("Unable to encode %#v to HashID: %#v", tt, err) 91 | } 92 | hashMap[tt] = h 93 | } 94 | }) 95 | 96 | t.Run("Decoding", func(t *testing.T) { 97 | for k, v := range hashMap { 98 | s, err := HashIDToString(v) 99 | if err != nil { 100 | t.Errorf("Unable to decode %#v to string: %#v", v, err) 101 | } 102 | 103 | if s != k { 104 | t.Errorf("HashIDToString(%#v) unsuccsessful, expected: %#v, got: %#v", v, k, s) 105 | } 106 | } 107 | }) 108 | } 109 | --------------------------------------------------------------------------------