├── .gitignore ├── AdvancedCloudNativeGo.postman_collection.json ├── Communication ├── Go-Micro │ ├── README.md │ ├── client │ │ ├── Dockerfile │ │ ├── README.md │ │ └── main.go │ ├── docker-compose.yml │ ├── kubernetes │ │ ├── k8s-deployment.yml │ │ ├── k8s-ingress.yml │ │ └── k8s-service.yml │ ├── proto │ │ ├── greeter.pb.go │ │ └── greeter.proto │ └── server │ │ ├── Dockerfile │ │ └── main.go ├── Kafka │ ├── README.md │ ├── docker-compose.yml │ ├── producer │ │ ├── Dockerfile │ │ └── producer.go │ └── subscriber │ │ ├── Dockerfile │ │ └── subscriber.go └── RabbitMQ │ ├── README.md │ ├── consumer │ ├── Dockerfile │ └── consumer.go │ ├── docker-compose.yml │ └── producer │ ├── Dockerfile │ └── producer.go ├── Configuration └── Consul │ ├── README.md │ └── docker-compose.yml ├── Discovery ├── Consul │ ├── README.md │ └── docker-compose.yml ├── Kubernetes │ ├── README.md │ ├── client │ │ ├── Dockerfile │ │ └── simple-k8s-client.go │ ├── docker-compose.yml │ ├── server │ │ ├── Dockerfile │ │ └── simple-k8s-server.go │ ├── simple-k8s-client-deployment.yaml │ ├── simple-k8s-client-service.yaml │ ├── simple-k8s-configmap.yaml │ ├── simple-k8s-server-deployment.yaml │ └── simple-k8s-server-service.yaml └── Simple │ ├── client │ ├── Dockerfile │ ├── README.md │ └── simple-client.go │ ├── docker-compose.yml │ └── server │ ├── Dockerfile │ ├── README.md │ └── simple-server.go ├── Frameworks ├── Gin-Web │ ├── Dockerfile │ ├── book.go │ ├── docker-compose.yml │ ├── favicon.ico │ ├── kubernetes │ │ ├── k8s-deployment.yml │ │ ├── k8s-ingress.yml │ │ └── k8s-service.yml │ ├── main.go │ └── templates │ │ └── index.html └── README.md ├── Images ├── building_blocks.jpg └── service_and_configmap.jpg └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Mac OS generated files 3 | .DS_Store 4 | 5 | # Visual Studio Code files 6 | .vscode/ 7 | -------------------------------------------------------------------------------- /AdvancedCloudNativeGo.postman_collection.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": [], 3 | "info": { 4 | "name": "AdvancedCloudNativeGo", 5 | "_postman_id": "d3a57dc7-713a-a049-fcb0-c4764de21c92", 6 | "description": "", 7 | "schema": "https://schema.getpostman.com/json/collection/v2.0.0/collection.json" 8 | }, 9 | "item": [ 10 | { 11 | "name": "Session 1", 12 | "description": "", 13 | "item": [ 14 | { 15 | "name": "Request Ping", 16 | "request": { 17 | "url": "http://localhost:8080/ping", 18 | "method": "GET", 19 | "header": [], 20 | "body": {}, 21 | "description": "" 22 | }, 23 | "response": [] 24 | }, 25 | { 26 | "name": "Get All Books", 27 | "request": { 28 | "url": "http://localhost:8080/api/books", 29 | "method": "GET", 30 | "header": [], 31 | "body": {}, 32 | "description": "" 33 | }, 34 | "response": [] 35 | }, 36 | { 37 | "name": "Create New Book", 38 | "request": { 39 | "url": "http://localhost:8080/api/books", 40 | "method": "POST", 41 | "header": [ 42 | { 43 | "key": "Content-Type", 44 | "value": "application/json", 45 | "description": "" 46 | } 47 | ], 48 | "body": { 49 | "mode": "raw", 50 | "raw": "{\n\t\"title\": \"New book\",\n\t\"author\": \"Postname\",\n\t\"isbn\": \"1234567890\"\n}" 51 | }, 52 | "description": "" 53 | }, 54 | "response": [] 55 | }, 56 | { 57 | "name": "Get Book 1234567890", 58 | "request": { 59 | "url": "http://localhost:8080/api/books/1234567890", 60 | "method": "GET", 61 | "header": [], 62 | "body": {}, 63 | "description": "" 64 | }, 65 | "response": [] 66 | }, 67 | { 68 | "name": "Update Book 1234567890", 69 | "request": { 70 | "url": "http://localhost:8080/api/books/1234567890", 71 | "method": "PUT", 72 | "header": [ 73 | { 74 | "key": "Content-Type", 75 | "value": "application/json", 76 | "description": "" 77 | } 78 | ], 79 | "body": { 80 | "mode": "raw", 81 | "raw": "{\n \"title\": \"New book(Update)\",\n \"author\": \"Postname\",\n \"isbn\": \"1234567890\"\n}" 82 | }, 83 | "description": "" 84 | }, 85 | "response": [] 86 | }, 87 | { 88 | "name": "Delete Book 1234567890", 89 | "request": { 90 | "url": "http://localhost:8080/api/books/1234567890", 91 | "method": "DELETE", 92 | "header": [], 93 | "body": {}, 94 | "description": "" 95 | }, 96 | "response": [] 97 | } 98 | ] 99 | }, 100 | { 101 | "name": "Session 2", 102 | "description": "", 103 | "item": [ 104 | { 105 | "name": "Get Consul Catalog Services", 106 | "request": { 107 | "url": "http://localhost:8500/v1/catalog/services", 108 | "method": "GET", 109 | "header": [], 110 | "body": {}, 111 | "description": "" 112 | }, 113 | "response": [] 114 | }, 115 | { 116 | "name": "Get Consul Agent Services", 117 | "request": { 118 | "url": "http://localhost:8500/v1/agent/services", 119 | "method": "GET", 120 | "header": [], 121 | "body": {}, 122 | "description": "" 123 | }, 124 | "response": [] 125 | }, 126 | { 127 | "name": "Register Consul Agent Gin-Web-01", 128 | "request": { 129 | "url": "http://localhost:8500/v1/agent/service/register", 130 | "method": "PUT", 131 | "header": [ 132 | { 133 | "key": "Content-Type", 134 | "value": "application/json", 135 | "description": "" 136 | } 137 | ], 138 | "body": { 139 | "mode": "raw", 140 | "raw": "{\n\t\"ID\": \"gin-web-01\",\n\t\"Name\": \"gin-web\",\n\t\"Tags\": [\n\t\t\"cloud-native-go\",\n\t\t\"v1\"\n\t],\n\t\"Address\": \"gin-web-01\",\n\t\"Port\": 8080,\n\t\"EnableTagOverride\": false,\n\t\"check\": {\n\t\t\"id\": \"ping\", \n\t\t\"name\": \"HTTP API on port 8080\",\n\t\t\"http\": \"http://gin-web-01:8080/ping\",\n\t\t\"interval\": \"5s\",\n\t\t\"timeout\": \"1s\"\n\t}\n}" 141 | }, 142 | "description": "" 143 | }, 144 | "response": [] 145 | }, 146 | { 147 | "name": "Register Consul Agent Gin-Web-02", 148 | "request": { 149 | "url": "http://localhost:8500/v1/agent/service/register", 150 | "method": "PUT", 151 | "header": [ 152 | { 153 | "key": "Content-Type", 154 | "value": "application/json", 155 | "description": "" 156 | } 157 | ], 158 | "body": { 159 | "mode": "raw", 160 | "raw": "{\n\t\"ID\": \"gin-web-02\",\n\t\"Name\": \"gin-web\",\n\t\"Tags\": [\n\t\t\"cloud-native-go\",\n\t\t\"v1\"\n\t],\n\t\"Address\": \"gin-web-02\",\n\t\"Port\": 9090,\n\t\"EnableTagOverride\": false,\n\t\"check\": {\n\t\t\"id\": \"ping\", \n\t\t\"name\": \"HTTP API on port 9090\",\n\t\t\"http\": \"http://gin-web-02:9090/ping\",\n\t\t\"interval\": \"5s\",\n\t\t\"timeout\": \"1s\"\n\t}\n}" 161 | }, 162 | "description": "" 163 | }, 164 | "response": [] 165 | }, 166 | { 167 | "name": "Get Consul Health Gin-Web All", 168 | "request": { 169 | "url": "http://localhost:8500/v1/health/service/gin-web", 170 | "method": "GET", 171 | "header": [], 172 | "body": {}, 173 | "description": "" 174 | }, 175 | "response": [] 176 | }, 177 | { 178 | "name": "Get Consul Health Gin-Web Passing", 179 | "request": { 180 | "url": { 181 | "raw": "http://localhost:8500/v1/health/service/gin-web?passing", 182 | "protocol": "http", 183 | "host": [ 184 | "localhost" 185 | ], 186 | "port": "8500", 187 | "path": [ 188 | "v1", 189 | "health", 190 | "service", 191 | "gin-web" 192 | ], 193 | "query": [ 194 | { 195 | "key": "passing", 196 | "value": "", 197 | "equals": false, 198 | "description": "" 199 | } 200 | ], 201 | "variable": [] 202 | }, 203 | "method": "GET", 204 | "header": [], 205 | "body": {}, 206 | "description": "" 207 | }, 208 | "response": [] 209 | }, 210 | { 211 | "name": "Get Consul Critical Health Checks", 212 | "request": { 213 | "url": "http://localhost:8500/v1/health/state/critical", 214 | "method": "GET", 215 | "header": [], 216 | "body": {}, 217 | "description": "" 218 | }, 219 | "response": [] 220 | }, 221 | { 222 | "name": "Get Gin Web All Keys", 223 | "request": { 224 | "url": { 225 | "raw": "http://localhost:8500/v1/kv/?keys", 226 | "protocol": "http", 227 | "host": [ 228 | "localhost" 229 | ], 230 | "port": "8500", 231 | "path": [ 232 | "v1", 233 | "kv", 234 | "" 235 | ], 236 | "query": [ 237 | { 238 | "key": "keys", 239 | "value": "", 240 | "equals": false, 241 | "description": "" 242 | } 243 | ], 244 | "variable": [] 245 | }, 246 | "method": "GET", 247 | "header": [], 248 | "body": {}, 249 | "description": "" 250 | }, 251 | "response": [] 252 | }, 253 | { 254 | "name": "Create Gin Web Message Key", 255 | "request": { 256 | "url": "http://localhost:8500/v1/kv/gin-web/message", 257 | "method": "PUT", 258 | "header": [], 259 | "body": { 260 | "mode": "raw", 261 | "raw": "Hello Gin Framework." 262 | }, 263 | "description": "" 264 | }, 265 | "response": [] 266 | }, 267 | { 268 | "name": "Get Gin Web All Configs", 269 | "request": { 270 | "url": { 271 | "raw": "http://localhost:8500/v1/kv/gin-web?recurse", 272 | "protocol": "http", 273 | "host": [ 274 | "localhost" 275 | ], 276 | "port": "8500", 277 | "path": [ 278 | "v1", 279 | "kv", 280 | "gin-web" 281 | ], 282 | "query": [ 283 | { 284 | "key": "recurse", 285 | "value": "", 286 | "equals": false, 287 | "description": "" 288 | } 289 | ], 290 | "variable": [] 291 | }, 292 | "method": "GET", 293 | "header": [], 294 | "body": {}, 295 | "description": "" 296 | }, 297 | "response": [] 298 | }, 299 | { 300 | "name": "Get Gin Web Message Config", 301 | "request": { 302 | "url": "http://localhost:8500/v1/kv/gin-web/message", 303 | "method": "GET", 304 | "header": [], 305 | "body": {}, 306 | "description": "" 307 | }, 308 | "response": [] 309 | }, 310 | { 311 | "name": "Get Gin Web Message Config(Raw)", 312 | "request": { 313 | "url": { 314 | "raw": "http://localhost:8500/v1/kv/gin-web/message?raw=true", 315 | "protocol": "http", 316 | "host": [ 317 | "localhost" 318 | ], 319 | "port": "8500", 320 | "path": [ 321 | "v1", 322 | "kv", 323 | "gin-web", 324 | "message" 325 | ], 326 | "query": [ 327 | { 328 | "key": "raw", 329 | "value": "true", 330 | "equals": true, 331 | "description": "" 332 | } 333 | ], 334 | "variable": [] 335 | }, 336 | "method": "GET", 337 | "header": [], 338 | "body": {}, 339 | "description": "" 340 | }, 341 | "response": [] 342 | }, 343 | { 344 | "name": "Create Gin Web Message Key (Update)", 345 | "request": { 346 | "url": "http://localhost:8500/v1/kv/gin-web/message", 347 | "method": "PUT", 348 | "header": [], 349 | "body": { 350 | "mode": "raw", 351 | "raw": "Hello Gin Framework (Updated)." 352 | }, 353 | "description": "" 354 | }, 355 | "response": [] 356 | }, 357 | { 358 | "name": "Delete Gin Web Configs", 359 | "request": { 360 | "url": "http://localhost:8500/v1/kv/foo", 361 | "method": "DELETE", 362 | "header": [], 363 | "body": {}, 364 | "description": "" 365 | }, 366 | "response": [] 367 | } 368 | ] 369 | } 370 | ] 371 | } -------------------------------------------------------------------------------- /Communication/Go-Micro/README.md: -------------------------------------------------------------------------------- 1 | # Implement Sync RPC calls with Binary Protocols 2 | For this section, we will be using the [Go-Micro](https://github.com/micro/go-micro) framework. 3 | 4 | ## Generating protobuf library 5 | 6 | Install protoc compiler, [here](https://github.com/golang/protobuf#user-content-installation) explains how to do it, and use it to generate go stubs from the `/proto/greeter.proto` file. 7 | 8 | Here is the content of `greeter.proto` file: 9 | 10 | ```proto 11 | syntax = "proto3"; 12 | 13 | service Greeter { 14 | rpc Hello(HelloRequest) returns (HelloResponse) {} 15 | } 16 | 17 | message HelloRequest { 18 | string name = 1; 19 | } 20 | 21 | message HelloResponse { 22 | string greeting = 2; 23 | } 24 | ``` 25 | 26 | and here is how we compile it using `protoc`: 27 | ``` bash 28 | $ cd ./Communication/Go-Micro/proto 29 | $ protoc --go_out=. *.proto 30 | ``` 31 | 32 | this will generate the go file `greeter.pb.go` which will be used by the client and the server. 33 | 34 | ## server 35 | The server is defined in `/server/main.go`. It implements the Greeter API function `Hello` and starts a server using Go-Micro. 36 | ``` go 37 | func (g *Greeter) Hello(ctx context.Context, req *proto.HelloRequest, rsp *proto.HelloResponse) error 38 | ``` 39 | 40 | ## client 41 | The client is defined in `/client/main.go`. It uses Consul for service discovery and creates a Greeter client to be able to call the `Hello` function every 3 seconds. 42 | 43 | ## Build docker images 44 | We will use docker compose to build all docker images: 45 | ```bash 46 | $$ docker-compose build 47 | consul uses an image, skipping 48 | hystrix-dashboard uses an image, skipping 49 | Building go-micro-server 50 | Step 1/9 : FROM golang:1.12.1-alpine 51 | ---> c0e5aac9423b 52 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 53 | ---> Using cache 54 | ---> 0636afd40b6b 55 | Step 3/9 : RUN go get -u github.com/micro/go-micro && go get github.com/micro/protobuf/proto && go get -u github.com/micro/protobuf/protoc-gen-go 56 | ---> Using cache 57 | ---> 00eaf3b8c703 58 | Step 4/9 : ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Communication/Go-Micro/ 59 | ---> Using cache 60 | ---> 6047284e87c7 61 | Step 5/9 : COPY . ${SOURCES} 62 | ---> bf9870558ebc 63 | Step 6/9 : RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 64 | ---> Running in c5101a94816e 65 | Removing intermediate container c5101a94816e 66 | ---> cd937794fb4c 67 | Step 7/9 : ENV CONSUL_HTTP_ADDR localhost:8500 68 | ---> Running in a923b3d5e52c 69 | Removing intermediate container a923b3d5e52c 70 | ---> c3b5d0ff4621 71 | Step 8/9 : WORKDIR ${SOURCES}server/ 72 | ---> Running in b4341da8b4b6 73 | Removing intermediate container b4341da8b4b6 74 | ---> 57b575341b1d 75 | Step 9/9 : CMD ${SOURCES}server/server 76 | ---> Running in 302cfb63a72d 77 | Removing intermediate container 302cfb63a72d 78 | ---> d640c013ac22 79 | Successfully built d640c013ac22 80 | Successfully tagged go-micro-server:1.0.1 81 | Building go-micro-client 82 | Step 1/9 : FROM golang:1.12.1-alpine 83 | ---> c0e5aac9423b 84 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 85 | ---> Using cache 86 | ---> 0636afd40b6b 87 | Step 3/9 : RUN go get -u github.com/micro/micro && go get github.com/micro/protobuf/proto && go get -u github.com/micro/protobuf/protoc-gen-go && go get github.com/micro/go-plugins/wrapper/breaker/hystrix && go get github.com/afex/hystrix-go/hystrix 88 | ---> Using cache 89 | ---> f5a7d190c0b3 90 | Step 4/9 : ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Communication/Go-Micro/ 91 | ---> Running in cce4421467b1 92 | Removing intermediate container cce4421467b1 93 | ---> f8448deac077 94 | Step 5/9 : COPY . ${SOURCES} 95 | ---> dc0951cede83 96 | Step 6/9 : RUN cd ${SOURCES}client/ && CGO_ENABLED=0 go build 97 | ---> Running in 51cd45b323af 98 | Removing intermediate container 51cd45b323af 99 | ---> 43960c6359ba 100 | Step 7/9 : ENV CONSUL_HTTP_ADDR localhost:8500 101 | ---> Running in e44fcca33793 102 | Removing intermediate container e44fcca33793 103 | ---> 89be72ad2368 104 | Step 8/9 : WORKDIR ${SOURCES}client/ 105 | ---> Running in d6013160d07a 106 | Removing intermediate container d6013160d07a 107 | ---> 67d2fbc74345 108 | Step 9/9 : CMD ${SOURCES}client/client 109 | ---> Running in 74c082821e44 110 | Removing intermediate container 74c082821e44 111 | ---> 78788eef751f 112 | Successfully built 78788eef751f 113 | Successfully tagged go-micro-client:1.0.1 114 | ``` 115 | 116 | ## Run 117 | Start Consul, the server and the client using docker compose: 118 | ```bash 119 | $ docker-compose up 120 | Creating network "go-micro_sky-net" with driver "bridge" 121 | Pulling hystrix-dashboard (mlabouardy/hystrix-dashboard:latest)... 122 | latest: Pulling from mlabouardy/hystrix-dashboard 123 | 03e1855d4f31: Pull complete 124 | a3ed95caeb02: Pull complete 125 | 5b3a2df3e63b: Pull complete 126 | 6ecee6444751: Pull complete 127 | 5b865d39f77d: Pull complete 128 | e7e5c0273866: Pull complete 129 | 6a4effbc4451: Pull complete 130 | 4b6cb08bb4bc: Pull complete 131 | 7b07ad270e2c: Pull complete 132 | f03f44139976: Pull complete 133 | Creating go-micro_hystrix-dashboard_1 ... done 134 | Creating go-micro_consul_1 ... done 135 | Creating go-micro_go-micro-server_1 ... done 136 | Creating go-micro_go-micro-client_1 ... done 137 | Attaching to go-micro_hystrix-dashboard_1, go-micro_consul_1, go-micro_go-micro-server_1, go-micro_go-micro-client_1 138 | consul_1 | ==> Starting Consul agent... 139 | go-micro-server_1 | 2019/03/22 15:03:30 Transport [http] Listening on [::]:38487 140 | consul_1 | ==> Consul agent running! 141 | consul_1 | Version: 'v0.8.3' 142 | consul_1 | Node ID: '3c5eeeff-e5d6-afae-a297-5058f80a1b93' 143 | consul_1 | Node name: 'dd2139f3b358' 144 | consul_1 | Datacenter: 'dc1' 145 | consul_1 | Server: true (bootstrap: false) 146 | consul_1 | Client Addr: 0.0.0.0 (HTTP: 8500, HTTPS: -1, DNS: 8600) 147 | consul_1 | Cluster Addr: 127.0.0.1 (LAN: 8301, WAN: 8302) 148 | consul_1 | Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false 149 | consul_1 | Atlas: 150 | consul_1 | 151 | consul_1 | ==> Log data will now stream in as it occurs: 152 | consul_1 | 153 | consul_1 | 2019/03/22 15:03:30 [DEBUG] Using unique ID "3c5eeeff-e5d6-afae-a297-5058f80a1b93" from host as node ID 154 | consul_1 | 2019/03/22 15:03:30 [INFO] raft: Initial configuration (index=1): [{Suffrage:Voter ID:127.0.0.1:8300 Address:127.0.0.1:8300}] 155 | consul_1 | 2019/03/22 15:03:30 [INFO] raft: Node at 127.0.0.1:8300 [Follower] entering Follower state (Leader: "") 156 | consul_1 | 2019/03/22 15:03:30 [INFO] serf: EventMemberJoin: dd2139f3b358 127.0.0.1 157 | consul_1 | 2019/03/22 15:03:30 [INFO] serf: EventMemberJoin: dd2139f3b358.dc1 127.0.0.1 158 | consul_1 | 2019/03/22 15:03:30 [INFO] consul: Adding LAN server dd2139f3b358 (Addr: tcp/127.0.0.1:8300) (DC: dc1) 159 | consul_1 | 2019/03/22 15:03:30 [INFO] consul: Handled member-join event for server "dd2139f3b358.dc1" in area "wan" 160 | go-micro-server_1 | 2019/03/22 15:03:30 Broker [http] Connected to [::]:33229 161 | consul_1 | 2019/03/22 15:03:30 [WARN] raft: Heartbeat timeout from "" reached, starting election 162 | consul_1 | 2019/03/22 15:03:30 [INFO] raft: Node at 127.0.0.1:8300 [Candidate] entering Candidate state in term 2 163 | consul_1 | 2019/03/22 15:03:30 [DEBUG] raft: Votes needed: 1 164 | consul_1 | 2019/03/22 15:03:30 [DEBUG] raft: Vote granted from 127.0.0.1:8300 in term 2. Tally: 1 165 | consul_1 | 2019/03/22 15:03:30 [INFO] raft: Election won. Tally: 1 166 | consul_1 | 2019/03/22 15:03:30 [INFO] raft: Node at 127.0.0.1:8300 [Leader] entering Leader state 167 | consul_1 | 2019/03/22 15:03:30 [INFO] consul: cluster leadership acquired 168 | consul_1 | 2019/03/22 15:03:30 [DEBUG] consul: reset tombstone GC to index 3 169 | go-micro-server_1 | 2019/03/22 15:03:30 Registry [mdns] Registering node: greeter-129b4606-8739-4df8-a61b-338b366cb270 170 | consul_1 | 2019/03/22 15:03:30 [INFO] consul: member 'dd2139f3b358' joined, marking health alive 171 | consul_1 | 2019/03/22 15:03:30 [INFO] consul: New leader elected: dd2139f3b358 172 | consul_1 | 2019/03/22 15:03:30 [INFO] agent: Synced service 'consul' 173 | consul_1 | 2019/03/22 15:03:30 [DEBUG] agent: Node info in sync 174 | hystrix-dashboard_1 | 2019-03-22 15:03:31.773 INFO 1 --- [ main] c.l.HysterixDashboardApplication : Starting HysterixDashboardApplication v0.0.1-SNAPSHOT on d83874cff53a with PID 1 (/hysterix-dashboard.jar started by root in /) 175 | hystrix-dashboard_1 | 2019-03-22 15:03:31.791 INFO 1 --- [ main] c.l.HysterixDashboardApplication : No active profile set, falling back to default profiles: default 176 | hystrix-dashboard_1 | 2019-03-22 15:03:31.902 INFO 1 --- [ main] s.c.a.AnnotationConfigApplicationContext : Refreshing org.springframework.context.annotation.AnnotationConfigApplicationContext@478267af: startup date [Fri Mar 22 15:03:31 UTC 2019]; root of context hierarchy 177 | hystrix-dashboard_1 | 2019-03-22 15:03:32.518 INFO 1 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'configurationPropertiesRebinderAutoConfiguration' of type [class org.springframework.cloud.autoconfigure.ConfigurationPropertiesRebinderAutoConfiguration$$EnhancerBySpringCGLIB$$658a8629] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying) 178 | hystrix-dashboard_1 | 2019-03-22 15:03:32.895 INFO 1 --- [ main] c.l.HysterixDashboardApplication : Started HysterixDashboardApplication in 2.001 seconds (JVM running for 3.425) 179 | hystrix-dashboard_1 | 180 | hystrix-dashboard_1 | . ____ _ __ _ _ 181 | hystrix-dashboard_1 | /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ 182 | hystrix-dashboard_1 | ( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ 183 | hystrix-dashboard_1 | \\/ ___)| |_)| | | | | || (_| | ) ) ) ) 184 | hystrix-dashboard_1 | ' |____| .__|_| |_|_| |_\__, | / / / / 185 | hystrix-dashboard_1 | =========|_|==============|___/=/_/_/_/ 186 | hystrix-dashboard_1 | :: Spring Boot :: (v1.3.3.RELEASE) 187 | hystrix-dashboard_1 | 188 | hystrix-dashboard_1 | 2019-03-22 15:03:33.093 INFO 1 --- [ main] c.l.HysterixDashboardApplication : No active profile set, falling back to default profiles: default 189 | hystrix-dashboard_1 | 2019-03-22 15:03:33.118 INFO 1 --- [ main] ationConfigEmbeddedWebApplicationContext : Refreshing org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@51d67bb8: startup date [Fri Mar 22 15:03:33 UTC 2019]; parent: org.springframework.context.annotation.AnnotationConfigApplicationContext@478267af 190 | hystrix-dashboard_1 | 2019-03-22 15:03:34.234 INFO 1 --- [ main] o.s.b.f.s.DefaultListableBeanFactory : Overriding bean definition for bean 'beanNameViewResolver' with a different definition: replacing [Root bean: class [null]; scope=; abstract=false; lazyInit=false; autowireMode=3; dependencyCheck=0; autowireCandidate=true; primary=false; factoryBeanName=org.springframework.boot.autoconfigure.web.ErrorMvcAutoConfiguration$WhitelabelErrorViewConfiguration; factoryMethodName=beanNameViewResolver; initMethodName=null; destroyMethodName=(inferred); defined in class path resource [org/springframework/boot/autoconfigure/web/ErrorMvcAutoConfiguration$WhitelabelErrorViewConfiguration.class]] with [Root bean: class [null]; scope=; abstract=false; lazyInit=false; autowireMode=3; dependencyCheck=0; autowireCandidate=true; primary=false; factoryBeanName=org.springframework.boot.autoconfigure.web.WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter; factoryMethodName=beanNameViewResolver; initMethodName=null; destroyMethodName=(inferred); defined in class path resource [org/springframework/boot/autoconfigure/web/WebMvcAutoConfiguration$WebMvcAutoConfigurationAdapter.class]] 191 | hystrix-dashboard_1 | 2019-03-22 15:03:34.443 INFO 1 --- [ main] o.s.cloud.context.scope.GenericScope : BeanFactory id=90cce959-06d6-3884-bbec-bf3c66a78265 192 | hystrix-dashboard_1 | 2019-03-22 15:03:34.474 INFO 1 --- [ main] trationDelegate$BeanPostProcessorChecker : Bean 'org.springframework.cloud.autoconfigure.ConfigurationPropertiesRebinderAutoConfiguration' of type [class org.springframework.cloud.autoconfigure.ConfigurationPropertiesRebinderAutoConfiguration$$EnhancerBySpringCGLIB$$658a8629] is not eligible for getting processed by all BeanPostProcessors (for example: not eligible for auto-proxying) 193 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:03:34.6488535 +0000 UTC m=+3.074486401 194 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:03:34.6488535 +0000 UTC m=+3.074486401 195 | hystrix-dashboard_1 | 2019-03-22 15:03:34.982 INFO 1 --- [ main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat initialized with port(s): 9002 (http) 196 | hystrix-dashboard_1 | 2019-03-22 15:03:35.009 INFO 1 --- [ main] o.apache.catalina.core.StandardService : Starting service Tomcat 197 | hystrix-dashboard_1 | 2019-03-22 15:03:35.011 INFO 1 --- [ main] org.apache.catalina.core.StandardEngine : Starting Servlet Engine: Apache Tomcat/8.0.32 198 | hystrix-dashboard_1 | 2019-03-22 15:03:35.165 INFO 1 --- [ost-startStop-1] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext 199 | hystrix-dashboard_1 | 2019-03-22 15:03:35.166 INFO 1 --- [ost-startStop-1] o.s.web.context.ContextLoader : Root WebApplicationContext: initialization completed in 2048 ms 200 | hystrix-dashboard_1 | 2019-03-22 15:03:35.791 INFO 1 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'proxyStreamServlet' to [/proxy.stream] 201 | hystrix-dashboard_1 | 2019-03-22 15:03:35.796 INFO 1 --- [ost-startStop-1] o.s.b.c.e.ServletRegistrationBean : Mapping servlet: 'dispatcherServlet' to [/] 202 | hystrix-dashboard_1 | 2019-03-22 15:03:35.803 INFO 1 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'characterEncodingFilter' to: [/*] 203 | hystrix-dashboard_1 | 2019-03-22 15:03:35.804 INFO 1 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'hiddenHttpMethodFilter' to: [/*] 204 | hystrix-dashboard_1 | 2019-03-22 15:03:35.805 INFO 1 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'httpPutFormContentFilter' to: [/*] 205 | hystrix-dashboard_1 | 2019-03-22 15:03:35.806 INFO 1 --- [ost-startStop-1] o.s.b.c.embedded.FilterRegistrationBean : Mapping filter: 'requestContextFilter' to: [/*] 206 | hystrix-dashboard_1 | 2019-03-22 15:03:36.145 INFO 1 --- [ main] o.s.ui.freemarker.SpringTemplateLoader : SpringTemplateLoader for FreeMarker: using resource loader [org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@51d67bb8: startup date [Fri Mar 22 15:03:33 UTC 2019]; parent: org.springframework.context.annotation.AnnotationConfigApplicationContext@478267af] and template loader path [classpath:/templates/] 207 | hystrix-dashboard_1 | 2019-03-22 15:03:36.150 INFO 1 --- [ main] o.s.w.s.v.f.FreeMarkerConfigurer : ClassTemplateLoader for Spring macros added to FreeMarker configuration 208 | hystrix-dashboard_1 | 2019-03-22 15:03:36.780 INFO 1 --- [ main] s.w.s.m.m.a.RequestMappingHandlerAdapter : Looking for @ControllerAdvice: org.springframework.boot.context.embedded.AnnotationConfigEmbeddedWebApplicationContext@51d67bb8: startup date [Fri Mar 22 15:03:33 UTC 2019]; parent: org.springframework.context.annotation.AnnotationConfigApplicationContext@478267af 209 | hystrix-dashboard_1 | 2019-03-22 15:03:36.912 INFO 1 --- [ main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/hystrix]}" onto public java.lang.String org.springframework.cloud.netflix.hystrix.dashboard.HystrixDashboardController.home(org.springframework.ui.Model,org.springframework.web.context.request.WebRequest) 210 | hystrix-dashboard_1 | 2019-03-22 15:03:36.913 INFO 1 --- [ main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/hystrix/{path}]}" onto public java.lang.String org.springframework.cloud.netflix.hystrix.dashboard.HystrixDashboardController.monitor(java.lang.String,org.springframework.ui.Model,org.springframework.web.context.request.WebRequest) 211 | hystrix-dashboard_1 | 2019-03-22 15:03:36.915 INFO 1 --- [ main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/error],produces=[text/html]}" onto public org.springframework.web.servlet.ModelAndView org.springframework.boot.autoconfigure.web.BasicErrorController.errorHtml(javax.servlet.http.HttpServletRequest,javax.servlet.http.HttpServletResponse) 212 | hystrix-dashboard_1 | 2019-03-22 15:03:36.916 INFO 1 --- [ main] s.w.s.m.m.a.RequestMappingHandlerMapping : Mapped "{[/error]}" onto public org.springframework.http.ResponseEntity> org.springframework.boot.autoconfigure.web.BasicErrorController.error(javax.servlet.http.HttpServletRequest) 213 | hystrix-dashboard_1 | 2019-03-22 15:03:36.982 INFO 1 --- [ main] o.s.w.s.handler.SimpleUrlHandlerMapping : Mapped URL path [/webjars/**] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler] 214 | hystrix-dashboard_1 | 2019-03-22 15:03:36.982 INFO 1 --- [ main] o.s.w.s.handler.SimpleUrlHandlerMapping : Mapped URL path [/**] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler] 215 | hystrix-dashboard_1 | 2019-03-22 15:03:37.042 INFO 1 --- [ main] o.s.w.s.handler.SimpleUrlHandlerMapping : Mapped URL path [/**/favicon.ico] onto handler of type [class org.springframework.web.servlet.resource.ResourceHttpRequestHandler] 216 | hystrix-dashboard_1 | 2019-03-22 15:03:37.315 WARN 1 --- [ main] o.s.c.n.a.ArchaiusAutoConfiguration : No spring.application.name found, defaulting to 'application' 217 | hystrix-dashboard_1 | 2019-03-22 15:03:37.330 WARN 1 --- [ main] c.n.c.sources.URLConfigurationSource : No URLs will be polled as dynamic configuration sources. 218 | hystrix-dashboard_1 | 2019-03-22 15:03:37.330 INFO 1 --- [ main] c.n.c.sources.URLConfigurationSource : To enable URLs as dynamic configuration sources, define System property archaius.configurationSource.additionalUrls or make config.properties available on classpath. 219 | hystrix-dashboard_1 | 2019-03-22 15:03:37.363 WARN 1 --- [ main] c.n.c.sources.URLConfigurationSource : No URLs will be polled as dynamic configuration sources. 220 | hystrix-dashboard_1 | 2019-03-22 15:03:37.364 INFO 1 --- [ main] c.n.c.sources.URLConfigurationSource : To enable URLs as dynamic configuration sources, define System property archaius.configurationSource.additionalUrls or make config.properties available on classpath. 221 | hystrix-dashboard_1 | 2019-03-22 15:03:37.588 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Registering beans for JMX exposure on startup 222 | hystrix-dashboard_1 | 2019-03-22 15:03:37.649 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'configurationPropertiesRebinder' has been autodetected for JMX exposure 223 | hystrix-dashboard_1 | 2019-03-22 15:03:37.652 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'refreshScope' has been autodetected for JMX exposure 224 | hystrix-dashboard_1 | 2019-03-22 15:03:37.657 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Bean with name 'environmentManager' has been autodetected for JMX exposure 225 | hystrix-dashboard_1 | 2019-03-22 15:03:37.674 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located managed bean 'environmentManager': registering with JMX server as MBean [org.springframework.cloud.context.environment:name=environmentManager,type=EnvironmentManager] 226 | hystrix-dashboard_1 | 2019-03-22 15:03:37.746 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located managed bean 'refreshScope': registering with JMX server as MBean [org.springframework.cloud.context.scope.refresh:name=refreshScope,type=RefreshScope] 227 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:03:37.6489653 +0000 UTC m=+6.074599601 228 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:03:37.6489653 +0000 UTC m=+6.074599601 229 | hystrix-dashboard_1 | 2019-03-22 15:03:37.791 INFO 1 --- [ main] o.s.j.e.a.AnnotationMBeanExporter : Located managed bean 'configurationPropertiesRebinder': registering with JMX server as MBean [org.springframework.cloud.context.properties:name=configurationPropertiesRebinder,context=51d67bb8,type=ConfigurationPropertiesRebinder] 230 | hystrix-dashboard_1 | 2019-03-22 15:03:38.139 INFO 1 --- [ main] s.b.c.e.t.TomcatEmbeddedServletContainer : Tomcat started on port(s): 9002 (http) 231 | hystrix-dashboard_1 | 2019-03-22 15:03:38.144 INFO 1 --- [ main] c.l.HysterixDashboardApplication : Started HysterixDashboardApplication in 7.469 seconds (JVM running for 8.674) 232 | ``` 233 | we can see the client/server interaction. -------------------------------------------------------------------------------- /Communication/Go-Micro/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.12.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get -u github.com/micro/micro && \ 6 | go get github.com/micro/protobuf/proto && \ 7 | go get -u github.com/micro/protobuf/protoc-gen-go && \ 8 | go get github.com/micro/go-plugins/wrapper/breaker/hystrix && \ 9 | go get github.com/afex/hystrix-go/hystrix 10 | 11 | ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Communication/Go-Micro/ 12 | COPY . ${SOURCES} 13 | 14 | RUN cd ${SOURCES}client/ && CGO_ENABLED=0 go build 15 | 16 | ENV CONSUL_HTTP_ADDR localhost:8500 17 | 18 | WORKDIR ${SOURCES}client/ 19 | CMD ${SOURCES}client/client 20 | -------------------------------------------------------------------------------- /Communication/Go-Micro/client/README.md: -------------------------------------------------------------------------------- 1 | # Using Circuit Breakers for Resilient Communication 2 | 3 | ## Hystrix 4 | We will configure Hystrix circuit breaker in the client. 5 | 6 | ### Add imports 7 | Add imports of hystrix and breaker packages to the `/client/main.go`: 8 | ``` go 9 | hystrix "github.com/afex/hystrix-go/hystrix" 10 | breaker "github.com/micro/go-plugins/wrapper/breaker/hystrix" 11 | ``` 12 | 13 | ### Enable hystrix circuit breaker 14 | Enable the Hystrix circuit breaker whithin the client adding this lines to the main function: 15 | ```go 16 | service.Init( 17 | micro.WrapClient(breaker.NewClientWrapper()), 18 | ) 19 | ``` 20 | 21 | ### Configure Hystrix 22 | ```go 23 | // override some default values for the Hystrix breaker 24 | hystrix.DefaultVolumeThreshold = 3 25 | hystrix.DefaultErrorPercentThreshold = 75 26 | hystrix.DefaultTimeout = 500 27 | hystrix.DefaultSleepWindow = 3500 28 | ``` 29 | 30 | ### Stream handler 31 | Add a stream handler so we can monitor the circuit breaker externally by the dashboard: 32 | ```go 33 | // export Hystrix stream 34 | hystrixStreamHandler := hystrix.NewStreamHandler() 35 | hystrixStreamHandler.Start() 36 | go http.ListenAndServe(net.JoinHostPort("", "8081"), hystrixStreamHandler) 37 | ``` 38 | 39 | ### Error handling 40 | In case an error ocurs during the call, we will check for Hystrix timeout adding this code to the `hello` function: 41 | ```go 42 | if err != nil { 43 | if err.Error() == "hystrix: timeout" { 44 | fmt.Printf("%s. Insert fallback logic here.\n", err.Error()) 45 | } else { 46 | fmt.Println(err.Error()) 47 | } 48 | return 49 | } 50 | ``` 51 | 52 | ## Docker compose 53 | Add the following lines to the `docker-compose.yml` file for the hystrix dashboard: 54 | ```docker 55 | hystrix-dashboard: 56 | image: mlabouardy/hystrix-dashboard:latest 57 | ports: 58 | - "9002:9002" 59 | networks: 60 | - sky-net 61 | ``` 62 | 63 | 64 | ## Forcing a server timeout 65 | To test the circuit breaker, we will force a timeout in the server adding this condition in the `hello` function of `/server/main.go`: 66 | ```go 67 | if counter > 7 && counter < 15 { 68 | time.Sleep(1000 * time.Millisecond) 69 | } else { 70 | time.Sleep(100 * time.Millisecond) 71 | } 72 | ``` 73 | with this, the client will think that the server is down. 74 | 75 | ## Build docker image 76 | ```bash 77 | $ docker-compose build 78 | ``` 79 | 80 | ## Run 81 | ```bash 82 | $ docker-compose up 83 | ``` 84 | 85 | ## Hystrix dashboard 86 | After running the docker images, we can open the Hystrix dashboard in a web browser: 87 | ``` 88 | http://localhost:9002/hystrix 89 | ``` 90 | 91 | and add as data stream the following URL: 92 | ``` 93 | http://go-micro-client:8081/hystrix.stream 94 | ``` 95 | 96 | The dashboard should indicate that the circuit is close and after awhile a server timeout will happen and the circuit breaker sets the server as down, so it open de circuit. These are the logs: 97 | ``` 98 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:44:15.4486301 +0000 UTC m=+9.004001801 99 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:44:15.4486301 +0000 UTC m=+9.004001801 100 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:44:18.4488321 +0000 UTC m=+12.004203801 101 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:44:18.4488321 +0000 UTC m=+12.004203801 102 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:44:21.4149692 +0000 UTC m=+15.005069601 103 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:44:21.4149692 +0000 UTC m=+15.005069601 104 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:44:24.4161592 +0000 UTC m=+18.006258301 105 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:44:24.4161592 +0000 UTC m=+18.006258301 106 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:44:27.4142212 +0000 UTC m=+21.004320801 107 | go-micro-client_1 | Hello Leander, calling at 2019-03-22 15:44:27.4142212 +0000 UTC m=+21.004320801 108 | go-micro-client_1 | hystrix: timeout. Insert fallback logic here. 109 | go-micro-client_1 | hystrix: timeout. Insert fallback logic here. 110 | go-micro-client_1 | hystrix: timeout. Insert fallback logic here. 111 | go-micro-client_1 | hystrix: circuit open 112 | go-micro-server_1 | Responding with Hello Leander, calling at 2019-03-22 15:44:30.4139995 +0000 UTC m=+24.004098501 113 | go-micro-server_1 | 2019/03/22 15:44:40 rpc: unable to write error response: write tcp 172.21.0.4:40831->172.21.0.5:35712: write: broken pipe 114 | go-micro-client_1 | hystrix: timeout. Insert fallback logic here. 115 | go-micro-server_1 | 2019/03/22 15:44:43 rpc: unable to write error response: write tcp 172.21.0.4:40831->172.21.0.5:35730: write: broken pipe 116 | ``` 117 | 118 | The circuit breaker shields the client from non-responding servers. -------------------------------------------------------------------------------- /Communication/Go-Micro/client/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net" 6 | "net/http" 7 | "time" 8 | 9 | proto "github.com/daniel-gil/advanced-go-microservices/Communication/Go-Micro/proto" 10 | micro "github.com/micro/go-micro" 11 | hystrix "github.com/afex/hystrix-go/hystrix" 12 | breaker "github.com/micro/go-plugins/wrapper/breaker/hystrix" 13 | "golang.org/x/net/context" 14 | ) 15 | 16 | // The Greeter API. 17 | type Greeter struct{} 18 | 19 | // Hello is a Greeter API method. 20 | func (g *Greeter) Hello(ctx context.Context, req *proto.HelloRequest, rsp *proto.HelloResponse) error { 21 | rsp.Greeting = "Hello " + req.Name 22 | return nil 23 | } 24 | 25 | func callEvery(d time.Duration, greeter proto.GreeterClient, f func(time.Time, proto.GreeterClient)) { 26 | for x := range time.Tick(d) { 27 | f(x, greeter) 28 | } 29 | } 30 | 31 | func hello(t time.Time, greeter proto.GreeterClient) { 32 | // Call the greeter 33 | rsp, err := greeter.Hello(context.TODO(), &proto.HelloRequest{Name: "Leander, calling at " + t.String()}) 34 | if err != nil { 35 | if err.Error() == "hystrix: timeout" { 36 | fmt.Printf("%s. Insert fallback logic here.\n", err.Error()) 37 | } else { 38 | fmt.Println(err.Error()) 39 | } 40 | return 41 | } 42 | 43 | // Print response 44 | fmt.Printf("%s\n", rsp.Greeting) 45 | } 46 | 47 | func main() { 48 | // Create a new service. Optionally include some options here. 49 | service := micro.NewService( 50 | micro.Name("greeter"), 51 | micro.Version("latest"), 52 | micro.Metadata(map[string]string{ 53 | "type": "helloworld", 54 | }), 55 | ) 56 | 57 | // Init will parse the command line flags. Any flags set will 58 | // override the above settings. 59 | // specify a Hystrix breaker client wrapper here 60 | service.Init( 61 | micro.WrapClient(breaker.NewClientWrapper()), 62 | ) 63 | 64 | // override some default values for the Hystrix breaker 65 | hystrix.DefaultVolumeThreshold = 3 66 | hystrix.DefaultErrorPercentThreshold = 75 67 | hystrix.DefaultTimeout = 500 68 | hystrix.DefaultSleepWindow = 3500 69 | 70 | // export Hystrix stream 71 | hystrixStreamHandler := hystrix.NewStreamHandler() 72 | hystrixStreamHandler.Start() 73 | go http.ListenAndServe(net.JoinHostPort("", "8081"), hystrixStreamHandler) 74 | 75 | // Create new greeter client and call hello 76 | greeter := proto.NewGreeterClient("greeter", service.Client()) 77 | callEvery(3*time.Second, greeter, hello) 78 | } 79 | -------------------------------------------------------------------------------- /Communication/Go-Micro/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | consul: 5 | image: consul:0.8.3 6 | ports: 7 | - "8300:8300" 8 | - "8400:8400" 9 | - "8500:8500" 10 | networks: 11 | - sky-net 12 | 13 | go-micro-server: 14 | build: 15 | context: . 16 | dockerfile: server/Dockerfile 17 | image: go-micro-server:1.0.1 18 | environment: 19 | - CONSUL_HTTP_ADDR=consul:8500 20 | depends_on: 21 | - consul 22 | networks: 23 | - sky-net 24 | 25 | go-micro-client: 26 | build: 27 | context: . 28 | dockerfile: client/Dockerfile 29 | image: go-micro-client:1.0.1 30 | environment: 31 | - CONSUL_HTTP_ADDR=consul:8500 32 | depends_on: 33 | - consul 34 | - go-micro-server 35 | networks: 36 | - sky-net 37 | 38 | hystrix-dashboard: 39 | image: mlabouardy/hystrix-dashboard:latest 40 | ports: 41 | - "9002:9002" 42 | networks: 43 | - sky-net 44 | 45 | networks: 46 | sky-net: 47 | driver: bridge 48 | -------------------------------------------------------------------------------- /Communication/Go-Micro/kubernetes/k8s-deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: go-micro 5 | labels: 6 | app: go-mirco 7 | spec: 8 | replicas: 3 9 | template: 10 | metadata: 11 | labels: 12 | app: go-micro 13 | tier: service 14 | spec: 15 | containers: 16 | - name: go-micro 17 | image: "cyrsis/test5:5.0.1" 18 | ports: 19 | - containerPort: 9090 20 | env: 21 | - name: PORT 22 | value: "9090" 23 | 24 | # define resource requests and limits 25 | resources: 26 | requests: 27 | memory: "64Mi" 28 | cpu: "125m" 29 | limits: 30 | memory: "128Mi" 31 | cpu: "250m" 32 | 33 | # for faster SIGKILL shutdown 34 | 35 | 36 | # check of gin-web is alive and healthy 37 | readinessProbe: 38 | httpGet: 39 | path: /ping 40 | port: 9090 41 | initialDelaySeconds: 5 42 | timeoutSeconds: 5 43 | livenessProbe: 44 | httpGet: 45 | path: /ping 46 | port: 9090 47 | initialDelaySeconds: 5 48 | timeoutSeconds: 5 49 | -------------------------------------------------------------------------------- /Communication/Go-Micro/kubernetes/k8s-ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: gin-web 5 | labels: 6 | app: gin-web 7 | tier: frontend 8 | spec: 9 | backend: 10 | serviceName: gin-web 11 | servicePort: 9090 -------------------------------------------------------------------------------- /Communication/Go-Micro/kubernetes/k8s-service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gin-web 5 | labels: 6 | app: gin-web 7 | tier: service 8 | spec: 9 | # use NodePort here to be able to access a port on each node 10 | type: NodePort 11 | ports: 12 | - port: 9090 13 | selector: 14 | app: gin-web -------------------------------------------------------------------------------- /Communication/Go-Micro/proto/greeter.pb.go: -------------------------------------------------------------------------------- 1 | // Code generated by protoc-gen-go. DO NOT EDIT. 2 | // source: github.com/lreimer/advanced-cloud-native-go/Communication/Go-Micro/proto/greeter.proto 3 | 4 | /* 5 | Package greeter is a generated protocol buffer package. 6 | It is generated from these files: 7 | github.com/lreimer/advanced-cloud-native-go/Communication/Go-Micro/proto/greeter.proto 8 | It has these top-level messages: 9 | HelloRequest 10 | HelloResponse 11 | */ 12 | package greeter 13 | 14 | import proto "github.com/golang/protobuf/proto" 15 | import fmt "fmt" 16 | import math "math" 17 | 18 | import ( 19 | client "github.com/micro/go-micro/client" 20 | server "github.com/micro/go-micro/server" 21 | context "golang.org/x/net/context" 22 | ) 23 | 24 | // Reference imports to suppress errors if they are not otherwise used. 25 | var _ = proto.Marshal 26 | var _ = fmt.Errorf 27 | var _ = math.Inf 28 | 29 | // This is a compile-time assertion to ensure that this generated file 30 | // is compatible with the proto package it is being compiled against. 31 | // A compilation error at this line likely means your copy of the 32 | // proto package needs to be updated. 33 | const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package 34 | 35 | type HelloRequest struct { 36 | Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` 37 | } 38 | 39 | func (m *HelloRequest) Reset() { *m = HelloRequest{} } 40 | func (m *HelloRequest) String() string { return proto.CompactTextString(m) } 41 | func (*HelloRequest) ProtoMessage() {} 42 | func (*HelloRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } 43 | 44 | func (m *HelloRequest) GetName() string { 45 | if m != nil { 46 | return m.Name 47 | } 48 | return "" 49 | } 50 | 51 | type HelloResponse struct { 52 | Greeting string `protobuf:"bytes,2,opt,name=greeting" json:"greeting,omitempty"` 53 | } 54 | 55 | func (m *HelloResponse) Reset() { *m = HelloResponse{} } 56 | func (m *HelloResponse) String() string { return proto.CompactTextString(m) } 57 | func (*HelloResponse) ProtoMessage() {} 58 | func (*HelloResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } 59 | 60 | func (m *HelloResponse) GetGreeting() string { 61 | if m != nil { 62 | return m.Greeting 63 | } 64 | return "" 65 | } 66 | 67 | func init() { 68 | proto.RegisterType((*HelloRequest)(nil), "HelloRequest") 69 | proto.RegisterType((*HelloResponse)(nil), "HelloResponse") 70 | } 71 | 72 | // Reference imports to suppress errors if they are not otherwise used. 73 | var _ context.Context 74 | var _ client.Option 75 | var _ server.Option 76 | 77 | // Client API for Greeter service 78 | 79 | type GreeterClient interface { 80 | Hello(ctx context.Context, in *HelloRequest, opts ...client.CallOption) (*HelloResponse, error) 81 | } 82 | 83 | type greeterClient struct { 84 | c client.Client 85 | serviceName string 86 | } 87 | 88 | func NewGreeterClient(serviceName string, c client.Client) GreeterClient { 89 | if c == nil { 90 | c = client.NewClient() 91 | } 92 | if len(serviceName) == 0 { 93 | serviceName = "greeter" 94 | } 95 | return &greeterClient{ 96 | c: c, 97 | serviceName: serviceName, 98 | } 99 | } 100 | 101 | func (c *greeterClient) Hello(ctx context.Context, in *HelloRequest, opts ...client.CallOption) (*HelloResponse, error) { 102 | req := c.c.NewRequest(c.serviceName, "Greeter.Hello", in) 103 | out := new(HelloResponse) 104 | err := c.c.Call(ctx, req, out, opts...) 105 | if err != nil { 106 | return nil, err 107 | } 108 | return out, nil 109 | } 110 | 111 | // Server API for Greeter service 112 | 113 | type GreeterHandler interface { 114 | Hello(context.Context, *HelloRequest, *HelloResponse) error 115 | } 116 | 117 | func RegisterGreeterHandler(s server.Server, hdlr GreeterHandler, opts ...server.HandlerOption) { 118 | s.Handle(s.NewHandler(&Greeter{hdlr}, opts...)) 119 | } 120 | 121 | type Greeter struct { 122 | GreeterHandler 123 | } 124 | 125 | func (h *Greeter) Hello(ctx context.Context, in *HelloRequest, out *HelloResponse) error { 126 | return h.GreeterHandler.Hello(ctx, in, out) 127 | } 128 | 129 | func init() { 130 | proto.RegisterFile("github.com/lreimer/advanced-cloud-native-go/Frameworks/Go-Micro/proto/greeter.proto", fileDescriptor0) 131 | } 132 | 133 | var fileDescriptor0 = []byte{ 134 | // 200 bytes of a gzipped FileDescriptorProto 135 | 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8e, 0x3d, 0x4b, 0xc6, 0x30, 136 | 0x14, 0x85, 0x7d, 0xc5, 0xcf, 0xe0, 0xeb, 0x90, 0xa9, 0x74, 0x92, 0x4c, 0x05, 0x49, 0x02, 0xf6, 137 | 0x17, 0x88, 0x60, 0x5d, 0x94, 0xd2, 0xd5, 0x29, 0x4d, 0x2f, 0x69, 0x68, 0x9a, 0x5b, 0xf3, 0x51, 138 | 0xff, 0xbe, 0x10, 0x45, 0x74, 0xbb, 0x07, 0xce, 0x7d, 0x9e, 0x43, 0xde, 0x8d, 0x4d, 0x73, 0x1e, 139 | 0x85, 0xc6, 0x55, 0xf6, 0x4a, 0x2f, 0xa9, 0xcf, 0xa3, 0xb3, 0x71, 0xb6, 0xde, 0xc8, 0xc7, 0x69, 140 | 0x57, 0x5e, 0xc3, 0xc4, 0x9f, 0x1c, 0xe6, 0x89, 0xbf, 0xa9, 0x64, 0x77, 0xe0, 0x1d, 0xca, 0xe7, 141 | 0xa0, 0x56, 0xf8, 0xc4, 0xb0, 0x44, 0xd9, 0x21, 0x7f, 0xb5, 0x3a, 0xa0, 0xdc, 0x02, 0x26, 0x94, 142 | 0x26, 0x00, 0x24, 0x08, 0xa2, 0x24, 0xc6, 0xc8, 0xcd, 0x0b, 0x38, 0x87, 0x03, 0x7c, 0x64, 0x88, 143 | 0x89, 0x52, 0x72, 0xe6, 0xd5, 0x0a, 0xd5, 0xe1, 0xee, 0xd0, 0x5c, 0x0f, 0xe5, 0x66, 0xf7, 0xe4, 144 | 0xf8, 0xd3, 0x89, 0x1b, 0xfa, 0x08, 0xb4, 0x26, 0x57, 0x85, 0x62, 0xbd, 0xa9, 0x4e, 0x4b, 0xf1, 145 | 0x37, 0x3f, 0xb4, 0xe4, 0xb2, 0xfb, 0x36, 0xd0, 0x86, 0x9c, 0x97, 0x3f, 0x7a, 0x14, 0x7f, 0x1d, 146 | 0xf5, 0xad, 0xf8, 0x87, 0x63, 0x27, 0xe3, 0x45, 0x19, 0xd3, 0x7e, 0x05, 0x00, 0x00, 0xff, 0xff, 147 | 0xb8, 0x0c, 0x3b, 0x06, 0xeb, 0x00, 0x00, 0x00, 148 | } 149 | -------------------------------------------------------------------------------- /Communication/Go-Micro/proto/greeter.proto: -------------------------------------------------------------------------------- 1 | syntax = "proto3"; 2 | 3 | service Greeter { 4 | rpc Hello(HelloRequest) returns (HelloResponse) {} 5 | } 6 | 7 | message HelloRequest { 8 | string name = 1; 9 | } 10 | 11 | message HelloResponse { 12 | string greeting = 2; 13 | } -------------------------------------------------------------------------------- /Communication/Go-Micro/server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.12.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get -u github.com/micro/go-micro && \ 6 | go get github.com/micro/protobuf/proto && \ 7 | go get -u github.com/micro/protobuf/protoc-gen-go 8 | 9 | ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Communication/Go-Micro/ 10 | COPY . ${SOURCES} 11 | 12 | RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 13 | 14 | ENV CONSUL_HTTP_ADDR localhost:8500 15 | 16 | WORKDIR ${SOURCES}server/ 17 | CMD ${SOURCES}server/server 18 | -------------------------------------------------------------------------------- /Communication/Go-Micro/server/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "time" 6 | 7 | proto "github.com/daniel-gil/advanced-go-microservices/Communication/Go-Micro/proto" 8 | micro "github.com/micro/go-micro" 9 | "golang.org/x/net/context" 10 | ) 11 | 12 | // The Greeter API. 13 | type Greeter struct{} 14 | 15 | var counter int 16 | 17 | // Hello is a Greeter API method. 18 | func (g *Greeter) Hello(ctx context.Context, req *proto.HelloRequest, rsp *proto.HelloResponse) error { 19 | counter++ 20 | if counter > 7 && counter < 15 { 21 | time.Sleep(10000 * time.Millisecond) 22 | } else { 23 | time.Sleep(100 * time.Millisecond) 24 | } 25 | 26 | rsp.Greeting = "Hello " + req.Name 27 | fmt.Printf("Responding with %s\n", rsp.Greeting) 28 | return nil 29 | } 30 | 31 | func main() { 32 | // Create a new service. Optionally include some options here. 33 | service := micro.NewService( 34 | micro.Name("greeter"), 35 | micro.Version("1.0.1"), 36 | micro.Metadata(map[string]string{ 37 | "type": "helloworld", 38 | }), 39 | ) 40 | 41 | // Init will parse the command line flags. Any flags set will 42 | // override the above settings. Options defined here will 43 | // override anything set on the command line. 44 | service.Init() 45 | 46 | // Register handler 47 | proto.RegisterGreeterHandler(service.Server(), new(Greeter)) 48 | 49 | // Run the server 50 | if err := service.Run(); err != nil { 51 | fmt.Println(err) 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /Communication/Kafka/README.md: -------------------------------------------------------------------------------- 1 | # Implement Publish/Subscribe with Apache Kafka 2 | 3 | We will be using the library [sarama](https://github.com/Shopify/sarama) a Go library for Apache Kafka. 4 | 5 | ## Producer 6 | 7 | First we create the configuration for the producer 8 | ``` go 9 | config := sarama.NewConfig() 10 | config.Producer.Return.Successes = true 11 | config.Producer.Return.Errors = true 12 | config.Producer.RequiredAcks = sarama.WaitForAll 13 | config.Producer.Retry.Max = 5 14 | ``` 15 | 16 | Next we create a sync producer for given address (from environment variable `BROKER_ADDR`) and configuration: 17 | ```go 18 | brokers := []string{brokerAddr()} 19 | producer, err := sarama.NewSyncProducer(brokers, config) 20 | if err != nil { 21 | panic(err) 22 | } 23 | 24 | defer func() { 25 | if err := producer.Close(); err != nil { 26 | panic(err) 27 | } 28 | }() 29 | ``` 30 | 31 | After this we can create a simple message: 32 | ```go 33 | msg := &sarama.ProducerMessage{ 34 | Topic: topic, 35 | Value: sarama.StringEncoder(fmt.Sprintf("Hello Kafka %v", msgCount)), 36 | } 37 | ``` 38 | where `topic` is retrieved from the environment variable `TOPIC`. 39 | 40 | And in a final step, we will use the producer for sending the message: 41 | ```go 42 | partition, offset, err := producer.SendMessage(msg) 43 | if err != nil { 44 | panic(err) 45 | } 46 | ``` 47 | 48 | ## Consumer 49 | The consumer or subscriber creates its own configuration: 50 | ```go 51 | config := sarama.NewConfig() 52 | config.Consumer.Offsets.Initial = sarama.OffsetOldest 53 | config.Consumer.Offsets.CommitInterval = 5 * time.Second 54 | config.Consumer.Return.Errors = true 55 | ``` 56 | 57 | Next creates a new consumer for given brokers and configuration 58 | ```go 59 | brokers := []string{brokerAddr()} 60 | master, err := sarama.NewConsumer(brokers, config) 61 | if err != nil { 62 | panic(err) 63 | } 64 | 65 | defer func() { 66 | if err := master.Close(); err != nil { 67 | panic(err) 68 | } 69 | }() 70 | ``` 71 | 72 | After this we will create a Kafka partition consumer for given topic: 73 | ```go 74 | consumer, err := master.ConsumePartition(topic, 0, sarama.OffsetOldest) 75 | if err != nil { 76 | panic(err) 77 | } 78 | ``` 79 | 80 | And finally we consume and process the messages: 81 | ```go 82 | // Get signal for finish 83 | doneCh := make(chan struct{}) 84 | go func() { 85 | for { 86 | select { 87 | case err := <-consumer.Errors(): 88 | fmt.Println(err) 89 | case msg := <-consumer.Messages(): 90 | msgCount++ 91 | fmt.Println("Received messages", string(msg.Key), string(msg.Value)) 92 | case <-signals: 93 | fmt.Println("Interrupt is detected") 94 | doneCh <- struct{}{} 95 | } 96 | } 97 | }() 98 | 99 | <-doneCh 100 | ``` 101 | 102 | ## Docker compose 103 | We need to add the docker images of Zookeeper and Kafka from `dockerkafka`. 104 | 105 | ## Run 106 | To fire the producer, subscriber, zookeeper and Kafka: 107 | ```bash 108 | $ docker-compose up 109 | ``` -------------------------------------------------------------------------------- /Communication/Kafka/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | zookeeper: 5 | image: dockerkafka/zookeeper 6 | ports: 7 | - "2181:2181" 8 | - "2888:2888" 9 | - "3888:3888" 10 | - "5000:5000" 11 | networks: 12 | - sky-net 13 | 14 | kafka: 15 | image: dockerkafka/kafka 16 | ports: 17 | - "9092:9092" 18 | depends_on: 19 | - zookeeper 20 | links: 21 | - zookeeper 22 | networks: 23 | - sky-net 24 | 25 | kafka-producer: 26 | build: 27 | context: . 28 | dockerfile: producer/Dockerfile 29 | image: kafka-producer:1.0.1 30 | environment: 31 | - BROKER_ADDR=kafka:9092 32 | depends_on: 33 | - zookeeper 34 | - kafka 35 | links: 36 | - kafka 37 | networks: 38 | - sky-net 39 | 40 | kafka-subscriber: 41 | build: 42 | context: . 43 | dockerfile: subscriber/Dockerfile 44 | image: kafka-subscriber:1.0.1 45 | environment: 46 | - BROKER_ADDR=kafka:9092 47 | depends_on: 48 | - zookeeper 49 | - kafka 50 | links: 51 | - kafka 52 | networks: 53 | - sky-net 54 | 55 | networks: 56 | sky-net: 57 | driver: bridge 58 | -------------------------------------------------------------------------------- /Communication/Kafka/producer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get github.com/Shopify/sarama 6 | 7 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Communication/Kafka/ 8 | COPY . ${SOURCES} 9 | 10 | RUN cd ${SOURCES}producer/ && CGO_ENABLED=0 go build 11 | 12 | ENV BROKER_ADDR localhost:9092 13 | 14 | WORKDIR ${SOURCES}producer/ 15 | CMD ${SOURCES}producer/producer 16 | -------------------------------------------------------------------------------- /Communication/Kafka/producer/producer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | 7 | "time" 8 | 9 | "github.com/Shopify/sarama" 10 | ) 11 | 12 | func main() { 13 | fmt.Println("Starting synchronous Kafka producer...") 14 | time.Sleep(5 * time.Second) 15 | 16 | config := sarama.NewConfig() 17 | config.Producer.Return.Successes = true 18 | config.Producer.Return.Errors = true 19 | config.Producer.RequiredAcks = sarama.WaitForAll 20 | config.Producer.Retry.Max = 5 21 | 22 | brokers := []string{brokerAddr()} 23 | producer, err := sarama.NewSyncProducer(brokers, config) 24 | if err != nil { 25 | panic(err) 26 | } 27 | 28 | defer func() { 29 | if err := producer.Close(); err != nil { 30 | panic(err) 31 | } 32 | }() 33 | 34 | topic := topic() 35 | msgCount := 0 36 | 37 | // Get signal for finish 38 | doneCh := make(chan struct{}) 39 | 40 | go func() { 41 | for { 42 | msgCount++ 43 | 44 | msg := &sarama.ProducerMessage{ 45 | Topic: topic, 46 | Value: sarama.StringEncoder(fmt.Sprintf("Hello Kafka %v", msgCount)), 47 | } 48 | 49 | partition, offset, err := producer.SendMessage(msg) 50 | if err != nil { 51 | panic(err) 52 | } 53 | 54 | fmt.Printf("Message is stored in topic(%s)/partition(%d)/offset(%d)\n", topic, partition, offset) 55 | time.Sleep(5 * time.Second) 56 | } 57 | }() 58 | 59 | <-doneCh 60 | } 61 | 62 | func brokerAddr() string { 63 | brokerAddr := os.Getenv("BROKER_ADDR") 64 | if len(brokerAddr) == 0 { 65 | brokerAddr = "localhost:9092" 66 | } 67 | return brokerAddr 68 | } 69 | 70 | func topic() string { 71 | topic := os.Getenv("TOPIC") 72 | if len(topic) == 0 { 73 | topic = "default-topic" 74 | } 75 | return topic 76 | } 77 | -------------------------------------------------------------------------------- /Communication/Kafka/subscriber/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get github.com/Shopify/sarama 6 | 7 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Communication/Kafka/ 8 | COPY . ${SOURCES} 9 | 10 | RUN cd ${SOURCES}subscriber/ && CGO_ENABLED=0 go build 11 | 12 | ENV BROKER_ADDR localhost:9092 13 | 14 | WORKDIR ${SOURCES}subscriber/ 15 | CMD ${SOURCES}subscriber/subscriber 16 | -------------------------------------------------------------------------------- /Communication/Kafka/subscriber/subscriber.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "os" 6 | "os/signal" 7 | "time" 8 | 9 | "github.com/Shopify/sarama" 10 | ) 11 | 12 | func main() { 13 | fmt.Println("Starting synchronous Kafka subscriber...") 14 | time.Sleep(5 * time.Second) 15 | 16 | config := sarama.NewConfig() 17 | config.Consumer.Offsets.Initial = sarama.OffsetOldest 18 | config.Consumer.Offsets.CommitInterval = 5 * time.Second 19 | config.Consumer.Return.Errors = true 20 | 21 | // Create new consumer 22 | brokers := []string{brokerAddr()} 23 | master, err := sarama.NewConsumer(brokers, config) 24 | if err != nil { 25 | panic(err) 26 | } 27 | 28 | defer func() { 29 | if err := master.Close(); err != nil { 30 | panic(err) 31 | } 32 | }() 33 | 34 | topic := topic() 35 | 36 | // decide about the offset here: literal value, sarama.OffsetOldest, sarama.OffsetNewest 37 | // this is important in case of reconnection 38 | consumer, err := master.ConsumePartition(topic, 0, sarama.OffsetOldest) 39 | if err != nil { 40 | panic(err) 41 | } 42 | 43 | signals := make(chan os.Signal, 1) 44 | signal.Notify(signals, os.Interrupt) 45 | 46 | // Count how many message processed 47 | msgCount := 0 48 | 49 | // Get signal for finish 50 | doneCh := make(chan struct{}) 51 | go func() { 52 | for { 53 | select { 54 | case err := <-consumer.Errors(): 55 | fmt.Println(err) 56 | case msg := <-consumer.Messages(): 57 | msgCount++ 58 | fmt.Println("Received messages", string(msg.Key), string(msg.Value)) 59 | case <-signals: 60 | fmt.Println("Interrupt is detected") 61 | doneCh <- struct{}{} 62 | } 63 | } 64 | }() 65 | 66 | <-doneCh 67 | fmt.Println("Processed", msgCount, "messages") 68 | } 69 | 70 | func brokerAddr() string { 71 | brokerAddr := os.Getenv("BROKER_ADDR") 72 | if len(brokerAddr) == 0 { 73 | brokerAddr = "localhost:9092" 74 | } 75 | return brokerAddr 76 | } 77 | 78 | func topic() string { 79 | topic := os.Getenv("TOPIC") 80 | if len(topic) == 0 { 81 | topic = "default-topic" 82 | } 83 | return topic 84 | } 85 | -------------------------------------------------------------------------------- /Communication/RabbitMQ/README.md: -------------------------------------------------------------------------------- 1 | # Implement Message Queuing with RabbitMQ 2 | 3 | We will use the [Go RabbitMQ Client Library](https://github.com/streadway/amqp). 4 | 5 | ## Producer 6 | First dial a connection to the broker, getting the broker address from the environment variable `BROKER_ADDR`: 7 | ```go 8 | conn, err := amqp.Dial(brokerAddr()) 9 | failOnError(err, "Failed to connect to RabbitMQ") 10 | defer conn.Close() 11 | ``` 12 | 13 | Then we open a channel for communication: 14 | ```go 15 | ch, err := conn.Channel() 16 | failOnError(err, "Failed to open a channel") 17 | defer ch.Close() 18 | ``` 19 | 20 | Next step we declare the queue: 21 | ```go 22 | q, err := ch.QueueDeclare( 23 | queue(), // name 24 | true, // durable 25 | false, // delete when unused 26 | false, // exclusive 27 | false, // no-wait 28 | nil, // arguments 29 | ) 30 | failOnError(err, "Failed to declare a queue") 31 | ``` 32 | where the queue name is configurable by the environment variable `QUEUE`. 33 | 34 | Once we have the queue, we can start publishing messages to it. 35 | ```go 36 | body := fmt.Sprintf("Hello RabbitMQ message %v", msgCount) 37 | 38 | err = ch.Publish( 39 | "", // exchange 40 | q.Name, // routing key 41 | false, // mandatory 42 | false, // immediate 43 | amqp.Publishing{ 44 | ContentType: "text/plain", 45 | Body: []byte(body), 46 | }) 47 | 48 | log.Printf(" [x] Sent %s", body) 49 | failOnError(err, "Failed to publish a message") 50 | ``` 51 | 52 | ## Consumer 53 | 54 | The consumer will use the same amqp library as the producer. 55 | 56 | The following 3 steps are the same as in the producer: 57 | - Dial the connection to the broker 58 | - Open communication channel 59 | - Declare the queue 60 | 61 | The next step is different, we need to register and get a message consumer 62 | ```go 63 | msgs, err := ch.Consume( 64 | q.Name, // queue 65 | "", // consumer 66 | true, // auto-ack 67 | false, // exclusive 68 | false, // no-local 69 | false, // no-wait 70 | nil, // args 71 | ) 72 | failOnError(err, "Failed to register a consumer") 73 | ``` 74 | 75 | Consumption processing logic, simply read the message body from the `msgs` channel: 76 | ```go 77 | go func() { 78 | for d := range msgs { 79 | log.Printf("Received a message: %s", d.Body) 80 | } 81 | }() 82 | ``` 83 | 84 | ## Build docker images 85 | ```bash 86 | zeus:RabbitMQ danielgil$ docker-compose build 87 | rabbitmq uses an image, skipping 88 | Building rabbitmq-producer 89 | Step 1/9 : FROM golang:1.8.1-alpine 90 | ---> 32efc118745e 91 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 92 | ---> Using cache 93 | ---> 513a31355d6c 94 | Step 3/9 : RUN go get github.com/streadway/amqp 95 | ---> Using cache 96 | ---> 5325b3afc763 97 | Step 4/9 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Communication/RabbitMQ/ 98 | ---> Using cache 99 | ---> 60759c3cf34d 100 | Step 5/9 : COPY . ${SOURCES} 101 | ---> 5b0b479e5978 102 | Step 6/9 : RUN cd ${SOURCES}producer/ && CGO_ENABLED=0 go build 103 | ---> Running in 928f403eb3ce 104 | Removing intermediate container 928f403eb3ce 105 | ---> 9fbcbf610650 106 | Step 7/9 : ENV BROKER_ADDR amqp://guest:guest@localhost:5672/ 107 | ---> Running in c39e9d95a1f2 108 | Removing intermediate container c39e9d95a1f2 109 | ---> 46a51ec93a1a 110 | Step 8/9 : WORKDIR ${SOURCES}producer/ 111 | ---> Running in 2110f140d3b6 112 | Removing intermediate container 2110f140d3b6 113 | ---> e3e5da40c2fc 114 | Step 9/9 : CMD ${SOURCES}producer/producer 115 | ---> Running in db7724e9a831 116 | Removing intermediate container db7724e9a831 117 | ---> 8eda880b9764 118 | Successfully built 8eda880b9764 119 | Successfully tagged rabbitmq-producer:1.0.1 120 | Building rabbitmq-consumer 121 | Step 1/9 : FROM golang:1.8.1-alpine 122 | ---> 32efc118745e 123 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 124 | ---> Using cache 125 | ---> 513a31355d6c 126 | Step 3/9 : RUN go get github.com/streadway/amqp 127 | ---> Using cache 128 | ---> 5325b3afc763 129 | Step 4/9 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Communication/RabbitMQ/ 130 | ---> Using cache 131 | ---> 60759c3cf34d 132 | Step 5/9 : COPY . ${SOURCES} 133 | ---> Using cache 134 | ---> 5b0b479e5978 135 | Step 6/9 : RUN cd ${SOURCES}consumer/ && CGO_ENABLED=0 go build 136 | ---> Running in 6c4279ffdde5 137 | Removing intermediate container 6c4279ffdde5 138 | ---> b6478c3e731e 139 | Step 7/9 : ENV BROKER_ADDR amqp://guest:guest@localhost:5672/ 140 | ---> Running in 30f33f35b976 141 | Removing intermediate container 30f33f35b976 142 | ---> 86e97bd787c6 143 | Step 8/9 : WORKDIR ${SOURCES}consumer/ 144 | ---> Running in 37fcaf04af51 145 | Removing intermediate container 37fcaf04af51 146 | ---> 351e869440f2 147 | Step 9/9 : CMD ${SOURCES}consumer/consumer 148 | ---> Running in 9198b50cf1f3 149 | Removing intermediate container 9198b50cf1f3 150 | ---> c6f2e0cbddd7 151 | Successfully built c6f2e0cbddd7 152 | Successfully tagged rabbitmq-consumer:1.0.1 153 | zeus:RabbitMQ danielgil$ 154 | ``` 155 | 156 | ## Run 157 | 158 | ``` bash 159 | $ docker-compose up 160 | Starting rabbitmq_rabbitmq_1 ... done 161 | Recreating rabbitmq_rabbitmq-producer_1 ... done 162 | Recreating rabbitmq_rabbitmq-consumer_1 ... done 163 | Attaching to rabbitmq_rabbitmq_1, rabbitmq_rabbitmq-producer_1, rabbitmq_rabbitmq-consumer_1 164 | rabbitmq-producer_1 | Starting RabbitMQ producer... 165 | rabbitmq-consumer_1 | Starting RabbitMQ consumer... 166 | rabbitmq_1 | 167 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:55 === 168 | rabbitmq_1 | Starting RabbitMQ 3.6.9 on Erlang 19.1 169 | rabbitmq_1 | Copyright (C) 2007-2016 Pivotal Software, Inc. 170 | rabbitmq_1 | Licensed under the MPL. See http://www.rabbitmq.com/ 171 | rabbitmq_1 | 172 | rabbitmq_1 | RabbitMQ 3.6.9. Copyright (C) 2007-2016 Pivotal Software, Inc. 173 | rabbitmq_1 | ## ## Licensed under the MPL. See http://www.rabbitmq.com/ 174 | rabbitmq_1 | ## ## 175 | rabbitmq_1 | ########## Logs: tty 176 | rabbitmq_1 | ###### ## tty 177 | rabbitmq_1 | ########## 178 | rabbitmq_1 | Starting broker... 179 | rabbitmq_1 | 180 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:55 === 181 | rabbitmq_1 | node : rabbit@38ad328b1a5e 182 | rabbitmq_1 | home dir : /var/lib/rabbitmq 183 | rabbitmq_1 | config file(s) : /etc/rabbitmq/rabbitmq.config 184 | rabbitmq_1 | cookie hash : MY4Ae1MS5LYaz68Hjmx/Hg== 185 | rabbitmq_1 | log : tty 186 | rabbitmq_1 | sasl log : tty 187 | rabbitmq_1 | database dir : /var/lib/rabbitmq/mnesia/rabbit@38ad328b1a5e 188 | rabbitmq_1 | 189 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 190 | rabbitmq_1 | Memory limit set to 799MB of 1998MB total. 191 | rabbitmq_1 | 192 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 193 | rabbitmq_1 | Disk free limit set to 50MB 194 | rabbitmq_1 | 195 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 196 | rabbitmq_1 | Limiting to approx 1048476 file handles (943626 sockets) 197 | rabbitmq_1 | 198 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 199 | rabbitmq_1 | FHC read buffering: OFF 200 | rabbitmq_1 | FHC write buffering: ON 201 | rabbitmq_1 | 202 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 203 | rabbitmq_1 | Waiting for Mnesia tables for 30000 ms, 9 retries left 204 | rabbitmq_1 | 205 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 206 | rabbitmq_1 | Waiting for Mnesia tables for 30000 ms, 9 retries left 207 | rabbitmq_1 | 208 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 209 | rabbitmq_1 | Priority queues enabled, real BQ is rabbit_variable_queue 210 | rabbitmq_1 | 211 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 212 | rabbitmq_1 | Starting rabbit_node_monitor 213 | rabbitmq_1 | 214 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 215 | rabbitmq_1 | Management plugin: using rates mode 'basic' 216 | rabbitmq_1 | 217 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 218 | rabbitmq_1 | msg_store_transient: using rabbit_msg_store_ets_index to provide index 219 | rabbitmq_1 | 220 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 221 | rabbitmq_1 | msg_store_persistent: using rabbit_msg_store_ets_index to provide index 222 | rabbitmq_1 | 223 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 224 | rabbitmq_1 | started TCP Listener on [::]:5672 225 | rabbitmq_1 | 226 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 227 | rabbitmq_1 | Management plugin started. Port: 15672 228 | rabbitmq_1 | 229 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 230 | rabbitmq_1 | Statistics database started. 231 | rabbitmq_1 | completed with 6 plugins. 232 | rabbitmq_1 | 233 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:58 === 234 | rabbitmq_1 | Server startup complete; 6 plugins started. 235 | rabbitmq_1 | * rabbitmq_management 236 | rabbitmq_1 | * rabbitmq_web_dispatch 237 | rabbitmq_1 | * rabbitmq_management_agent 238 | rabbitmq_1 | * amqp_client 239 | rabbitmq_1 | * cowboy 240 | rabbitmq_1 | * cowlib 241 | rabbitmq_1 | 242 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:59 === 243 | rabbitmq_1 | accepting AMQP connection <0.503.0> (172.22.0.4:41476 -> 172.22.0.2:5672) 244 | rabbitmq_1 | 245 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:59 === 246 | rabbitmq_1 | connection <0.503.0> (172.22.0.4:41476 -> 172.22.0.2:5672): user 'guest' authenticated and granted access to vhost '/' 247 | rabbitmq-producer_1 | 2019/03/22 16:08:59 [x] Sent Hello RabbitMQ message 1 248 | rabbitmq_1 | 249 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:59 === 250 | rabbitmq_1 | accepting AMQP connection <0.516.0> (172.22.0.3:57582 -> 172.22.0.2:5672) 251 | rabbitmq_1 | 252 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:08:59 === 253 | rabbitmq_1 | connection <0.516.0> (172.22.0.3:57582 -> 172.22.0.2:5672): user 'guest' authenticated and granted access to vhost '/' 254 | rabbitmq-consumer_1 | 2019/03/22 16:08:59 [*] Waiting for messages. To exit press CTRL+C 255 | rabbitmq-consumer_1 | 2019/03/22 16:08:59 Received a message: Hello RabbitMQ message 1 256 | rabbitmq-producer_1 | 2019/03/22 16:09:04 [x] Sent Hello RabbitMQ message 2 257 | rabbitmq-consumer_1 | 2019/03/22 16:09:04 Received a message: Hello RabbitMQ message 2 258 | rabbitmq-producer_1 | 2019/03/22 16:09:09 [x] Sent Hello RabbitMQ message 3 259 | rabbitmq-consumer_1 | 2019/03/22 16:09:09 Received a message: Hello RabbitMQ message 3 260 | rabbitmq-producer_1 | 2019/03/22 16:09:14 [x] Sent Hello RabbitMQ message 4 261 | rabbitmq-consumer_1 | 2019/03/22 16:09:14 Received a message: Hello RabbitMQ message 4 262 | ``` 263 | 264 | Check that we have the producer, consumer and RabbitMQ running: 265 | ```bash 266 | $ docker ps 267 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 268 | 62a6d0e10b69 rabbitmq-producer:1.0.1 "/bin/sh -c ${SOURCE…" 2 minutes ago Up About a minute rabbitmq_rabbitmq-producer_1 269 | db5f01a14895 rabbitmq-consumer:1.0.1 "/bin/sh -c ${SOURCE…" 2 minutes ago Up About a minute rabbitmq_rabbitmq-consumer_1 270 | 38ad328b1a5e rabbitmq:3.6.9-management-alpine "docker-entrypoint.s…" 4 minutes ago Up About a minute 0.0.0.0:4369->4369/tcp, 0.0.0.0:5671-5672->5671-5672/tcp, 0.0.0.0:15671-15672->15671-15672/tcp, 0.0.0.0:25672->25672/tcp rabbitmq_rabbitmq_1 271 | ``` 272 | 273 | 274 | ## RabbitMQ Management Console 275 | We can access the management console under this URL: 276 | ``` 277 | http://localhost:15672/ 278 | ``` 279 | where 280 | ``` 281 | Username: guest 282 | Password: guest 283 | ``` 284 | 285 | Now we could stop the producer with 286 | ``` 287 | $ docker stop rabbitmq_rabbitmq-producer_1 288 | rabbitmq_rabbitmq-producer_1 289 | ``` 290 | 291 | and RabbitMQ will detect it: 292 | ``` 293 | rabbitmq_1 | 294 | rabbitmq_1 | =WARNING REPORT==== 22-Mar-2019::16:17:20 === 295 | rabbitmq_1 | closing AMQP connection <0.470.0> (172.22.0.4:41482 -> 172.22.0.2:5672): 296 | rabbitmq_1 | client unexpectedly closed TCP connection 297 | rabbitmq_rabbitmq-producer_1 exited with code 137 298 | ``` 299 | 300 | If instead of stopping the producer, we stop the consumer: 301 | ``` bash 302 | $ docker stop rabbitmq_rabbitmq-consumer_1 303 | rabbitmq_rabbitmq-consumer_1 304 | ```` 305 | 306 | the messages are queued but not consumed 307 | ``` 308 | rabbitmq_1 | 309 | rabbitmq_1 | =WARNING REPORT==== 22-Mar-2019::16:19:41 === 310 | rabbitmq_1 | closing AMQP connection <0.476.0> (172.22.0.3:57640 -> 172.22.0.2:5672): 311 | rabbitmq_1 | client unexpectedly closed TCP connection 312 | rabbitmq_rabbitmq-consumer_1 exited with code 137 313 | rabbitmq-producer_1 | 2019/03/22 16:19:46 [x] Sent Hello RabbitMQ message 5 314 | rabbitmq-producer_1 | 2019/03/22 16:19:51 [x] Sent Hello RabbitMQ message 6 315 | rabbitmq-producer_1 | 2019/03/22 16:19:56 [x] Sent Hello RabbitMQ message 7 316 | rabbitmq-producer_1 | 2019/03/22 16:20:01 [x] Sent Hello RabbitMQ message 8 317 | rabbitmq-producer_1 | 2019/03/22 16:20:06 [x] Sent Hello RabbitMQ message 9 318 | rabbitmq-producer_1 | 2019/03/22 16:20:11 [x] Sent Hello RabbitMQ message 10 319 | rabbitmq-producer_1 | 2019/03/22 16:20:16 [x] Sent Hello RabbitMQ message 11 320 | rabbitmq-producer_1 | 2019/03/22 16:20:20 [x] Sent Hello RabbitMQ message 12 321 | ``` 322 | 323 | If after awhile we start again the consumer we will see something like this: 324 | ``` 325 | rabbitmq-producer_1 | 2019/03/22 16:25:35 [x] Sent Hello RabbitMQ message 75 326 | rabbitmq-producer_1 | 2019/03/22 16:25:40 [x] Sent Hello RabbitMQ message 76 327 | rabbitmq-producer_1 | 2019/03/22 16:25:45 [x] Sent Hello RabbitMQ message 77 328 | rabbitmq-producer_1 | 2019/03/22 16:25:50 [x] Sent Hello RabbitMQ message 78 329 | rabbitmq-producer_1 | 2019/03/22 16:25:55 [x] Sent Hello RabbitMQ message 79 330 | rabbitmq_1 | 331 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:25:56 === 332 | rabbitmq_1 | accepting AMQP connection <0.515.0> (172.22.0.3:57658 -> 172.22.0.2:5672) 333 | rabbitmq_1 | 334 | rabbitmq_1 | =INFO REPORT==== 22-Mar-2019::16:25:56 === 335 | rabbitmq_1 | connection <0.515.0> (172.22.0.3:57658 -> 172.22.0.2:5672): user 'guest' authenticated and granted access to vhost '/' 336 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 [*] Waiting for messages. To exit press CTRL+C 337 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 5 338 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 6 339 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 7 340 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 8 341 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 9 342 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 10 343 | rabbitmq-consumer_1 | 2019/03/22 16:25:56 Received a message: Hello RabbitMQ message 11 344 | ``` -------------------------------------------------------------------------------- /Communication/RabbitMQ/consumer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get github.com/streadway/amqp 6 | 7 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Communication/RabbitMQ/ 8 | COPY . ${SOURCES} 9 | 10 | RUN cd ${SOURCES}consumer/ && CGO_ENABLED=0 go build 11 | 12 | ENV BROKER_ADDR amqp://guest:guest@localhost:5672/ 13 | 14 | WORKDIR ${SOURCES}consumer/ 15 | CMD ${SOURCES}consumer/consumer 16 | -------------------------------------------------------------------------------- /Communication/RabbitMQ/consumer/consumer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/streadway/amqp" 10 | ) 11 | 12 | func main() { 13 | fmt.Println("Starting RabbitMQ consumer...") 14 | time.Sleep(7 * time.Second) 15 | 16 | conn, err := amqp.Dial(brokerAddr()) 17 | failOnError(err, "Failed to connect to RabbitMQ") 18 | defer conn.Close() 19 | 20 | ch, err := conn.Channel() 21 | failOnError(err, "Failed to open a channel") 22 | defer ch.Close() 23 | 24 | q, err := ch.QueueDeclare( 25 | queue(), // name 26 | true, // durable 27 | false, // delete when unused 28 | false, // exclusive 29 | false, // no-wait 30 | nil, // arguments 31 | ) 32 | failOnError(err, "Failed to declare a queue") 33 | 34 | msgs, err := ch.Consume( 35 | q.Name, // queue 36 | "", // consumer 37 | true, // auto-ack 38 | false, // exclusive 39 | false, // no-local 40 | false, // no-wait 41 | nil, // args 42 | ) 43 | failOnError(err, "Failed to register a consumer") 44 | 45 | forever := make(chan bool) 46 | 47 | go func() { 48 | for d := range msgs { 49 | log.Printf("Received a message: %s", d.Body) 50 | } 51 | }() 52 | 53 | log.Printf(" [*] Waiting for messages. To exit press CTRL+C") 54 | <-forever 55 | } 56 | 57 | func brokerAddr() string { 58 | brokerAddr := os.Getenv("BROKER_ADDR") 59 | if len(brokerAddr) == 0 { 60 | brokerAddr = "amqp://guest:guest@localhost:5672/" 61 | } 62 | return brokerAddr 63 | } 64 | 65 | func queue() string { 66 | queue := os.Getenv("QUEUE") 67 | if len(queue) == 0 { 68 | queue = "default-queue" 69 | } 70 | return queue 71 | } 72 | 73 | func failOnError(err error, msg string) { 74 | if err != nil { 75 | log.Fatalf("%s: %s", msg, err) 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /Communication/RabbitMQ/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | rabbitmq: 5 | image: rabbitmq:3.6.9-management-alpine 6 | ports: 7 | - "4369:4369" 8 | - "5671:5671" 9 | - "5672:5672" 10 | - "15671:15671" 11 | - "15672:15672" 12 | - "25672:25672" 13 | environment: 14 | - RABBITMQ_DEFAULT_USER=guest 15 | - RABBITMQ_DEFAULT_PASS=guest 16 | networks: 17 | - sky-net 18 | 19 | rabbitmq-producer: 20 | build: 21 | context: . 22 | dockerfile: producer/Dockerfile 23 | image: rabbitmq-producer:1.0.1 24 | environment: 25 | - BROKER_ADDR=amqp://guest:guest@rabbitmq:5672/ 26 | - QUEUE=test-queue 27 | depends_on: 28 | - rabbitmq 29 | links: 30 | - rabbitmq 31 | networks: 32 | - sky-net 33 | 34 | rabbitmq-consumer: 35 | build: 36 | context: . 37 | dockerfile: consumer/Dockerfile 38 | image: rabbitmq-consumer:1.0.1 39 | environment: 40 | - BROKER_ADDR=amqp://guest:guest@rabbitmq:5672/ 41 | - QUEUE=test-queue 42 | depends_on: 43 | - rabbitmq 44 | links: 45 | - rabbitmq 46 | networks: 47 | - sky-net 48 | 49 | networks: 50 | sky-net: 51 | driver: bridge 52 | -------------------------------------------------------------------------------- /Communication/RabbitMQ/producer/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get github.com/streadway/amqp 6 | 7 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Communication/RabbitMQ/ 8 | COPY . ${SOURCES} 9 | 10 | RUN cd ${SOURCES}producer/ && CGO_ENABLED=0 go build 11 | 12 | ENV BROKER_ADDR amqp://guest:guest@localhost:5672/ 13 | 14 | WORKDIR ${SOURCES}producer/ 15 | CMD ${SOURCES}producer/producer 16 | -------------------------------------------------------------------------------- /Communication/RabbitMQ/producer/producer.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | "time" 8 | 9 | "github.com/streadway/amqp" 10 | ) 11 | 12 | func main() { 13 | fmt.Println("Starting RabbitMQ producer...") 14 | time.Sleep(7 * time.Second) 15 | 16 | conn, err := amqp.Dial(brokerAddr()) 17 | failOnError(err, "Failed to connect to RabbitMQ") 18 | defer conn.Close() 19 | 20 | ch, err := conn.Channel() 21 | failOnError(err, "Failed to open a channel") 22 | defer ch.Close() 23 | 24 | q, err := ch.QueueDeclare( 25 | queue(), // name 26 | true, // durable 27 | false, // delete when unused 28 | false, // exclusive 29 | false, // no-wait 30 | nil, // arguments 31 | ) 32 | failOnError(err, "Failed to declare a queue") 33 | 34 | msgCount := 0 35 | 36 | // Get signal for finish 37 | doneCh := make(chan struct{}) 38 | 39 | go func() { 40 | for { 41 | msgCount++ 42 | body := fmt.Sprintf("Hello RabbitMQ message %v", msgCount) 43 | 44 | err = ch.Publish( 45 | "", // exchange 46 | q.Name, // routing key 47 | false, // mandatory 48 | false, // immediate 49 | amqp.Publishing{ 50 | ContentType: "text/plain", 51 | Body: []byte(body), 52 | }) 53 | log.Printf(" [x] Sent %s", body) 54 | failOnError(err, "Failed to publish a message") 55 | 56 | time.Sleep(5 * time.Second) 57 | } 58 | }() 59 | 60 | <-doneCh 61 | } 62 | 63 | func brokerAddr() string { 64 | brokerAddr := os.Getenv("BROKER_ADDR") 65 | if len(brokerAddr) == 0 { 66 | brokerAddr = "amqp://guest:guest@localhost:5672/" 67 | } 68 | return brokerAddr 69 | } 70 | 71 | func queue() string { 72 | queue := os.Getenv("QUEUE") 73 | if len(queue) == 0 { 74 | queue = "default-queue" 75 | } 76 | return queue 77 | } 78 | 79 | func failOnError(err error, msg string) { 80 | if err != nil { 81 | log.Fatalf("%s: %s", msg, err) 82 | } 83 | } 84 | -------------------------------------------------------------------------------- /Configuration/Consul/README.md: -------------------------------------------------------------------------------- 1 | # Service Configuration 2 | Using Consul for Central Microservice Configuration 3 | 4 | ## Add key/value 5 | 6 | ### From dashboard 7 | We can add configuration values from the Consul web UI (dashboard) under the `key/value` tab. 8 | 9 | ### From Consul CLI 10 | We can add configuration using the Consul CLI: 11 | 12 | ``` bash 13 | $ consul kv put foo bar 14 | Success! Data written to: foo 15 | ``` 16 | 17 | and we can see in the Consul console logs the following message: 18 | 19 | ```bash 20 | consul_1 | 2019/03/20 11:28:43 [DEBUG] http: Request PUT /v1/kv/foo (457.4µs) from=172.19.0.1:60150 21 | ``` 22 | 23 | ### From REST API 24 | 25 | Request: 26 | ``` 27 | PUT http://localhost:8500/v1/kv/gin-web/message 28 | Body: Hello Gin Framework. 29 | ``` 30 | 31 | Response: 32 | ``` 33 | Status: 200 OK 34 | Body: true 35 | ``` 36 | 37 | ## Retrieve key/value 38 | 39 | ### From Consul CLI 40 | ``` bash 41 | $ consul kv get foo 42 | bar 43 | ``` 44 | 45 | ### From REST API 46 | 47 | Request: 48 | ``` 49 | GET http://localhost:8500/v1/kv/?keys= 50 | ``` 51 | 52 | Response: 53 | ``` 54 | [ 55 | "foo", 56 | "gin-web/message" 57 | ] 58 | ``` 59 | 60 | #### Display all configuration for gin-web path 61 | Includes the value base64 encoded. 62 | 63 | Request: 64 | ``` 65 | GET http://localhost:8500/v1/kv/gin-web?recurse= 66 | ``` 67 | 68 | Response: 69 | ``` 70 | [ 71 | { 72 | "LockIndex": 0, 73 | "Key": "gin-web/message", 74 | "Flags": 0, 75 | "Value": "SGVsbG8gR2luIEZyYW1ld29yay4=", 76 | "CreateIndex": 81, 77 | "ModifyIndex": 81 78 | } 79 | ] 80 | ``` 81 | 82 | #### Display raw value 83 | 84 | Request: 85 | ``` 86 | GET http://localhost:8500/v1/kv/gin-web/message?raw=true 87 | ``` 88 | 89 | Response: 90 | ``` 91 | Hello Gin Framework. 92 | ``` 93 | 94 | ## Update key/value 95 | 96 | ### From REST API 97 | 98 | Request: 99 | ``` 100 | PUT http://localhost:8500/v1/kv/gin-web/message 101 | Hello Gin Framework (Updated). 102 | ``` 103 | 104 | Response: 105 | ``` 106 | Status: 200 OK 107 | Body: true 108 | ``` 109 | 110 | 111 | 112 | ## Delete key/value 113 | 114 | ### From REST API 115 | 116 | Request: 117 | ``` 118 | DELETE http://localhost:8500/v1/kv/foo 119 | ``` 120 | 121 | Response: 122 | ``` 123 | Status: 200 OK 124 | Body: true 125 | ``` 126 | 127 | -------------------------------------------------------------------------------- /Configuration/Consul/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | consul: 5 | image: consul:0.8.3 6 | ports: 7 | - "8300:8300" 8 | - "8400:8400" 9 | - "8500:8500" 10 | networks: 11 | - sky-net 12 | 13 | networks: 14 | sky-net: 15 | driver: bridge -------------------------------------------------------------------------------- /Discovery/Consul/README.md: -------------------------------------------------------------------------------- 1 | # Service discovery 2 | 3 | # Consul 4 | 5 | Consul is a service mesh solution providing a full featured control plane with service discovery, configuration, and segmentation functionality. Each of these features can be used individually as needed, or they can be used together to build a full service mesh. Consul requires a data plane and supports both a proxy and native integration model. Consul ships with a simple built-in proxy so that everything works out of the box, but also supports 3rd party proxy integrations such as Envoy. 6 | 7 | ### docker-compose.yml 8 | Contains the required definition to start up Consul by using docker compose. 9 | 10 | Create a service definition specifiying a consul docker image, defining the 3 ports, linking consul to demo microservices and linking it to a network. 11 | 12 | Fire up 2 instances of the microservices from /Frameworks/Gin-Web on ports 8080 and 9090. 13 | 14 | ``` 15 | version: '2' 16 | 17 | services: 18 | consul: 19 | image: consul:0.8.3 20 | ports: 21 | - "8300:8300" 22 | - "8400:8400" 23 | - "8500:8500" 24 | links: 25 | - gin-web-01 26 | - gin-web-02 27 | networks: 28 | - sky-net 29 | 30 | gin-web-01: 31 | image: gin-web:1.0.1 32 | environment: 33 | - PORT=8080 34 | ports: 35 | - "8080:8080" 36 | networks: 37 | - sky-net 38 | 39 | gin-web-02: 40 | image: gin-web:1.0.1 41 | environment: 42 | - PORT=9090 43 | ports: 44 | - "9090:9090" 45 | networks: 46 | - sky-net 47 | 48 | networks: 49 | sky-net: 50 | driver: bridge 51 | ``` 52 | 53 | ### Start Consul 54 | Start Consul and the 2 microservices configured: 55 | 56 | ``` bash 57 | $ cd ./Discovery/Consul 58 | $ docker-compose up 59 | Creating network "consul_sky-net" with driver "bridge" 60 | Pulling consul (consul:0.8.3)... 61 | 0.8.3: Pulling from library/consul 62 | 6f821164d5b7: Pull complete 63 | 0def154e6a6c: Pull complete 64 | 3d8c5010f2a6: Pull complete 65 | 16a3ec6049b7: Pull complete 66 | f5c11926a0d8: Pull complete 67 | Creating consul_gin-web-01_1 ... done 68 | Creating consul_gin-web-02_1 ... done 69 | Creating consul_consul_1 ... done 70 | Attaching to consul_gin-web-01_1, consul_gin-web-02_1, consul_consul_1 71 | gin-web-02_1 | [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. 72 | gin-web-02_1 | 73 | gin-web-02_1 | [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production. 74 | gin-web-02_1 | - using env: export GIN_MODE=release 75 | gin-web-02_1 | - using code: gin.SetMode(gin.ReleaseMode) 76 | gin-web-02_1 | 77 | gin-web-02_1 | [GIN-debug] GET /ping --> main.main.func1 (3 handlers) 78 | gin-web-01_1 | [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. 79 | gin-web-01_1 | 80 | gin-web-01_1 | [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production. 81 | gin-web-01_1 | - using env: export GIN_MODE=release 82 | gin-web-01_1 | - using code: gin.SetMode(gin.ReleaseMode) 83 | gin-web-01_1 | 84 | gin-web-01_1 | [GIN-debug] GET /ping --> main.main.func1 (3 handlers) 85 | gin-web-01_1 | [GIN-debug] GET /hello --> main.main.func2 (3 handlers) 86 | gin-web-01_1 | [GIN-debug] GET /api/books --> main.main.func3 (3 handlers) 87 | gin-web-01_1 | [GIN-debug] POST /api/books --> main.main.func4 (3 handlers) 88 | gin-web-01_1 | [GIN-debug] GET /api/books/:isbn --> main.main.func5 (3 handlers) 89 | gin-web-01_1 | [GIN-debug] PUT /api/books/:isbn --> main.main.func6 (3 handlers) 90 | gin-web-01_1 | [GIN-debug] DELETE /api/books/:isbn --> main.main.func7 (3 handlers) 91 | gin-web-01_1 | [GIN-debug] Loaded HTML Templates (2): 92 | gin-web-01_1 | - 93 | gin-web-01_1 | - index.html 94 | gin-web-01_1 | 95 | gin-web-01_1 | [GIN-debug] GET /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 96 | gin-web-01_1 | [GIN-debug] HEAD /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 97 | gin-web-01_1 | [GIN-debug] GET / --> main.main.func8 (3 handlers) 98 | gin-web-01_1 | [GIN-debug] Listening and serving HTTP on :8080 99 | gin-web-02_1 | [GIN-debug] GET /hello --> main.main.func2 (3 handlers) 100 | gin-web-02_1 | [GIN-debug] GET /api/books --> main.main.func3 (3 handlers) 101 | gin-web-02_1 | [GIN-debug] POST /api/books --> main.main.func4 (3 handlers) 102 | gin-web-02_1 | [GIN-debug] GET /api/books/:isbn --> main.main.func5 (3 handlers) 103 | gin-web-02_1 | [GIN-debug] PUT /api/books/:isbn --> main.main.func6 (3 handlers) 104 | gin-web-02_1 | [GIN-debug] DELETE /api/books/:isbn --> main.main.func7 (3 handlers) 105 | gin-web-02_1 | [GIN-debug] Loaded HTML Templates (2): 106 | gin-web-02_1 | - 107 | gin-web-02_1 | - index.html 108 | gin-web-02_1 | 109 | gin-web-02_1 | [GIN-debug] GET /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 110 | gin-web-02_1 | [GIN-debug] HEAD /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 111 | gin-web-02_1 | [GIN-debug] GET / --> main.main.func8 (3 handlers) 112 | gin-web-02_1 | [GIN-debug] Listening and serving HTTP on :9090 113 | consul_1 | ==> Starting Consul agent... 114 | consul_1 | ==> Consul agent running! 115 | consul_1 | Version: 'v0.8.3' 116 | consul_1 | Node ID: '3c5eeeff-e5d6-afae-a297-5058f80a1b93' 117 | consul_1 | Node name: '9014a96ed9bb' 118 | consul_1 | Datacenter: 'dc1' 119 | consul_1 | Server: true (bootstrap: false) 120 | consul_1 | Client Addr: 0.0.0.0 (HTTP: 8500, HTTPS: -1, DNS: 8600) 121 | consul_1 | Cluster Addr: 127.0.0.1 (LAN: 8301, WAN: 8302) 122 | consul_1 | Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false 123 | consul_1 | Atlas: 124 | consul_1 | 125 | consul_1 | ==> Log data will now stream in as it occurs: 126 | consul_1 | 127 | consul_1 | 2019/03/19 09:50:29 [DEBUG] Using unique ID "3c5eeeff-e5d6-afae-a297-5058f80a1b93" from host as node ID 128 | consul_1 | 2019/03/19 09:50:29 [INFO] raft: Initial configuration (index=1): [{Suffrage:Voter ID:127.0.0.1:8300 Address:127.0.0.1:8300}] 129 | consul_1 | 2019/03/19 09:50:29 [INFO] raft: Node at 127.0.0.1:8300 [Follower] entering Follower state (Leader: "") 130 | consul_1 | 2019/03/19 09:50:29 [INFO] serf: EventMemberJoin: 9014a96ed9bb 127.0.0.1 131 | consul_1 | 2019/03/19 09:50:29 [INFO] consul: Adding LAN server 9014a96ed9bb (Addr: tcp/127.0.0.1:8300) (DC: dc1) 132 | consul_1 | 2019/03/19 09:50:29 [INFO] serf: EventMemberJoin: 9014a96ed9bb.dc1 127.0.0.1 133 | consul_1 | 2019/03/19 09:50:29 [INFO] consul: Handled member-join event for server "9014a96ed9bb.dc1" in area "wan" 134 | consul_1 | 2019/03/19 09:50:29 [WARN] raft: Heartbeat timeout from "" reached, starting election 135 | consul_1 | 2019/03/19 09:50:29 [INFO] raft: Node at 127.0.0.1:8300 [Candidate] entering Candidate state in term 2 136 | consul_1 | 2019/03/19 09:50:29 [DEBUG] raft: Votes needed: 1 137 | consul_1 | 2019/03/19 09:50:29 [DEBUG] raft: Vote granted from 127.0.0.1:8300 in term 2. Tally: 1 138 | consul_1 | 2019/03/19 09:50:29 [INFO] raft: Election won. Tally: 1 139 | consul_1 | 2019/03/19 09:50:29 [INFO] raft: Node at 127.0.0.1:8300 [Leader] entering Leader state 140 | consul_1 | 2019/03/19 09:50:29 [INFO] consul: cluster leadership acquired 141 | consul_1 | 2019/03/19 09:50:29 [INFO] consul: New leader elected: 9014a96ed9bb 142 | consul_1 | 2019/03/19 09:50:29 [DEBUG] consul: reset tombstone GC to index 3 143 | consul_1 | 2019/03/19 09:50:29 [INFO] consul: member '9014a96ed9bb' joined, marking health alive 144 | consul_1 | 2019/03/19 09:50:29 [INFO] agent: Synced service 'consul' 145 | consul_1 | 2019/03/19 09:50:29 [DEBUG] agent: Node info in sync 146 | ``` 147 | 148 | Now we can open a web browser and navigate to the UI of Consul: 149 | ``` 150 | http://localhost:8500 151 | ``` 152 | 153 | At this point, we can see in the Consul dashboard (Service tab) that the only service registered is Consul itself. 154 | 155 | ## Registering new service 156 | Using the REST API of consul itself to register few services. To achieve this, we will use the REST client Postman. 157 | 158 | ### Service catalog 159 | URL to access the service catalog: 160 | ``` 161 | GET http://localhost:8500/v1/catalog/services 162 | ``` 163 | 164 | ### Service agents 165 | URL to access the service agents: 166 | ``` 167 | GET http://localhost:8500/v1/agent/services 168 | ``` 169 | 170 | ### Register first microservice 171 | Register a service for the first microservice: 172 | 173 | 174 | ``` 175 | PUT http://localhost:8500/v1/agent/service/register 176 | Body: 177 | { 178 | "ID": "gin-web-01", 179 | "Name": "gin-web", 180 | "Tags": [ 181 | "cloud-native-go", 182 | "v1" 183 | ], 184 | "Address": "gin-web-01", 185 | "Port": 8080, 186 | "EnableTagOverride": false, 187 | "check": { 188 | "id": "ping", 189 | "name": "HTTP API on port 8080", 190 | "http": "http://gin-web-01:8080/ping", 191 | "interval": "5s", 192 | "timeout": "1s" 193 | } 194 | } 195 | ``` 196 | 197 | After registering the 2 instances (`gin-web-01` and `gin-web-02`) under the same name `gin-web`, we can verify the new registration by calling the agent services: 198 | 199 | Request: 200 | ``` 201 | GET http://localhost:8500/v1/agent/services 202 | ``` 203 | 204 | Response: 205 | ``` 206 | { 207 | "consul": { 208 | "ID": "consul", 209 | "Service": "consul", 210 | "Tags": [], 211 | "Address": "", 212 | "Port": 8300, 213 | "EnableTagOverride": false, 214 | "CreateIndex": 0, 215 | "ModifyIndex": 0 216 | }, 217 | "gin-web-01": { 218 | "ID": "gin-web-01", 219 | "Service": "gin-web", 220 | "Tags": [ 221 | "cloud-native-go", 222 | "v1" 223 | ], 224 | "Address": "gin-web-01", 225 | "Port": 8080, 226 | "EnableTagOverride": false, 227 | "CreateIndex": 0, 228 | "ModifyIndex": 0 229 | }, 230 | "gin-web-02": { 231 | "ID": "gin-web-02", 232 | "Service": "gin-web", 233 | "Tags": [ 234 | "cloud-native-go", 235 | "v1" 236 | ], 237 | "Address": "gin-web-02", 238 | "Port": 9090, 239 | "EnableTagOverride": false, 240 | "CreateIndex": 0, 241 | "ModifyIndex": 0 242 | } 243 | } 244 | ``` 245 | 246 | Consul is responsible for checking the healthy of the microservices, it calls constantly the ping URL of both services. 247 | 248 | If we list the docker images, we will see the consul itself and the 2 services running: 249 | 250 | ``` bash 251 | $ docker ps 252 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 253 | 9014a96ed9bb consul:0.8.3 "docker-entrypoint.s…" About an hour ago Up About an hour 0.0.0.0:8300->8300/tcp, 0.0.0.0:8400->8400/tcp, 8301-8302/tcp, 8301-8302/udp, 0.0.0.0:8500->8500/tcp, 8600/tcp, 8600/udp consul_consul_1 254 | 1a0467e07d44 gin-web:1.0.1 "/bin/sh -c ${SOURCE…" About an hour ago Up About an hour 0.0.0.0:8080->8080/tcp consul_gin-web-01_1 255 | 3ee6a67e8123 gin-web:1.0.1 "/bin/sh -c ${SOURCE…" About an hour ago Up About an hour 8080/tcp, 0.0.0.0:9090->9090/tcp 256 | ``` 257 | 258 | ### Forcing a service failure 259 | 260 | If we stop one of the containers: 261 | ``` bash 262 | $ docker stop consul_gin-web-01_1 263 | consul_gin-web-01_1 264 | ``` 265 | 266 | we will see in the consul logs that the ping is failing, the service became unhealthy: 267 | ``` bash 268 | [WARN] agent: http request failed 'http://gin-web-01:8080/ping': Get http://gin-web-01:8080/ping: dial tcp: lookup gin-web-01 on 127.0.0.11:53: no such host 269 | ``` 270 | 271 | ### Check service health 272 | 273 | #### Get health of all services 274 | 275 | Request: 276 | ``` 277 | GET http://localhost:8500/v1/health/service/gin-web 278 | ``` 279 | 280 | Response: 281 | ``` 282 | [ 283 | { 284 | "Node": { 285 | "ID": "3c5eeeff-e5d6-afae-a297-5058f80a1b93", 286 | "Node": "9014a96ed9bb", 287 | "Address": "127.0.0.1", 288 | "Datacenter": "dc1", 289 | "TaggedAddresses": { 290 | "lan": "127.0.0.1", 291 | "wan": "127.0.0.1" 292 | }, 293 | "Meta": {}, 294 | "CreateIndex": 5, 295 | "ModifyIndex": 6 296 | }, 297 | "Service": { 298 | "ID": "gin-web-01", 299 | "Service": "gin-web", 300 | "Tags": [ 301 | "cloud-native-go", 302 | "v1" 303 | ], 304 | "Address": "gin-web-01", 305 | "Port": 8080, 306 | "EnableTagOverride": false, 307 | "CreateIndex": 171, 308 | "ModifyIndex": 171 309 | }, 310 | "Checks": [ 311 | { 312 | "Node": "9014a96ed9bb", 313 | "CheckID": "serfHealth", 314 | "Name": "Serf Health Status", 315 | "Status": "passing", 316 | "Notes": "", 317 | "Output": "Agent alive and reachable", 318 | "ServiceID": "", 319 | "ServiceName": "", 320 | "ServiceTags": [], 321 | "CreateIndex": 5, 322 | "ModifyIndex": 5 323 | }, 324 | { 325 | "Node": "9014a96ed9bb", 326 | "CheckID": "service:gin-web-01", 327 | "Name": "Service 'gin-web' check", 328 | "Status": "critical", 329 | "Notes": "", 330 | "Output": "Get http://gin-web-01:8080/ping: dial tcp: lookup gin-web-01 on 127.0.0.11:53: no such host", 331 | "ServiceID": "gin-web-01", 332 | "ServiceName": "gin-web", 333 | "ServiceTags": [ 334 | "cloud-native-go", 335 | "v1" 336 | ], 337 | "CreateIndex": 171, 338 | "ModifyIndex": 233 339 | } 340 | ] 341 | }, 342 | { 343 | "Node": { 344 | "ID": "3c5eeeff-e5d6-afae-a297-5058f80a1b93", 345 | "Node": "9014a96ed9bb", 346 | "Address": "127.0.0.1", 347 | "Datacenter": "dc1", 348 | "TaggedAddresses": { 349 | "lan": "127.0.0.1", 350 | "wan": "127.0.0.1" 351 | }, 352 | "Meta": {}, 353 | "CreateIndex": 5, 354 | "ModifyIndex": 6 355 | }, 356 | "Service": { 357 | "ID": "gin-web-02", 358 | "Service": "gin-web", 359 | "Tags": [ 360 | "cloud-native-go", 361 | "v1" 362 | ], 363 | "Address": "gin-web-02", 364 | "Port": 9090, 365 | "EnableTagOverride": false, 366 | "CreateIndex": 188, 367 | "ModifyIndex": 188 368 | }, 369 | "Checks": [ 370 | { 371 | "Node": "9014a96ed9bb", 372 | "CheckID": "serfHealth", 373 | "Name": "Serf Health Status", 374 | "Status": "passing", 375 | "Notes": "", 376 | "Output": "Agent alive and reachable", 377 | "ServiceID": "", 378 | "ServiceName": "", 379 | "ServiceTags": [], 380 | "CreateIndex": 5, 381 | "ModifyIndex": 5 382 | }, 383 | { 384 | "Node": "9014a96ed9bb", 385 | "CheckID": "service:gin-web-02", 386 | "Name": "Service 'gin-web' check", 387 | "Status": "passing", 388 | "Notes": "", 389 | "Output": "HTTP GET http://gin-web-02:9090/ping: 200 OK Output: pong", 390 | "ServiceID": "gin-web-02", 391 | "ServiceName": "gin-web", 392 | "ServiceTags": [ 393 | "cloud-native-go", 394 | "v1" 395 | ], 396 | "CreateIndex": 188, 397 | "ModifyIndex": 234 398 | } 399 | ] 400 | } 401 | ] 402 | ``` 403 | 404 | #### Get information of healthy services 405 | Only returns the services where the health is OK. 406 | 407 | Request: 408 | ``` 409 | GET http://localhost:8500/v1/health/service/gin-web?passing= 410 | ``` 411 | 412 | Response: 413 | ``` 414 | [ 415 | { 416 | "Node": { 417 | "ID": "3c5eeeff-e5d6-afae-a297-5058f80a1b93", 418 | "Node": "9014a96ed9bb", 419 | "Address": "127.0.0.1", 420 | "Datacenter": "dc1", 421 | "TaggedAddresses": { 422 | "lan": "127.0.0.1", 423 | "wan": "127.0.0.1" 424 | }, 425 | "Meta": {}, 426 | "CreateIndex": 5, 427 | "ModifyIndex": 6 428 | }, 429 | "Service": { 430 | "ID": "gin-web-02", 431 | "Service": "gin-web", 432 | "Tags": [ 433 | "cloud-native-go", 434 | "v1" 435 | ], 436 | "Address": "gin-web-02", 437 | "Port": 9090, 438 | "EnableTagOverride": false, 439 | "CreateIndex": 188, 440 | "ModifyIndex": 188 441 | }, 442 | "Checks": [ 443 | { 444 | "Node": "9014a96ed9bb", 445 | "CheckID": "serfHealth", 446 | "Name": "Serf Health Status", 447 | "Status": "passing", 448 | "Notes": "", 449 | "Output": "Agent alive and reachable", 450 | "ServiceID": "", 451 | "ServiceName": "", 452 | "ServiceTags": [], 453 | "CreateIndex": 5, 454 | "ModifyIndex": 5 455 | }, 456 | { 457 | "Node": "9014a96ed9bb", 458 | "CheckID": "service:gin-web-02", 459 | "Name": "Service 'gin-web' check", 460 | "Status": "passing", 461 | "Notes": "", 462 | "Output": "HTTP GET http://gin-web-02:9090/ping: 200 OK Output: pong", 463 | "ServiceID": "gin-web-02", 464 | "ServiceName": "gin-web", 465 | "ServiceTags": [ 466 | "cloud-native-go", 467 | "v1" 468 | ], 469 | "CreateIndex": 188, 470 | "ModifyIndex": 345 471 | } 472 | ] 473 | } 474 | ] 475 | ``` 476 | 477 | #### Get the unhealthy services 478 | Check for the critical states: 479 | 480 | Request: 481 | ``` 482 | GET http://localhost:8500/v1/health/state/critical 483 | ``` 484 | 485 | Response: 486 | ``` 487 | [ 488 | { 489 | "Node": "9014a96ed9bb", 490 | "CheckID": "service:gin-web-01", 491 | "Name": "Service 'gin-web' check", 492 | "Status": "critical", 493 | "Notes": "", 494 | "Output": "Get http://gin-web-01:8080/ping: dial tcp: lookup gin-web-01 on 127.0.0.11:53: no such host", 495 | "ServiceID": "gin-web-01", 496 | "ServiceName": "gin-web", 497 | "ServiceTags": [ 498 | "cloud-native-go", 499 | "v1" 500 | ], 501 | "CreateIndex": 171, 502 | "ModifyIndex": 359 503 | } 504 | ] 505 | ``` 506 | -------------------------------------------------------------------------------- /Discovery/Consul/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | consul: 5 | image: consul:0.8.3 6 | ports: 7 | - "8300:8300" 8 | - "8400:8400" 9 | - "8500:8500" 10 | links: 11 | - gin-web-01 12 | - gin-web-02 13 | networks: 14 | - sky-net 15 | 16 | gin-web-01: 17 | image: gin-web:1.0.1 18 | environment: 19 | - PORT=8080 20 | ports: 21 | - "8080:8080" 22 | networks: 23 | - sky-net 24 | 25 | gin-web-02: 26 | image: gin-web:1.0.1 27 | environment: 28 | - PORT=9090 29 | ports: 30 | - "9090:9090" 31 | networks: 32 | - sky-net 33 | 34 | networks: 35 | sky-net: 36 | driver: bridge -------------------------------------------------------------------------------- /Discovery/Kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # Implement Go Microservice Discovery and Configuration with Kubernetes 2 | 3 | ## Kubernetes Concepts 4 | 5 | ![Service and ConfigMap](./../../Images/service_and_configmap.jpg) 6 | 7 | ### Service 8 | Abstraction for a logical collection of pods; provides a DNS name. 9 | 10 | ### ConfigMap 11 | Contains a set of named strings; used as ENV variables or volumes. 12 | 13 | ## Building docker images 14 | 15 | ```bash 16 | $ docker-compose build 17 | WARNING: Some services (simple-k8s-server) use the 'deploy' key, which will be ignored. Compose does not support 'deploy' configuration - use `docker stack deploy` to deploy to a swarm. 18 | Building simple-k8s-server 19 | Step 1/6 : FROM golang:1.8.1-alpine 20 | ---> 32efc118745e 21 | Step 2/6 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Kubernetes/ 22 | ---> Running in 8d30fe2e7fa7 23 | Removing intermediate container 8d30fe2e7fa7 24 | ---> 8d8408eb8bac 25 | Step 3/6 : COPY . ${SOURCES} 26 | ---> 0627f1cbf0d5 27 | Step 4/6 : RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 28 | ---> Running in 21abe82d890f 29 | Removing intermediate container 21abe82d890f 30 | ---> da3c0f2a6d2e 31 | Step 5/6 : WORKDIR ${SOURCES}server/ 32 | ---> Running in 501942e26ad8 33 | Removing intermediate container 501942e26ad8 34 | ---> 5adf3ea4dcd5 35 | Step 6/6 : CMD ${SOURCES}server/server 36 | ---> Running in 35cb22492c9f 37 | Removing intermediate container 35cb22492c9f 38 | ---> bfcc6231cc24 39 | Successfully built bfcc6231cc24 40 | Successfully tagged simple-k8s-server:1.0.1 41 | Building simple-k8s-client 42 | Step 1/6 : FROM golang:1.8.1-alpine 43 | ---> 32efc118745e 44 | Step 2/6 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Kubernetes/ 45 | ---> Using cache 46 | ---> 8d8408eb8bac 47 | Step 3/6 : COPY . ${SOURCES} 48 | ---> Using cache 49 | ---> 0627f1cbf0d5 50 | Step 4/6 : RUN cd ${SOURCES}client/ && CGO_ENABLED=0 go build 51 | ---> Running in e5f1649776db 52 | Removing intermediate container e5f1649776db 53 | ---> ef88512cf83b 54 | Step 5/6 : WORKDIR ${SOURCES}client/ 55 | ---> Running in e1090252352d 56 | Removing intermediate container e1090252352d 57 | ---> 67ae7e6e61f2 58 | Step 6/6 : CMD ${SOURCES}client/client 59 | ---> Running in d18b516d1775 60 | Removing intermediate container d18b516d1775 61 | ---> 3a3a19a321af 62 | Successfully built 3a3a19a321af 63 | Successfully tagged simple-k8s-client:1.0.1 64 | ``` 65 | 66 | now we have available the new docker images: 67 | ```bash 68 | $ docker images 69 | REPOSITORY TAG IMAGE ID CREATED SIZE 70 | simple-k8s-client 1.0.1 3a3a19a321af 44 seconds ago 262MB 71 | simple-k8s-server 1.0.1 bfcc6231cc24 51 seconds ago 263MB 72 | ``` 73 | 74 | 75 | ## Tagging and pushing to remote registry 76 | 77 | ### Tag 78 | We can tag a docker image with (using the docker hub username): 79 | ``` 80 | docker tag simple-k8s-client:1.0.1 danigilmayol/simple-k8s-client:1.0.1 81 | docker tag simple-k8s-server:1.0.1 danigilmayol/simple-k8s-server:1.0.1 82 | ``` 83 | 84 | Once tagged we can list again the docker images to verify that the new tagged is listed: 85 | ``` 86 | $ docker images 87 | REPOSITORY TAG IMAGE ID CREATED SIZE 88 | danigilmayol/simple-k8s-client 1.0.1 3a3a19a321af 3 minutes ago 262MB 89 | simple-k8s-client 1.0.1 3a3a19a321af 3 minutes ago 262MB 90 | danigilmayol/simple-k8s-server 1.0.1 bfcc6231cc24 3 minutes ago 263MB 91 | simple-k8s-server 1.0.1 bfcc6231cc24 3 minutes ago 263MB 92 | ``` 93 | 94 | 95 | ### Push 96 | Now we can use the following command for pushing the tagged image to the docker hub repository (using the docker.io username): 97 | ``` bash 98 | $ docker push danigilmayol/simple-k8s-client:1.0.1 99 | The push refers to repository [docker.io/danigilmayol/simple-k8s-client] 100 | 4d7e2671d1a0: Pushed 101 | 0ccda82e2d32: Pushed 102 | bf24f77dead4: Mounted from library/golang 103 | fe6a76f83540: Mounted from library/golang 104 | b0252301604b: Mounted from library/golang 105 | 23ffabff0295: Mounted from library/golang 106 | ba5126b459c9: Mounted from library/golang 107 | e154057080f4: Mounted from library/golang 108 | 1.0.1: digest: sha256:015f09924337bb7d77d78da53ffcdc4add142c297c1954b8f0ffeeda3489cbb9 size: 1991 109 | ``` 110 | 111 | ``` bash 112 | $ docker push danigilmayol/simple-k8s-server:1.0.1 113 | The push refers to repository [docker.io/danigilmayol/simple-k8s-server] 114 | 2d1ab5ed2e0b: Pushed 115 | 0ccda82e2d32: Mounted from danigilmayol/simple-k8s-client 116 | bf24f77dead4: Mounted from danigilmayol/simple-k8s-client 117 | fe6a76f83540: Mounted from danigilmayol/simple-k8s-client 118 | b0252301604b: Mounted from danigilmayol/simple-k8s-client 119 | 23ffabff0295: Mounted from danigilmayol/simple-k8s-client 120 | ba5126b459c9: Mounted from danigilmayol/simple-k8s-client 121 | e154057080f4: Mounted from danigilmayol/simple-k8s-client 122 | 1.0.1: digest: sha256:a103507b7bd4f43d9406ecf72778b485649ce0961edfb884144951ab0f274d71 size: 1991 123 | ``` 124 | 125 | Now the docker images are available from docker hub. 126 | 127 | 128 | 129 | ## Server 130 | The server service is defined in `/server/simple-k8s-server.go`. It is a simple http server that have a single endpoint `/info`. 131 | 132 | We have defined the deployment YAML file `simple-k8s-server-deployment.yaml` to deploy the `simple-k8s-server` container to Kubernetes on port 9090, defining a readiness probe. 133 | 134 | Moreover, we have define a service definition for the previous deployment in the YAML file `simple-k8s-server-service.yaml`, so that we can access the server pods using a DNS name. 135 | 136 | 137 | We can deploy to Kubernetes first the deployment: 138 | ``` bash 139 | $ cd ./Discovery/Kubernetes 140 | $ kubectl apply -f simple-k8s-server-deployment.yaml 141 | deployment.extensions "simple-k8s-server" created 142 | ``` 143 | 144 | and then deploy the service: 145 | ```bash 146 | $ kubectl apply -f simple-k8s-server-service.yaml 147 | service "simple-k8s-server" created 148 | ``` 149 | 150 | Check the pods running: 151 | ```bash 152 | $ kubectl get pods 153 | NAME READY STATUS RESTARTS AGE 154 | simple-k8s-server-54f6786449-9gzww 1/1 Running 0 2m 155 | simple-k8s-server-54f6786449-zzpv4 1/1 Running 0 2m 156 | ``` 157 | 158 | now we can also access the services: 159 | ```bash 160 | $ kubectl get services 161 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 162 | kubernetes ClusterIP 10.96.0.1 443/TCP 5d 163 | simple-k8s-server NodePort 10.107.233.232 9090:30212/TCP 1m 164 | ``` 165 | 166 | ## ConfigMap 167 | The ConfigMap definition is defined in file `simple-k8s-configmap.yaml`. We define a service URL for our simple-server that the client will use later on to look up our service. 168 | 169 | Deploy the ConfigMap to Kubernetes: 170 | ``` 171 | $ kubectl apply -f simple-k8s-configmap.yaml 172 | configmap "simple-k8s-config" created 173 | ``` 174 | 175 | ## Client 176 | The client service is defined in `/client/simple-k8s-client.go`. The function `initServiceURL` initialize the service URL from the environment variable `SERVICE_URL`. 177 | 178 | This environment variable is retrieved from the ConfigMap as defined in file `simple-k8s-client-deployment.yaml`: 179 | ``` 180 | containers: 181 | - env: 182 | - name: SERVICE_URL 183 | valueFrom: 184 | configMapKeyRef: 185 | name: simple-k8s-config 186 | key: service.url 187 | ``` 188 | 189 | Deploy the client configuration to Kubernetes: 190 | ```bash 191 | $ kubectl apply -f simple-k8s-client-deployment.yaml 192 | deployment.extensions "simple-k8s-client" created 193 | ``` 194 | 195 | Now if we list the pods we can see the new client pod: 196 | ```bash 197 | $ kubectl get pods 198 | NAME READY STATUS RESTARTS AGE 199 | simple-k8s-client-8445474b45-5k7kx 1/1 Running 0 23s 200 | simple-k8s-server-54f6786449-9gzww 1/1 Running 0 15m 201 | simple-k8s-server-54f6786449-zzpv4 1/1 Running 0 15m 202 | ``` 203 | 204 | We can list also the ConfigMap available: 205 | ```bash 206 | $ kubectl get configmap 207 | NAME DATA AGE 208 | simple-k8s-config 1 2m 209 | ``` 210 | 211 | We can check the client logs: 212 | ```bash 213 | 2019-03-21T10:35:22.183337202Z (ENV) SERVICE_URL: http://simple-k8s-server:9090/infoHello Kubernetes Discovery & Configuration. Time is 2019-03-21 10:35:22.149696705 +0000 UTC 214 | 2019-03-21T10:35:27.151003024Z Hello Kubernetes Discovery & Configuration. Time is 2019-03-21 10:35:27.150025395 +0000 UTC 215 | 2019-03-21T10:35:32.151523524Z Hello Kubernetes Discovery & Configuration. Time is 2019-03-21 10:35:32.150024818 +0000 UTC 216 | 2019-03-21T10:35:37.151047272Z Hello Kubernetes Discovery & Configuration. Time is 2019-03-21 10:35:37.149896798 +0000 UTC 217 | 2019-03-21T10:35:42.150756995Z Hello Kubernetes Discovery & Configuration. Time is 2019-03-21 10:35:42.149736266 +0000 UTC 218 | 2019-03-21T10:35:47.151071223Z Hello Kubernetes Discovery & Configuration. Time is 2019-03-21 10:35:47.149833414 +0000 UTC 219 | ``` 220 | and we will see that the client prints the message received from the server each 5 seconds. 221 | 222 | If we check the server logs we will see the corresponding messages from the client requests: 223 | ``` bash 224 | 2019-03-21T10:35:29.510184577Z The /info endpoint is being called... 225 | 2019-03-21T10:35:35.298218171Z The /info endpoint is being called... 226 | 2019-03-21T10:35:39.509426673Z The /info endpoint is being called... 227 | 2019-03-21T10:35:45.298700271Z The /info endpoint is being called... 228 | ``` -------------------------------------------------------------------------------- /Discovery/Kubernetes/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Kubernetes/ 4 | COPY . ${SOURCES} 5 | 6 | RUN cd ${SOURCES}client/ && CGO_ENABLED=0 go build 7 | 8 | WORKDIR ${SOURCES}client/ 9 | CMD ${SOURCES}client/client 10 | -------------------------------------------------------------------------------- /Discovery/Kubernetes/client/simple-k8s-client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | "os" 8 | "time" 9 | ) 10 | 11 | var url string 12 | 13 | func main() { 14 | initServiceURL() 15 | 16 | var client = &http.Client{ 17 | Timeout: time.Second * 10, 18 | } 19 | 20 | callHelloEvery(5*time.Second, client) 21 | } 22 | 23 | func initServiceURL() { 24 | url = os.Getenv("SERVICE_URL") 25 | fmt.Printf("(ENV) SERVICE_URL: %s", url) 26 | if len(url) == 0 { 27 | url = "http://simple-k8s-server:8080/info" 28 | } 29 | } 30 | 31 | func hello(t time.Time, client *http.Client) { 32 | // Call the greeter 33 | response, err := client.Get(url) 34 | 35 | if err != nil { 36 | fmt.Println(err) 37 | return 38 | } 39 | 40 | // print response 41 | body, _ := ioutil.ReadAll(response.Body) 42 | fmt.Printf("%s. Time is %v\n", body, t) 43 | } 44 | 45 | func callHelloEvery(d time.Duration, client *http.Client) { 46 | for x := range time.Tick(d) { 47 | hello(x, client) 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /Discovery/Kubernetes/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | simple-k8s-server: 5 | build: 6 | context: . 7 | dockerfile: server/Dockerfile 8 | image: simple-k8s-server:1.0.1 9 | environment: 10 | - PORT=9090 11 | ports: 12 | - "9090:9090" 13 | labels: 14 | kompose.service.type: NodePort 15 | deploy: 16 | replicas: 2 17 | 18 | simple-k8s-client: 19 | build: 20 | context: . 21 | dockerfile: client/Dockerfile 22 | image: simple-k8s-client:1.0.1 23 | depends_on: 24 | - simple-k8s-server 25 | environment: 26 | - SERVICE_URL=http://simple-k8s-server:9090/info -------------------------------------------------------------------------------- /Discovery/Kubernetes/server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Kubernetes/ 4 | COPY . ${SOURCES} 5 | 6 | RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 7 | 8 | WORKDIR ${SOURCES}server/ 9 | CMD ${SOURCES}server/server -------------------------------------------------------------------------------- /Discovery/Kubernetes/server/simple-k8s-server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | ) 8 | 9 | func main() { 10 | http.HandleFunc("/info", info) 11 | http.ListenAndServe(port(), nil) 12 | } 13 | 14 | func info(w http.ResponseWriter, r *http.Request) { 15 | fmt.Println("The /info endpoint is being called...") 16 | w.WriteHeader(http.StatusOK) 17 | fmt.Fprintf(w, "Hello Kubernetes Discovery & Configuration") 18 | } 19 | 20 | func port() string { 21 | port := os.Getenv("PORT") 22 | if len(port) == 0 { 23 | port = "8080" 24 | } 25 | return ":" + port 26 | } 27 | -------------------------------------------------------------------------------- /Discovery/Kubernetes/simple-k8s-client-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.7.0 () 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: simple-k8s-client 10 | name: simple-k8s-client 11 | spec: 12 | replicas: 1 13 | strategy: {} 14 | template: 15 | metadata: 16 | creationTimestamp: null 17 | labels: 18 | io.kompose.service: simple-k8s-client 19 | spec: 20 | containers: 21 | - env: 22 | - name: SERVICE_URL 23 | valueFrom: 24 | configMapKeyRef: 25 | name: simple-k8s-config 26 | key: service.url 27 | image: danigilmayol/simple-k8s-client:1.0.1 28 | name: simple-k8s-client 29 | resources: {} 30 | restartPolicy: Always 31 | status: {} 32 | -------------------------------------------------------------------------------- /Discovery/Kubernetes/simple-k8s-client-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.version: 1.7.0 () 7 | creationTimestamp: null 8 | labels: 9 | io.kompose.service: simple-k8s-client 10 | name: simple-k8s-client 11 | spec: 12 | clusterIP: None 13 | ports: 14 | - name: headless 15 | port: 55555 16 | targetPort: 0 17 | selector: 18 | io.kompose.service: simple-k8s-client 19 | status: 20 | loadBalancer: {} 21 | -------------------------------------------------------------------------------- /Discovery/Kubernetes/simple-k8s-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: simple-k8s-config 5 | namespace: default 6 | data: 7 | service.url: "http://simple-k8s-server:9090/info" -------------------------------------------------------------------------------- /Discovery/Kubernetes/simple-k8s-server-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.service.type: NodePort 7 | kompose.version: 1.7.0 () 8 | creationTimestamp: null 9 | labels: 10 | io.kompose.service: simple-k8s-server 11 | name: simple-k8s-server 12 | spec: 13 | replicas: 2 14 | strategy: {} 15 | template: 16 | metadata: 17 | creationTimestamp: null 18 | labels: 19 | io.kompose.service: simple-k8s-server 20 | spec: 21 | containers: 22 | - env: 23 | - name: PORT 24 | value: "9090" 25 | image: danigilmayol/simple-k8s-server:1.0.1 26 | name: simple-k8s-server 27 | ports: 28 | - containerPort: 9090 29 | resources: {} 30 | 31 | readinessProbe: 32 | httpGet: 33 | path: /info 34 | port: 9090 35 | initialDelaySeconds: 5 36 | timeoutSeconds: 5 37 | livenessProbe: 38 | httpGet: 39 | path: /info 40 | port: 9090 41 | initialDelaySeconds: 10 42 | timeoutSeconds: 5 43 | 44 | restartPolicy: Always 45 | status: {} 46 | -------------------------------------------------------------------------------- /Discovery/Kubernetes/simple-k8s-server-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | annotations: 5 | kompose.cmd: kompose convert 6 | kompose.service.type: NodePort 7 | kompose.version: 1.7.0 () 8 | creationTimestamp: null 9 | labels: 10 | io.kompose.service: simple-k8s-server 11 | name: simple-k8s-server 12 | spec: 13 | ports: 14 | - name: "9090" 15 | port: 9090 16 | targetPort: 9090 17 | selector: 18 | io.kompose.service: simple-k8s-server 19 | type: NodePort 20 | status: 21 | loadBalancer: {} 22 | -------------------------------------------------------------------------------- /Discovery/Simple/client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get github.com/hashicorp/consul/api 6 | 7 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Simple/ 8 | COPY . ${SOURCES} 9 | 10 | RUN cd ${SOURCES}client/ && CGO_ENABLED=0 go build 11 | 12 | ENV CONSUL_HTTP_ADDR localhost:8500 13 | 14 | WORKDIR ${SOURCES}client/ 15 | CMD ${SOURCES}client/client 16 | -------------------------------------------------------------------------------- /Discovery/Simple/client/README.md: -------------------------------------------------------------------------------- 1 | # Implement Go Microservice Lookup with Consul 2 | 3 | ## client 4 | The function `lookupServiceWithConsul` from the `simple-client.go` file, will connect to Consul, perform a lookup for the simple-server and construct the endpoint we need to call from the server registration. 5 | 6 | The function build a consul client and access the agent API to query all the registered services within Consul. After this, we access to the service that we are interested in, with name `simple-server`, and then we can read the address and port of the service. 7 | 8 | 9 | ### Build docker image 10 | Now we are building the docker image containing Consul, simple-server and simple-client. 11 | 12 | ```bash 13 | $ docker-compose build 14 | consul uses an image, skipping 15 | Building simple-server 16 | Step 1/9 : FROM golang:1.8.1-alpine 17 | ---> 32efc118745e 18 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 19 | ---> Using cache 20 | ---> 513a31355d6c 21 | Step 3/9 : RUN go get github.com/hashicorp/consul/api 22 | ---> Using cache 23 | ---> 083df7e35455 24 | Step 4/9 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Simple/ 25 | ---> Using cache 26 | ---> 5b5dd790b972 27 | Step 5/9 : COPY . ${SOURCES} 28 | ---> 3c4752bf64e0 29 | Step 6/9 : RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 30 | ---> Running in 003847b13bda 31 | Removing intermediate container 003847b13bda 32 | ---> ab712d865d54 33 | Step 7/9 : ENV CONSUL_HTTP_ADDR localhost:8500 34 | ---> Running in cec44e47f853 35 | Removing intermediate container cec44e47f853 36 | ---> f0dca955538d 37 | Step 8/9 : WORKDIR ${SOURCES}server/ 38 | ---> Running in b79a668bc366 39 | Removing intermediate container b79a668bc366 40 | ---> 0a47ba18e19e 41 | Step 9/9 : CMD ${SOURCES}server/server 42 | ---> Running in 47029cb8c509 43 | Removing intermediate container 47029cb8c509 44 | ---> 0521488f8cf0 45 | Successfully built 0521488f8cf0 46 | Successfully tagged simple-server:1.0.1 47 | Building simple-client 48 | Step 1/9 : FROM golang:1.8.1-alpine 49 | ---> 32efc118745e 50 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 51 | ---> Using cache 52 | ---> 513a31355d6c 53 | Step 3/9 : RUN go get github.com/hashicorp/consul/api 54 | ---> Using cache 55 | ---> 083df7e35455 56 | Step 4/9 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Simple/ 57 | ---> Using cache 58 | ---> 5b5dd790b972 59 | Step 5/9 : COPY . ${SOURCES} 60 | ---> Using cache 61 | ---> 3c4752bf64e0 62 | Step 6/9 : RUN cd ${SOURCES}client/ && CGO_ENABLED=0 go build 63 | ---> Running in 5641566b484e 64 | Removing intermediate container 5641566b484e 65 | ---> b74f345f9ce8 66 | Step 7/9 : ENV CONSUL_HTTP_ADDR localhost:8500 67 | ---> Running in c9ff837919ce 68 | Removing intermediate container c9ff837919ce 69 | ---> f1de545b8a8a 70 | Step 8/9 : WORKDIR ${SOURCES}client/ 71 | ---> Running in 3fc764e5dd7e 72 | Removing intermediate container 3fc764e5dd7e 73 | ---> 394308aad9ba 74 | Step 9/9 : CMD ${SOURCES}client/client 75 | ---> Running in 5583a62c50f1 76 | Removing intermediate container 5583a62c50f1 77 | ---> 815722d13cde 78 | Successfully built 815722d13cde 79 | Successfully tagged simple-client:1.0.1 80 | ``` 81 | 82 | 83 | ### Start docker image 84 | Fire a Consul as well as the simple microservice: 85 | 86 | ``` bash 87 | zeus:Simple danielgil$ docker-compose up 88 | Starting simple_consul_1 ... done 89 | Recreating simple_simple-server_1 ... done 90 | Recreating simple_simple-client_1 ... done 91 | Attaching to simple_consul_1, simple_simple-server_1, simple_simple-client_1 92 | consul_1 | ==> Starting Consul agent... 93 | simple-server_1 | Starting Simple Server. 94 | consul_1 | ==> Consul agent running! 95 | consul_1 | Version: 'v0.8.3' 96 | simple-client_1 | Starting Simple Client 97 | consul_1 | Node ID: '3c5eeeff-e5d6-afae-a297-5058f80a1b93' 98 | consul_1 | Node name: '412cdc44c745' 99 | consul_1 | Datacenter: 'dc1' 100 | consul_1 | Server: true (bootstrap: false) 101 | consul_1 | Client Addr: 0.0.0.0 (HTTP: 8500, HTTPS: -1, DNS: 8600) 102 | consul_1 | Cluster Addr: 127.0.0.1 (LAN: 8301, WAN: 8302) 103 | consul_1 | Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false 104 | consul_1 | Atlas: 105 | consul_1 | 106 | consul_1 | ==> Log data will now stream in as it occurs: 107 | consul_1 | 108 | consul_1 | 2019/03/21 09:41:57 [DEBUG] Using unique ID "3c5eeeff-e5d6-afae-a297-5058f80a1b93" from host as node ID 109 | consul_1 | 2019/03/21 09:41:57 [INFO] raft: Initial configuration (index=1): [{Suffrage:Voter ID:127.0.0.1:8300 Address:127.0.0.1:8300}] 110 | consul_1 | 2019/03/21 09:41:57 [INFO] serf: EventMemberJoin: 412cdc44c745 127.0.0.1 111 | consul_1 | 2019/03/21 09:41:57 [INFO] serf: EventMemberJoin: 412cdc44c745.dc1 127.0.0.1 112 | consul_1 | 2019/03/21 09:41:57 [INFO] raft: Node at 127.0.0.1:8300 [Follower] entering Follower state (Leader: "") 113 | consul_1 | 2019/03/21 09:41:57 [INFO] consul: Adding LAN server 412cdc44c745 (Addr: tcp/127.0.0.1:8300) (DC: dc1) 114 | consul_1 | 2019/03/21 09:41:57 [INFO] consul: Handled member-join event for server "412cdc44c745.dc1" in area "wan" 115 | consul_1 | 2019/03/21 09:41:57 [WARN] raft: Heartbeat timeout from "" reached, starting election 116 | consul_1 | 2019/03/21 09:41:57 [INFO] raft: Node at 127.0.0.1:8300 [Candidate] entering Candidate state in term 2 117 | consul_1 | 2019/03/21 09:41:57 [DEBUG] raft: Votes needed: 1 118 | consul_1 | 2019/03/21 09:41:57 [DEBUG] raft: Vote granted from 127.0.0.1:8300 in term 2. Tally: 1 119 | consul_1 | 2019/03/21 09:41:57 [INFO] raft: Election won. Tally: 1 120 | consul_1 | 2019/03/21 09:41:57 [INFO] raft: Node at 127.0.0.1:8300 [Leader] entering Leader state 121 | consul_1 | 2019/03/21 09:41:57 [INFO] consul: cluster leadership acquired 122 | consul_1 | 2019/03/21 09:41:57 [DEBUG] consul: reset tombstone GC to index 3 123 | consul_1 | 2019/03/21 09:41:57 [INFO] consul: member '412cdc44c745' joined, marking health alive 124 | consul_1 | 2019/03/21 09:41:57 [INFO] consul: New leader elected: 412cdc44c745 125 | consul_1 | 2019/03/21 09:41:57 [INFO] agent: Synced service 'consul' 126 | consul_1 | 2019/03/21 09:41:57 [DEBUG] agent: Node info in sync 127 | consul_1 | 2019/03/21 09:41:58 [INFO] agent: Synced service 'simple-server' 128 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Service 'consul' in sync 129 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Check 'service:simple-server' in sync 130 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Node info in sync 131 | consul_1 | 2019/03/21 09:41:58 [DEBUG] http: Request PUT /v1/agent/service/register (954.4µs) from=172.20.0.3:45014 132 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Service 'consul' in sync 133 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Service 'simple-server' in sync 134 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Check 'service:simple-server' in sync 135 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Node info in sync 136 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Service 'consul' in sync 137 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Service 'simple-server' in sync 138 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Check 'service:simple-server' in sync 139 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: Node info in sync 140 | consul_1 | 2019/03/21 09:41:58 [DEBUG] agent: pausing 4.512637805s before first HTTP request of http://eea8d2111821:8080/info 141 | consul_1 | 2019/03/21 09:41:59 [DEBUG] http: Request GET /v1/agent/services (314.8µs) from=172.20.0.4:48352 142 | simple-server_1 | The /info endpoint is being called... 143 | consul_1 | 2019/03/21 09:42:02 [DEBUG] agent: Check 'service:simple-server' is passing 144 | consul_1 | 2019/03/21 09:42:02 [DEBUG] agent: Service 'consul' in sync 145 | consul_1 | 2019/03/21 09:42:02 [DEBUG] agent: Service 'simple-server' in sync 146 | consul_1 | 2019/03/21 09:42:02 [INFO] agent: Synced check 'service:simple-server' 147 | consul_1 | 2019/03/21 09:42:02 [DEBUG] agent: Node info in sync 148 | simple-server_1 | The /info endpoint is being called... 149 | simple-client_1 | Hello Consul Discovery. Time is 2019-03-21 09:42:04.2643383 +0000 UTC 150 | consul_1 | ==> Newer Consul version available: 1.4.3 (currently running: 0.8.3) 151 | consul_1 | 2019/03/21 09:42:07 [DEBUG] agent: Check 'service:simple-server' is passing 152 | simple-server_1 | The /info endpoint is being called... 153 | simple-client_1 | Hello Consul Discovery. Time is 2019-03-21 09:42:09.2643637 +0000 UTC 154 | simple-server_1 | The /info endpoint is being called... 155 | simple-server_1 | The /info endpoint is being called... 156 | consul_1 | 2019/03/21 09:42:12 [DEBUG] agent: Check 'service:simple-server' is passing 157 | simple-client_1 | Hello Consul Discovery. Time is 2019-03-21 09:42:14.2646375 +0000 UTC 158 | simple-server_1 | The /info endpoint is being called... 159 | ``` 160 | 161 | we can see in the logs that the client is calling each 5 seconds the server. -------------------------------------------------------------------------------- /Discovery/Simple/client/simple-client.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "io/ioutil" 6 | "net/http" 7 | "time" 8 | 9 | consulapi "github.com/hashicorp/consul/api" 10 | ) 11 | 12 | var url string 13 | 14 | func main() { 15 | lookupServiceWithConsul() 16 | 17 | fmt.Println("Starting Simple Client") 18 | var client = &http.Client{ 19 | Timeout: time.Second * 10, 20 | } 21 | 22 | callHelloEvery(5*time.Second, client) 23 | } 24 | 25 | func lookupServiceWithConsul() { 26 | config := consulapi.DefaultConfig() 27 | consul, error := consulapi.NewClient(config) 28 | if error != nil { 29 | fmt.Println(error) 30 | return 31 | } 32 | 33 | services, error := consul.Agent().Services() 34 | if error != nil { 35 | fmt.Println(error) 36 | return 37 | } 38 | 39 | service := services["simple-server"] 40 | address := service.Address 41 | port := service.Port 42 | 43 | url = fmt.Sprintf("http://%s:%v/info", address, port) 44 | } 45 | 46 | func hello(t time.Time, client *http.Client) { 47 | response, err := client.Get(url) 48 | 49 | if err != nil { 50 | fmt.Println(err) 51 | return 52 | } 53 | 54 | body, _ := ioutil.ReadAll(response.Body) 55 | fmt.Printf("%s. Time is %v\n", body, t) 56 | } 57 | 58 | func callHelloEvery(d time.Duration, client *http.Client) { 59 | for x := range time.Tick(d) { 60 | hello(x, client) 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /Discovery/Simple/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | consul: 5 | image: consul:0.8.3 6 | ports: 7 | - "8300:8300" 8 | - "8400:8400" 9 | - "8500:8500" 10 | networks: 11 | - sky-net 12 | 13 | simple-server: 14 | build: 15 | context: . 16 | dockerfile: server/Dockerfile 17 | image: simple-server:1.0.1 18 | environment: 19 | - CONSUL_HTTP_ADDR=consul:8500 20 | depends_on: 21 | - consul 22 | networks: 23 | - sky-net 24 | 25 | simple-client: 26 | build: 27 | context: . 28 | dockerfile: client/Dockerfile 29 | image: simple-client:1.0.1 30 | environment: 31 | - CONSUL_HTTP_ADDR=consul:8500 32 | depends_on: 33 | - consul 34 | - simple-server 35 | networks: 36 | - sky-net 37 | 38 | networks: 39 | sky-net: 40 | driver: bridge 41 | -------------------------------------------------------------------------------- /Discovery/Simple/server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM golang:1.8.1-alpine 2 | 3 | RUN apk update && apk upgrade && apk add --no-cache bash git 4 | 5 | RUN go get github.com/hashicorp/consul/api 6 | 7 | ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Simple/ 8 | COPY . ${SOURCES} 9 | 10 | RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 11 | 12 | ENV CONSUL_HTTP_ADDR localhost:8500 13 | 14 | WORKDIR ${SOURCES}server/ 15 | CMD ${SOURCES}server/server 16 | -------------------------------------------------------------------------------- /Discovery/Simple/server/README.md: -------------------------------------------------------------------------------- 1 | # Implement Go Microservice Registration with Consul 2 | 3 | ## server 4 | The function `registerServiceWithConsul` from the `simple-server.go` file, registers to an agent a service called `simple-server` and checks that the service is alive and healthy. 5 | 6 | ### Compile server 7 | Compile the server with 8 | ``` bash 9 | cd ./Discovery/Simple/server 10 | go build 11 | ``` 12 | 13 | ### Build docker image 14 | To check the service we need to start the server with consul using docker-compose. 15 | 16 | ```bash 17 | $ cd ./Discovery/Simple 18 | $ docker-compose build 19 | consul uses an image, skipping 20 | Building simple-server 21 | Step 1/9 : FROM golang:1.8.1-alpine 22 | ---> 32efc118745e 23 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 24 | ---> Using cache 25 | ---> 513a31355d6c 26 | Step 3/9 : RUN go get github.com/hashicorp/consul/api 27 | ---> Using cache 28 | ---> 083df7e35455 29 | Step 4/9 : ENV SOURCES /go/src/github.com/PacktPublishing/Advanced-Cloud-Native-Go/Discovery/Simple/ 30 | ---> Using cache 31 | ---> 5b5dd790b972 32 | Step 5/9 : COPY . ${SOURCES} 33 | ---> d77acfdfb8ec 34 | Step 6/9 : RUN cd ${SOURCES}server/ && CGO_ENABLED=0 go build 35 | ---> Running in cda45f254eb8 36 | Removing intermediate container cda45f254eb8 37 | ---> 9e4c31243065 38 | Step 7/9 : ENV CONSUL_HTTP_ADDR localhost:8500 39 | ---> Running in bbd19620d98d 40 | Removing intermediate container bbd19620d98d 41 | ---> 4869691ed2f7 42 | Step 8/9 : WORKDIR ${SOURCES}server/ 43 | ---> Running in f41e64c7c1a0 44 | Removing intermediate container f41e64c7c1a0 45 | ---> 8f66c7bed13a 46 | Step 9/9 : CMD ${SOURCES}server/server 47 | ---> Running in d40d219f0838 48 | Removing intermediate container d40d219f0838 49 | ---> 573776b437ea 50 | Successfully built 573776b437ea 51 | Successfully tagged simple-server:1.0.1 52 | ``` 53 | 54 | ### Start docker image 55 | Fire a Consul as well as the simple microservice: 56 | 57 | ``` bash 58 | $ docker-compose up 59 | Starting simple_consul_1 ... done 60 | Recreating simple_simple-server_1 ... done 61 | Attaching to simple_consul_1, simple_simple-server_1 62 | simple-server_1 | Starting Simple Server. 63 | consul_1 | ==> Starting Consul agent... 64 | consul_1 | ==> Consul agent running! 65 | consul_1 | Version: 'v0.8.3' 66 | consul_1 | Node ID: '3c5eeeff-e5d6-afae-a297-5058f80a1b93' 67 | consul_1 | Node name: '412cdc44c745' 68 | consul_1 | Datacenter: 'dc1' 69 | consul_1 | Server: true (bootstrap: false) 70 | consul_1 | Client Addr: 0.0.0.0 (HTTP: 8500, HTTPS: -1, DNS: 8600) 71 | consul_1 | Cluster Addr: 127.0.0.1 (LAN: 8301, WAN: 8302) 72 | consul_1 | Gossip encrypt: false, RPC-TLS: false, TLS-Incoming: false 73 | consul_1 | Atlas: 74 | consul_1 | 75 | consul_1 | ==> Log data will now stream in as it occurs: 76 | consul_1 | 77 | consul_1 | 2019/03/21 09:37:15 [DEBUG] Using unique ID "3c5eeeff-e5d6-afae-a297-5058f80a1b93" from host as node ID 78 | consul_1 | 2019/03/21 09:37:15 [INFO] raft: Initial configuration (index=1): [{Suffrage:Voter ID:127.0.0.1:8300 Address:127.0.0.1:8300}] 79 | consul_1 | 2019/03/21 09:37:15 [INFO] raft: Node at 127.0.0.1:8300 [Follower] entering Follower state (Leader: "") 80 | consul_1 | 2019/03/21 09:37:15 [INFO] serf: EventMemberJoin: 412cdc44c745 127.0.0.1 81 | consul_1 | 2019/03/21 09:37:15 [INFO] serf: EventMemberJoin: 412cdc44c745.dc1 127.0.0.1 82 | consul_1 | 2019/03/21 09:37:15 [INFO] consul: Handled member-join event for server "412cdc44c745.dc1" in area "wan" 83 | consul_1 | 2019/03/21 09:37:15 [INFO] consul: Adding LAN server 412cdc44c745 (Addr: tcp/127.0.0.1:8300) (DC: dc1) 84 | consul_1 | 2019/03/21 09:37:15 [WARN] raft: Heartbeat timeout from "" reached, starting election 85 | consul_1 | 2019/03/21 09:37:15 [INFO] raft: Node at 127.0.0.1:8300 [Candidate] entering Candidate state in term 2 86 | consul_1 | 2019/03/21 09:37:15 [DEBUG] raft: Votes needed: 1 87 | consul_1 | 2019/03/21 09:37:15 [DEBUG] raft: Vote granted from 127.0.0.1:8300 in term 2. Tally: 1 88 | consul_1 | 2019/03/21 09:37:15 [INFO] raft: Election won. Tally: 1 89 | consul_1 | 2019/03/21 09:37:15 [INFO] raft: Node at 127.0.0.1:8300 [Leader] entering Leader state 90 | consul_1 | 2019/03/21 09:37:15 [INFO] consul: cluster leadership acquired 91 | consul_1 | 2019/03/21 09:37:15 [INFO] consul: New leader elected: 412cdc44c745 92 | consul_1 | 2019/03/21 09:37:15 [DEBUG] consul: reset tombstone GC to index 3 93 | consul_1 | 2019/03/21 09:37:15 [INFO] consul: member '412cdc44c745' joined, marking health alive 94 | consul_1 | 2019/03/21 09:37:15 [INFO] agent: Synced service 'consul' 95 | consul_1 | 2019/03/21 09:37:15 [DEBUG] agent: Node info in sync 96 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'consul' in sync 97 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: pausing 456.697941ms before first HTTP request of http://ff1cc77b53e9:8080/info 98 | consul_1 | 2019/03/21 09:37:16 [INFO] agent: Synced service 'simple-server' 99 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Check 'service:simple-server' in sync 100 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Node info in sync 101 | consul_1 | 2019/03/21 09:37:16 [DEBUG] http: Request PUT /v1/agent/service/register (4.3729ms) from=172.20.0.3:44968 102 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'consul' in sync 103 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'simple-server' in sync 104 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Check 'service:simple-server' in sync 105 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Node info in sync 106 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'consul' in sync 107 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'simple-server' in sync 108 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Check 'service:simple-server' in sync 109 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Node info in sync 110 | simple-server_1 | The /info endpoint is being called... 111 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Check 'service:simple-server' is passing 112 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'consul' in sync 113 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Service 'simple-server' in sync 114 | consul_1 | 2019/03/21 09:37:16 [INFO] agent: Synced check 'service:simple-server' 115 | consul_1 | 2019/03/21 09:37:16 [DEBUG] agent: Node info in sync 116 | simple-server_1 | The /info endpoint is being called... 117 | consul_1 | 2019/03/21 09:37:21 [DEBUG] agent: Check 'service:simple-server' is passing 118 | ``` 119 | 120 | We can see in the logs that the simple-server is echoing periodically the following output: 121 | 122 | ``` 123 | simple-server_1 | The /info endpoint is being called... 124 | ``` 125 | 126 | because Consul is checking the info endpoint regularly to verify that the simple-service is alive. 127 | 128 | We can test Consul stopping the server: 129 | ``` bash 130 | $ docker ps 131 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 132 | 9ee38bb6b8e8 simple-server:1.0.1 "/bin/sh -c ${SOURCE…" 3 minutes ago Up 3 minutes simple_simple-server_1 133 | 412cdc44c745 consul:0.8.3 "docker-entrypoint.s…" 4 minutes ago Up 3 minutes 0.0.0.0:8300->8300/tcp, 0.0.0.0:8400->8400/tcp, 8301-8302/tcp, 8301-8302/udp, 0.0.0.0:8500->8500/tcp, 8600/tcp, 8600/udp simple_consul_1 134 | ``` 135 | 136 | ```bash 137 | $ docker stop simple_simple-server_1 138 | simple_simple-server_1 139 | ``` 140 | 141 | and Consul realize that the simple-server is not running anymore and that the service is unhealthy: 142 | ```bash 143 | consul_1 | 2019/03/21 09:39:55 [WARN] agent: http request failed 'http://9ee38bb6b8e8:8080/info': Get http://9ee38bb6b8e8:8080/info: dial tcp: lookup 9ee38bb6b8e8 on 127.0.0.11:53: no such host 144 | ``` -------------------------------------------------------------------------------- /Discovery/Simple/server/simple-server.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "net/http" 6 | "os" 7 | "strconv" 8 | 9 | consulapi "github.com/hashicorp/consul/api" 10 | ) 11 | 12 | func main() { 13 | registerServiceWithConsul() 14 | 15 | fmt.Println("Starting Simple Server.") 16 | http.HandleFunc("/info", info) 17 | http.ListenAndServe(port(), nil) 18 | } 19 | 20 | func registerServiceWithConsul() { 21 | config := consulapi.DefaultConfig() 22 | consul, err := consulapi.NewClient(config) 23 | if err != nil { 24 | fmt.Println(err) 25 | } 26 | 27 | var registration = new(consulapi.AgentServiceRegistration) 28 | //var registration = consulapi.AgentServiceRegistration{} --> ®istration 29 | 30 | registration.ID = "simple-server" 31 | registration.Name = "simple-server" 32 | 33 | address := hostname() 34 | registration.Address = address 35 | port, _ := strconv.Atoi(port()[1:len(port())]) 36 | registration.Port = port 37 | 38 | registration.Check = new(consulapi.AgentServiceCheck) 39 | registration.Check.HTTP = fmt.Sprintf("http://%s:%v/info", address, port) 40 | registration.Check.Interval = "5s" 41 | registration.Check.Timeout = "3s" 42 | 43 | consul.Agent().ServiceRegister(registration) 44 | } 45 | 46 | func info(w http.ResponseWriter, r *http.Request) { 47 | fmt.Println("The /info endpoint is being called...") 48 | w.WriteHeader(http.StatusOK) 49 | fmt.Fprintf(w, "Hello Consul Discovery") 50 | } 51 | 52 | func port() string { 53 | port := os.Getenv("PORT") 54 | 55 | if len(port) == 0 { 56 | port = "8080" 57 | } 58 | return ":" + port 59 | } 60 | 61 | func hostname() string { 62 | hostname, _ := os.Hostname() 63 | return hostname 64 | } 65 | -------------------------------------------------------------------------------- /Frameworks/Gin-Web/Dockerfile: -------------------------------------------------------------------------------- 1 | # golang docker base image 2 | FROM golang:1.12.0-alpine 3 | 4 | # install the gin web framework (and install required packages for executing `go get`) 5 | RUN apk update && apk upgrade && apk add --no-cache bash git 6 | RUN go get github.com/gin-gonic/gin 7 | 8 | # copy local sources into this docker image (defining an environment variable SOURCES) 9 | ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Frameworks/Gin-Web/ 10 | COPY . ${SOURCES} 11 | 12 | # change into the SOURCES directory and call `go build` 13 | RUN cd ${SOURCES} && CGO_ENABLED=0 go build 14 | 15 | # define the work directory 16 | WORKDIR ${SOURCES} 17 | 18 | # define the entry-poing command (it is, the Gin-Web executable after building the binary) 19 | CMD ${SOURCES}Gin-Web 20 | 21 | # expose port 8080 22 | EXPOSE 8080 23 | -------------------------------------------------------------------------------- /Frameworks/Gin-Web/book.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | // Book type with Name, Author and ISBN 4 | type Book struct { 5 | Title string `json:"title"` 6 | Author string `json:"author"` 7 | ISBN string `json:"isbn"` 8 | Description string `json:"description,omitempty"` 9 | } 10 | 11 | var books = map[string]Book{ 12 | "4432432432": Book{Title: "The Hitchhiker's Guide to the Galaxy", Author: "Douglas Adams", ISBN: "88854858345", Description: "a funny book"}, 13 | "000000000": Book{Title: "Advanced Cloud Native Go", Author: "M. Leander Reimer", ISBN: "0000000000"}, 14 | } 15 | 16 | // AllBooks returns a slice of all books 17 | func AllBooks() []Book { 18 | values := make([]Book, len(books)) 19 | idx := 0 20 | for _, book := range books { 21 | values[idx] = book 22 | idx++ 23 | } 24 | return values 25 | } 26 | 27 | // GetBook returns the book for a given ISBN 28 | func GetBook(isbn string) (Book, bool) { 29 | book, found := books[isbn] 30 | return book, found 31 | } 32 | 33 | // CreateBook creates a new Book if it does not exist 34 | func CreateBook(book Book) (string, bool) { 35 | _, exists := books[book.ISBN] 36 | if exists { 37 | return "", false 38 | } 39 | books[book.ISBN] = book 40 | return book.ISBN, true 41 | } 42 | 43 | // UpdateBook updates an existing book 44 | func UpdateBook(isbn string, book Book) bool { 45 | _, exists := books[book.ISBN] 46 | if exists { 47 | books[isbn] = book 48 | } 49 | return exists 50 | } 51 | 52 | // DeleteBook removes a book from the map by ISBN key 53 | func DeleteBook(isbn string) { 54 | delete(books, isbn) 55 | } 56 | -------------------------------------------------------------------------------- /Frameworks/Gin-Web/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | microservice: 5 | build: . 6 | image: gin-web:1.0.1 7 | environment: 8 | - PORT=8080 9 | ports: 10 | - "8080:8080" -------------------------------------------------------------------------------- /Frameworks/Gin-Web/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel-gil/advanced-go-microservices/5ebf9ff61d2acea6673e6ad0a5bd4e2b047f9449/Frameworks/Gin-Web/favicon.ico -------------------------------------------------------------------------------- /Frameworks/Gin-Web/kubernetes/k8s-deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: gin-web 5 | labels: 6 | app: gin-web 7 | spec: 8 | replicas: 3 9 | template: 10 | metadata: 11 | labels: 12 | app: gin-web 13 | tier: service 14 | spec: 15 | containers: 16 | - name: gin-web 17 | image: "danigilmayol/gin-web:1.0.1" 18 | ports: 19 | - containerPort: 9090 20 | env: 21 | - name: PORT 22 | value: "9090" 23 | 24 | # define resource requests and limits 25 | resources: 26 | requests: 27 | memory: "64Mi" 28 | cpu: "125m" 29 | limits: 30 | memory: "128Mi" 31 | cpu: "250m" 32 | # check of gin-web is alive and healthy 33 | readinessProbe: 34 | httpGet: 35 | path: /ping 36 | port: 9090 37 | initialDelaySeconds: 5 38 | timeoutSeconds: 5 39 | livenessProbe: 40 | httpGet: 41 | path: /ping 42 | port: 9090 43 | initialDelaySeconds: 5 44 | timeoutSeconds: 5 45 | 46 | -------------------------------------------------------------------------------- /Frameworks/Gin-Web/kubernetes/k8s-ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: gin-web 5 | labels: 6 | app: gin-web 7 | tier: frontend 8 | spec: 9 | backend: 10 | serviceName: gin-web 11 | servicePort: 9090 -------------------------------------------------------------------------------- /Frameworks/Gin-Web/kubernetes/k8s-service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: gin-web 5 | labels: 6 | app: gin-web 7 | tier: service 8 | spec: 9 | # use NodePort here to be able to access a port on each node 10 | type: NodePort 11 | ports: 12 | - port: 9090 13 | selector: 14 | app: gin-web -------------------------------------------------------------------------------- /Frameworks/Gin-Web/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "net/http" 5 | "os" 6 | 7 | "github.com/gin-gonic/gin" 8 | ) 9 | 10 | func main() { 11 | engine := gin.Default() 12 | 13 | engine.GET("/ping", func(c *gin.Context) { 14 | c.String(http.StatusOK, "pong") 15 | }) 16 | 17 | engine.GET("/hello", func(c *gin.Context) { 18 | c.JSON(http.StatusOK, gin.H{"message": "Hello Gin Framework."}) 19 | }) 20 | 21 | // get all books 22 | engine.GET("/api/books", func(c *gin.Context) { 23 | c.JSON(http.StatusOK, AllBooks()) 24 | }) 25 | 26 | // create a new book 27 | engine.POST("/api/books", func(c *gin.Context) { 28 | var book Book 29 | if c.BindJSON(&book) == nil { 30 | isbn, created := CreateBook(book) 31 | if created { 32 | c.Header("Location", "/api/books/"+isbn) 33 | c.Status(http.StatusCreated) 34 | } else { 35 | c.Status(http.StatusConflict) 36 | } 37 | } 38 | }) 39 | 40 | // get book by ISBN 41 | engine.GET("/api/books/:isbn", func(c *gin.Context) { 42 | isbn := c.Params.ByName("isbn") 43 | book, found := GetBook(isbn) 44 | if found { 45 | c.JSON(http.StatusOK, book) 46 | } else { 47 | c.AbortWithStatus(http.StatusNotFound) 48 | } 49 | }) 50 | 51 | // update existing book 52 | engine.PUT("/api/books/:isbn", func(c *gin.Context) { 53 | isbn := c.Params.ByName("isbn") 54 | 55 | var book Book 56 | if c.BindJSON(&book) == nil { 57 | exists := UpdateBook(isbn, book) 58 | if exists { 59 | c.Status(http.StatusOK) 60 | } else { 61 | c.Status(http.StatusNotFound) 62 | } 63 | } 64 | }) 65 | 66 | // delete book 67 | engine.DELETE("/api/books/:isbn", func(c *gin.Context) { 68 | isbn := c.Params.ByName("isbn") 69 | DeleteBook(isbn) 70 | c.Status(http.StatusOK) 71 | }) 72 | 73 | // configuration for static files and templates 74 | engine.LoadHTMLGlob("./templates/*.html") 75 | engine.StaticFile("/favicon.ico", "./favicon.ico") 76 | 77 | engine.GET("/", func(c *gin.Context) { 78 | c.HTML(http.StatusOK, "index.html", gin.H{ 79 | "title": "Advanced Cloud Native Go with Gin Framework", 80 | }) 81 | }) 82 | 83 | // run server on PORT 84 | engine.Run(port()) 85 | } 86 | 87 | func port() string { 88 | port := os.Getenv("PORT") 89 | if len(port) == 0 { 90 | port = "8080" 91 | } 92 | return ":" + port 93 | } 94 | -------------------------------------------------------------------------------- /Frameworks/Gin-Web/templates/index.html: -------------------------------------------------------------------------------- 1 | {{ define "index.html" }} 2 | 3 | 4 | 5 | {{ .title }} 6 | 7 | 14 | 15 | 16 | 17 |
18 | 19 | 20 | 21 | {{ end }} -------------------------------------------------------------------------------- /Frameworks/README.md: -------------------------------------------------------------------------------- 1 | # Go Microservices Frameworks 2 | 3 | ## Running Gin Web Framework 4 | 5 | For this example, we will need the Gin web framework: 6 | 7 | ``` bash 8 | go get github.com/gin-gonic/gin 9 | ``` 10 | 11 | We can run the gin webserver with 12 | 13 | ``` bash 14 | $ cd ./Frameworks/Gin-Web/ 15 | $ go run *.go 16 | [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. 17 | 18 | [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production. 19 | - using env: export GIN_MODE=release 20 | - using code: gin.SetMode(gin.ReleaseMode) 21 | 22 | [GIN-debug] GET /ping --> main.main.func1 (3 handlers) 23 | [GIN-debug] GET /hello --> main.main.func2 (3 handlers) 24 | [GIN-debug] GET /api/books --> main.main.func3 (3 handlers) 25 | [GIN-debug] POST /api/books --> main.main.func4 (3 handlers) 26 | [GIN-debug] GET /api/books/:isbn --> main.main.func5 (3 handlers) 27 | [GIN-debug] PUT /api/books/:isbn --> main.main.func6 (3 handlers) 28 | [GIN-debug] DELETE /api/books/:isbn --> main.main.func7 (3 handlers) 29 | [GIN-debug] Loaded HTML Templates (2): 30 | - 31 | - index.html 32 | 33 | [GIN-debug] GET /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 34 | [GIN-debug] HEAD /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 35 | [GIN-debug] GET / --> main.main.func8 (3 handlers) 36 | [GIN-debug] Listening and serving HTTP on :8080 37 | 38 | ``` 39 | 40 | The API calls processed are displayed in the console: 41 | ``` 42 | [GIN] 2019/03/18 - 15:14:02 | 200 | 52.607µs | ::1 | GET /ping 43 | [GIN] 2019/03/18 - 15:14:53 | 200 | 228.558µs | ::1 | GET /hello 44 | [GIN] 2019/03/18 - 15:15:50 | 200 | 186.498µs | ::1 | GET /api/books 45 | [GIN] 2019/03/18 - 15:16:07 | 200 | 11.964µs | ::1 | GET /api/books/4432432432 46 | [GIN] 2019/03/18 - 15:16:40 | 201 | 383.826µs | ::1 | POST /api/books 47 | [GIN] 2019/03/18 - 15:19:39 | 200 | 44.836µs | ::1 | PUT /api/books/444444444 48 | [GIN] 2019/03/18 - 15:20:15 | 200 | 2.623µs | ::1 | DELETE /api/books/444444444 49 | ```` 50 | 51 | 52 | ## Service API 53 | 54 | ### Ping 55 | Request: 56 | ``` 57 | GET http://localhost:8080/ping 58 | ``` 59 | 60 | Response: 61 | ``` 62 | Status: 200 OK 63 | Body: pong 64 | ``` 65 | 66 | ### Hello 67 | 68 | Request: 69 | 70 | ``` 71 | GET http://localhost:8080/hello 72 | ``` 73 | 74 | Response: 75 | ``` 76 | Status: 200 OK 77 | Body: 78 | { 79 | "message": "Hello Gin Framework." 80 | } 81 | ``` 82 | 83 | ### Get all books 84 | Request: 85 | 86 | ```GET http://localhost:8080/api/books``` 87 | 88 | Response: 89 | 90 | ``` 91 | Status: 200 OK 92 | Body: 93 | [ 94 | { 95 | "title": "The Hitchhiker's Guide to the Galaxy", 96 | "author": "Douglas Adams", 97 | "isbn": "88854858345", 98 | "description": "a funny book" 99 | }, 100 | { 101 | "title": "Advanced Cloud Native Go", 102 | "author": "M. Leander Reimer", 103 | "isbn": "0000000000" 104 | } 105 | ] 106 | ``` 107 | 108 | ### Get book by ISBN 109 | 110 | Request: 111 | ``` 112 | GET http://localhost:8080/api/books/{ISBN} 113 | ``` 114 | 115 | Response: 116 | ``` 117 | Status: 200 OK 118 | Body: 119 | { 120 | "title": "The Hitchhiker's Guide to the Galaxy", 121 | "author": "Douglas Adams", 122 | "isbn": "88854858345", 123 | "description": "a funny book" 124 | } 125 | ``` 126 | 127 | ### Create a book 128 | 129 | Request: 130 | ``` 131 | POST http://localhost:8080/api/books 132 | Body: 133 | { 134 | "title": "Game of thrones", 135 | "author": "George R.R. Martin", 136 | "isbn": "444444444", 137 | "description": "an incredible story" 138 | } 139 | 140 | ``` 141 | 142 | Response: 143 | ``` 144 | Status: 201 Created 145 | ``` 146 | 147 | 148 | ### Update a book 149 | 150 | Request: 151 | ``` 152 | PUT http://localhost:8080/api/books/{ISBN} 153 | Body: 154 | { 155 | "title": "Game of thrones", 156 | "author": "George R.R. Martin", 157 | "isbn": "444444444", 158 | "description": "an incredible story with a lot of action" 159 | } 160 | ``` 161 | 162 | Response: 163 | ``` 164 | Status: 200 OK 165 | ``` 166 | 167 | ### Delete a book 168 | 169 | Request: 170 | ``` 171 | DELETE http://localhost:8080/api/books/{ISBN} 172 | ``` 173 | 174 | Response: 175 | ``` 176 | Status: 200 OK 177 | ``` 178 | 179 | ## Docker 180 | Docker must be installed, you can check that docker is running with: 181 | 182 | ```bash 183 | docker version 184 | ``` 185 | 186 | ### Dockerfile 187 | The Dockerfile is essentially the build instructions to build the image. 188 | 189 | ``` 190 | # golang docker base image 191 | FROM golang:1.12.0-alpine 192 | 193 | # install the gin web framework (and install required packages for executing `go get`) 194 | RUN apk update && apk upgrade && apk add --no-cache bash git 195 | RUN go get github.com/gin-gonic/gin 196 | 197 | # copy local sources into this docker image (defining an environment variable SOURCES) 198 | ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Frameworks/Gin-Web/ 199 | COPY . ${SOURCES} 200 | 201 | # change into the SOURCES directory and call `go build` 202 | RUN cd ${SOURCES} && CGO_ENABLED=0 go build 203 | 204 | # define the work directory 205 | WORKDIR ${SOURCES} 206 | 207 | # define the entry-poing command (it is, the Gin-Web executable after building the binary) 208 | CMD ${SOURCES}Gin-Web 209 | 210 | # expose port 8080 211 | EXPOSE 8080 212 | ``` 213 | 214 | ### docker-componse.yml 215 | docker-componse.yml is a config file for docker-compose. It allows to deploy, combine and configure multiple docker-container at the same time. 216 | 217 | ``` 218 | version: '3' 219 | 220 | services: 221 | microservice: 222 | build: . 223 | image: gin-web:1.0.1 224 | environment: 225 | - PORT=8080 226 | ports: 227 | - "8080:8080" 228 | ``` 229 | 230 | ### Building docker image using docker-compose 231 | To list the docker images available: 232 | ``` 233 | docker images 234 | ``` 235 | 236 | To build a docker image: 237 | ``` 238 | $ docker-compose build 239 | Building microservice 240 | Step 1/9 : FROM golang:1.12.0-alpine 241 | ---> 2205a315f9c7 242 | Step 2/9 : RUN apk update && apk upgrade && apk add --no-cache bash git 243 | ---> Running in 571eec34615b 244 | fetch http://dl-cdn.alpinelinux.org/alpine/v3.9/main/x86_64/APKINDEX.tar.gz 245 | fetch http://dl-cdn.alpinelinux.org/alpine/v3.9/community/x86_64/APKINDEX.tar.gz 246 | v3.9.2-21-g3dda2a36ce [http://dl-cdn.alpinelinux.org/alpine/v3.9/main] 247 | v3.9.2-23-gb6bc53c8f8 [http://dl-cdn.alpinelinux.org/alpine/v3.9/community] 248 | OK: 9756 distinct packages available 249 | (1/2) Upgrading libcrypto1.1 (1.1.1a-r1 -> 1.1.1b-r1) 250 | (2/2) Upgrading libssl1.1 (1.1.1a-r1 -> 1.1.1b-r1) 251 | Executing ca-certificates-20190108-r0.trigger 252 | OK: 6 MiB in 15 packages 253 | fetch http://dl-cdn.alpinelinux.org/alpine/v3.9/main/x86_64/APKINDEX.tar.gz 254 | fetch http://dl-cdn.alpinelinux.org/alpine/v3.9/community/x86_64/APKINDEX.tar.gz 255 | (1/11) Installing ncurses-terminfo-base (6.1_p20190105-r0) 256 | (2/11) Installing ncurses-terminfo (6.1_p20190105-r0) 257 | (3/11) Installing ncurses-libs (6.1_p20190105-r0) 258 | (4/11) Installing readline (7.0.003-r1) 259 | (5/11) Installing bash (4.4.19-r1) 260 | Executing bash-4.4.19-r1.post-install 261 | (6/11) Installing nghttp2-libs (1.35.1-r0) 262 | (7/11) Installing libssh2 (1.8.0-r4) 263 | (8/11) Installing libcurl (7.64.0-r1) 264 | (9/11) Installing expat (2.2.6-r0) 265 | (10/11) Installing pcre2 (10.32-r1) 266 | (11/11) Installing git (2.20.1-r0) 267 | Executing busybox-1.29.3-r10.trigger 268 | OK: 29 MiB in 26 packages 269 | Removing intermediate container 571eec34615b 270 | ---> a1d03237249c 271 | Step 3/9 : RUN go get github.com/gin-gonic/gin 272 | ---> Running in f45a24075d94 273 | Removing intermediate container f45a24075d94 274 | ---> ab97154e9078 275 | Step 4/9 : ENV SOURCES /go/src/github.com/daniel-gil/advanced-go-microservices/Frameworks/Gin-Web/ 276 | ---> Running in 271f693279e9 277 | Removing intermediate container 271f693279e9 278 | ---> 76e0deae300b 279 | Step 5/9 : COPY . ${SOURCES} 280 | ---> 85527054db89 281 | Step 6/9 : RUN cd ${SOURCES} && CGO_ENABLED=0 go build 282 | ---> Running in 0766b6826bfc 283 | Removing intermediate container 0766b6826bfc 284 | ---> 02391963ea65 285 | Step 7/9 : WORKDIR ${SOURCES} 286 | ---> Running in 1e1d2249ec4b 287 | Removing intermediate container 1e1d2249ec4b 288 | ---> f3eea799c147 289 | Step 8/9 : CMD ${SOURCES}Gin-Web 290 | ---> Running in a91583983d32 291 | Removing intermediate container a91583983d32 292 | ---> 75b70c3fcb7a 293 | Step 9/9 : EXPOSE 8080 294 | ---> Running in c1a4034f9919 295 | Removing intermediate container c1a4034f9919 296 | ---> c7e5eedbebcc 297 | Successfully built c7e5eedbebcc 298 | Successfully tagged gin-web:1.0.1 299 | ``` 300 | 301 | Now we can check that the new docker image has been created: 302 | ```bash 303 | $ docker images 304 | REPOSITORY TAG IMAGE ID CREATED SIZE 305 | gin-web 1.0.1 c7e5eedbebcc About a minute ago 462MB 306 | ``` 307 | 308 | To run this docker image in background: 309 | ``` 310 | $ docker-compose up -d 311 | Recreating gin-web_microservice_1 ... done 312 | ``` 313 | 314 | List all docker container running in the background: 315 | ``` 316 | $ docker ps 317 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 318 | c74b35148445 gin-web:1.0.1 "/bin/sh -c ${SOURCE…" 16 seconds ago Up 15 seconds 0.0.0.0:8080->8080/tcp gin-web_microservice_1 319 | ``` 320 | 321 | To stop everything we can: 322 | ``` 323 | $ docker-compose stop 324 | Stopping gin-web_microservice_1 ... done 325 | ``` 326 | 327 | ### Tagging and pushing to remote registry 328 | 329 | #### Tag 330 | We can tag a docker image with (using the docker hub username): 331 | ``` 332 | docker tag gin-web:1.0.1 danigilmayol/gin-web:1.0.1 333 | ``` 334 | 335 | Once tagged we can list again the docker images to verify that the new tagged is listed: 336 | ``` 337 | $ docker images 338 | REPOSITORY TAG IMAGE ID CREATED SIZE 339 | danigilmayol/gin-web 1.0.1 c7e5eedbebcc 14 minutes ago 462MB 340 | gin-web 1.0.1 c7e5eedbebcc 14 minutes ago 462MB 341 | ``` 342 | 343 | #### Push 344 | Now we can use the following command for pushing the tagged image to the docker hub repository (using the docker.io username): 345 | ``` 346 | $ docker push danigilmayol/gin-web:1.0.1 347 | The push refers to repository [docker.io/danigilmayol/gin-web] 348 | 2aa85349ad3e: Pushed 349 | 9b0c5f6a6b31: Pushed 350 | 4d4949d3b5fe: Pushed 351 | 0a8c8ab90b25: Pushed 352 | a124fdb5e3b1: Layer already exists 353 | dbf140e0475c: Layer already exists 354 | 41a6c6e6c287: Layer already exists 355 | 4afd4ab746df: Layer already exists 356 | bcf2f368fe23: Layer already exists 357 | 1.0.1: digest: sha256:bbb37c779a6eb997284db1af6d8c9993cb32709bda581b403c7a79065c4240a3 size: 2209 358 | ``` 359 | 360 | Now the docker image is available from docker hub. 361 | 362 | 363 | ## Kubernetes 364 | Kubernetes is a portable, extensible open-source platform for managing containerized workloads and services, that facilitates both declarative configuration and automation. 365 | 366 | ### Concepts 367 | 368 | #### Node 369 | The `node` is a worker machine in Kubernetes (previously known as a `minion`). Each node has the services necessary to run pods and is managed by the master components. The services on a node include Docker, kubelet and kube-proxy. 370 | 371 | #### Kubelet 372 | The most important and most prominent controller in Kubernetes is the `Kubelet`, which is the primary implementer of the Pod and Node APIs that drive the container execution layer. It is the lowest level component in Kubernetes, and it communicates with the master node. 373 | 374 | A `Kubelet` is responsible for what’s running on an individual machine, it's a process watcher like supervisord, but focused on running containers. It has one job: given a set of containers to run, make sure they are all running. 375 | 376 | #### Pod 377 | The `pod` is the smallest deployble unit computation within Kubernetes. The `pod` contains our containers (1 or more), it can be described using labels and exists on a node. 378 | 379 | #### Service 380 | The `service` is an abstraction for a logical collection of pods which handles requests. 381 | Those requests comes either from inside the Kubernetes cluster (from one node to another) 382 | or from outside the cluster (public requests to our master node that wants to hit a 383 | specific microservice). The `service` tends to be load balancer. 384 | 385 | It is declassible within the Kubernetes cluster using a DNS name. 386 | 387 | ``` 388 | apiVersion: v1 389 | kind: Service 390 | metadata: 391 | name: gin-web 392 | labels: 393 | app: gin-web 394 | tier: service 395 | spec: 396 | # use NodePort here to be able to access a port on each node 397 | type: NodePort 398 | ports: 399 | - port: 9090 400 | selector: 401 | app: gin-web 402 | ``` 403 | 404 | #### Deployment 405 | The `deployment` defines a desired state and Kubernetes interprets this deployment 406 | definition and does the rest for you. It allows for the clarity of updates the pods. 407 | 408 | ``` 409 | apiVersion: extensions/v1beta1 410 | kind: Deployment 411 | metadata: 412 | name: gin-web 413 | labels: 414 | app: gin-web 415 | spec: 416 | replicas: 3 417 | template: 418 | metadata: 419 | labels: 420 | app: gin-web 421 | tier: service 422 | spec: 423 | containers: 424 | - name: gin-web 425 | image: "gin-web:1.0.1" 426 | ports: 427 | - containerPort: 9090 428 | env: 429 | - name: PORT 430 | value: "9090" 431 | 432 | # define resource requests and limits 433 | resources: 434 | requests: 435 | memory: "64Mi" 436 | cpu: "125m" 437 | limits: 438 | memory: "128Mi" 439 | cpu: "250m" 440 | # check of gin-web is alive and healthy 441 | readinessProbe: 442 | httpGet: 443 | path: /ping 444 | port: 9090 445 | initialDelaySeconds: 5 446 | timeoutSeconds: 5 447 | livenessProbe: 448 | httpGet: 449 | path: /ping 450 | port: 9090 451 | initialDelaySeconds: 5 452 | timeoutSeconds: 5 453 | ``` 454 | 455 | 456 | #### Ingress 457 | The `ingress` allows the external access to defined services from the outside world into a Kubernetes cluster. 458 | 459 | ``` 460 | apiVersion: extensions/v1beta1 461 | kind: Ingress 462 | metadata: 463 | name: gin-web 464 | labels: 465 | app: gin-web 466 | tier: frontend 467 | spec: 468 | backend: 469 | serviceName: gin-web 470 | servicePort: 9090 471 | ``` 472 | 473 | #### Kubernetes Cluster 474 | A Kubernetes Cluster is composed of a `Master` and one or several `Node/Kubelet` instances. 475 | 476 | The Kubernetes `master` has replication controllers that handle making sure that the deployment 477 | definitions are satisfied. It also contains: 478 | - *Deployment history*: to facilitate 479 | rollbacks (scalability or revert to the last replication controller). 480 | - *Service definition* for routing: it knows the cluster configuration (nodes and master) 481 | and which pods are running in each node. 482 | 483 | ### Minikube 484 | Minikube is a tool that makes it easy to run Kubernetes locally. Minikube runs a single-node Kubernetes cluster inside a VM on your laptop for users looking to try out Kubernetes or develop with it day-to-day. 485 | 486 | #### Installation 487 | To install minikube, we need first to install a Hypervisor (eg. VirtualBox) and then kubectl. For more information check this link: https://kubernetes.io/docs/tasks/tools/install-minikube/. 488 | 489 | ##### VirtualBox 490 | Install VirtualBox from this link: https://www.virtualbox.org/wiki/Downloads. 491 | 492 | 493 | ##### kubectl 494 | 495 | To install kubectl run the following command: 496 | ``` 497 | brew install kubernetes-cli 498 | ``` 499 | 500 | Check if it has been installed correctly using 501 | ``` 502 | kubectl version 503 | ``` 504 | 505 | ##### Minikube 506 | 507 | To install minikube, run the following command: 508 | 509 | ``` 510 | $ brew cask install minikube 511 | Updating Homebrew... 512 | ==> Auto-updated Homebrew! 513 | Updated 1 tap (homebrew/cask). 514 | No changes to formulae. 515 | 516 | ==> Satisfying dependencies 517 | All Formula dependencies satisfied. 518 | ==> Downloading https://storage.googleapis.com/minikube/releases/v0.35.0/minikube-darwin-amd64 519 | ######################################################################## 100.0% 520 | ==> Verifying SHA-256 checksum for Cask 'minikube'. 521 | ==> Installing Cask minikube 522 | ==> Linking Binary 'minikube-darwin-amd64' to '/usr/local/bin/minikube'. 523 | 🍺 minikube was successfully installed! 524 | 525 | ``` 526 | 527 | 528 | #### Starting the cluster 529 | ``` 530 | $ minikube start 531 | 😄 minikube v0.35.0 on darwin (amd64) 532 | 🔥 Creating virtualbox VM (CPUs=2, Memory=2048MB, Disk=20000MB) ... 533 | 💿 Downloading Minikube ISO ... 534 | 184.42 MB / 184.42 MB [============================================] 100.00% 0s 535 | 💣 Unable to start VM: create: precreate: VBoxManage not found. Make sure VirtualBox is installed and VBoxManage is in the path 536 | 537 | 💡 Make sure to install all necessary requirements, according to the documentation: 538 | 👉 https://kubernetes.io/docs/tasks/tools/install-minikube/ 539 | zeus:~ danielgil$ minikube start 540 | 😄 minikube v0.35.0 on darwin (amd64) 541 | 🔥 Creating virtualbox VM (CPUs=2, Memory=2048MB, Disk=20000MB) ... 542 | 📶 "minikube" IP address is 192.168.99.100 543 | 🐳 Configuring Docker as the container runtime ... 544 | ✨ Preparing Kubernetes environment ... 545 | 💾 Downloading kubeadm v1.13.4 546 | 💾 Downloading kubelet v1.13.4 547 | 🚜 Pulling images required by Kubernetes v1.13.4 ... 548 | 🚀 Launching Kubernetes v1.13.4 using kubeadm ... 549 | ⌛ Waiting for pods: apiserver proxy etcd scheduler controller addon-manager dns 550 | 🔑 Configuring cluster permissions ... 551 | 🤔 Verifying component health ..... 552 | 💗 kubectl is now configured to use "minikube" 553 | 🏄 Done! Thank you for using minikube! 554 | ``` 555 | 556 | #### Check status 557 | To check if the minikube is running: 558 | 559 | ``` bash 560 | $ minikube status 561 | host: Running 562 | kubelet: Running 563 | apiserver: Running 564 | kubectl: Correctly Configured: pointing to minikube-vm at 192.168.99.100 565 | ``` 566 | 567 | #### Stop minikube 568 | ``` bash 569 | $ minikube stop 570 | ✋ Stopping "minikube" in virtualbox ... 571 | 🛑 "minikube" stopped. 572 | ``` 573 | 574 | #### Check IP Kubernetes is running 575 | 576 | Display the local IP Kubernetes is running on my machine: 577 | ``` 578 | $ minikube ip 579 | 192.168.99.100 580 | ``` 581 | 582 | To check that the Kubernetes master is running at this IP: 583 | ``` 584 | kubectl cluster-info 585 | Kubernetes master is running at https://192.168.99.100:8443 586 | KubeDNS is running at https://192.168.99.100:8443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy 587 | 588 | To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'. 589 | ``` 590 | 591 | #### Kubernetes dashboard 592 | 593 | To open a Kubernetes dashboard on a web browser run the following command: 594 | ``` 595 | $ minikube dashboard 596 | 🔌 Enabling dashboard ... 597 | 🤔 Verifying dashboard health ... 598 | 🚀 Launching proxy ... 599 | 🤔 Verifying proxy health ... 600 | 🎉 Opening http://127.0.0.1:51388/api/v1/namespaces/kube-system/services/http:kubernetes-dashboard:/proxy/ in your default browser... 601 | ``` 602 | 603 | #### kubectl (Kubernetes control) 604 | 605 | ##### Apply 606 | Apply a configuration to a resource by filename (pass the directory where our project has the Kubernetes YML files). 607 | ``` 608 | $ kubectl apply -f kubernetes/ 609 | deployment.extensions "gin-web" created 610 | ingress.extensions "gin-web" created 611 | service "gin-web" created 612 | ``` 613 | 614 | ##### List deployments 615 | List the deployments in Kubernetes (we have 3 available because we specify 3 replicas): 616 | ``` 617 | $ kubectl get deployments 618 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 619 | gin-web 3 3 3 3 1m 620 | ``` 621 | 622 | ##### List pods 623 | Display the list of pods: 624 | ``` 625 | $ kubectl get pods 626 | NAME READY STATUS RESTARTS AGE 627 | gin-web-7bfff6b569-fb6v2 1/1 Running 0 1m 628 | gin-web-7bfff6b569-l26bp 1/1 Running 0 1m 629 | gin-web-7bfff6b569-qnl8f 1/1 Running 0 1m 630 | ``` 631 | 632 | ##### Logs 633 | Display the logs of a pod: 634 | ``` 635 | $ kubectl logs gin-web-7bfff6b569-fb6v2 636 | [GIN-debug] [WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. 637 | 638 | [GIN-debug] [WARNING] Running in "debug" mode. Switch to "release" mode in production. 639 | - using env: export GIN_MODE=release 640 | - using code: gin.SetMode(gin.ReleaseMode) 641 | 642 | [GIN-debug] GET /ping --> main.main.func1 (3 handlers) 643 | [GIN-debug] GET /hello --> main.main.func2 (3 handlers) 644 | [GIN-debug] GET /api/books --> main.main.func3 (3 handlers) 645 | [GIN-debug] POST /api/books --> main.main.func4 (3 handlers) 646 | [GIN-debug] GET /api/books/:isbn --> main.main.func5 (3 handlers) 647 | [GIN-debug] PUT /api/books/:isbn --> main.main.func6 (3 handlers) 648 | [GIN-debug] DELETE /api/books/:isbn --> main.main.func7 (3 handlers) 649 | [GIN-debug] Loaded HTML Templates (2): 650 | - 651 | - index.html 652 | 653 | [GIN-debug] GET /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 654 | [GIN-debug] HEAD /favicon.ico --> github.com/gin-gonic/gin.(*RouterGroup).StaticFile.func1 (3 handlers) 655 | [GIN-debug] GET / --> main.main.func8 (3 handlers) 656 | [GIN-debug] Listening and serving HTTP on :9090 657 | [GIN] 2019/03/21 - 09:10:09 | 200 | 14.391µs | 172.17.0.1 | GET /ping 658 | [GIN] 2019/03/21 - 09:10:11 | 200 | 16.956µs | 172.17.0.1 | GET /ping 659 | [GIN] 2019/03/21 - 09:10:19 | 200 | 22.106µs | 172.17.0.1 | GET /ping 660 | [GIN] 2019/03/21 - 09:10:21 | 200 | 7.108µs | 172.17.0.1 | GET /ping 661 | ``` 662 | 663 | here we can see that Kubernetes pings our microservice regularly to check if it is alive and healthy. 664 | 665 | ##### List services 666 | Display the list of services: 667 | ``` 668 | $ kubectl get services 669 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 670 | gin-web NodePort 10.110.28.47 9090:31497/TCP 2d 671 | kubernetes ClusterIP 10.96.0.1 443/TCP 5d 672 | ``` 673 | 674 | ##### Access a service 675 | So we have the `gin-web` service running on port 9090 (and the node port 31497). To access to the service: 676 | ``` 677 | $ minikube service gin-web 678 | ``` 679 | and it will accesss this Kubernetes service from within a web browser. 680 | 681 | ##### Scaling 682 | Rescale the number of replicas: 683 | ``` 684 | $ kubectl scale deployment gin-web --replicas=8 685 | ``` 686 | -------------------------------------------------------------------------------- /Images/building_blocks.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel-gil/advanced-go-microservices/5ebf9ff61d2acea6673e6ad0a5bd4e2b047f9449/Images/building_blocks.jpg -------------------------------------------------------------------------------- /Images/service_and_configmap.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/daniel-gil/advanced-go-microservices/5ebf9ff61d2acea6673e6ad0a5bd4e2b047f9449/Images/service_and_configmap.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Advanced Go Microservices 2 | Examples from linkedIn course [Advanced Cloud Native Go](https://www.linkedin.com/learning/advanced-cloud-native-go/). 3 | 4 | ## Introduction 5 | Building blocks and functions of a cloud native application platform 6 | 7 | ![Building blocks and functions of a cloud native application platform](./Images/building_blocks.jpg) 8 | 9 | ## Cloud Native Interactive Landscape 10 | 11 | The [Cloud Native Interactive Landscape](https://github.com/cncf/landscape) filters and sorts hundreds of projects and products, and shows details including GitHub stars, funding or market cap, first and last commits, contributor counts, headquarters location, and recent tweets. Check the current version of [landscape map](https://landscape.cncf.io/images/landscape.png). 12 | 13 | ## Sections 14 | 1. [Go Microservices Frameworks](./Frameworks/README.md): Gin-Gonic, Docker, Kubernetes, minikube. 15 | 2. Service discovery and configuration 16 | - [Service Discovery](./Discovery/Consul/README.md) 17 | - [Service Configuration](./Configuration/Consul/README.md) 18 | - [Implement Go Microservice Registration with Consul](./Discovery/Simple/server/README.md) 19 | - [Implement Go Microservice Lookup with Consul](./Discovery/Simple/client/README.md) 20 | - [Implement Go Microservice Discovery and Configuration with Kubernetes](./Discovery/Kubernetes/README.md) 21 | 3. Microservice Communication 22 | - [Implement Sync RPC calls with Binary Protocols](./Communication/Go-Micro/README.md) 23 | - [Using Circuit Breakers for Resilient Communication](./Communication/Go-Micro/client/README.md) 24 | - [Implement Message Queuing with RabbitMQ](./Communication/RabbitMQ/README.md) 25 | - [Implement Publish/Subscribe with Apache Kafka](./Communication/Kafka/README.md) --------------------------------------------------------------------------------