├── LICENSE ├── README.md ├── examples ├── README.md ├── jenkins │ ├── README.md │ ├── jenkins-service.json │ └── jenkins.json └── wordpress │ ├── README.md │ ├── mysql-service.json │ ├── mysql.json │ ├── wordpress-rc.json │ ├── wordpress-service.json │ └── wordpress.json ├── resources ├── hello │ ├── hello-pod.json │ ├── hello-rc.json │ └── hello-service.json └── todo │ ├── mysql-service.json │ ├── mysql.json │ ├── todo-rc.json │ └── todo-service.json └── src ├── hello └── hello.go └── todo └── todo.go /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) 2016 Harry Lawrence 3 | 4 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 5 | 6 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 7 | 8 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # An overview of Kubernetes 2 | 3 | Notice: This hasn't been updated in some time, and while most of it still 4 | remains relevant, there are certainly things in here I'd like to update to keep 5 | on track with the Kubernetes project. This is due to a) me being a bit lazy, and 6 | b) most importantly, the amazing contributors who hack away on Kubernetes. There 7 | is so much love for that project and we're seeing new versions and features 8 | being shipped frequently. I hope this keeps up. And I also hope to keep up with 9 | it at some point in regards to this project. Contributions are still and always 10 | welcome. If you're a Kubernetes contributor and you're reading this, thank you 11 | for everything. 12 | 13 | Recently I have been playing around with Kubernetes on Google Container Engine. 14 | At first it seemed pretty daunting to me and from what I've heard from a few 15 | other people it has been the same for them. But once you start getting used to 16 | how Kubernetes works and how you're supposed to use it, you realise how powerful 17 | it can be and how it can make your deployments seem almost effortless. The goal 18 | of this document is to go through Kubernetes step by step in such a way whereby 19 | the only prerequisite for you is that you understand what containers are and 20 | ideally, how Docker works. This is just Kubernetes in my own words. I hope you 21 | find it helpful. (WIP) 22 | 23 | ### So what is it? 24 | In short, Kubernetes is a set of tools and programs that give you higher level 25 | control of your cluster and everything running on it. Once Kubernetes is all set 26 | up on your cluster you can start pods, services, and have your containers all 27 | running in harmony. You can find out more about it, along with Google Container 28 | Engine [here][1]. 29 | 30 | 31 | ### Up and running quickly 32 | A great way to get up and running quickly with Kubernetes is to get set up with 33 | Google Container Engine, which allows you to start up a cluster with everything 34 | working from the get go. You can manage aspects of this using the [gcloud][2] 35 | cli tool, which I'll be using in this brief introduction. You can also get up 36 | and running locally using Vagrant, or elsewhere as listed on the 37 | [getting started page][3]. 38 | 39 | For this tutorial I will be using the following: 40 | 41 | - Go 42 | - Docker / Docker Hub 43 | - A Linux environment 44 | - gcloud cli application /w kubectl 45 | - Google Container Engine / Kubernetes 46 | 47 | ### The application 48 | 49 | First, we need an application that we'd like to run on our cluster. We could 50 | use one that already exists, but for this introduction I'm going to create one 51 | using the [Go programming language][4]. 52 | 53 | [`hello.go`][8] 54 | ```go 55 | package main 56 | 57 | import ( 58 | "fmt" 59 | "log" 60 | "net/http" 61 | ) 62 | 63 | func main() { 64 | http.HandleFunc("/", handler) 65 | log.Fatal(http.ListenAndServe(":3000", nil)) 66 | } 67 | 68 | func handler(w http.ResponseWriter, r *http.Request) { 69 | fmt.Fprint(w, "Hello from Go!") 70 | } 71 | ``` 72 | 73 | Next, we'll compile this program making sure it is statically linked: 74 | 75 | Go 1.4: 76 | 77 | ``` 78 | $ CGO_ENABLED=0 GOOS=linux go build -o hello -a -installsuffix cgo . 79 | ``` 80 | 81 | Go 1.5: 82 | 83 | ``` 84 | $ CGO_ENABLED=0 GOOS=linux go build -o hello -a -tags netgo -ldflags '-w' . 85 | ``` 86 | 87 | We're now left with our `hello` binary. If you're already on Linux, you'll be 88 | able to test this. If not, you can boot up a VM and try it out there. We'll be 89 | using Docker next to create our container image. 90 | 91 | As we already have our compiled application, our Dockerfile is going to be super 92 | simple: 93 | 94 | `Dockerfile` 95 | ``` 96 | FROM scratch 97 | ADD hello / 98 | CMD ["/hello"] 99 | ``` 100 | 101 | We can then build this and push it to the Docker registry: 102 | 103 | ``` 104 | $ docker build -t DOCKERHUB_USERNAME/hello:latest . 105 | $ docker push DOCKERHUB_USERNAME/hello 106 | ``` 107 | 108 | So now we have our image built and pushed, we should be able to test running it: 109 | 110 | ``` 111 | $ docker run -p 3000:3000 DOCKERHUB_USERNAME/hello 112 | ``` 113 | 114 | And to check that our application is working correctly: 115 | 116 | ``` 117 | $ curl localhost:3000 118 | Hello from Go! 119 | ``` 120 | 121 | ### Starting the cluster 122 | 123 | Now we have our container image pushed to the Docker hub, we can get a start on 124 | creating the cluster. 125 | 126 | Once you have gcloud installed, you'll be able to install kubectl: 127 | 128 | ``` 129 | $ gcloud components update kubectl 130 | ``` 131 | 132 | If you've set up Google Container Engine and have enabled billing etc... you 133 | should have a default project listed. You can access that [here][5]. 134 | 135 | NOTE: If you've not done this yet and feel uneasy about paying for something you 136 | might never use, Google (at least as of writing this) offer 60 days and $300 137 | worth of resources to use with Google Cloud Platform when you initially sign up, 138 | no strings attached. This is a total steal, just [go here][7] then click on 139 | Free Trial. 140 | 141 | Get the ID of the project and configure like so: 142 | 143 | ``` 144 | $ gcloud config set project PROJECT_ID 145 | ``` 146 | 147 | Next, set your default zone (you can learn about that [here][6]): 148 | 149 | ``` 150 | $ gcloud config set compute/zone ZONE 151 | ``` 152 | 153 | You can now start the cluster. I'll just be using the default settings here, but 154 | you can use flags to customise various aspects of it. 155 | 156 | ``` 157 | $ gcloud container clusters create helloapp 158 | ``` 159 | 160 | After a few moments, your cluster will be created! You should see something like 161 | this: 162 | 163 | ``` 164 | NAME ZONE MASTER_VERSION MASTER_IP MACHINE_TYPE NODE_VERSION NUM_NODES STATUS 165 | helloapp europe-west1-b 1.1.7 10.10.10.10 n1-standard-1 1.1.7 3 RUNNING 166 | ``` 167 | 168 | Now we're ready to start deploying our application. 169 | 170 | ### Pods 171 | 172 | This is where things might start to seem a little daunting if you're new to 173 | Kubernetes. But once you get used to it, it's really not that bad. Kubernetes 174 | has this concept of pods. Pods are basically a collection of 1 or more 175 | containers, it's that simple. Within a pod you can allocate resources and limits 176 | to what the containers have access to. 177 | 178 | Kubernetes makes it easy for us to create multiple pods of the same application 179 | and sit them behind a load balancer. We can also tell Kubernetes that at anytime 180 | we'd like 3 instances of our app to be running. Or 4, or 5. It doesn't really 181 | matter too much at this point. What does matter though, is that Kubernetes will 182 | make sure that how ever many you specify to be running at one time, it will 183 | stick to that, by means of creating new pods if one were to go down, or to bring 184 | a pod down if at anytime there are too many running. I'll get onto that shortly. 185 | 186 | First though, let's create a single pod. We'll do this by use of a JSON 187 | document. You may also use YAML if you prefer that. 188 | 189 | [`hello-pod.json`][9] 190 | ```json 191 | { 192 | "apiVersion": "v1", 193 | "kind": "Pod", 194 | "metadata": { 195 | "name": "hello", 196 | "labels": { 197 | "name": "hello" 198 | } 199 | }, 200 | "spec": { 201 | "containers": [ 202 | { 203 | "name": "hello", 204 | "image": "DOCKERHUB_USERNAME/hello:latest", 205 | "ports": [ 206 | { 207 | "name": "http", 208 | "containerPort": 3000, 209 | "protocol": "TCP" 210 | } 211 | ] 212 | } 213 | ] 214 | } 215 | } 216 | ``` 217 | 218 | Let's go over this a little. So we're specifying the API version we'll be using 219 | at the top here. In this example this is simply just `v1`. We need to tell 220 | Kubernetes what it is we'd like to create, in this case it's a `Pod`, which is 221 | the value to `kind`. 222 | 223 | As you can see there is also a metadata section, where we can name this pod, 224 | and provide it some key / value pairs under labels. Labels, as you'll come to 225 | see are pretty important, as we can select resources to be ran by using them, 226 | as opposed to just a name. 227 | 228 | And finally the `spec`, which is where we list our containers to be ran in this 229 | pod. We're only going to be using the one. This will be pulled from the Docker 230 | registry that we pushed to earlier on. 231 | 232 | We then specify the ports, and if you remember from earlier our Go application 233 | runs on port 3000, so we'll use that here also. 234 | 235 | Let's now create this pod: 236 | 237 | ``` 238 | $ kubectl create -f hello-pod.json 239 | pod "hello" created 240 | ``` 241 | 242 | If we list our pods, the newly created one should be there: 243 | 244 | ``` 245 | $ kubectl get pods 246 | NAME READY STATUS RESTARTS AGE 247 | hello 1/1 Running 0 1m 248 | ``` 249 | 250 | So now that is up and running, we can move on to services. 251 | 252 | ### Services 253 | 254 | So what is a service and why do we need them? As far as Kubernetes is concerned, 255 | a service is basically a named load balancer. In essence this means that we 256 | could have multiple pods running on our cluster, and have our service use the 257 | metadata to select the pods that are relevant. You will then be able to hit the 258 | application that is running inside one of your pods via the public IP of the 259 | service. 260 | 261 | So let's create the service: 262 | 263 | [`hello-service.json`][10] 264 | ```json 265 | { 266 | "apiVersion": "v1", 267 | "kind": "Service", 268 | "metadata": { 269 | "name": "hello-service", 270 | "labels": { 271 | "name": "hello-service" 272 | } 273 | }, 274 | "spec": { 275 | "type": "LoadBalancer", 276 | "ports": [ 277 | { 278 | "name": "http", 279 | "port": 80, 280 | "targetPort": 3000 281 | } 282 | ], 283 | "selector": { 284 | "name": "hello" 285 | } 286 | } 287 | } 288 | ``` 289 | 290 | You can see here that we're using the `selector` key to find anything using the 291 | specified metadata key / value pairs. In this case, something with the `name`, 292 | `hello`. The target port of this pod is 3000, as specified in the pod file, but 293 | we would like to run that on port 80, so it's more accessible. With this kind of 294 | service, the `type` needs to be specified as `LoadBalancer`. This allows it to 295 | be publically accessible to the outside world via IP address. 296 | 297 | So with both our `hello-pod.json` and our `hello-service.json`, we're now able 298 | to create them on the cluster: 299 | 300 | ``` 301 | $ kubectl create -f hello-service.json 302 | service "hello-service" created 303 | ``` 304 | 305 | We can also list our services, and should see this new one appear: 306 | 307 | ``` 308 | $ kubectl get services 309 | NAME CLUSTER_IP EXTERNAL_IP PORT(S) SELECTOR AGE 310 | hello-service 10.143.241.227 80/TCP name=hello 44s 311 | kubernetes 10.143.240.1 443/TCP 8m 312 | ``` 313 | 314 | It may take a few moments, but once the service is up and running, you should be 315 | able to visit the external IP address and see the application up and running. 316 | You can find out what that is by running the following: 317 | 318 | ``` 319 | $ kubectl describe services hello-service 320 | ``` 321 | 322 | It should look a little like this: 323 | 324 | ``` 325 | Name: hello-service 326 | Namespace: default 327 | Labels: name=hello-service 328 | Selector: name=hello 329 | Type: LoadBalancer 330 | IP: 10.143.241.227 331 | LoadBalancer Ingress: 104.155.115.84 332 | Port: http 80/TCP 333 | NodePort: http 30962/TCP 334 | Endpoints: 10.140.1.3:3000 335 | Session Affinity: None 336 | ``` 337 | 338 | If you try going to the IP address listed as `LoadBalancer Ingress:` in your 339 | web browser, you should see the application running live. 340 | 341 | ``` 342 | $ curl 104.155.115.84 343 | Hello from Go! 344 | ``` 345 | 346 | ### Replication Controllers 347 | 348 | A replication controller is responsible for keeping a defined amount of pods 349 | running at any given time. You may have your replication controller create 4 350 | pods for example. If one goes down, the controller will be sure to start another 351 | one. If for some reason a 5th one appears, it will kill one to bring it back 352 | down to 4. It's a pretty straight-forward concept, and better yet we can make 353 | use of our pod files we made earlier to create our first replication 354 | controller. 355 | 356 | [`hello-rc.json`][11] 357 | ```json 358 | { 359 | "apiVersion": "v1", 360 | "kind": "ReplicationController", 361 | "metadata": { 362 | "name": "hello-rc", 363 | "labels": { 364 | "name": "hello-rc" 365 | } 366 | }, 367 | "spec": { 368 | "replicas": 3, 369 | "selector": { 370 | "name": "hello" 371 | }, 372 | "template": { 373 | "metadata": { 374 | "name": "hello", 375 | "labels": { 376 | "name": "hello" 377 | } 378 | }, 379 | "spec": { 380 | "containers": [ 381 | { 382 | "name": "hello", 383 | "image": "DOCKERHUB_USERNAME/hello:latest", 384 | "ports": [ 385 | { 386 | "name": "http", 387 | "containerPort": 3000, 388 | "protocol": "TCP" 389 | } 390 | ] 391 | } 392 | ] 393 | } 394 | } 395 | } 396 | } 397 | ``` 398 | 399 | So if you look beyond `template`, you'll see the same pod that we created 400 | earlier on. If the replication controller is going to start and maintain a 401 | number of pods, it needs a template to know what our pods will look like. 402 | 403 | Above `template`, you'll see things specific to the controller itself. Notably, 404 | the `replicas` key which defines how many of the pods specified below should 405 | be running. It also uses a selector, which is again linked with the metadata 406 | provided inside the pod. So this replication controller is going to look for 407 | anything with `name` as `hello`. Above that you'll also notice keys which so 408 | fer we've specified in each of our other files too. 409 | 410 | So at this point we can kill the pod we created earlier on: 411 | 412 | ``` 413 | $ kubectl delete pods hello 414 | ``` 415 | 416 | and then create our replication controller: 417 | 418 | ``` 419 | $ kubectl create -f hello-rc.json 420 | ``` 421 | 422 | After that has started, you should be able to see 3 of our `hello` pods running: 423 | 424 | ``` 425 | $ kubectl get pods 426 | ``` 427 | 428 | The `hello-service` should also still be running, serving incoming request to 429 | each of your newly created pods. We can see the replication controller in action 430 | if we manually try to delete a pod: 431 | 432 | ``` 433 | $ kubectl delete pods POD_ID 434 | ``` 435 | 436 | and then quickly run: 437 | 438 | ``` 439 | $ kubectl get pods 440 | ``` 441 | 442 | To see what is going on under the hood. 443 | 444 | ### Scaling 445 | 446 | So with our replication controller handling our pods and our service balancing 447 | the load, we can now think about scaling. As of now, we haven't specified any 448 | limits for the pods, I will come to that shortly. So let's create a scenario: 449 | 450 | You deploy your application. You write a blog post about it and send out a few 451 | tweets. The traffic is on the rise. Your app is beginning to struggle with the 452 | load. We need to create more pods to even the load out on the cluster. We're 453 | going to do this with just one simple command and change from having 3 pods to 454 | 5 pods: 455 | 456 | ``` 457 | $ kubectl scale --replicas=5 -f hello-rc.json 458 | ``` 459 | 460 | You should now be able to run: 461 | 462 | ``` 463 | $ kubectl get pods 464 | ``` 465 | 466 | to see the two newly created ones: 467 | 468 | ``` 469 | $ kubectl get pods 470 | NAME READY STATUS RESTARTS AGE 471 | hello-zndh4s 1/1 Running 0 21m 472 | hello-j8sdgd 1/1 Running 0 21m 473 | hello-8sjn4h 1/1 Running 0 21m 474 | hello-ka98ah 1/1 Running 0 1m 475 | hello-qjwh37 1/1 Running 0 1m 476 | ``` 477 | 478 | This works both ways. Eventually the traffic might subside, and you can then 479 | bring it back down to 3 pods. 480 | 481 | ``` 482 | $ kubectl scale --replicas=3 -f hello-rc.json 483 | ``` 484 | 485 | ### Persistent Storage 486 | 487 | NOTE: To go through this next part, you may want to delete all the pods and 488 | services currently running, first: 489 | 490 | ``` 491 | $ kubectl delete services hello-service 492 | $ kubectl delete rc hello-rc 493 | ``` 494 | 495 | Something your application may need is storage. You may be dealing with file 496 | uploads, or a database. Although containers should be seen as being fairly 497 | disposable, your storage / data should remain. GCE makes this really easy for us 498 | as all we need to do is to create a disk, and then tell our containers where 499 | that disk is. 500 | 501 | So let's make a disk first of all: 502 | 503 | ``` 504 | $ gcloud compute disks create mysql-disk 505 | ``` 506 | 507 | By default, this will create a 500GB disk. You can change this among various 508 | other settings by passing flags to that command. 509 | 510 | Within the scope of `containers` in either your pod or ReplicationController 511 | file, you can add another section called `volumeMounts`. Here, you are able to 512 | specify where inside your container you'd like to mount. 513 | 514 | I've created a new application called todo.go. It is based off our original 515 | hello.go application, only it actually does something this time: 516 | 517 | [`todo.go`][12] 518 | ```go 519 | package main 520 | 521 | import ( 522 | "database/sql" 523 | "fmt" 524 | "html/template" 525 | "log" 526 | "net/http" 527 | 528 | _ "github.com/go-sql-driver/mysql" 529 | ) 530 | 531 | const dbHost = "mysql-service:3306" 532 | const dbUser = "root" 533 | const dbPassword = "password" 534 | const dbName = "todo" 535 | 536 | func main() { 537 | http.HandleFunc("/", todoList) 538 | http.HandleFunc("/save", saveItem) 539 | log.Fatal(http.ListenAndServe(":3000", nil)) 540 | } 541 | 542 | // Todo represents a single 'todo', or item. 543 | type Todo struct { 544 | ID int 545 | Item string 546 | } 547 | 548 | // todoList shows the todo list along with the form to add a new item to the 549 | // list. 550 | func todoList(w http.ResponseWriter, r *http.Request) { 551 | tmpl := ` 552 | 553 | 554 | List of items 555 | 556 | 557 |

Todo list:

558 |
559 | Add item:
560 |

561 |
562 | 567 | 568 | 569 | ` 570 | t, err := template.New("todolist").Parse(tmpl) 571 | if err != nil { 572 | log.Fatal(err) 573 | } 574 | 575 | dbc := db() 576 | rows, err := dbc.Query("SELECT * FROM items") 577 | if err != nil { 578 | log.Fatal(err) 579 | } 580 | 581 | todos := []Todo{} 582 | for rows.Next() { 583 | todo := Todo{} 584 | err = rows.Scan(&todo.ID, &todo.Item) 585 | todos = append(todos, todo) 586 | } 587 | 588 | data := struct { 589 | TodoItems []Todo 590 | }{ 591 | TodoItems: todos, 592 | } 593 | 594 | t.Execute(w, data) 595 | } 596 | 597 | // saveItem saves a new todo item and then redirects the user back to the list 598 | func saveItem(w http.ResponseWriter, r *http.Request) { 599 | dbc := db() 600 | stmt, err := dbc.Prepare("INSERT items SET item=?") 601 | if err != nil { 602 | log.Fatal(err) 603 | } 604 | _, err = stmt.Exec(r.FormValue("item")) 605 | if err != nil { 606 | log.Fatal(err) 607 | } 608 | http.Redirect(w, r, "/", 301) 609 | } 610 | 611 | // db creates a connection to the database and creates the items table if it 612 | // does not already exist. 613 | func db() *sql.DB { 614 | connStr := fmt.Sprintf( 615 | "%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local", 616 | dbUser, 617 | dbPassword, 618 | dbHost, 619 | dbName, 620 | ) 621 | db, err := sql.Open("mysql", connStr) 622 | if err != nil { 623 | log.Fatal(err) 624 | } 625 | _, err = db.Exec(`CREATE TABLE IF NOT EXISTS items( 626 | id integer NOT NULL AUTO_INCREMENT, 627 | item varchar(255), 628 | PRIMARY KEY (id) 629 | )`) 630 | return db 631 | } 632 | 633 | ``` 634 | 635 | Now we can go through similar steps from earlier to create our binary and Docker 636 | container image. This one will be called `todo`. 637 | 638 | ``` 639 | $ go get -u github.com/go-sql-driver/mysql 640 | $ CGO_ENABLED=0 GOOS=linux go build -o todo -a -tags netgo -ldflags '-w' . 641 | ``` 642 | 643 | `Dockerfile` 644 | ``` 645 | FROM scratch 646 | ADD todo / 647 | CMD ["/todo"] 648 | ``` 649 | 650 | ``` 651 | $ docker build -t DOCKERHUB_USERNAME/todo:latest . 652 | $ docker push DOCKERHUB_USERNAME/todo 653 | ``` 654 | 655 | That is our container image pushed to the Docker Hub. Now onto defining our 656 | replication controller and services. Seeing as we'll be using MySQL this time, 657 | we'll be creating a seperate disk using the gcloud cli tool. This is what we'll 658 | use when configuring the `volumes` for our containers. 659 | 660 | ``` 661 | $ gcloud compute disks create mysql-disk 662 | ``` 663 | 664 | `mysql-disk` is going to be the name of it. This is important as it is used 665 | to reference the disk in our JSON files. Once that is done, we can go ahead and 666 | create the MySQL pod. 667 | 668 | [`mysql.json`][13] 669 | ```json 670 | { 671 | "apiVersion": "v1", 672 | "kind": "Pod", 673 | "metadata": { 674 | "name": "mysql", 675 | "labels": { 676 | "name": "mysql" 677 | } 678 | }, 679 | "spec": { 680 | "containers": [ 681 | { 682 | "name": "mysql", 683 | "image": "mysql:5.6", 684 | "env": [ 685 | { 686 | "name": "MYSQL_ROOT_PASSWORD", 687 | "value": "password" 688 | }, 689 | { 690 | "name": "MYSQL_DATABASE", 691 | "value": "todo" 692 | } 693 | ], 694 | "ports": [ 695 | { 696 | "name": "mysql", 697 | "protocol": "TCP", 698 | "containerPort": 3306 699 | } 700 | ], 701 | "volumeMounts": [ 702 | { 703 | "name": "mysql-storage", 704 | "mountPath": "/var/lib/mysql" 705 | } 706 | ] 707 | } 708 | ], 709 | "volumes": [ 710 | { 711 | "name": "mysql-storage", 712 | "gcePersistentDisk": { 713 | "pdName": "mysql-disk", 714 | "fsType": "ext4" 715 | } 716 | } 717 | ] 718 | } 719 | } 720 | 721 | ``` 722 | 723 | As you can see in this file, we specify a few more things than in our original 724 | hello pod. We'll be using the official `mysql:5.6` container image from Docker 725 | Hub. Environment variables are set to configure various aspects of how this pod 726 | will run. You can find documentation for these at the Docker Hub on the MySQL 727 | page. 728 | 729 | We'll just be setting the basics, `MYSQL_ROOT_PASSWORD` and `MYSQL_DATABASE`. 730 | This will give us access to our `todo` database as the `root` user. The next new 731 | thing here are the `volumeMounts` and `volumes` keys. We give each volume mount 732 | a name and path. The name is what is then used by the `volumes` as a reference 733 | to it. The mount path is just where on the container you'll mount to. 734 | 735 | Outside of the scope of our containers, we can specify where our volume mounts 736 | will be. In this case, we'll be using the newly created `mysql-disk` from 737 | earlier, and defining the file system type to `ext4`. Now we can start the pod: 738 | 739 | ``` 740 | $ kubectl create -f mysql.json 741 | ``` 742 | 743 | Next, we'll create a service for this pod. Like earlier, our service is going to 744 | be acting as a load balancer. We'll say what port we'd like to listen on, along 745 | with the target port of the running container: 746 | 747 | [`mysql-service.json`][14] 748 | ```json 749 | { 750 | "apiVersion": "v1", 751 | "kind": "Service", 752 | "metadata": { 753 | "name": "mysql-service", 754 | "labels": { 755 | "name": "mysql" 756 | } 757 | }, 758 | "spec": { 759 | "selector": { 760 | "name": "mysql" 761 | }, 762 | "ports": [ 763 | { 764 | "protocol": "TCP", 765 | "port": 3306, 766 | "targetPort": 3306 767 | } 768 | ] 769 | } 770 | } 771 | ``` 772 | 773 | We can now start this: 774 | 775 | ``` 776 | $ kubectl create -f mysql-service.json 777 | ``` 778 | 779 | Take note of the `name` key in this file. Google Container Engine comes with 780 | a DNS service already running, which means pods are able to access each other 781 | using the value to `name` as the host. If you noticed in our Go program, we 782 | specify the host as `mysql-service:3306`. This can also be done with environment 783 | variables. I'll go into detail with that another time. 784 | 785 | With MySQL running, we should be able to start our todo application now. For 786 | this example I'll be using a replication controller rather than just a single 787 | pod definition: 788 | 789 | [`todo-rc.json`][15] 790 | ```json 791 | { 792 | "apiVersion": "v1", 793 | "kind": "ReplicationController", 794 | "metadata": { 795 | "name": "todo-rc", 796 | "labels": { 797 | "name": "todo-rc" 798 | } 799 | }, 800 | "spec": { 801 | "replicas": 3, 802 | "selector": { 803 | "name": "todo" 804 | }, 805 | "template": { 806 | "metadata": { 807 | "name": "todo", 808 | "labels": { 809 | "name": "todo" 810 | } 811 | }, 812 | "spec": { 813 | "containers": [ 814 | { 815 | "name": "todo", 816 | "image": "DOCKERHUB_USERNAME/todo:latest", 817 | "ports": [ 818 | { 819 | "name": "http", 820 | "containerPort": 3000, 821 | "protocol": "TCP" 822 | } 823 | ] 824 | } 825 | ] 826 | } 827 | } 828 | } 829 | } 830 | ``` 831 | 832 | You'll notice this file is very similar to our hello app replication controller. 833 | We can start this now: 834 | 835 | ``` 836 | $ kubectl create -f todo-rc.json 837 | ``` 838 | 839 | And the service, with `"type": "LoadBalancer"` to expose a public IP: 840 | 841 | [`todo-service.json`][16] 842 | ```json 843 | { 844 | "apiVersion": "v1", 845 | "kind": "Service", 846 | "metadata": { 847 | "name": "todo-service", 848 | "labels": { 849 | "name": "todo-service" 850 | } 851 | }, 852 | "spec": { 853 | "type": "LoadBalancer", 854 | "ports": [ 855 | { 856 | "name": "http", 857 | "port": 80, 858 | "targetPort": 3000 859 | } 860 | ], 861 | "selector": { 862 | "name": "todo" 863 | } 864 | } 865 | } 866 | ``` 867 | 868 | The same as always: 869 | 870 | ``` 871 | $ kubectl create -f todo-service.json 872 | ``` 873 | 874 | Once the service has finished creating the load balancer, you can head over to 875 | the public IP to see your application running. 876 | 877 | More coming soon... 878 | 879 | ### Questions and contributions 880 | 881 | If you have any questions, feel free to open an issue. Contributions are more 882 | than welcome, especially if you think there is something that can be explained 883 | more clearly or see any mistakes. 884 | 885 | ### License 886 | 887 | This project uses the MIT license. 888 | 889 | [1]: https://cloud.google.com/container-engine/docs/ 890 | [2]: https://cloud.google.com/sdk/gcloud/ 891 | [3]: http://kubernetes.io/gettingstarted/ 892 | [4]: http://golang.org 893 | [5]: https://console.cloud.google.com/project 894 | [6]: https://cloud.google.com/compute/docs/zones#available 895 | [7]: https://cloud.google.com/ 896 | [8]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/src/hello/hello.go 897 | [9]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/hello/hello-pod.json 898 | [10]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/hello/hello-service.json 899 | [11]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/hello/hello-rc.json 900 | [12]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/src/todo/todo.go 901 | [13]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/todo/mysql.json 902 | [14]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/todo/mysql-service.json 903 | [15]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/todo/todo-rc.json 904 | [16]: https://raw.githubusercontent.com/hazbo/kubernetes-overview/master/resources/todo/todo-service.json 905 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | ### Examples 2 | 3 | I will be adding examples here from time to time, that are not spoken about on 4 | the main README. You should use these examples as guides only. I will document 5 | each example on how it can be used. 6 | -------------------------------------------------------------------------------- /examples/jenkins/README.md: -------------------------------------------------------------------------------- 1 | ### Jenkins on GCE 2 | 3 | This is a very basic guide that I'll keep coming back to with improvements, but 4 | it should give you a start with getting Jenkins running on GCE. 5 | 6 | Crate your cluster if you have not done so already: 7 | 8 | ``` 9 | $ gcloud container clusters create jenkins 10 | ``` 11 | 12 | Deploy your containers: 13 | 14 | ``` 15 | $ kubectl create -f jenkins.json 16 | $ kubectl create -f jenkins-service.json 17 | ``` 18 | 19 | Check your pods and services: 20 | 21 | ``` 22 | $ kubectl get pods 23 | $ kubectl get services 24 | ``` 25 | 26 | Wait for the load balancer to be created. You can keep checking for the public 27 | IP by doing the following: 28 | 29 | ``` 30 | $ kubectl describe services jenkins-service | grep "LoadBalancer Ingress" 31 | ``` 32 | 33 | Head over to that IP address in your web browser. 34 | 35 | Done! 36 | -------------------------------------------------------------------------------- /examples/jenkins/jenkins-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "jenkins-service", 6 | "labels": { 7 | "name": "jenkins-service" 8 | } 9 | }, 10 | "spec": { 11 | "type": "LoadBalancer", 12 | "selector": { 13 | "name": "jenkins" 14 | }, 15 | "ports": [ 16 | { 17 | "protocol": "TCP", 18 | "port": 80, 19 | "targetPort": 8080 20 | } 21 | ] 22 | } 23 | } -------------------------------------------------------------------------------- /examples/jenkins/jenkins.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "jenkins", 6 | "labels": { 7 | "name": "jenkins" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "name": "jenkins", 14 | "image": "jenkins:latest", 15 | "ports": [ 16 | { 17 | "name": "http", 18 | "protocol": "TCP", 19 | "containerPort": 8080 20 | }, 21 | { 22 | "name": "build-agents", 23 | "protocol": "TCP", 24 | "containerPort": 50000 25 | } 26 | ] 27 | } 28 | ] 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /examples/wordpress/README.md: -------------------------------------------------------------------------------- 1 | ### Wordpress on GCE 2 | 3 | Crate your cluster if you have not done so already: 4 | 5 | ``` 6 | $ gcloud container clusters create wordpress 7 | ``` 8 | 9 | Create a persistant storage disk for MySQL, called `mysql-disk`: 10 | 11 | ``` 12 | $ gcloud compute disks create mysql-disk 13 | ``` 14 | 15 | Deploy your containers: 16 | 17 | ``` 18 | $ kubectl create -f mysql.json 19 | $ kubectl create -f mysql-service.json 20 | $ kubectl create -f wordpress-rc.json 21 | $ kubectl create -f wordpress-service.json 22 | ``` 23 | 24 | Check your pods and services: 25 | 26 | ``` 27 | $ kubectl get pods 28 | $ kubectl get services 29 | ``` 30 | 31 | After a few moments your load balancer should have been created. You can check 32 | up on that like so: 33 | 34 | ``` 35 | $ kubectl describe services wordpress-service 36 | ``` 37 | 38 | Look for the "LoadBalancer Ingress" key. You can do this from the output of the 39 | above command or by using grep to just get the IP: 40 | 41 | ``` 42 | $ kubectl describe services wordpress-service | grep "LoadBalancer Ingress" 43 | ``` 44 | 45 | Head over to that IP address in your web browser to go through the Wordpress 46 | installation. 47 | 48 | And you're done! 49 | 50 | ##### Next 51 | 52 | Okay so the above will have set your wordpress site up. There are things you 53 | will want to do to tweak this though. The disk that we initiall set up will be 54 | 500GB by default. You should change this to suit your needs. 55 | 56 | The same goes for creating your cluster. This uses all the default settings. 57 | While this will work just fine, you should refine it to work for you. 58 | 59 | As for the JSON documents, these will also work, but use `root` and `password` 60 | for your database, so this would need to be changed for your own site. Along 61 | with that are a bunch of other environment variables that can be found on 62 | the Wordpress Docker Hub page, the same goes for MySQL. 63 | 64 | Although `wordpress.json` is not used in this example, you may use it instead of 65 | the replication controller if you wish. The choice is yours. 66 | -------------------------------------------------------------------------------- /examples/wordpress/mysql-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "mysql-service", 6 | "labels": { 7 | "name": "mysql" 8 | } 9 | }, 10 | "spec": { 11 | "selector": { 12 | "name": "mysql" 13 | }, 14 | "ports": [ 15 | { 16 | "protocol": "TCP", 17 | "port": 3306, 18 | "targetPort": 3306 19 | } 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /examples/wordpress/mysql.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "mysql", 6 | "labels": { 7 | "name": "mysql" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "name": "mysql", 14 | "image": "mysql:5.6", 15 | "env": [ 16 | { 17 | "name": "MYSQL_ROOT_PASSWORD", 18 | "value": "password" 19 | }, 20 | { 21 | "name": "MYSQL_DATABASE", 22 | "value": "wordpress" 23 | } 24 | ], 25 | "ports": [ 26 | { 27 | "name": "mysql", 28 | "protocol": "TCP", 29 | "containerPort": 3306 30 | } 31 | ], 32 | "volumeMounts": [ 33 | { 34 | "name": "mysql-storage", 35 | "mountPath": "/var/lib/mysql" 36 | } 37 | ] 38 | } 39 | ], 40 | "volumes": [ 41 | { 42 | "name": "mysql-storage", 43 | "gcePersistentDisk": { 44 | "pdName": "mysql-disk", 45 | "fsType": "ext4" 46 | } 47 | } 48 | ] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /examples/wordpress/wordpress-rc.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ReplicationController", 4 | "metadata": { 5 | "name": "wordpress-rc", 6 | "labels": { 7 | "name": "wordpress-rc" 8 | } 9 | }, 10 | "spec": { 11 | "replicas": 3, 12 | "selector": { 13 | "name": "wordpress" 14 | }, 15 | "template": { 16 | "metadata": { 17 | "name": "wordpress", 18 | "labels": { 19 | "name": "wordpress" 20 | } 21 | }, 22 | "spec": { 23 | "containers": [ 24 | { 25 | "name": "wordpress", 26 | "image": "wordpress:latest", 27 | "ports": [ 28 | { 29 | "name": "http", 30 | "protocol": "TCP", 31 | "containerPort": 80 32 | } 33 | ], 34 | "env": [ 35 | { 36 | "name": "WORDPRESS_DB_HOST", 37 | "value": "mysql-service:3306" 38 | }, 39 | { 40 | "name": "WORDPRESS_DB_USER", 41 | "value": "root" 42 | }, 43 | { 44 | "name": "WORDPRESS_DB_PASSWORD", 45 | "value": "password" 46 | } 47 | ] 48 | } 49 | ] 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /examples/wordpress/wordpress-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "wordpress-service", 6 | "labels": { 7 | "name": "wordpress-service" 8 | } 9 | }, 10 | "spec": { 11 | "type": "LoadBalancer", 12 | "selector": { 13 | "name": "wordpress" 14 | }, 15 | "ports": [ 16 | { 17 | "protocol": "TCP", 18 | "port": 80, 19 | "targetPort": 80 20 | } 21 | ] 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /examples/wordpress/wordpress.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "wordpress", 6 | "labels": { 7 | "name": "wordpress" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "name": "wordpress", 14 | "image": "wordpress:latest", 15 | "ports": [ 16 | { 17 | "name": "http", 18 | "protocol": "TCP", 19 | "containerPort": 80 20 | } 21 | ], 22 | "env": [ 23 | { 24 | "name": "WORDPRESS_DB_HOST", 25 | "value": "mysql-service:3306" 26 | }, 27 | { 28 | "name": "WORDPRESS_DB_USER", 29 | "value": "root" 30 | }, 31 | { 32 | "name": "WORDPRESS_DB_PASSWORD", 33 | "value": "password" 34 | } 35 | ] 36 | } 37 | ] 38 | } 39 | } -------------------------------------------------------------------------------- /resources/hello/hello-pod.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "hello", 6 | "labels": { 7 | "name": "hello" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "name": "hello", 14 | "image": "DOCKERHUB_USERNAME/hello:latest", 15 | "ports": [ 16 | { 17 | "name": "http", 18 | "containerPort": 3000, 19 | "protocol": "TCP" 20 | } 21 | ] 22 | } 23 | ] 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /resources/hello/hello-rc.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ReplicationController", 4 | "metadata": { 5 | "name": "hello-rc", 6 | "labels": { 7 | "name": "hello-rc" 8 | } 9 | }, 10 | "spec": { 11 | "replicas": 3, 12 | "selector": { 13 | "name": "hello" 14 | }, 15 | "template": { 16 | "metadata": { 17 | "name": "hello", 18 | "labels": { 19 | "name": "hello" 20 | } 21 | }, 22 | "spec": { 23 | "containers": [ 24 | { 25 | "name": "hello", 26 | "image": "DOCKERHUB_USERNAME/hello:latest", 27 | "ports": [ 28 | { 29 | "name": "http", 30 | "containerPort": 3000, 31 | "protocol": "TCP" 32 | } 33 | ] 34 | } 35 | ] 36 | } 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /resources/hello/hello-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "hello-service", 6 | "labels": { 7 | "name": "hello-service" 8 | } 9 | }, 10 | "spec": { 11 | "type": "LoadBalancer", 12 | "ports": [ 13 | { 14 | "name": "http", 15 | "port": 80, 16 | "targetPort": 3000 17 | } 18 | ], 19 | "selector": { 20 | "name": "hello" 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /resources/todo/mysql-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "mysql-service", 6 | "labels": { 7 | "name": "mysql" 8 | } 9 | }, 10 | "spec": { 11 | "selector": { 12 | "name": "mysql" 13 | }, 14 | "ports": [ 15 | { 16 | "protocol": "TCP", 17 | "port": 3306, 18 | "targetPort": 3306 19 | } 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /resources/todo/mysql.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Pod", 4 | "metadata": { 5 | "name": "mysql", 6 | "labels": { 7 | "name": "mysql" 8 | } 9 | }, 10 | "spec": { 11 | "containers": [ 12 | { 13 | "name": "mysql", 14 | "image": "mysql:5.6", 15 | "env": [ 16 | { 17 | "name": "MYSQL_ROOT_PASSWORD", 18 | "value": "password" 19 | }, 20 | { 21 | "name": "MYSQL_DATABASE", 22 | "value": "todo" 23 | } 24 | ], 25 | "ports": [ 26 | { 27 | "name": "mysql", 28 | "protocol": "TCP", 29 | "containerPort": 3306 30 | } 31 | ], 32 | "volumeMounts": [ 33 | { 34 | "name": "mysql-storage", 35 | "mountPath": "/var/lib/mysql" 36 | } 37 | ] 38 | } 39 | ], 40 | "volumes": [ 41 | { 42 | "name": "mysql-storage", 43 | "gcePersistentDisk": { 44 | "pdName": "mysql-disk", 45 | "fsType": "ext4" 46 | } 47 | } 48 | ] 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /resources/todo/todo-rc.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "ReplicationController", 4 | "metadata": { 5 | "name": "todo-rc", 6 | "labels": { 7 | "name": "todo-rc" 8 | } 9 | }, 10 | "spec": { 11 | "replicas": 3, 12 | "selector": { 13 | "name": "todo" 14 | }, 15 | "template": { 16 | "metadata": { 17 | "name": "todo", 18 | "labels": { 19 | "name": "todo" 20 | } 21 | }, 22 | "spec": { 23 | "containers": [ 24 | { 25 | "name": "todo", 26 | "image": "DOCKERHUB_USERNAME/todo:latest", 27 | "ports": [ 28 | { 29 | "name": "http", 30 | "containerPort": 3000, 31 | "protocol": "TCP" 32 | } 33 | ] 34 | } 35 | ] 36 | } 37 | } 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /resources/todo/todo-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "apiVersion": "v1", 3 | "kind": "Service", 4 | "metadata": { 5 | "name": "todo-service", 6 | "labels": { 7 | "name": "todo-service" 8 | } 9 | }, 10 | "spec": { 11 | "type": "LoadBalancer", 12 | "ports": [ 13 | { 14 | "name": "http", 15 | "port": 80, 16 | "targetPort": 3000 17 | } 18 | ], 19 | "selector": { 20 | "name": "todo" 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/hello/hello.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "net/http" 7 | ) 8 | 9 | func main() { 10 | http.HandleFunc("/", handler) 11 | log.Fatal(http.ListenAndServe(":3000", nil)) 12 | } 13 | 14 | func handler(w http.ResponseWriter, r *http.Request) { 15 | fmt.Fprint(w, "Hello from Go!") 16 | } 17 | -------------------------------------------------------------------------------- /src/todo/todo.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "database/sql" 5 | "fmt" 6 | "html/template" 7 | "log" 8 | "net/http" 9 | 10 | _ "github.com/go-sql-driver/mysql" 11 | ) 12 | 13 | const dbHost = "mysql-service:3306" 14 | const dbUser = "root" 15 | const dbPassword = "password" 16 | const dbName = "todo" 17 | 18 | func main() { 19 | http.HandleFunc("/", todoList) 20 | http.HandleFunc("/save", saveItem) 21 | log.Fatal(http.ListenAndServe(":3000", nil)) 22 | } 23 | 24 | // Todo represents a single 'todo', or item. 25 | type Todo struct { 26 | ID int 27 | Item string 28 | } 29 | 30 | // todoList shows the todo list along with the form to add a new item to the 31 | // list. 32 | func todoList(w http.ResponseWriter, r *http.Request) { 33 | tmpl := ` 34 | 35 | 36 | List of items 37 | 38 | 39 |

Todo list:

40 |
41 | Add item:
42 |

43 |
44 | 49 | 50 | 51 | ` 52 | t, err := template.New("todolist").Parse(tmpl) 53 | if err != nil { 54 | log.Fatal(err) 55 | } 56 | 57 | dbc := db() 58 | rows, err := dbc.Query("SELECT * FROM items") 59 | if err != nil { 60 | log.Fatal(err) 61 | } 62 | 63 | todos := []Todo{} 64 | for rows.Next() { 65 | todo := Todo{} 66 | err = rows.Scan(&todo.ID, &todo.Item) 67 | todos = append(todos, todo) 68 | } 69 | 70 | data := struct { 71 | TodoItems []Todo 72 | }{ 73 | TodoItems: todos, 74 | } 75 | 76 | t.Execute(w, data) 77 | } 78 | 79 | // saveItem saves a new todo item and then redirects the user back to the list 80 | func saveItem(w http.ResponseWriter, r *http.Request) { 81 | dbc := db() 82 | stmt, err := dbc.Prepare("INSERT items SET item=?") 83 | if err != nil { 84 | log.Fatal(err) 85 | } 86 | _, err = stmt.Exec(r.FormValue("item")) 87 | if err != nil { 88 | log.Fatal(err) 89 | } 90 | http.Redirect(w, r, "/", 301) 91 | } 92 | 93 | // db creates a connection to the database and creates the items table if it 94 | // does not already exist. 95 | func db() *sql.DB { 96 | connStr := fmt.Sprintf( 97 | "%s:%s@tcp(%s)/%s?charset=utf8&parseTime=True&loc=Local", 98 | dbUser, 99 | dbPassword, 100 | dbHost, 101 | dbName, 102 | ) 103 | db, err := sql.Open("mysql", connStr) 104 | if err != nil { 105 | log.Fatal(err) 106 | } 107 | _, err = db.Exec(`CREATE TABLE IF NOT EXISTS items( 108 | id integer NOT NULL AUTO_INCREMENT, 109 | item varchar(255), 110 | PRIMARY KEY (id) 111 | )`) 112 | return db 113 | } 114 | --------------------------------------------------------------------------------