├── Chapter1 └── user-data ├── Chapter10 └── Kubernetes_Cluster │ ├── 1-bootstrap_cluster.sh │ ├── 2-get_k8s_fleet_etcd.sh │ ├── 3-install_k8s_fleet_units.sh │ ├── cloud-config │ ├── master.yaml │ └── node.yaml │ ├── set_k8s_access.sh │ ├── settings │ └── units │ ├── kube-apiserver.service │ ├── kube-controller-manager.service │ ├── kube-kubelet.service │ ├── kube-proxy.service │ └── kube-scheduler.service ├── Chapter3 ├── hello.service └── hello1.service ├── Chapter4 ├── config.rb ├── hello-cluster.service └── user-data ├── Chapter5 ├── Local_Development_VM │ ├── coreos-dev-install.sh │ ├── files │ │ ├── vm │ │ │ ├── Vagrantfile │ │ │ ├── config.rb │ │ │ └── user-data │ │ ├── vm_halt.sh │ │ ├── vm_ssh.sh │ │ └── vm_up.sh │ ├── fleet │ │ └── nginx.service │ └── share │ │ └── nginx │ │ └── html │ │ └── index.html └── Test_Staging_Cluster │ ├── cloud-config │ ├── control1.yaml │ ├── staging1.yaml │ └── test1.yaml │ ├── create_cluster_control.sh │ ├── create_cluster_workers.sh │ ├── files │ ├── control1.sh │ ├── etcdctl │ ├── set_cluster_access.sh │ ├── staging1.sh │ └── test1.sh │ ├── fleet │ ├── staging1_webserver.service │ └── test1_webserver.service │ ├── install_fleetctl_and_scripts.sh │ └── settings ├── Chapter6 └── Test_Staging_Cluster │ ├── cloud-config │ └── registry-cbuilder1.yaml │ ├── create_registry-cbuilder1.sh │ ├── dockerfiles │ └── dbuilder │ │ ├── 00_host_keys.sh │ │ ├── Dockerfile │ │ ├── README.md │ │ ├── dbuilder.service │ │ ├── rsync.sh │ │ ├── rsyncd.conf │ │ └── ssh │ │ ├── config │ │ ├── id_rsa │ │ └── id_rsa.pub │ ├── files │ └── reg-dbuider1.sh │ ├── fleet │ ├── dbuilder.service │ └── registry.service │ └── webserver │ ├── deploy_2_staging1.sh │ ├── deploy_2_test1.sh │ ├── staging1 │ └── index.html │ └── test1 │ └── index.html ├── Chapter7 ├── Production_Cluster │ ├── cloud-config │ │ ├── control1.yaml │ │ ├── web1.yaml │ │ └── web2.yaml │ ├── create_cluster_control.sh │ ├── create_cluster_workers.sh │ ├── files │ │ ├── control1.sh │ │ ├── etcdctl │ │ ├── set_cluster_access.sh │ │ ├── web1.sh │ │ └── web2.sh │ ├── fleet │ │ └── website1.service │ ├── install_fleetctl_and_scripts.sh │ ├── scripts │ │ └── deploy_2_production_website1.sh │ └── settings └── Test_Staging_Cluster │ ├── files │ ├── Dockerfile │ ├── build.sh │ └── push.sh │ └── install_website1_2_dbuilder.sh ├── LICENSE └── README.md /Chapter1/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | name: core-01 6 | initial-advertise-peer-urls: http://$private_ipv4:2380 7 | listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 8 | initial-cluster-token: core-01_etcd 9 | initial-cluster: core-01=http://$private_ipv4:2380 10 | initial-cluster-state: new 11 | advertise-client-urls: http://$public_ipv4:2379,http://$public_ipv4:4001 12 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 13 | fleet: 14 | public-ip: $public_ipv4 15 | flannel: 16 | interface: $public_ipv4 17 | units: 18 | - name: etcd2.service 19 | command: start 20 | - name: fleet.service 21 | command: start 22 | - name: docker-tcp.socket 23 | command: start 24 | enable: true 25 | content: | 26 | [Unit] 27 | Description=Docker Socket for the API 28 | 29 | [Socket] 30 | ListenStream=2375 31 | Service=docker.service 32 | BindIPv6Only=both 33 | 34 | [Install] 35 | WantedBy=sockets.target 36 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/1-bootstrap_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Create Kubernetes cluster 4 | 5 | # Update required settings in "settings" file before running this script 6 | 7 | function pause(){ 8 | read -p "$*" 9 | } 10 | 11 | ## Fetch GC settings 12 | # project and zone 13 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 14 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 15 | # CoreOS release channel 16 | channel=$(cat settings | grep channel= | head -1 | cut -f2 -d"=") 17 | # master instance type 18 | master_machine_type=$(cat settings | grep master_machine_type= | head -1 | cut -f2 -d"=") 19 | # node instance type 20 | node_machine_type=$(cat settings | grep node_machine_type= | head -1 | cut -f2 -d"=") 21 | # get the latest full image name 22 | image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'}) 23 | # 24 | # master name 25 | master_name=$(cat settings | grep master_name= | head -1 | cut -f2 -d"=") 26 | # node name and count 27 | node_name=$(cat settings | grep node_name= | head -1 | cut -f2 -d"=") 28 | node_count=$(cat settings | grep node_count= | head -1 | cut -f2 -d"=") 29 | ## 30 | 31 | # create master node 32 | gcloud compute instances create $master_name \ 33 | --project=$project --image=$image --image-project=coreos-cloud \ 34 | --boot-disk-type=pd-standard --boot-disk-size=200 --zone=$zone \ 35 | --machine-type=$master_machine_type --metadata-from-file user-data=./cloud-config/master.yaml \ 36 | --can-ip-forward --scopes compute-rw --tags=k8s-cluster,k8s-master 37 | # create internal static IP for the master 38 | gcloud compute routes create ip-10-222-1-1-$master_name --project=$project \ 39 | --next-hop-instance $master_name \ 40 | --next-hop-instance-zone $zone \ 41 | --destination-range 10.222.1.1/32 42 | # 43 | 44 | # create nodes 45 | # by defaul 2 nodes get created, update node_count in settings file if you want a different number of nodes 46 | for (( i=1; i<=$node_count; i++ )) 47 | do 48 | gcloud compute instances create $node_name-$i \ 49 | --project=$project --image=$image --image-project=coreos-cloud \ 50 | --boot-disk-type=pd-standard --boot-disk-size=200 --zone=$zone \ 51 | --machine-type=$node_machine_type --metadata-from-file user-data=./cloud-config/node.yaml \ 52 | --can-ip-forward --tags=k8s-cluster,k8s-nodes,prod 53 | done 54 | # 55 | 56 | # create a folder to store our binary files and settings file 57 | mkdir -p ~/k8s-cluster/bin 58 | # copy files there 59 | cp -f settings ~/k8s-cluster 60 | cp -f set_k8s_access.sh ~/k8s-cluster/bin 61 | cp -rf units ~/k8s-cluster 62 | echo " " 63 | echo "Cluster machines setup has finished !!!" 64 | pause 'Press [Enter] key to continue ...' 65 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/2-get_k8s_fleet_etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 4 | 5 | # Install/update etcdctl, fleetctl and kubectl 6 | 7 | # fetch from settings file 8 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 9 | master_name=$(cat settings | grep master_name= | head -1 | cut -f2 -d"=") 10 | 11 | # get master external IP 12 | master_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep $master_name | awk {'print $5'}); 13 | # path to the folder where we store our binary files 14 | export PATH=${HOME}/k8s-cluster/bin:$PATH 15 | 16 | function pause(){ 17 | read -p "$*" 18 | } 19 | 20 | # get latest k8s version 21 | function get_latest_version_number { 22 | local -r latest_url="https://storage.googleapis.com/kubernetes-release/release/latest.txt" 23 | if [[ $(which wget) ]]; then 24 | wget -qO- ${latest_url} 25 | elif [[ $(which curl) ]]; then 26 | curl -Ss ${latest_url} 27 | fi 28 | } 29 | 30 | k8s_version=$(get_latest_version_number) 31 | 32 | # create tmp folder 33 | mkdir tmp 34 | 35 | echo "Downloading and instaling fleetctl, etcdctl and kubectl ..." 36 | # First let's check which OS we use: OS X or Linux 37 | uname=$(uname) 38 | 39 | if [[ "${uname}" == "Darwin" ]] 40 | then 41 | # OS X 42 | # 43 | cd ./tmp 44 | # download etcd and fleet clients for OS X 45 | ETCD_RELEASE=$(ssh core@$master_ip etcdctl --version | cut -d " " -f 3- | tr -d '\r') 46 | echo "Downloading etcdctl v$ETCD_RELEASE for OS X" 47 | curl -L -o etcd.zip "https://github.com/coreos/etcd/releases/download/v$ETCD_RELEASE/etcd-v$ETCD_RELEASE-darwin-amd64.zip" 48 | unzip -j -o "etcd.zip" "etcd-v$ETCD_RELEASE-darwin-amd64/etcdctl" 49 | mv -f etcdctl ~/k8s-cluster/bin 50 | # clean up 51 | rm -f etcd.zip 52 | echo "etcdctl was copied to ~/k8s-cluster/bin" 53 | echo " " 54 | 55 | # 56 | FLEET_RELEASE=$(ssh core@$master_ip fleetctl version | cut -d " " -f 3- | tr -d '\r') 57 | echo "Downloading fleetctl v$FLEET_RELEASE for OS X" 58 | curl -L -o fleet.zip "https://github.com/coreos/fleet/releases/download/v$FLEET_RELEASE/fleet-v$FLEET_RELEASE-darwin-amd64.zip" 59 | unzip -j -o "fleet.zip" "fleet-v$FLEET_RELEASE-darwin-amd64/fleetctl" 60 | mv -f fleetctl ~/k8s-cluster/bin 61 | # clean up 62 | rm -f fleet.zip 63 | echo "fleetctl was copied to ~/k8s-cluster/bin " 64 | echo " " 65 | 66 | # download kubernetes kubectl for OS X 67 | echo "Downloading kubernetes $k8s_version kubectl for OS X" 68 | curl -L -o kubectl https://storage.googleapis.com/kubernetes-release/release/$k8s_version/bin/darwin/amd64/kubectl 69 | mv -f kubectl ~/k8s-cluster/bin 70 | # 71 | echo " " 72 | echo "kubectl was copied to ~/k8s-cluster/bin" 73 | echo " " 74 | # Make them executable 75 | # 76 | chmod +x ~/k8s-cluster/bin/* 77 | cd .. 78 | else 79 | # Linux 80 | # 81 | cd ./tmp 82 | # download etcd and fleet clients for Linux 83 | ETCD_RELEASE=$(ssh core@$master_ip etcdctl --version | cut -d " " -f 3- | tr -d '\r') 84 | echo "Downloading etcdctl $ETCD_RELEASE for Linux" 85 | curl -L -o etcd.tar.gz "https://github.com/coreos/etcd/releases/download/v$ETCD_RELEASE/etcd-v$ETCD_RELEASE-linux-amd64.tar.gz" 86 | tar -zxvf etcd.tar.gz etcd-v$ETCD_RELEASE-linux-amd64/etcdctl 87 | mv -f etcd-v$ETCD_RELEASE-linux-amd64/etcdctl ~/k8s-cluster/bin 88 | # clean up 89 | rm -f etcd.tar.gz 90 | rm -rf etcd-v$ETCD_RELEASE-linux-amd64 91 | echo "etcdctl was copied to ~/k8s-cluster/bin" 92 | echo " " 93 | 94 | FLEET_RELEASE=$(ssh core@$master_ip fleetctl version | cut -d " " -f 3- | tr -d '\r') 95 | cd ./tmp 96 | echo "Downloading fleetctl v$FLEET_RELEASE for Linux" 97 | curl -L -o fleet.tar.gz "https://github.com/coreos/fleet/releases/download/v$FLEET_RELEASE/fleet-v$FLEET_RELEASE-linux-amd64.tar.gz" 98 | tar -zxvf fleet.tar.gz fleet-v$FLEET_RELEASE-linux-amd64/fleetctl 99 | mv -f fleet-v$FLEET_RELEASE-linux-amd64/fleetctl ~/k8s-cluster/bin 100 | # clean up 101 | rm -f fleet.tar.gz 102 | rm -rf fleet-v$FLEET_RELEASE-linux-amd64 103 | echo "fleetctl was copied to ~/k8s-cluster/bin" 104 | 105 | # download kubernetes kubectl for Linux 106 | echo "Downloading kubernetes $k8s_version kubectl for Linux" 107 | curl -L -o kubectl https://storage.googleapis.com/kubernetes-release/release/$k8s_version/bin/linux/amd64/kubectl 108 | mv -f kubectl ~/k8s-cluster/bin 109 | echo "kubectl was copied to ~/k8s-cluster/bin" 110 | echo " " 111 | 112 | # 113 | # Make them executable 114 | chmod +x ~/k8s-cluster/bin/* 115 | # 116 | cd .. 117 | fi 118 | echo " " 119 | echo "Instaling of fleetctl, etcdctl and kubectl has finished !!!" 120 | pause 'Press [Enter] key to continue ...' 121 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/3-install_k8s_fleet_units.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install Kubernetes fleet units 4 | 5 | function pause(){ 6 | read -p "$*" 7 | } 8 | 9 | # get latest k8s version 10 | function get_latest_version_number { 11 | local -r latest_url="https://storage.googleapis.com/kubernetes-release/release/latest.txt" 12 | if [[ $(which wget) ]]; then 13 | wget -qO- ${latest_url} 14 | elif [[ $(which curl) ]]; then 15 | curl -Ss ${latest_url} 16 | fi 17 | } 18 | 19 | k8s_version=$(get_latest_version_number) 20 | 21 | # update fleet units with k8s version 22 | sed -i "" -e 's/_K8S_VERSION_/'$k8s_version'/g' ./units/*.service 23 | # 24 | 25 | # fetch from settings file 26 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 27 | master_name=$(cat settings | grep master_name= | head -1 | cut -f2 -d"=") 28 | 29 | # set binaries folder, fleet tunnel to master's external IP 30 | # get master external IP 31 | master_external_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep $master_name | awk {'print $5'}); 32 | # path to the folder where we store our binary files 33 | export PATH=${HOME}/k8s-cluster/bin:$PATH 34 | # fleet tunnel 35 | export FLEETCTL_TUNNEL="$master_external_ip" 36 | export FLEETCTL_STRICT_HOST_KEY_CHECKING=false 37 | 38 | # deploy fleet units 39 | echo "Kubernetes $k8s_version will be installed ... " 40 | fleetctl start ./units/kube-apiserver.service 41 | fleetctl start ./units/kube-controller-manager.service 42 | fleetctl start ./units/kube-scheduler.service 43 | fleetctl start ./units/kube-kubelet.service 44 | fleetctl start ./units/kube-proxy.service 45 | echo " " 46 | fleetctl list-units 47 | 48 | echo " " 49 | echo "Kubernetes Cluster setup has finished !!!" 50 | pause 'Press [Enter] key to continue...' 51 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/cloud-config/master.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | name: master1 6 | initial-advertise-peer-urls: http://10.222.1.1:2380 7 | initial-cluster-token: k8s_etcd 8 | initial-cluster: master1=http://10.222.1.1:2380 9 | initial-cluster-state: new 10 | listen-peer-urls: http://10.222.1.1:2380,http://10.222.1.1:7001 11 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 12 | advertise-client-urls: http://10.222.1.1:2379,http://10.222.1.1:4001 13 | fleet: 14 | metadata: role=master 15 | units: 16 | - name: 00-ens4v1.network 17 | runtime: true 18 | content: | 19 | [Match] 20 | Name=ens4v1 21 | 22 | [Network] 23 | Address=10.222.1.1/24 24 | - name: etcd2.service 25 | command: start 26 | - name: fleet.service 27 | command: start 28 | - name: flanneld.service 29 | command: start 30 | drop-ins: 31 | - name: 50-network-config.conf 32 | content: | 33 | [Unit] 34 | Requires=etcd2.service 35 | [Service] 36 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network": "10.244.0.0/16", "Backend": {"Type": "vxlan"}}' 37 | - name: docker.service 38 | command: start 39 | drop-ins: 40 | - name: 50-insecure-registry.conf 41 | content: | 42 | [Unit] 43 | Requires=flanneld.service 44 | After=flanneld.service 45 | [Service] 46 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 47 | 48 | write_files: 49 | - path: /etc/resolv.conf 50 | permissions: 0644 51 | owner: root 52 | content: | 53 | nameserver 169.254.169.254 54 | nameserver 10.240.0.1 55 | - path: /opt/bin/wupiao 56 | permissions: '0755' 57 | content: | 58 | #!/bin/bash 59 | # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen 60 | [ -n "$1" ] && \ 61 | until curl -o /dev/null -sIf http://${1}; do \ 62 | sleep 1 && echo .; 63 | done; 64 | exit $? 65 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/cloud-config/node.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | initial-cluster: master=http://10.222.1.1:2380 8 | proxy: on 9 | fleet: 10 | metadata: role=node 11 | units: 12 | - name: fleet.service 13 | command: start 14 | - name: flanneld.service 15 | command: start 16 | drop-ins: 17 | - name: 50-network-config.conf 18 | content: | 19 | [Unit] 20 | Requires=etcd2.service 21 | [Service] 22 | ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{"Network":"10.244.0.0/16", "Backend": {"Type": "vxlan"}}' 23 | - name: docker.service 24 | command: start 25 | drop-ins: 26 | - name: 50-insecure-registry.conf 27 | content: | 28 | [Unit] 29 | Requires=flanneld.service 30 | After=flanneld.service 31 | [Service] 32 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 33 | write_files: 34 | - path: /opt/bin/wupiao 35 | permissions: '0755' 36 | content: | 37 | #!/bin/bash 38 | # [w]ait [u]ntil [p]ort [i]s [a]ctually [o]pen 39 | [ -n "$1" ] && \ 40 | until curl -o /dev/null -sIf http://${1}; do \ 41 | sleep 1 && echo .; 42 | done; 43 | exit $? 44 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/set_k8s_access.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 3 | # Setup Client SSH Tunnels 4 | 5 | # fetch from settings file 6 | project=$(cat ~/k8s-cluster/settings | grep project= | head -1 | cut -f2 -d"=") 7 | master_name=$(cat ~/k8s-cluster/settings | grep master_name= | head -1 | cut -f2 -d"=") 8 | # get master internal IP 9 | master_external_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep $master_name | awk {'print $5'}); 10 | 11 | # SET 12 | # Google Cloud project 13 | export CLOUDSDK_CORE_PROJECT=$project 14 | # path to the bin folder where we store our binary files 15 | export PATH=${HOME}/k8s-cluster/bin:$PATH 16 | # fleet tunnel 17 | export FLEETCTL_TUNNEL="$master_external_ip" 18 | export FLEETCTL_STRICT_HOST_KEY_CHECKING=false 19 | # etcd 20 | ssh -f -nNT -L 2379:127.0.0.1:2379 core@$master_external_ip 21 | # k8s master 22 | ssh -f -nNT -L 8080:127.0.0.1:8080 core@$master_external_ip 23 | 24 | echo " " 25 | etcdctl --no-sync ls / 26 | 27 | echo " " 28 | fleetctl list-units 29 | 30 | echo " " 31 | kubectl get nodes 32 | 33 | echo " " 34 | echo "Type exit when you are finished ..." 35 | /bin/bash 36 | 37 | echo "stoping ssh forwarding !!!" 38 | # kill ssh forwarding 39 | kill $(ps aux | grep -v grep | grep "ssh -f -nNT -L 8080:127.0.0.1:8080" | awk {'print $2'}) 40 | kill $(ps aux | grep -v grep | grep "ssh -f -nNT -L 2379:127.0.0.1:2379" | awk {'print $2'}) 41 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/settings: -------------------------------------------------------------------------------- 1 | ## change Google Cloud settings as per your requirements 2 | # GC settings 3 | 4 | # SET YOUR PROJECT AND ZONE !!! 5 | project=my-cloud-project 6 | zone=europe-west1-c 7 | 8 | # CoreOS RELEASE CHANNEL 9 | channel=stable 10 | 11 | # MASTER AND NODE MACHINE TYPES 12 | master_machine_type=g1-small 13 | node_machine_type=n1-standard-1 14 | ## 15 | 16 | ### 17 | # master name 18 | master_name=k8s-master 19 | 20 | # node name and count 21 | node_name=k8s-node 22 | node_count=2 23 | ### 24 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/units/kube-apiserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStartPre=-/usr/bin/mkdir -p /opt/bin 7 | ExecStartPre=/bin/bash -c '/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/_K8S_VERSION_/bin/linux/amd64/kube-apiserver' 8 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-apiserver 9 | ExecStart=/opt/bin/kube-apiserver \ 10 | --allow_privileged=true \ 11 | --insecure_bind_address=0.0.0.0 \ 12 | --insecure_port=8080 \ 13 | --kubelet_https=true \ 14 | --secure_port=6443 \ 15 | --service-cluster-ip-range=10.100.0.0/16 \ 16 | --etcd_servers=http://127.0.0.1:2379 \ 17 | --public_address_override=10.222.1.1 \ 18 | --cloud_provider=gce \ 19 | --cors_allowed_origins=.* \ 20 | --logtostderr=true \ 21 | --runtime_config=api/v1 22 | 23 | Restart=always 24 | RestartSec=10 25 | 26 | [X-Fleet] 27 | MachineMetadata=role=master 28 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/units/kube-controller-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | Requires=kube-apiserver.service 5 | After=kube-apiserver.service 6 | 7 | [Service] 8 | ExecStartPre=/bin/bash -c '/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/_K8S_VERSION_/bin/linux/amd64/kube-controller-manager' 9 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-controller-manager 10 | ExecStart=/opt/bin/kube-controller-manager \ 11 | --master=127.0.0.1:8080 \ 12 | --cloud_provider=gce \ 13 | --logtostderr=true 14 | Restart=always 15 | RestartSec=10 16 | 17 | [X-Fleet] 18 | MachineOf=kube-apiserver.service 19 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/units/kube-kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | EnvironmentFile=/etc/environment 7 | ExecStartPre=/bin/bash -c '/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/_K8S_VERSION_/bin/linux/amd64/kubelet' 8 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kubelet 9 | ExecStart=/opt/bin/kubelet \ 10 | --address=0.0.0.0 \ 11 | --port=10250 \ 12 | --register-node=true \ 13 | --api_servers=http://10.222.1.1:8080 \ 14 | --allow_privileged=true \ 15 | --logtostderr=true \ 16 | --cadvisor_port=4194 \ 17 | --healthz_bind_address=0.0.0.0 \ 18 | --healthz_port=10248 19 | 20 | Restart=always 21 | RestartSec=10 22 | 23 | [X-Fleet] 24 | Global=true 25 | MachineMetadata=role=node 26 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/units/kube-proxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Proxy 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStartPre=/bin/bash -c '/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/_K8S_VERSION_/bin/linux/amd64/kube-proxy' 7 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-proxy 8 | ExecStart=/opt/bin/kube-proxy \ 9 | --master=http://10.222.1.1:8080 \ 10 | --logtostderr=true 11 | 12 | [X-Fleet] 13 | Global=true 14 | MachineMetadata=role=node 15 | -------------------------------------------------------------------------------- /Chapter10/Kubernetes_Cluster/units/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | Requires=kube-apiserver.service 5 | After=kube-apiserver.service 6 | 7 | [Service] 8 | ExecStartPre=/bin/bash -c '/usr/bin/wget -N -P /opt/bin https://storage.googleapis.com/kubernetes-release/release/_K8S_VERSION_/bin/linux/amd64/kube-scheduler' 9 | ExecStartPre=/usr/bin/chmod +x /opt/bin/kube-scheduler 10 | ExecStart=/opt/bin/kube-scheduler --master=127.0.0.1:8080 11 | 12 | Restart=always 13 | RestartSec=10 14 | 15 | [X-Fleet] 16 | MachineOf=kube-apiserver.service 17 | -------------------------------------------------------------------------------- /Chapter3/hello.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=HelloWorld 3 | # this unit will only start after docker.service 4 | After=docker.service 5 | Requires=docker.service 6 | 7 | [Service] 8 | TimeoutStartSec=0 9 | # busybox image will be pulled from docker public registry 10 | ExecStartPre=/usr/bin/docker pull busybox 11 | # we use rm just in case the container with the name “busybox1” is left 12 | ExecStartPre=-/usr/bin/docker rm busybox1 13 | # we start docker container 14 | ExecStart=/usr/bin/docker run --rm --name busybox1 busybox /bin/sh -c "while true; do echo Hello World; sleep 1; done" 15 | # we stop docker container when systemctl stop is used 16 | ExecStop=/usr/bin/docker stop busybox1 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /Chapter3/hello1.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=HelloWorld 3 | # this unit will only start after docker.service 4 | After=docker.service 5 | Requires=docker.service 6 | 7 | [Service] 8 | TimeoutStartSec=0 9 | # busybox image will be pulled from docker public registry 10 | ExecStartPre=/usr/bin/docker pull busybox 11 | # we use rm just in case the container with the name busybox1 is left 12 | ExecStartPre=-/usr/bin/docker rm busybox1 13 | # we start docker container 14 | ExecStart=/usr/bin/docker run --rm --name busybox1 busybox /bin/sh -c "while true; do echo Hello World; sleep 1; done" 15 | # we stop docker container when systemctl stop is used 16 | ExecStop=/usr/bin/docker stop busybox1 17 | 18 | [X-Fleet] 19 | -------------------------------------------------------------------------------- /Chapter4/config.rb: -------------------------------------------------------------------------------- 1 | # Size of the CoreOS cluster created by Vagrant 2 | $num_instances=3 3 | 4 | # Used to fetch a new discovery token for a cluster of size $num_instances 5 | $new_discovery_url="https://discovery.etcd.io/new?size=#{$num_instances}" 6 | 7 | # To automatically replace the discovery token on 'vagrant up', uncomment 8 | # the lines below: 9 | # 10 | if File.exists?('user-data') && ARGV[0].eql?('up') 11 | require 'open-uri' 12 | require 'yaml' 13 | 14 | token = open($new_discovery_url).read 15 | 16 | data = YAML.load(IO.readlines('user-data')[1..-1].join) 17 | if data['coreos'].key? 'etcd' 18 | data['coreos']['etcd']['discovery'] = token 19 | end 20 | if data['coreos'].key? 'etcd2' 21 | data['coreos']['etcd2']['discovery'] = token 22 | end 23 | 24 | yaml = YAML.dump(data) 25 | File.open('user-data', 'w') { |file| file.write("#cloud-config\n\n#{yaml}") } 26 | end 27 | # 28 | 29 | # 30 | # coreos-vagrant is configured through a series of configuration 31 | # options (global ruby variables) which are detailed below. To modify 32 | # these options, first copy this file to "config.rb". Then simply 33 | # uncomment the necessary lines, leaving the $, and replace everything 34 | # after the equals sign.. 35 | 36 | # Change basename of the VM 37 | # The default value is "core", which results in VMs named starting with 38 | # "core-01" through to "core-${num_instances}". 39 | #$instance_name_prefix="core" 40 | 41 | # Official CoreOS channel from which updates should be downloaded 42 | #$update_channel='alpha' 43 | 44 | # Log the serial consoles of CoreOS VMs to log/ 45 | # Enable by setting value to true, disable with false 46 | # WARNING: Serial logging is known to result in extremely high CPU usage with 47 | # VirtualBox, so should only be used in debugging situations 48 | #$enable_serial_logging=false 49 | 50 | # Enable port forwarding of Docker TCP socket 51 | # Set to the TCP port you want exposed on the *host* machine, default is 2375 52 | # If 2375 is used, Vagrant will auto-increment (e.g. in the case of $num_instances > 1) 53 | # You can then use the docker tool locally by setting the following env var: 54 | # export DOCKER_HOST='tcp://127.0.0.1:2375' 55 | #$expose_docker_tcp=2375 56 | 57 | # Enable NFS sharing of your home directory ($HOME) to CoreOS 58 | # It will be mounted at the same path in the VM as on the host. 59 | # Example: /Users/foobar -> /Users/foobar 60 | #$share_home=false 61 | 62 | # Customize VMs 63 | #$vm_gui = false 64 | #$vm_memory = 1024 65 | #$vm_cpus = 1 66 | 67 | # Share additional folders to the CoreOS VMs 68 | # For example, 69 | # $shared_folders = {'/path/on/host' => '/path/on/guest', '/home/foo/app' => '/app'} 70 | # or, to map host folders to guest folders of the same name, 71 | # $shared_folders = Hash[*['/home/foo/app1', '/home/foo/app2'].map{|d| [d, d]}.flatten] 72 | #$shared_folders = {} 73 | 74 | # Enable port forwarding from guest(s) to host machine, syntax is: { 80 => 8080 }, auto correction is enabled by default. 75 | #$forwarded_ports = {} 76 | -------------------------------------------------------------------------------- /Chapter4/hello-cluster.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | 3 | [Service] 4 | ExecStart=/usr/bin/bash -c "while true; do echo 'Hello Cluster'; sleep 1; done" 5 | 6 | -------------------------------------------------------------------------------- /Chapter4/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | discovery: https://discovery.etcd.io/614a2cd12096d7de6b577f0a95748027 6 | advertise-client-urls: http://$public_ipv4:2379 7 | initial-advertise-peer-urls: http://$private_ipv4:2380 8 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 9 | listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 10 | fleet: 11 | public-ip: $public_ipv4 12 | metadata: cluster=vagrant 13 | flannel: 14 | interface: $public_ipv4 15 | units: 16 | - name: etcd2.service 17 | command: start 18 | - name: fleet.service 19 | command: start 20 | - name: docker-tcp.socket 21 | command: start 22 | enable: true 23 | content: | 24 | [Unit] 25 | Description=Docker Socket for the API 26 | 27 | [Socket] 28 | ListenStream=2375 29 | Service=docker.service 30 | BindIPv6Only=both 31 | 32 | [Install] 33 | WantedBy=sockets.target 34 | write_files: 35 | - path: /home/core/test.txt 36 | permissions: 0644 37 | owner: core 38 | content: | 39 | Hello Cluster 40 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/coreos-dev-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function pause(){ 4 | read -p "$*" 5 | } 6 | 7 | # coreos-dev-install.sh 8 | 9 | # create "coreos-dev-env" and other required folders 10 | mkdir ~/coreos-dev-env 11 | mkdir ~/coreos-dev-env/vm 12 | mkdir ~/coreos-dev-env/bin 13 | mkdir ~/coreos-dev-env/share 14 | mkdir ~/coreos-dev-env/fleet 15 | 16 | # copy scripts 17 | cp -f files/*.sh ~/coreos-dev-env/ 18 | # make files executable 19 | chmod 755 ~/coreos-dev-env/* 20 | 21 | # copy vm folder 22 | cp -rf files/vm ~/coreos-dev-env/ 23 | # 24 | 25 | # copy fleet folder 26 | cp -rf fleet ~/coreos-dev-env/ 27 | # 28 | 29 | # copy share subfolders/files 30 | cp -rf share ~/coreos-dev-env/ 31 | chmod -R 777 ~/coreos-dev-env/share 32 | # 33 | 34 | # first up to initialise VM 35 | echo "Setting up CoreOS VM" 36 | cd ~/coreos-dev-env/vm 37 | vagrant box update 38 | vagrant up --provider virtualbox 39 | 40 | # Add vagrant ssh key to ssh-agent 41 | vagrant ssh-config core-dev-01 | sed -n "s/IdentityFile//gp" | xargs ssh-add 42 | 43 | # download etcd, fleetctl and docker clients 44 | # First let's check which OS we use: OS X or Linux 45 | uname=$(uname) 46 | 47 | if [[ "${uname}" == "Darwin" ]] 48 | then 49 | # OS X 50 | # 51 | cd ~/coreos-dev-env/vm 52 | LATEST_RELEASE=$(vagrant ssh -c "etcdctl --version" | cut -d " " -f 3- | tr -d '\r' ) 53 | cd ~/coreos-dev-env/bin 54 | echo "Downloading etcdctl $LATEST_RELEASE for OS X" 55 | curl -L -o etcd.zip "https://github.com/coreos/etcd/releases/download/v$LATEST_RELEASE/etcd-v$LATEST_RELEASE-darwin-amd64.zip" 56 | unzip -j -o "etcd.zip" "etcd-v$LATEST_RELEASE-darwin-amd64/etcdctl" 57 | rm -f etcd.zip 58 | # 59 | cd ~/coreos-dev-env/vm 60 | LATEST_RELEASE=$(vagrant ssh -c 'fleetctl version' | cut -d " " -f 3- | tr -d '\r') 61 | cd ~/coreos-dev-env/bin 62 | echo "Downloading fleetctl v$LATEST_RELEASE for OS X" 63 | curl -L -o fleet.zip "https://github.com/coreos/fleet/releases/download/v$LATEST_RELEASE/fleet-v$LATEST_RELEASE-darwin-amd64.zip" 64 | unzip -j -o "fleet.zip" "fleet-v$LATEST_RELEASE-darwin-amd64/fleetctl" 65 | rm -f fleet.zip 66 | # download docker client 67 | cd ~/coreos-dev-env/vm 68 | LATEST_RELEASE=$(vagrant ssh -c 'docker version' | grep 'Server version:' | cut -d " " -f 3- | tr -d '\r') 69 | echo "Downloading docker v$LATEST_RELEASE client for OS X" 70 | curl -o ~/coreos-dev-env/bin/docker https://get.docker.com/builds/Darwin/x86_64/docker-$LATEST_RELEASE 71 | # Make them executable 72 | chmod +x ~/coreos-dev-env/bin/* 73 | # 74 | else 75 | # Linux 76 | # 77 | cd ~/coreos-dev-env/vm 78 | LATEST_RELEASE=$(vagrant ssh -c "etcdctl --version" | cut -d " " -f 3- | tr -d '\r' ) 79 | cd ~/coreos-dev-env/bin 80 | echo "Downloading etcdctl $LATEST_RELEASE for Linux" 81 | wget "https://github.com/coreos/etcd/releases/download/v$LATEST_RELEASE/etcd-v$LATEST_RELEASE-linux-amd64.tar.gz" 82 | tar -zxvf etcd-v$LATEST_RELEASE-linux-amd64.tar.gz etcd-v$LATEST_RELEASE-linux-amd64/etcdctl --strip 1 83 | rm -f etcd-v$LATEST_RELEASE-linux-amd64.tar.gz 84 | # 85 | cd ~/coreos-dev-env/vm 86 | LATEST_RELEASE=$(vagrant ssh -c 'fleetctl version' | cut -d " " -f 3- | tr -d '\r') 87 | cd ~/coreos-dev-env/bin 88 | echo "Downloading fleetctl v$LATEST_RELEASE for Linux" 89 | wget "https://github.com/coreos/fleet/releases/download/v$LATEST_RELEASE/fleet-v$LATEST_RELEASE-linux-amd64.tar.gz" 90 | tar -zxvf fleet-v$LATEST_RELEASE-linux-amd64.tar.gz fleet-v$LATEST_RELEASE-linux-amd64/fleetctl --strip 1 91 | rm -f fleet-v$LATEST_RELEASE-linux-amd64.tar.gz 92 | # 93 | echo "" 94 | echo "You need to install docker for Linux if you have not done that yet !!!" 95 | fi 96 | # 97 | cd ~/coreos-dev-env 98 | # 99 | echo "Installation has finished !!!" 100 | echo "" 101 | pause 'Press [Enter] key to continue...' 102 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/files/vm/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # # vi: set ft=ruby : 3 | 4 | require 'fileutils' 5 | 6 | Vagrant.require_version ">= 1.6.0" 7 | 8 | CLOUD_CONFIG_PATH = File.join(File.dirname(__FILE__), "user-data") 9 | CONFIG = File.join(File.dirname(__FILE__), "config.rb") 10 | 11 | # Defaults for config options defined in CONFIG 12 | $num_instances = 1 13 | $instance_name_prefix = "core" 14 | $update_channel = "alpha" 15 | $enable_serial_logging = false 16 | $share_home = false 17 | $vm_gui = false 18 | $vm_memory = 1024 19 | $vm_cpus = 1 20 | $shared_folders = {} 21 | $forwarded_ports = {} 22 | 23 | # Attempt to apply the deprecated environment variable NUM_INSTANCES to 24 | # $num_instances while allowing config.rb to override it 25 | if ENV["NUM_INSTANCES"].to_i > 0 && ENV["NUM_INSTANCES"] 26 | $num_instances = ENV["NUM_INSTANCES"].to_i 27 | end 28 | 29 | if File.exist?(CONFIG) 30 | require CONFIG 31 | end 32 | 33 | # Use old vb_xxx config variables when set 34 | def vm_gui 35 | $vb_gui.nil? ? $vm_gui : $vb_gui 36 | end 37 | 38 | def vm_memory 39 | $vb_memory.nil? ? $vm_memory : $vb_memory 40 | end 41 | 42 | def vm_cpus 43 | $vb_cpus.nil? ? $vm_cpus : $vb_cpus 44 | end 45 | 46 | Vagrant.configure("2") do |config| 47 | # always use Vagrants insecure key 48 | config.ssh.insert_key = false 49 | 50 | config.vm.box = "coreos-%s" % $update_channel 51 | config.vm.box_version = ">= 308.0.1" 52 | config.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant.json" % $update_channel 53 | 54 | ["vmware_fusion", "vmware_workstation"].each do |vmware| 55 | config.vm.provider vmware do |v, override| 56 | override.vm.box_url = "http://%s.release.core-os.net/amd64-usr/current/coreos_production_vagrant_vmware_fusion.json" % $update_channel 57 | end 58 | end 59 | 60 | config.vm.provider :virtualbox do |v| 61 | # On VirtualBox, we don't have guest additions or a functional vboxsf 62 | # in CoreOS, so tell Vagrant that so it can be smarter. 63 | v.check_guest_additions = false 64 | v.functional_vboxsf = false 65 | end 66 | 67 | # plugin conflict 68 | if Vagrant.has_plugin?("vagrant-vbguest") then 69 | config.vbguest.auto_update = false 70 | end 71 | 72 | (1..$num_instances).each do |i| 73 | config.vm.define vm_name = "%s-%02d" % [$instance_name_prefix, i] do |config| 74 | config.vm.hostname = vm_name 75 | 76 | if $enable_serial_logging 77 | logdir = File.join(File.dirname(__FILE__), "log") 78 | FileUtils.mkdir_p(logdir) 79 | 80 | serialFile = File.join(logdir, "%s-serial.txt" % vm_name) 81 | FileUtils.touch(serialFile) 82 | 83 | ["vmware_fusion", "vmware_workstation"].each do |vmware| 84 | config.vm.provider vmware do |v, override| 85 | v.vmx["serial0.present"] = "TRUE" 86 | v.vmx["serial0.fileType"] = "file" 87 | v.vmx["serial0.fileName"] = serialFile 88 | v.vmx["serial0.tryNoRxLoss"] = "FALSE" 89 | end 90 | end 91 | 92 | config.vm.provider :virtualbox do |vb, override| 93 | vb.customize ["modifyvm", :id, "--uart1", "0x3F8", "4"] 94 | vb.customize ["modifyvm", :id, "--uartmode1", serialFile] 95 | end 96 | end 97 | 98 | if $expose_docker_tcp 99 | config.vm.network "forwarded_port", guest: 2375, host: ($expose_docker_tcp + i - 1), auto_correct: true 100 | end 101 | 102 | $forwarded_ports.each do |guest, host| 103 | config.vm.network "forwarded_port", guest: guest, host: host, auto_correct: true 104 | end 105 | 106 | ["vmware_fusion", "vmware_workstation"].each do |vmware| 107 | config.vm.provider vmware do |v| 108 | v.gui = vm_gui 109 | v.vmx['memsize'] = vm_memory 110 | v.vmx['numvcpus'] = vm_cpus 111 | end 112 | end 113 | 114 | config.vm.provider :virtualbox do |vb| 115 | vb.gui = vm_gui 116 | vb.memory = vm_memory 117 | vb.cpus = vm_cpus 118 | end 119 | 120 | ip = "172.19.20.99" 121 | config.vm.network :private_network, ip: ip 122 | 123 | # Uncomment below to enable NFS for sharing the host machine into the coreos-vagrant VM. 124 | #config.vm.synced_folder ".", "/home/core/share", id: "core", :nfs => true, :mount_options => ['nolock,vers=3,udp'] 125 | $shared_folders.each_with_index do |(host_folder, guest_folder), index| 126 | config.vm.synced_folder host_folder.to_s, guest_folder.to_s, id: "core-share%02d" % index, nfs: true, mount_options: ['nolock,vers=3,udp'] 127 | end 128 | 129 | if $share_home 130 | config.vm.synced_folder ENV['HOME'], ENV['HOME'], id: "home", :nfs => true, :mount_options => ['nolock,vers=3,udp'] 131 | end 132 | 133 | if File.exist?(CLOUD_CONFIG_PATH) 134 | config.vm.provision :file, :source => "#{CLOUD_CONFIG_PATH}", :destination => "/tmp/vagrantfile-user-data" 135 | config.vm.provision :shell, :inline => "mv /tmp/vagrantfile-user-data /var/lib/coreos-vagrant/", :privileged => true 136 | end 137 | 138 | end 139 | end 140 | end 141 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/files/vm/config.rb: -------------------------------------------------------------------------------- 1 | # Size of the CoreOS cluster created by Vagrant 2 | $num_instances=1 3 | 4 | # Used to fetch a new discovery token for a cluster of size $num_instances 5 | $new_discovery_url="https://discovery.etcd.io/new?size=#{$num_instances}" 6 | 7 | # To automatically replace the discovery token on 'vagrant up', uncomment 8 | # the lines below: 9 | # 10 | #if File.exists?('user-data') && ARGV[0].eql?('up') 11 | # require 'open-uri' 12 | # require 'yaml' 13 | # 14 | # token = open($new_discovery_url).read 15 | # 16 | # data = YAML.load(IO.readlines('user-data')[1..-1].join) 17 | # if data['coreos'].key? 'etcd' 18 | # data['coreos']['etcd']['discovery'] = token 19 | # end 20 | # if data['coreos'].key? 'etcd2' 21 | # data['coreos']['etcd2']['discovery'] = token 22 | # end 23 | # 24 | # yaml = YAML.dump(data) 25 | # File.open('user-data', 'w') { |file| file.write("#cloud-config\n\n#{yaml}") } 26 | #end 27 | # 28 | 29 | # 30 | # coreos-vagrant is configured through a series of configuration 31 | # options (global ruby variables) which are detailed below. To modify 32 | # these options, first copy this file to "config.rb". Then simply 33 | # uncomment the necessary lines, leaving the $, and replace everything 34 | # after the equals sign.. 35 | 36 | # Change basename of the VM 37 | # The default value is "core", which results in VMs named starting with 38 | # "core-01" through to "core-${num_instances}". 39 | $instance_name_prefix="core-dev" 40 | 41 | # Official CoreOS channel from which updates should be downloaded 42 | $update_channel='beta' 43 | 44 | # Log the serial consoles of CoreOS VMs to log/ 45 | # Enable by setting value to true, disable with false 46 | # WARNING: Serial logging is known to result in extremely high CPU usage with 47 | # VirtualBox, so should only be used in debugging situations 48 | #$enable_serial_logging=false 49 | 50 | # Enable port forwarding of Docker TCP socket 51 | # Set to the TCP port you want exposed on the *host* machine, default is 2375 52 | # If 2375 is used, Vagrant will auto-increment (e.g. in the case of $num_instances > 1) 53 | # You can then use the docker tool locally by setting the following env var: 54 | # export DOCKER_HOST='tcp://127.0.0.1:2375' 55 | $expose_docker_tcp=2375 56 | 57 | # Enable NFS sharing of your home directory ($HOME) to CoreOS 58 | # It will be mounted at the same path in the VM as on the host. 59 | # Example: /Users/foobar -> /Users/foobar 60 | #$share_home=false 61 | 62 | # Customize VMs 63 | #$vm_gui = false 64 | #$vm_memory = 1024 65 | #$vm_cpus = 1 66 | 67 | # Share additional folders to the CoreOS VMs 68 | # For example, 69 | # $shared_folders = {'/path/on/host' => '/path/on/guest', '/home/foo/app' => '/app'} 70 | # or, to map host folders to guest folders of the same name, 71 | # $shared_folders = Hash[*['/home/foo/app1', '/home/foo/app2'].map{|d| [d, d]}.flatten] 72 | $shared_folders = {'~/coreos-dev-env/share' => '/home/core/share'} 73 | 74 | # Enable port forwarding from guest(s) to host machine, syntax is: { 80 => 8080 }, auto correction is enabled by default. 75 | #$forwarded_ports = {} 76 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/files/vm/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | name: core-dev-01 6 | initial-advertise-peer-urls: http://$private_ipv4:2380 7 | listen-peer-urls: http://$private_ipv4:2380,http://$private_ipv4:7001 8 | initial-cluster-token: etcd01 9 | initial-cluster: core-dev-01=http://$private_ipv4:2380 10 | initial-cluster-state: new 11 | advertise-client-urls: http://$public_ipv4:2379,http://$public_ipv4:4001 12 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 13 | fleet: 14 | public-ip: $public_ipv4 15 | flannel: 16 | interface: $public_ipv4 17 | units: 18 | - name: etcd2.service 19 | command: start 20 | - name: fleet.service 21 | command: start 22 | - name: docker-tcp.socket 23 | command: start 24 | enable: true 25 | content: | 26 | [Unit] 27 | Description=Docker Socket for the API 28 | 29 | [Socket] 30 | ListenStream=2375 31 | Service=docker.service 32 | BindIPv6Only=both 33 | 34 | [Install] 35 | WantedBy=sockets.target 36 | update: 37 | reboot-strategy: off 38 | 39 | write_files: 40 | - path: /etc/systemd/system/docker.service.d/50-insecure-registry.conf 41 | content: | 42 | [Service] 43 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 44 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/files/vm_halt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # vm_up.sh 4 | 5 | cd ~/coreos-dev-env/vm 6 | 7 | function pause(){ 8 | read -p "$*" 9 | } 10 | 11 | vagrant halt 12 | 13 | pause 'Press [Enter] key to continue...' 14 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/files/vm_ssh.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # vm_ssh.sh 4 | 5 | cd ~/coreos-dev-env/vm 6 | vagrant ssh -- -A 7 | 8 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/files/vm_up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # vm_up.sh 4 | 5 | cd ~/coreos-dev-env/vm 6 | 7 | vagrant up 8 | 9 | # Add vagrant ssh key to ssh-agent 10 | vagrant ssh-config core-dev-01 | sed -n "s/IdentityFile//gp" | xargs ssh-add &>/dev/null 11 | 12 | # Set the environment variable for the docker daemon 13 | export DOCKER_HOST=tcp://127.0.0.1:2375 14 | 15 | # path to the bin folder where we store our binary files 16 | export PATH=${HOME}/coreos-dev-env/bin:$PATH 17 | 18 | # set etcd endpoint 19 | export ETCDCTL_PEERS=http://172.19.20.99:2379 20 | echo " " 21 | echo "etcdctl ls /:" 22 | etcdctl --no-sync ls / 23 | echo " " 24 | 25 | # set fleetctl endpoint 26 | export FLEETCTL_ENDPOINT=http://172.19.20.99:2379 27 | export FLEETCTL_DRIVER=etcd 28 | export FLEETCTL_STRICT_HOST_KEY_CHECKING=false 29 | echo "fleetctl list-machines:" 30 | fleetctl list-machines 31 | echo " " 32 | 33 | # list fleet units 34 | echo "fleetctl list-units:" 35 | fleetctl list-units 36 | echo " " 37 | 38 | # running docker containers 39 | echo "docker containers:" 40 | docker ps 41 | echo " " 42 | # 43 | 44 | cd ~/coreos-dev-env 45 | 46 | # open bash shell 47 | /bin/bash 48 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/fleet/nginx.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=nginx 3 | 4 | [Service] 5 | User=core 6 | TimeoutStartSec=0 7 | EnvironmentFile=/etc/environment 8 | ExecStartPre=-/usr/bin/docker rm nginx 9 | ExecStart=/usr/bin/docker run --rm --name nginx -p 80:80 \ 10 | -v /home/core/share/nginx/html:/usr/share/nginx/html \ 11 | nginx:latest 12 | # 13 | ExecStop=/usr/bin/docker stop nginx 14 | ExecStopPost=-/usr/bin/docker rm nginx 15 | 16 | Restart=always 17 | RestartSec=10s 18 | 19 | [X-Fleet] 20 | -------------------------------------------------------------------------------- /Chapter5/Local_Development_VM/share/nginx/html/index.html: -------------------------------------------------------------------------------- 1 | 2 |
3 |
10 | "Hello, CoreOS" Development Environment11 | |
12 |
This is an example page for CoreOS Essentials Book Chapter 5.
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/cloud-config/control1.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | name: control1 6 | initial-advertise-peer-urls: http://10.200.1.1:2380 7 | initial-cluster-token: control_etcd 8 | initial-cluster: control1=http://10.200.1.1:2380 9 | initial-cluster-state: new 10 | listen-peer-urls: http://10.200.1.1:2380,http://10.200.1.1:7001 11 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 12 | advertise-client-urls: http://10.200.1.1:2379,http://10.200.1.1:4001 13 | fleet: 14 | metadata: "role=services,cpeer=tsc-control1" 15 | units: 16 | - name: 00-ens4v1.network 17 | runtime: true 18 | content: | 19 | [Match] 20 | Name=ens4v1 21 | 22 | [Network] 23 | Address=10.200.1.1/24 24 | - name: etcd2.service 25 | command: start 26 | - name: fleet.service 27 | command: start 28 | - name: docker.service 29 | command: start 30 | drop-ins: 31 | - name: 50-insecure-registry.conf 32 | content: | 33 | [Unit] 34 | [Service] 35 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 36 | write_files: 37 | - path: /etc/resolv.conf 38 | permissions: 0644 39 | owner: root 40 | content: | 41 | nameserver 169.254.169.254 42 | nameserver 10.240.0.1 43 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/cloud-config/staging1.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | initial-cluster: control1=http://10.200.1.1:2380 8 | proxy: on 9 | fleet: 10 | public-ip: $public_ipv4 11 | metadata: "role=worker,cpeer=tsc-staging1" 12 | units: 13 | - name: 00-ens4v1.network 14 | runtime: true 15 | content: | 16 | [Match] 17 | Name=ens4v1 18 | 19 | [Network] 20 | Address=10.200.3.1/24 21 | - name: etcd2.service 22 | command: start 23 | - name: fleet.service 24 | command: start 25 | - name: docker.service 26 | command: start 27 | drop-ins: 28 | - name: 50-insecure-registry.conf 29 | content: | 30 | [Unit] 31 | [Service] 32 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 33 | write_files: 34 | - path: /etc/resolv.conf 35 | permissions: 0644 36 | owner: root 37 | content: | 38 | nameserver 169.254.169.254 39 | nameserver 10.240.0.1 40 | ssh_authorized_keys: 41 | - ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrJybGYAiSG9Z2ETblpLimDsMoZgkGRyHamecl9X4XVwtgzV6Kl37BgEO2Mhp4D3K48wqn5rRBNETV6UNZPF42epgkEKBFFffZIwLZ9ppJMr0KT21+82jPX059j5OMsz5qLv7UzCocAb/rULk5Rudkh4NXTcXly9ybHWITSJ3hLebZblBPtg5Fi/RG7WnOP+DvLNGJXt89xIvSRHJBrQ4z2zaEKICABLU5Ky6aX4MqJf+9NU15cC7NgFhL+Juhhrm2V66XxN2apikYXEyjMHjaGkJvYPVSjYplydc0WdZb++jjAqGGb0AZQrwT8kcZEk5peHC5LPyaRmTuXqQkFl9J root@tsc-registry-cbuilder1-docker-builder 42 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/cloud-config/test1.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | initial-cluster: control1=http://10.200.1.1:2380 8 | proxy: on 9 | fleet: 10 | public-ip: $public_ipv4 11 | metadata: "role=worker,cpeer=tsc-test1" 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | - name: fleet.service 16 | command: start 17 | - name: docker.service 18 | command: start 19 | drop-ins: 20 | - name: 50-insecure-registry.conf 21 | content: | 22 | [Unit] 23 | [Service] 24 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 25 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/create_cluster_control.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create TS cluster control 3 | 4 | # Update required settings in "settings" file before running this script 5 | 6 | function pause(){ 7 | read -p "$*" 8 | } 9 | 10 | ## Fetch GC settings 11 | # project and zone 12 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 13 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 14 | # CoreOS release channel 15 | channel=$(cat settings | grep channel= | head -1 | cut -f2 -d"=") 16 | # control instance type 17 | control_machine_type=$(cat settings | grep control_machine_type= | head -1 | cut -f2 -d"=") 18 | # get the latest full image name 19 | image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'}) 20 | ## 21 | 22 | # create an instance 23 | gcloud compute instances create tsc-control1 --project=$project --image=$image --image-project=coreos-cloud \ 24 | --boot-disk-size=50 --zone=$zone --machine-type=$control_machine_type \ 25 | --metadata-from-file user-data=cloud-config/control1.yaml --can-ip-forward --tags=tsc-control1,tsc 26 | 27 | # create a static IP for the new instance 28 | gcloud compute routes create ip-10-200-1-1-tsc-control1 --project=$project \ 29 | --next-hop-instance tsc-control1 \ 30 | --next-hop-instance-zone $zone \ 31 | --destination-range 10.200.1.1/32 32 | 33 | echo " " 34 | echo "Setup has finished !!!" 35 | pause 'Press [Enter] key to continue...' 36 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/create_cluster_workers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create TS cluster workers 3 | 4 | # Update required settings in "settings" file before running this script 5 | 6 | function pause(){ 7 | read -p "$*" 8 | } 9 | 10 | ## Fetch GC settings 11 | # project and zone 12 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 13 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 14 | # CoreOS release channel 15 | channel=$(cat settings | grep channel= | head -1 | cut -f2 -d"=") 16 | # worker instance type 17 | worker_machine_type=$(cat settings | grep worker_machine_type= | head -1 | cut -f2 -d"=") 18 | # get the latest full image name 19 | image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'}) 20 | ## 21 | 22 | # create test1 instance 23 | gcloud compute instances create tsc-test1 --project=$project --image=$image --image-project=coreos-cloud \ 24 | --boot-disk-size=200 --zone=$zone --machine-type=$worker_machine_type \ 25 | --metadata-from-file user-data=cloud-config/test1.yaml --can-ip-forward --tags=tsc-test1,tsc 26 | 27 | # create staging1 instance 28 | gcloud compute instances create tsc-staging1 --project=$project --image=$image --image-project=coreos-cloud \ 29 | --boot-disk-size=200 --zone=$zone --machine-type=$worker_machine_type \ 30 | --metadata-from-file user-data=cloud-config/staging1.yaml --can-ip-forward --tags=tsc-staging1,tsc 31 | # create a static IP for the staging1 instance 32 | gcloud compute routes create ip-10-200-3-1-tsc-staging1 --project=$project \ 33 | --next-hop-instance tsc-staging1 \ 34 | --next-hop-instance-zone $zone \ 35 | --destination-range 10.200.3.1/32 36 | 37 | # Open port 80 HTTP access to web servers 38 | gcloud compute firewall-rules create http-80 --project=$project \ 39 | --allow tcp:80 --target-tags=tsc,prod 40 | 41 | echo " " 42 | echo "Setup has finished !!!" 43 | pause 'Press [Enter] key to continue...' 44 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/files/control1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@tsc-control1" --ssh-flag="-A" 4 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/files/etcdctl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 3 | 4 | ssh core@control_ip etcdctl $1 $2 $3 $4 $5 $6 $7 $8 $9 5 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/files/set_cluster_access.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup Client SSH Tunnels 4 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 5 | 6 | # SET 7 | # path to the cluster folder where we store our binary files 8 | export PATH=${HOME}/coreos-tsc-gce/bin:$PATH 9 | # fleet tunnel 10 | export FLEETCTL_TUNNEL=control_ip 11 | export FLEETCTL_STRICT_HOST_KEY_CHECKING=false 12 | 13 | echo "list fleet machines:" 14 | fleetctl list-machines 15 | 16 | echo "list fleet units:" 17 | fleetctl list-units 18 | 19 | /bin/bash 20 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/files/staging1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@tsc-staging1" --ssh-flag="-A" 4 | 5 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/files/test1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@tsc-test1" --ssh-flag="-A" 4 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/fleet/staging1_webserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=nginx 3 | 4 | [Service] 5 | User=core 6 | TimeoutStartSec=0 7 | EnvironmentFile=/etc/environment 8 | ExecStartPre=-/usr/bin/docker rm staging1-webserver 9 | ExecStart=/usr/bin/docker run --rm --name staging1-webserver -p 80:80 \ 10 | -v /home/core/share/nginx/html:/usr/share/nginx/html \ 11 | nginx:latest 12 | # 13 | ExecStop=/usr/bin/docker stop staging1-webserver 14 | ExecStopPost=-/usr/bin/docker rm staging1-webserver 15 | 16 | Restart=always 17 | RestartSec=10s 18 | 19 | [X-Fleet] 20 | MachineMetadata=cpeer=tsc-staging1 21 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/fleet/test1_webserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=nginx 3 | 4 | [Service] 5 | User=core 6 | TimeoutStartSec=0 7 | EnvironmentFile=/etc/environment 8 | ExecStartPre=-/usr/bin/docker rm test1-webserver 9 | ExecStart=/usr/bin/docker run --rm --name test1-webserver -p 80:80 \ 10 | -v /home/core/share/nginx/html:/usr/share/nginx/html \ 11 | nginx:latest 12 | # 13 | ExecStop=/usr/bin/docker stop test1-webserver 14 | ExecStopPost=-/usr/bin/docker rm test1-webserver 15 | 16 | Restart=always 17 | RestartSec=10s 18 | 19 | [X-Fleet] 20 | MachineMetadata=cpeer=tsc-test1 21 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/install_fleetctl_and_scripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install_fleet_etcd_clients.sh 4 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 5 | 6 | function pause(){ 7 | read -p "$*" 8 | } 9 | 10 | echo "Fetching Google Cloud settings ..." 11 | ## Fetch GC settings 12 | # project and zone 13 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 14 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 15 | # get tsc-control1 server's external IP 16 | control_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep -m 1 tsc-control1 | awk {'print $5'}) 17 | # get tsc-test1 server's external IP 18 | test_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep -m 1 tsc-test1 | awk {'print $5'}) 19 | # get tsc-staging1 server's external IP 20 | staging_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep -m 1 tsc-staging1 | awk {'print $5'}) 21 | ## 22 | echo " " 23 | 24 | # create main folder and a few subfolders 25 | echo "Creating 'coreos-tsc-gce' folder and its subfolders ..." 26 | mkdir -p ~/coreos-tsc-gce/bin 27 | mkdir -p ~/coreos-tsc-gce/fleet 28 | echo " " 29 | 30 | echo "Installing Development cluster local files ..." 31 | # copy settings file 32 | cp -f settings ~/coreos-tsc-gce/ 33 | 34 | cp -f files/* ~/coreos-tsc-gce/bin/ 35 | cp -f fleet/* ~/coreos-tsc-gce/fleet/ 36 | 37 | # set control IP 38 | sed -i "" "s/control_ip/$control_ip/" ~/coreos-tsc-gce/bin/etcdctl 39 | sed -i "" "s/control_ip/$control_ip/" ~/coreos-tsc-gce/bin/set_cluster_access.sh 40 | # set zone 41 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-tsc-gce/bin/control1.sh 42 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-tsc-gce/bin/test1.sh 43 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-tsc-gce/bin/staging1.sh 44 | # set project 45 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-tsc-gce/bin/control1.sh 46 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-tsc-gce/bin/test1.sh 47 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-tsc-gce/bin/staging1.sh 48 | # make files executables 49 | chmod 755 ~/coreos-tsc-gce/bin/* 50 | echo " " 51 | 52 | # download fleetctl client 53 | echo "Downloading and instaling fleetctl ..." 54 | # First let's check which OS we use: OS X or Linux 55 | uname=$(uname) 56 | 57 | # check remote fleet version 58 | FLEET_RELEASE=$(gcloud compute --project=$project ssh --zone=$zone "core@tsc-control1" --command "fleetctl version | cut -d ' ' -f 3- | tr -d '\r' ") 59 | cd ~/coreos-tsc-gce/bin 60 | 61 | if [[ "${uname}" == "Darwin" ]] 62 | then 63 | # OS X 64 | echo "Downloading fleetctl v$FLEET_RELEASE for OS X" 65 | curl -L -o fleet.zip "https://github.com/coreos/fleet/releases/download/v$FLEET_RELEASE/fleet-v$FLEET_RELEASE-darwin-amd64.zip" 66 | unzip -j -o "fleet.zip" "fleet-v$FLEET_RELEASE-darwin-amd64/fleetctl" 67 | rm -f fleet.zip 68 | # Make them executable 69 | chmod +x ~/coreos-tsc-gce/bin/* 70 | # 71 | else 72 | # Linux 73 | echo "Downloading fleetctl v$FLEET_RELEASE for Linux" 74 | wget "https://github.com/coreos/fleet/releases/download/v$FLEET_RELEASE/fleet-v$FLEET_RELEASE-linux-amd64.tar.gz" 75 | tar -zxvf fleet-v$FLEET_RELEASE-linux-amd64.tar.gz fleet-v$FLEET_RELEASE-linux-amd64/fleetctl --strip 1 76 | rm -f fleet-v$FLEET_RELEASE-linux-amd64.tar.gz 77 | # Make them executable 78 | chmod +x ~/coreos-tsc-gce/bin/* 79 | # 80 | fi 81 | # 82 | cd ~/coreos-tsc-gce 83 | 84 | echo " " 85 | echo "Installation has finished !!!" 86 | pause 'Press [Enter] key to continue...' 87 | -------------------------------------------------------------------------------- /Chapter5/Test_Staging_Cluster/settings: -------------------------------------------------------------------------------- 1 | ### CoreOS Test/Staging Cluster on GCE settings 2 | 3 | ## change Google Cloud settings as per your requirements 4 | # GC settings 5 | 6 | # CoreOS RELEASE CHANNEL 7 | channel=beta 8 | 9 | # SET YOUR PROJECT AND ZONE !!! 10 | project=my-cloud-project 11 | zone=europe-west1-d 12 | 13 | # ETCD CONTROL AND NODES MACHINES TYPE 14 | # 15 | control_machine_type=g1-small 16 | # 17 | worker_machine_type=n1-standard-1 18 | ## 19 | 20 | ### 21 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/cloud-config/registry-cbuilder1.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | initial-cluster: control1=http://10.200.1.1:2380 8 | proxy: on 9 | fleet: 10 | public-ip: $public_ipv4 11 | metadata: "role=worker,cpeer=tsc-reg-cbuilder1" 12 | units: 13 | - name: 00-ens4v1.network 14 | runtime: true 15 | content: | 16 | [Match] 17 | Name=ens4v1 18 | 19 | [Network] 20 | Address=10.200.4.1/24 21 | - name: etcd2.service 22 | command: start 23 | - name: fleet.service 24 | command: start 25 | - name: docker.service 26 | command: start 27 | drop-ins: 28 | - name: 50-insecure-registry.conf 29 | content: | 30 | [Unit] 31 | [Service] 32 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 33 | write_files: 34 | - path: /etc/resolv.conf 35 | permissions: 0644 36 | owner: root 37 | content: | 38 | nameserver 169.254.169.254 39 | nameserver 10.240.0.1 40 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/create_registry-cbuilder1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create TS cluster workers 3 | 4 | function pause(){ 5 | read -p "$*" 6 | } 7 | 8 | ## Fetch GC settings 9 | # project and zone 10 | project=$(cat ~/coreos-tsc-gce/settings | grep project= | head -1 | cut -f2 -d"=") 11 | zone=$(cat ~/coreos-tsc-gce/settings | grep zone= | head -1 | cut -f2 -d"=") 12 | # CoreOS release channel 13 | channel=$(cat ~/coreos-tsc-gce/settings | grep channel= | head -1 | cut -f2 -d"=") 14 | # worker instance type 15 | worker_machine_type=$(cat ~/coreos-tsc-gce/settings | grep worker_machine_type= | head -1 | cut -f2 -d"=") 16 | # get the latest full image name 17 | image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'}) 18 | ## 19 | 20 | # create registry-cbuilder1 instance 21 | gcloud compute instances create tsc-registry-cbuilder1 --project=$project --image=$image --image-project=coreos-cloud \ 22 | --boot-disk-size=40 --zone=$zone --machine-type=$worker_machine_type --metadata-from-file user-data=cloud-config/registry-cbuilder1.yaml \ 23 | --can-ip-forward --tags=tsc-registry-cbuilder1,tsc 24 | # create a static IP for the new instance 25 | gcloud compute routes create ip-10-200-4-1-tsc-registry-cbuilder1 --project=$project \ 26 | --next-hop-instance tsc-registry-cbuilder1 \ 27 | --next-hop-instance-zone $zone \ 28 | --destination-range 10.200.4.1/32 29 | 30 | # copy reg-dbuilder1.sh file 31 | cp files/* ~/coreos-tsc-gce/bin 32 | chmod 755 ~/coreos-tsc-gce/bin/* 33 | 34 | # set zone 35 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-tsc-gce/bin/reg-dbuider1.sh 36 | # set project 37 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-tsc-gce/bin/reg-dbuider1.sh 38 | 39 | # copy fleet units 40 | cp fleet/* ~/coreos-tsc-gce/fleet 41 | 42 | # add docker builder container public ssh key 43 | gcloud compute --project=$project ssh --zone=$zone "core@tsc-staging1" --command 'echo "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrJybGYAiSG9Z2ETblpLimDsMoZgkGRyHamecl9X4XVwtgzV6Kl37BgEO2Mhp4D3K48wqn5rRBNETV6UNZPF42epgkEKBFFffZIwLZ9ppJMr0KT21+82jPX059j5OMsz5qLv7UzCocAb/rULk5Rudkh4NXTcXly9ybHWITSJ3hLebZblBPtg5Fi/RG7WnOP+DvLNGJXt89xIvSRHJBrQ4z2zaEKICABLU5Ky6aX4MqJf+9NU15cC7NgFhL+Juhhrm2V66XxN2apikYXEyjMHjaGkJvYPVSjYplydc0WdZb++jjAqGGb0AZQrwT8kcZEk5peHC5LPyaRmTuXqQkFl9J root@tsc-registry-cbuilder1-docker-builder" >> /home/core/.ssh/authorized_keys' 44 | 45 | echo " " 46 | echo "Setup has finished !!!" 47 | pause 'Press [Enter] key to continue...' 48 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/00_host_keys.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cp /tmp/authorized_keys /root/.ssh/authorized_keys 4 | 5 | chown root:root /root/.ssh/authorized_keys 6 | chmod 600 /root/.ssh/authorized_keys 7 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM phusion/baseimage:latest 2 | 3 | RUN apt-get update 4 | RUN apt-get install -y mc git rsync 5 | 6 | # Set correct environment variables. 7 | ENV HOME /root 8 | 9 | # enable ssh server 10 | RUN rm -f /etc/service/sshd/down 11 | 12 | # Use baseimage-docker's init system. 13 | CMD ["/sbin/my_init"] 14 | 15 | ## ssh settings 16 | ADD ssh/config /root/.ssh/ 17 | RUN chmod -R 600 /root/.ssh/ 18 | ADD 00_host_keys.sh /etc/my_init.d/00_host_keys.sh 19 | RUN chmod 755 /etc/my_init.d/00_host_keys.sh 20 | 21 | # add rsync daemon 22 | RUN mkdir /etc/service/rsync 23 | ADD rsync.sh /etc/service/rsync/run 24 | ADD rsyncd.conf /etc/rsyncd.conf 25 | 26 | # Clean up APT when done. 27 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 28 | 29 | EXPOSE 22 30 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/README.md: -------------------------------------------------------------------------------- 1 | #### Docker builder 2 | * [](https://quay.io/repository/rimusz/dbuilder) 3 | 4 | * Docker builder image based on phusion/baseimage:latest with built in SSH server to access via ssh and rsync to sync files. 5 | 6 | * It uses host's docker. 7 | 8 | * Check CoreOS fleet unit dbuldder.service to see how it works. 9 | 10 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/dbuilder.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=docker-builder 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | User=core 8 | TimeoutStartSec=0 9 | EnvironmentFile=/etc/environment 10 | ExecStartPre=-/bin/sh -c 'docker pull quay.io/rimusz/dbuilder:latest' 11 | ExecStartPre=-/bin/sh -c '/usr/bin/docker rm docker-builder' 12 | ExecStart=/bin/sh -c '/usr/bin/docker run --rm --name docker-builder --hostname="$(hostname | cut -d. -f1)-docker-builder" -p 2222:22 \ 13 | -v /home/core/.ssh/authorized_keys:/tmp/authorized_keys \ 14 | -v /home/core/data:/data -v /var/run/docker.sock:/var/run/docker.sock \ 15 | -v /usr/bin/docker:/usr/bin/docker -v /usr/lib/libdevmapper.so.1.02:/usr/lib/libdevmapper.so.1.02 \ 16 | quay.io/rimusz/dbuilder:latest' 17 | ExecStop=/bin/sh -c '/usr/bin/docker stop docker-builder' 18 | ExecStopPost=-/bin/sh -c '/usr/bin/docker rm docker-builder' 19 | 20 | Restart=always 21 | RestartSec=10s 22 | 23 | [X-Fleet] 24 | MachineMetadata=cpeer=tsc-reg-cbuilder1 25 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/rsync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | exec /usr/bin/rsync --daemon 4 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/rsyncd.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rimusz/coreos-essentials-book/7d95976fd4feb2a99957eecd68fb5d28298ba88b/Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/rsyncd.conf -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/ssh/config: -------------------------------------------------------------------------------- 1 | StrictHostKeyChecking no 2 | UserKnownHostsFile=/dev/null 3 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/ssh/id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAqycmxmAIkhvWdhE25aS4pg7DKGYJBkch2pnnJfV+F1cLYM1e 3 | ipd+wYBDtjIaeA9yuPMKp+a0QTRE1elDWTxeNnqYJBCgRRX32SMC2faaSTK9Ck9t 4 | fvNoz19OfY+TjLM+ai7+1MwqHAG/61C5OUbnZIeDV03F5cvcmx1iE0id4S3m2W5Q 5 | T7YORYv0Ru1pzj/g7yzRiV7fPcSL0kRyQa0OM9s2hCiAgAS1OSsuml+DKiX/vTVN 6 | eXAuzYBYS/iboYa5tleul8TdmqYpGFxMozB42hpCb2D1Uo2KZcnXNFnWW/vo4wKh 7 | hm9AGUK8E/JHGRJOaXhwuSz8mkZk7l6kJBZfSQIDAQABAoIBAH2KJzflIwRA5QsP 8 | T2wcadBsDZwpU79GF7/nmZPPQSDPmSn1Gb6gpro6YfBXSfD8q9Kbexpy747QTxl0 9 | kxD2ZciiHcx+YYCrP3QG6UWFS+AbFmxknFU9J4jMSJ9y2/CdXU+3jvX5l32ZjvOE 10 | 74fiyUGE4ML96gXHhdZFNDYe0qdit601piOzybtR/xzGbYATnaM3QCUZaudihmUv 11 | +J5XSvhBbNJQ2SIK4I2muLCiWg0fniqld9RUBKE/j9ZjVrVey0vW1Oc10uiycbQ7 12 | gsKHaVNtxcR+wY0v8MuBBYr/c5GF6sO7gbaxt6h7acgPNRGF8ZHpw4J8QOmF7Qg1 13 | J6ue54ECgYEA2HDYJTIGPTm/TFfXRlJ+DopxmSOeNxaPx8O9+4P8nTfNxsLSyrxi 14 | AVeCpd2unvh/9a1aOhMGx/lRaKtTUfeBEyvxv6USeuABJDThwftK3vvzyZq0f7gm 15 | D7YCBzoq8pLrLqbd3rdwMN1Rd60MjdYEQVzrO5L0XJribK7iuvPkXDECgYEAym9P 16 | l/dnCAEUq3oA4hxbNQhQX652nfS5TTiA/YQVkJU5+m503I4vFBlYXOa+8mprcnSY 17 | Etz+lXp5mpMulTO0KCiqK3oZFiFZdveHIukRo1T7Y1SZVzt8aei6AjI7zmt5Iwf3 18 | +hHcRu6ZuHp37ZYCa3AsUcBBhY7Tw5mNbqqsJpkCgYBtzqpKDJwPudlswO/ttHJR 19 | rDk/yUs+hYrCfXNIREkH9kTGvxa0VmieZ6hXwCUUb9xzZtRsvDRnhfygTrNB1YC9 20 | mkDWyseaYUUS+mVCxdvRTx/6uysx1NoyhFN5GTCqypDYpT3Q/V3E3LO1NK021Epq 21 | YtrQ6Zhh1w5it0M+N/g2MQKBgBY7BRWeMK1bWQa/visWjBmV0SXAxfvwrQzC4L92 22 | atvW12geCy1RW/AsHF0PZfa/Iw+NqEmiBT8rFQGZmir+Y5Fpbc6aePjKLsX2xttZ 23 | lvtF0NmGp9RBjxceXJbzJ0gy2zmynVPT9EnB2BW/NT5dE+Tyi5tqrodZOP4o9bQq 24 | f/85AoGBAIPDPb5z8OHKI/PcLyL+h5tO737lsHpOzx/2KnIkRYgHOfHrrm0nBt4C 25 | rpXoWb16o7j4gHrdi5Xi3kB6QCtZhA3ISyIJbpIQ1PuWYfL2Cdr08x6mZcIz9+25 26 | +ftpy6w4H30pWzBa0Mq4xwqknf095VCjf96IIcR6f5zaI3xKQVMl 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/dockerfiles/dbuilder/ssh/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCrJybGYAiSG9Z2ETblpLimDsMoZgkGRyHamecl9X4XVwtgzV6Kl37BgEO2Mhp4D3K48wqn5rRBNETV6UNZPF42epgkEKBFFffZIwLZ9ppJMr0KT21+82jPX059j5OMsz5qLv7UzCocAb/rULk5Rudkh4NXTcXly9ybHWITSJ3hLebZblBPtg5Fi/RG7WnOP+DvLNGJXt89xIvSRHJBrQ4z2zaEKICABLU5Ky6aX4MqJf+9NU15cC7NgFhL+Juhhrm2V66XxN2apikYXEyjMHjaGkJvYPVSjYplydc0WdZb++jjAqGGb0AZQrwT8kcZEk5peHC5LPyaRmTuXqQkFl9J root@tsc-registry-cbuilder1-docker-builder 2 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/files/reg-dbuider1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@tsc-registry-cbuilder1" --ssh-flag="-A" 4 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/fleet/dbuilder.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=docker-builder 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | User=core 8 | TimeoutStartSec=0 9 | EnvironmentFile=/etc/environment 10 | ExecStartPre=-/bin/sh -c 'docker pull quay.io/rimusz/dbuilder:latest' 11 | ExecStartPre=-/bin/sh -c '/usr/bin/docker rm docker-builder' 12 | ExecStart=/bin/sh -c '/usr/bin/docker run --rm --name docker-builder --hostname="$(hostname | cut -d. -f1)-docker-builder" -p 2222:22 \ 13 | -v /home/core/.ssh/authorized_keys:/tmp/authorized_keys \ 14 | -v /home/core/data:/data -v /var/run/docker.sock:/var/run/docker.sock \ 15 | -v /usr/bin/docker:/usr/bin/docker -v /usr/lib/libdevmapper.so.1.02:/usr/lib/libdevmapper.so.1.02 \ 16 | quay.io/rimusz/dbuilder:latest' 17 | # 18 | ExecStop=/bin/sh -c '/usr/bin/docker stop docker-builder' 19 | ExecStopPost=-/bin/sh -c '/usr/bin/docker rm docker-builder' 20 | 21 | Restart=always 22 | RestartSec=10s 23 | 24 | [X-Fleet] 25 | MachineMetadata=cpeer=tsc-reg-cbuilder1 26 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/fleet/registry.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=docker-registry 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | User=core 8 | TimeoutStartSec=0 9 | EnvironmentFile=/etc/environment 10 | ExecStartPre=/usr/bin/docker pull registry:latest 11 | ExecStartPre=-/bin/bash -c '/usr/bin/docker rm registry' 12 | ExecStart=/usr/bin/docker run --rm --name registry -e GUNICORN_OPTS=[--preload] -e SETTINGS_FLAVOR=local \ 13 | -e SEARCH_BACKEND=sqlalchemy -p 5000:5000 \ 14 | -v /home/core/registry/registry:/tmp/registry registry 15 | # 16 | ExecStop=/bin/bash -c '/usr/bin/docker stop registry' 17 | ExecStopPost=-/bin/bash -c '/usr/bin/docker rm registry' 18 | # 19 | 20 | Restart=always 21 | RestartSec=10s 22 | 23 | [X-Fleet] 24 | MachineMetadata=cpeer=tsc-reg-cbuilder1 25 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/webserver/deploy_2_staging1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function pause(){ 4 | read -p "$*" 5 | } 6 | 7 | ## Fetch GC settings 8 | # project and zone 9 | project=$(cat ~/coreos-tsc-gce/settings | grep project= | head -1 | cut -f2 -d"=") 10 | zone=$(cat ~/coreos-tsc-gce/settings | grep zone= | head -1 | cut -f2 -d"=") 11 | 12 | # change folder permissions 13 | gcloud compute --project=$project ssh --zone=$zone "core@tsc-staging1" --command "sudo chmod -R 755 /home/core/share/" 14 | gcloud compute --project=$project ssh --zone=$zone "core@tsc-staging1" --command "sudo chown -R core /home/core/share/" 15 | 16 | echo "Deploying code to tsc-staging1 server !!!" 17 | gcloud compute copy-files staging1/index.html core@tsc-staging1:/home/core/share/nginx/html --zone $zone --project $project 18 | 19 | echo " " 20 | echo "Finished !!!" 21 | pause 'Press [Enter] key to continue...' 22 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/webserver/deploy_2_test1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function pause(){ 4 | read -p "$*" 5 | } 6 | 7 | ## Fetch GC settings 8 | # project and zone 9 | project=$(cat ~/coreos-tsc-gce/settings | grep project= | head -1 | cut -f2 -d"=") 10 | zone=$(cat ~/coreos-tsc-gce/settings | grep zone= | head -1 | cut -f2 -d"=") 11 | 12 | # change folder permissions 13 | gcloud compute --project=$project ssh --zone=$zone "core@tsc-test1" --command "sudo chmod -R 755 /home/core/share/" 14 | gcloud compute --project=$project ssh --zone=$zone "core@tsc-test1" --command "sudo chown -R core /home/core/share/" 15 | 16 | echo "Deploying code to tsc-test1 server !!!" 17 | gcloud compute copy-files test1/index.html core@tsc-test1:/home/core/share/nginx/html --zone $zone --project $project 18 | 19 | echo " " 20 | echo "Finished !!!" 21 | pause 'Press [Enter] key to continue...' 22 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/webserver/staging1/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
10 | "Hello, CoreOS" from Staging1 Development Environment11 | |
12 |
This is an example page for CoreOS Essentials Book Chapter 6.
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /Chapter6/Test_Staging_Cluster/webserver/test1/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 |
10 | "Hello, CoreOS" from Test1 Development Environment11 | |
12 |
This is an example page for CoreOS Essentials Book Chapter 6.
16 | 17 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/cloud-config/control1.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | name: prod-control1 6 | initial-advertise-peer-urls: http://10.220.1.1:2380 7 | initial-cluster-token: prod-control_etcd 8 | initial-cluster: prod-control1=http://10.220.1.1:2380 9 | initial-cluster-state: new 10 | listen-peer-urls: http://10.220.1.1:2380,http://10.220.1.1:7001 11 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 12 | advertise-client-urls: http://10.220.1.1:2379,http://10.220.1.1:4001 13 | fleet: 14 | metadata: "role=services,cpeer=prod-control1" 15 | units: 16 | - name: 00-ens4v1.network 17 | runtime: true 18 | content: | 19 | [Match] 20 | Name=ens4v1 21 | 22 | [Network] 23 | Address=10.220.1.1/24 24 | - name: etcd2.service 25 | command: start 26 | - name: fleet.service 27 | command: start 28 | - name: docker.service 29 | command: start 30 | drop-ins: 31 | - name: 50-insecure-registry.conf 32 | content: | 33 | [Unit] 34 | [Service] 35 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 36 | write_files: 37 | - path: /etc/resolv.conf 38 | permissions: 0644 39 | owner: root 40 | content: | 41 | nameserver 169.254.169.254 42 | nameserver 10.240.0.1 43 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/cloud-config/web1.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | initial-cluster: prod-control1=http://10.220.1.1:2380 8 | proxy: on 9 | fleet: 10 | public-ip: $public_ipv4 11 | metadata: "role=worker,cpeer=prod-web1,service=website1" 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | - name: fleet.service 16 | command: start 17 | - name: docker.service 18 | command: start 19 | drop-ins: 20 | - name: 50-insecure-registry.conf 21 | content: | 22 | [Unit] 23 | [Service] 24 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 25 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/cloud-config/web2.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | coreos: 4 | etcd2: 5 | listen-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 6 | advertise-client-urls: http://0.0.0.0:2379,http://0.0.0.0:4001 7 | initial-cluster: prod-control1=http://10.220.1.1:2380 8 | proxy: on 9 | fleet: 10 | public-ip: $public_ipv4 11 | metadata: "role=worker,cpeer=prod-web2,service=website1" 12 | units: 13 | - name: etcd2.service 14 | command: start 15 | - name: fleet.service 16 | command: start 17 | - name: docker.service 18 | command: start 19 | drop-ins: 20 | - name: 50-insecure-registry.conf 21 | content: | 22 | [Unit] 23 | [Service] 24 | Environment=DOCKER_OPTS='--insecure-registry="0.0.0.0/0"' 25 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/create_cluster_control.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create Production cluster control 3 | 4 | # Update required settings in "settings" file before running this script 5 | 6 | function pause(){ 7 | read -p "$*" 8 | } 9 | 10 | ## Fetch GC settings 11 | # project and zone 12 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 13 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 14 | # CoreOS release channel 15 | channel=$(cat settings | grep channel= | head -1 | cut -f2 -d"=") 16 | # control instance type 17 | control_machine_type=$(cat settings | grep control_machine_type= | head -1 | cut -f2 -d"=") 18 | # get the latest full image name 19 | image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'}) 20 | ## 21 | 22 | # create an instance 23 | gcloud compute instances create prod-control1 --project=$project --image=$image \ 24 | --image-project=coreos-cloud --boot-disk-size=10 --zone=$zone \ 25 | --machine-type=$control_machine_type --metadata-from-file \ 26 | user-data=cloud-config/control1.yaml \ 27 | --can-ip-forward --tags=prod-control1,prod 28 | 29 | # create a static IP for the new instance 30 | gcloud compute routes create ip-10-220-1-1-prod-control1 --project=$project \ 31 | --next-hop-instance prod-control1 \ 32 | --next-hop-instance-zone $zone \ 33 | --destination-range 10.220.1.1/32 34 | 35 | echo " " 36 | echo "Setup has finished !!!" 37 | pause 'Press [Enter] key to continue...' 38 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/create_cluster_workers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create Production cluster workers 3 | 4 | # Update required settings in "settings" file before running this script 5 | 6 | function pause(){ 7 | read -p "$*" 8 | } 9 | 10 | ## Fetch GC settings 11 | # project and zone 12 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 13 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 14 | # CoreOS release channel 15 | channel=$(cat settings | grep channel= | head -1 | cut -f2 -d"=") 16 | # worker instance type 17 | worker_machine_type=$(cat settings | grep worker_machine_type= | head -1 | cut -f2 -d"=") 18 | # get the latest full image name 19 | image=$(gcloud compute images list --project=$project | grep -v grep | grep coreos-$channel | awk {'print $1'}) 20 | ## 21 | 22 | # create web1 instance 23 | gcloud compute instances create prod-web1 --project=$project --image=$image \ 24 | --image-project=coreos-cloud --boot-disk-size=20 --zone=$zone \ 25 | --machine-type=$worker_machine_type --metadata-from-file \ 26 | user-data=cloud-config/web1.yaml --can-ip-forward --tags prod-web1,prod 27 | 28 | # create a static IP for the new web1 instance 29 | gcloud compute routes create ip-10-220-2-1-prod-web1 --project=$project \ 30 | --next-hop-instance prod-web1 \ 31 | --next-hop-instance-zone $zone \ 32 | --destination-range 10.220.2.1/32 33 | 34 | # create web2 instance 35 | gcloud compute instances create prod-web2 --project=$project --image=$image \ 36 | --image-project=coreos-cloud --boot-disk-size=20 --zone=$zone \ 37 | --machine-type=$worker_machine_type --metadata-from-file \ 38 | user-data=cloud-config/web2.yaml --can-ip-forward --tags=prod-web2,prod 39 | 40 | # create a static IP for the new web2 instance 41 | gcloud compute routes create ip-10-220-3-1-prod-web2 --project=$project \ 42 | --next-hop-instance prod-web2 \ 43 | --next-hop-instance-zone $zone \ 44 | --destination-range 10.220.3.1/32 45 | 46 | echo " " 47 | echo "Setup has finished !!!" 48 | pause 'Press [Enter] key to continue...' 49 | 50 | 51 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/files/control1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@prod-control1" --ssh-flag="-A" 4 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/files/etcdctl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 3 | 4 | ssh core@control_ip etcdctl $1 $2 $3 $4 $5 $6 $7 $8 5 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/files/set_cluster_access.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Setup Client SSH Tunnels 4 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 5 | 6 | # SET 7 | # path to the cluster folder where we store our binary files 8 | export PATH=${HOME}/coreos-prod-gce/bin:$PATH 9 | # fleet tunnel 10 | export FLEETCTL_TUNNEL=control_ip 11 | export FLEETCTL_STRICT_HOST_KEY_CHECKING=false 12 | 13 | echo "list fleet machines:" 14 | fleetctl list-machines 15 | 16 | echo "list fleet units:" 17 | fleetctl list-units 18 | 19 | /bin/bash 20 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/files/web1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@prod-web1" --ssh-flag="-A" 4 | 5 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/files/web2.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | gcloud compute --project="_PROJECT_" ssh --zone="_ZONE_" "core@prod-web2" --ssh-flag="-A" 4 | 5 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/fleet/website1.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=prod-website1 3 | 4 | [Service] 5 | User=core 6 | TimeoutStartSec=0 7 | EnvironmentFile=/etc/environment 8 | ExecStartPre=-/usr/bin/docker rm prod-website1 9 | ExecStart=/usr/bin/docker run --rm --name prod-website1 -p 80:80 10.200.4.1:5000/website1:latest 10 | # 11 | ExecStop=/usr/bin/docker stop prod-website1 12 | ExecStopPost=-/usr/bin/docker rm prod-website1 13 | 14 | Restart=always 15 | RestartSec=10s 16 | 17 | [X-Fleet] 18 | Global=true 19 | MachineMetadata=service=website1 20 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/install_fleetctl_and_scripts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # install_fleet_etcd_clients.sh 4 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 5 | 6 | function pause(){ 7 | read -p "$*" 8 | } 9 | 10 | ## Fetch GC settings 11 | # project and zone 12 | project=$(cat settings | grep project= | head -1 | cut -f2 -d"=") 13 | zone=$(cat settings | grep zone= | head -1 | cut -f2 -d"=") 14 | # get prod-control1 server's external IP 15 | control_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep -m 1 prod-control1 | awk {'print $5'}) 16 | # get prod-web1 server's external IP 17 | web1_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep -m 1 prod-web1 | awk {'print $5'}) 18 | # get prod-web2 server's external IP 19 | web2_ip=$(gcloud compute instances list --project=$project | grep -v grep | grep -m 1 prod-web2 | awk {'print $5'}) 20 | ## 21 | 22 | # create main folder and a few subfolders 23 | mkdir -p ~/coreos-prod-gce/bin 24 | mkdir -p ~/coreos-prod-gce/fleet 25 | 26 | # copy settings file 27 | cp -f settings ~/coreos-prod-gce/ 28 | 29 | echo "Install etcdctl, ssh shell and cluster access scripts" 30 | cp -f files/* ~/coreos-prod-gce/bin/ 31 | cp -f fleet/* ~/coreos-prod-gce/fleet/ 32 | cp -f scripts/* ~/coreos-prod-gce/ 33 | 34 | # set control IP 35 | sed -i "" "s/control_ip/$control_ip/" ~/coreos-prod-gce/bin/etcdctl 36 | sed -i "" "s/control_ip/$control_ip/" ~/coreos-prod-gce/bin/set_cluster_access.sh 37 | # set zone 38 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-prod-gce/bin/control1.sh 39 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-prod-gce/bin/web1.sh 40 | sed -i "" "s/_ZONE_/$zone/" ~/coreos-prod-gce/bin/web2.sh 41 | # set project 42 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-prod-gce/bin/control1.sh 43 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-prod-gce/bin/web1.sh 44 | sed -i "" "s/_PROJECT_/$project/" ~/coreos-prod-gce/bin/web2.sh 45 | # make files executables 46 | chmod 755 ~/coreos-prod-gce/bin/* 47 | 48 | # download fleetctl client 49 | # First let's check which OS we use: OS X or Linux 50 | uname=$(uname) 51 | 52 | # check remote fleet version 53 | FLEET_RELEASE=$(gcloud compute --project=$project ssh --zone=$zone "core@prod-control1" --command "fleetctl version | cut -d ' ' -f 3- | tr -d '\r' ") 54 | cd ~/coreos-prod-gce/bin 55 | 56 | if [[ "${uname}" == "Darwin" ]] 57 | then 58 | # OS X 59 | echo "Downloading fleetctl v$FLEET_RELEASE for OS X" 60 | curl -L -o fleet.zip "https://github.com/coreos/fleet/releases/download/v$FLEET_RELEASE/fleet-v$FLEET_RELEASE-darwin-amd64.zip" 61 | unzip -j -o "fleet.zip" "fleet-v$FLEET_RELEASE-darwin-amd64/fleetctl" 62 | rm -f fleet.zip 63 | # Make them executable 64 | chmod +x ~/coreos-prod-gce/bin/* 65 | # 66 | else 67 | # Linux 68 | echo "Downloading fleetctl v$FLEET_RELEASE for Linux" 69 | wget "https://github.com/coreos/fleet/releases/download/v$FLEET_RELEASE/fleet-v$FLEET_RELEASE-linux-amd64.tar.gz" 70 | tar -zxvf fleet-v$FLEET_RELEASE-linux-amd64.tar.gz fleet-v$FLEET_RELEASE-linux-amd64/fleetctl --strip 1 71 | rm -f fleet-v$FLEET_RELEASE-linux-amd64.tar.gz 72 | # Make them executable 73 | chmod +x ~/coreos-prod-gce/bin/* 74 | # 75 | fi 76 | # 77 | cd ~/coreos-prod-gce 78 | 79 | echo " " 80 | echo "Install has finished !!!" 81 | pause 'Press [Enter] key to continue...' 82 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/scripts/deploy_2_production_website1.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Build docker container for website1 3 | # and release it 4 | 5 | ssh-add ~/.ssh/google_compute_engine &>/dev/null 6 | 7 | function pause(){ 8 | read -p "$*" 9 | } 10 | 11 | # Test/Staging cluster 12 | ## Fetch GC settings 13 | # project and zone 14 | project=$(cat ~/coreos-tsc-gce/settings | grep project= | head -1 | cut -f2 -d"=") 15 | zone=$(cat ~/coreos-tsc-gce/settings | grep zone= | head -1 | cut -f2 -d"=") 16 | cbuilder1=$(gcloud compute instances list --project=$project | grep -v grep | grep tsc-registry-cbuilder1 | awk {'print $5'}) 17 | 18 | # create a folder on docker builder 19 | echo "Entering dbuilder docker container" 20 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$cbuilder1 "/usr/bin/docker exec docker-builder /bin/bash -c 'sudo mkdir -p /data/website1 && sudo chmod -R 777 /data/website1'" 21 | 22 | # sync files from staging to docker builder 23 | echo "Deploying code to docker builder server !!!" 24 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$cbuilder1 '/usr/bin/docker exec docker-builder rsync -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" -avzW --delete core@10.200.3.1:/home/core/share/nginx/html/ /data/website1' 25 | # change folder permisions to 755 26 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$cbuilder1 "/usr/bin/docker exec docker-builder /bin/bash -c 'sudo chmod -R 755 /data/website1'" 27 | 28 | echo "Build new docker image and push to registry!!!" 29 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$cbuilder1 "/usr/bin/docker exec docker-builder /bin/bash -c 'cd /data && ./build.sh && ./push.sh'" 30 | ## 31 | 32 | # Production cluster 33 | ## Fetch GC settings 34 | # project and zone 35 | project2=$(cat ~/coreos-prod-gce/settings | grep project= | head -1 | cut -f2 -d"=") 36 | 37 | # Get servers IPs 38 | control1=$(gcloud compute instances list --project=$project2 | grep -v grep | grep prod-control1 | awk {'print $5'}) 39 | web1=$(gcloud compute instances list --project=$project2 | grep -v grep | grep prod-web1 | awk {'print $5'}) 40 | web2=$(gcloud compute instances list --project=$project2 | grep -v grep | grep prod-web2 | awk {'print $5'}) 41 | 42 | echo "Pull new docker image on web1" 43 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$web1 docker pull 10.200.4.1:5000/website1 44 | echo "Pull new docker image on web2" 45 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$web2 docker pull 10.200.4.1:5000/website1 46 | 47 | echo "Restart fleet unit" 48 | # restart fleet unit 49 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$control1 fleetctl stop website1.service 50 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$control1 fleetctl start website1.service 51 | # 52 | sleep 5 53 | echo " " 54 | echo "List Production cluster fleet units" 55 | ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no core@$control1 fleetctl list-units 56 | 57 | echo " " 58 | echo "Finished !!!" 59 | pause 'Press [Enter] key to continue...' 60 | 61 | -------------------------------------------------------------------------------- /Chapter7/Production_Cluster/settings: -------------------------------------------------------------------------------- 1 | ### CoreOS Production Cluster on GCE settings 2 | 3 | ## change Google Cloud settings as per your requirements 4 | # GC settings 5 | 6 | # CoreOS RELEASE CHANNEL 7 | channel=beta 8 | 9 | # SET YOUR PROJECT AND ZONE !!! 10 | project=my-cloud-project 11 | zone=europe-west1-c 12 | 13 | # ETCD CONTROL AND NODES MACHINES TYPE 14 | # 15 | control_machine_type=g1-small 16 | # 17 | worker_machine_type=n1-standard-1 18 | ## 19 | 20 | ### 21 | -------------------------------------------------------------------------------- /Chapter7/Test_Staging_Cluster/files/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:latest 2 | 3 | ## add website code 4 | ADD website1 /usr/share/nginx/html 5 | 6 | EXPOSE 80 7 | -------------------------------------------------------------------------------- /Chapter7/Test_Staging_Cluster/files/build.sh: -------------------------------------------------------------------------------- 1 | docker build --rm -t 10.200.4.1:5000/website1 . 2 | -------------------------------------------------------------------------------- /Chapter7/Test_Staging_Cluster/files/push.sh: -------------------------------------------------------------------------------- 1 | docker push 10.200.4.1:5000/website1 2 | -------------------------------------------------------------------------------- /Chapter7/Test_Staging_Cluster/install_website1_2_dbuilder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function pause(){ 4 | read -p "$*" 5 | } 6 | 7 | ## Fetch GC settings 8 | # project and zone 9 | project=$(cat ~/coreos-tsc-gce/settings | grep project= | head -1 | cut -f2 -d"=") 10 | zone=$(cat ~/coreos-tsc-gce/settings | grep zone= | head -1 | cut -f2 -d"=") 11 | 12 | echo "Deploy docker image building script to tsc-registry-cbuilder1 server !!!" 13 | gcloud compute ssh --project=$project --zone=$zone "core@tsc-registry-cbuilder1" --command "sudo chmod o+w /home/core/data" 14 | 15 | gcloud compute copy-files files/* core@tsc-registry-cbuilder1:/home/core/data --zone $zone --project $project 16 | gcloud compute ssh --project=$project --zone=$zone "core@tsc-registry-cbuilder1" --command "sudo chmod 755 /home/core/data/*.sh" 17 | 18 | echo " " 19 | echo "Finished !!!" 20 | pause 'Press [Enter] key to continue...' 21 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## [Code/Examples for CoreOS Essentials Book](https://www.packtpub.com/networking-and-servers/coreos-essentials) 2 | 3 | ### You can order the book: 4 | * On [Pack Publishing](https://www.packtpub.com/networking-and-servers/coreos-essentials) 5 | * On [Amazon](http://www.amazon.co.uk/CoreOS-Essentials-Rimantas-Mocevicius/dp/1785283944/ref=sr_1_1?ie=UTF8&qid=1431298369&sr=8-1&keywords=rimantas+mocevicius) 6 | --------------------------------------------------------------------------------