├── conf ├── network │ ├── flannel.json │ └── 00_flannel.sh ├── supervisord │ ├── 00_default.conf │ └── 01_minion.conf ├── cluster │ ├── 01_k8s_master_add_bridge.sh │ ├── 03_minion_init.sh │ ├── 00_etcd-cluster.sh │ ├── 02_minion.sh │ └── 01_k8s.sh ├── yaml │ ├── nginx.yaml │ ├── dashboard.yaml │ └── skydns.yaml └── docker │ └── default_docker ├── 01_kubernetes-client ├── 00_kubernetes-etcd ├── 02_kubernetes-master ├── 03_kubernetes-minion ├── README.md └── docker-kubernetes.sh /conf/network/flannel.json: -------------------------------------------------------------------------------- 1 | { 2 | "Network": "10.250.0.0/16", 3 | "Backend": { 4 | "Type": "vxlan" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /conf/supervisord/00_default.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [program:ssh] 5 | command=service ssh start 6 | autorestart=true -------------------------------------------------------------------------------- /conf/supervisord/01_minion.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | nodaemon=true 3 | 4 | [program:ssh] 5 | command=service ssh start 6 | autorestart=true 7 | 8 | [program:wrapdocker] 9 | command=/bin/wrapdocker 10 | autorestart=false 11 | -------------------------------------------------------------------------------- /conf/cluster/01_k8s_master_add_bridge.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | BRCTL="$(which brctl)" 3 | BRIDGE_IFACE="br0" 4 | FLANNEL_CIDR="$(ip a s flannel.1 | grep -v 'inet6' | grep 'inet' | cut -d ':' -f 2 | awk '{ print $2}' | sed 's/0\/16/1\/24/g')" 5 | 6 | $BRCTL addbr $BRIDGE_IFACE && sleep 2 7 | ip addr add $FLANNEL_CIDR dev $BRIDGE_IFACE && sleep 2 8 | ip link set dev $BRIDGE_IFACE up && sleep 2 9 | -------------------------------------------------------------------------------- /conf/yaml/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-svc 5 | labels: 6 | app: nginx 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 80 11 | protocol: TCP 12 | name: http 13 | --- 14 | apiVersion: v1 15 | kind: ReplicationController 16 | metadata: 17 | name: nginxs 18 | spec: 19 | replicas: 20 20 | template: 21 | metadata: 22 | labels: 23 | app: nginx 24 | spec: 25 | containers: 26 | - name: nginx-multi 27 | image: ruo91/nginx:latest 28 | ports: 29 | - containerPort: 80 30 | -------------------------------------------------------------------------------- /conf/cluster/03_minion_init.sh: -------------------------------------------------------------------------------- 1 | #------------------------------------------------# 2 | # Initialization a minion script 3 | # Maintainer: Yongbok Kim (ruo91@yongbok.net) 4 | #------------------------------------------------# 5 | #!/bin/bash 6 | 7 | # Binary 8 | FLANNEL="/bin/flannel.sh" 9 | 10 | # PID 11 | DOCKER_PID="$(ps -e | grep 'docker' | awk '{ printf $1 "\n" }')" 12 | 13 | # kill docker 14 | echo "Kill Docker..." && sleep 1 15 | kill -9 $DOCKER_PID 16 | echo "done" && sleep 2 17 | 18 | # Flannel 19 | $FLANNEL flannel start && sleep 2 20 | 21 | # Delete docker bridge 22 | ip link set dev docker0 down && sleep 2 23 | brctl delbr docker0 && sleep 2 24 | 25 | # Start docker 26 | service docker start 27 | -------------------------------------------------------------------------------- /conf/docker/default_docker: -------------------------------------------------------------------------------- 1 | # Docker Upstart and SysVinit configuration file 2 | 3 | # Customize location of Docker binary (especially for development testing). 4 | #DOCKER="/usr/local/bin/docker" 5 | 6 | # Use DOCKER_OPTS to modify the daemon startup options. 7 | DOCKER_BIP="$(ip a s flannel.1 | grep -v 'inet6' | grep 'inet' | cut -d ':' -f 2 | awk '{ print $2}' | sed 's/0\/16/1\/24/g')" 8 | DOCKER_OPTS="--bip=$DOCKER_BIP --dns 8.8.8.8 --dns 8.8.4.4" 9 | 10 | # If you need Docker to use an HTTP proxy, it can also be specified here. 11 | #export http_proxy="http://127.0.0.1:3128/" 12 | 13 | # This is also a handy place to tweak where Docker's temporary files go. 14 | #export TMPDIR="/mnt/bigdrive/docker-tmp" 15 | -------------------------------------------------------------------------------- /conf/yaml/dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Service 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kubernetes-dashboard 6 | namespace: kube-system 7 | labels: 8 | k8s-app: kubernetes-dashboard 9 | kubernetes.io/cluster-service: "true" 10 | spec: 11 | selector: 12 | k8s-app: kubernetes-dashboard 13 | ports: 14 | - port: 80 15 | targetPort: 9090 16 | --- 17 | # Replication Controller 18 | apiVersion: v1 19 | kind: ReplicationController 20 | metadata: 21 | # Keep the name in sync with image version and 22 | # gce/coreos/kube-manifests/addons/dashboard counterparts 23 | name: kubernetes-dashboard-v1.0.1 24 | namespace: kube-system 25 | labels: 26 | k8s-app: kubernetes-dashboard 27 | version: v1.0.1 28 | kubernetes.io/cluster-service: "true" 29 | spec: 30 | replicas: 1 31 | selector: 32 | k8s-app: kubernetes-dashboard 33 | template: 34 | metadata: 35 | labels: 36 | k8s-app: kubernetes-dashboard 37 | version: v1.0.1 38 | kubernetes.io/cluster-service: "true" 39 | spec: 40 | containers: 41 | - name: kubernetes-dashboard 42 | image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.0.1 43 | resources: 44 | # keep request = limit to keep this container in guaranteed class 45 | limits: 46 | cpu: 100m 47 | memory: 50Mi 48 | requests: 49 | cpu: 100m 50 | memory: 50Mi 51 | ports: 52 | - containerPort: 9090 53 | args: 54 | - --apiserver-host=http://172.17.1.4:8080 55 | livenessProbe: 56 | httpGet: 57 | path: / 58 | port: 9090 59 | initialDelaySeconds: 30 60 | timeoutSeconds: 30 61 | -------------------------------------------------------------------------------- /01_kubernetes-client: -------------------------------------------------------------------------------- 1 | # 2 | # Dockerfile - Google Kubernetes 3 | # 4 | # - Build 5 | # docker build --rm -t kubernetes:client -f 01_kubernetes-client . 6 | # 7 | # - Run 8 | # docker run -d --name="kubernetes-client" -h "kubernetes-client" kubernetes:client 9 | # 10 | # - SSH 11 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' kubernetes-client` 12 | 13 | # Use the base images 14 | FROM ubuntu:16.04 15 | MAINTAINER Yongbok Kim 16 | 17 | # Change the repository 18 | RUN sed -i 's/archive.ubuntu.com/ftp.daumkakao.com/g' /etc/apt/sources.list 19 | 20 | # The last update and install package for docker 21 | RUN apt-get update && apt-get install -y supervisor openssh-server nano net-tools iputils-ping 22 | 23 | # Variable 24 | ENV SRC_DIR /opt 25 | WORKDIR $SRC_DIR 26 | 27 | # Google - Kubernetes 28 | ENV KUBERNETES_HOME $SRC_DIR/kubernetes 29 | ENV PATH $PATH:$KUBERNETES_HOME/client/bin 30 | ADD kubernetes-client-linux-amd64.tar.gz $SRC_DIR 31 | ADD conf/yaml/nginx.yaml $SRC_DIR/nginx.yaml 32 | ADD conf/yaml/skydns.yaml $SRC_DIR/skydns.yaml 33 | ADD conf/yaml/dashboard.yaml $SRC_DIR/dashboard.yaml 34 | RUN echo '# Kubernetes' >> /etc/profile \ 35 | && echo "export KUBERNETES_HOME=$KUBERNETES_HOME" >> /etc/profile \ 36 | && echo 'export PATH=$PATH:$KUBERNETES_HOME/client/bin' >> /etc/profile \ 37 | && echo '' >> /etc/profile 38 | 39 | # Supervisor 40 | RUN mkdir -p /var/log/supervisor 41 | ADD conf/supervisord/00_default.conf /etc/supervisor/conf.d/supervisord.conf 42 | 43 | # SSH 44 | RUN mkdir /var/run/sshd 45 | RUN sed -i '/^#UseLogin/ s:.*:UseLogin yes:' /etc/ssh/sshd_config 46 | RUN sed -i 's/\#AuthorizedKeysFile/AuthorizedKeysFile/g' /etc/ssh/sshd_config 47 | RUN sed -i '/^PermitRootLogin/ s:.*:PermitRootLogin yes:' /etc/ssh/sshd_config 48 | 49 | # Set the root password for ssh 50 | RUN echo 'root:kubernetes' |chpasswd 51 | 52 | # Port 53 | EXPOSE 22 54 | 55 | # Daemon 56 | CMD ["/usr/bin/supervisord"] 57 | -------------------------------------------------------------------------------- /00_kubernetes-etcd: -------------------------------------------------------------------------------- 1 | # 2 | # Dockerfile - Google Kubernetes: etcd cluster 3 | # 4 | # - Build 5 | # docker build --rm -t kubernetes:etcd -f 00_kubernetes-etcd . 6 | # 7 | # - Run 8 | # docker run -d --name="etcd-cluster-0" -h "etcd-cluster-0" kubernetes:etcd 9 | # docker run -d --name="etcd-cluster-1" -h "etcd-cluster-1" kubernetes:etcd 10 | # docker run -d --name="etcd-cluster-2" -h "etcd-cluster-2" kubernetes:etcd 11 | # 12 | # - SSH 13 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' etcd-cluster-0` 14 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' etcd-cluster-1` 15 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' etcd-cluster-2` 16 | 17 | # Use the base images 18 | FROM ubuntu:16.04 19 | MAINTAINER Yongbok Kim 20 | 21 | # Change the repository 22 | RUN sed -i 's/archive.ubuntu.com/ftp.daumkakao.com/g' /etc/apt/sources.list 23 | 24 | # The last update and install package for docker 25 | RUN apt-get update && apt-get install -y supervisor openssh-server git-core curl nano build-essential 26 | 27 | # Variable 28 | ENV SRC_DIR /opt 29 | WORKDIR $SRC_DIR 30 | 31 | # GO Language 32 | ENV GO_ARCH linux-amd64 33 | ENV GOROOT $SRC_DIR/go 34 | ENV PATH $PATH:$GOROOT/bin 35 | RUN curl -XGET https://github.com/golang/go/tags | grep tag-name > /tmp/golang_tag \ 36 | && sed -e 's/<[^>]*>//g' /tmp/golang_tag > /tmp/golang_ver \ 37 | && GO_VER=`sed -e 's/ go/go/g' /tmp/golang_ver | head -n 1` && rm -f /tmp/golang_* \ 38 | && curl -LO "https://storage.googleapis.com/golang/$GO_VER.$GO_ARCH.tar.gz" \ 39 | && tar -C $SRC_DIR -xzf go*.tar.gz && rm -rf go*.tar.gz \ 40 | && echo '' >> /etc/profile \ 41 | && echo '# Golang' >> /etc/profile \ 42 | && echo "export GOROOT=$GOROOT" >> /etc/profile \ 43 | && echo 'export PATH=$PATH:$GOROOT/bin' >> /etc/profile \ 44 | && echo '' >> /etc/profile 45 | 46 | # etcd 47 | ENV ETCD $SRC_DIR/etcd 48 | ENV PATH $PATH:$ETCD 49 | ENV ETCD_RELEASE_VER release-2.3 50 | RUN git clone https://github.com/coreos/etcd $SRC_DIR/etcd-source \ 51 | && cd $SRC_DIR/etcd-source \ 52 | && git checkout $ETCD_RELEASE_VER \ 53 | && ./build && mv bin $ETCD \ 54 | && cd $SRC_DIR && rm -rf $SRC_DIR/etcd-source \ 55 | && echo '# etcd' >> /etc/profile \ 56 | && echo "export ETCD=$ETCD" >> /etc/profile \ 57 | && echo 'export PATH=$PATH:$ETCD' >> /etc/profile \ 58 | && echo '' >> /etc/profile 59 | 60 | # etcd cluster scripts 61 | ADD conf/cluster/00_etcd-cluster.sh /bin/etcd-cluster.sh 62 | RUN chmod a+x /bin/etcd-cluster.sh 63 | 64 | # Supervisor 65 | RUN mkdir -p /var/log/supervisor 66 | ADD conf/supervisord/00_default.conf /etc/supervisor/conf.d/supervisord.conf 67 | 68 | # SSH 69 | RUN mkdir /var/run/sshd 70 | RUN sed -i '/^#UseLogin/ s:.*:UseLogin yes:' /etc/ssh/sshd_config 71 | RUN sed -i 's/\#AuthorizedKeysFile/AuthorizedKeysFile/g' /etc/ssh/sshd_config 72 | RUN sed -i '/^PermitRootLogin/ s:.*:PermitRootLogin yes:' /etc/ssh/sshd_config 73 | 74 | # Set the root password for ssh 75 | RUN echo 'root:kubernetes' |chpasswd 76 | 77 | # Port 78 | EXPOSE 22 2379 2380 4001 79 | 80 | # Daemon 81 | CMD ["/usr/bin/supervisord"] 82 | -------------------------------------------------------------------------------- /conf/network/00_flannel.sh: -------------------------------------------------------------------------------- 1 | #------------------------------------------------# 2 | # Flannel start script 3 | # Maintainer: Yongbok Kim (ruo91@yongbok.net) 4 | #------------------------------------------------# 5 | #!/bin/bash 6 | ### Global ### 7 | # Flannel 8 | FLANNEL_HOME=/opt/flannel 9 | PATH=$PATH:$FLANNEL_HOME/bin 10 | 11 | # Flannel options 12 | IFACE="eth1" 13 | ETCD_PORT="4001" 14 | ETCD_PREFIX="/overlay/network" 15 | ETCD_SERVER="http://172.17.1.1:$ETCD_PORT,http://172.17.1.2:$ETCD_PORT,http://172.17.1.3:$ETCD_PORT" 16 | 17 | # Logs 18 | FLANNEL_LOGS="/tmp/flannel.log" 19 | FLANNELD_LOGS="/tmp/flanneld.log" 20 | 21 | # PID 22 | FLANNELD_PID="$(ps -e | grep 'flanneld' | awk '{ printf $1 "\n" }')" 23 | 24 | # Functions 25 | function f_flannel { 26 | echo "Start Flannel Server..." && sleep 1 27 | flanneld \ 28 | -iface=$IFACE \ 29 | -log_dir="$FLANNEL_LOGS" \ 30 | -etcd-prefix="$ETCD_PREFIX" \ 31 | -etcd-endpoints="$ETCD_SERVER" \ 32 | --v=0 > $FLANNELD_LOGS 2>&1 & 33 | echo "done" 34 | } 35 | 36 | # Function of manual 37 | function f_apiserver_manual { 38 | echo -ne "\033[33m- Interface \033[0m \n" 39 | echo -ne "\033[33m- ex) $IFACE \033[0m \n" 40 | echo -ne "\033[33m- Input: \033[0m" 41 | read IFACE 42 | echo 43 | 44 | echo -ne "\033[33m- ETCD Prefix \033[0m \n" 45 | echo -ne "\033[33m- ex) $ETCD_PREFIX \033[0m \n" 46 | echo -ne "\033[33m- Input: \033[0m" 47 | read ETCD_PREFIX 48 | echo 49 | 50 | echo -ne "\033[33m- ETCD Server \033[0m \n" 51 | echo -ne "\033[33m- ex)$ETCD_SERVER \033[0m \n" 52 | echo -ne "\033[33m- Input: \033[0m" 53 | read K8S_ETCD_SERVER 54 | echo 55 | 56 | echo "Start Flannel Server..." && sleep 1 57 | flanneld \ 58 | -iface=$IFACE \ 59 | -log_dir="$FLANNEL_LOGS" \ 60 | -etcd-prefix="$ETCD_PREFIX" \ 61 | -etcd-endpoints="$ETCD_SERVER" \ 62 | --v=0 > $FLANNELD_LOGS 2>&1 & 63 | echo "done" 64 | } 65 | 66 | function f_kill_of_process { 67 | if [[ "$ARG_2" == "f" || "$ARG_2" == "flannel" ]]; then 68 | echo "Kill of Flanneld..." && sleep 1 69 | kill -9 $FLANNELD_PID 70 | echo "done" 71 | 72 | else 73 | echo "Not found PIDs" 74 | fi 75 | } 76 | 77 | function f_help { 78 | echo "Usage: $ARG_0 [Options] [Arguments]" 79 | echo 80 | echo "- Options" 81 | echo "f, flannel : Flannel" 82 | echo "k, kill : kill of process" 83 | echo 84 | echo "- Arguments" 85 | echo "s, start : Start commands" 86 | echo "m, manual : Manual commands" 87 | echo 88 | echo "f, flannel : kill of flannel (k or kill option only.)" 89 | echo " ex) $ARG_0 k f or $ARG_0 kill flannel" 90 | echo 91 | } 92 | 93 | # Main 94 | ARG_0="$0" 95 | ARG_1="$1" 96 | ARG_2="$2" 97 | 98 | case ${ARG_1} in 99 | f|flannel) 100 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 101 | f_flannel 102 | 103 | elif [[ "$ARG_2" == "m" || "ARG_2" == "manual" ]]; then 104 | f_flannel_manual 105 | 106 | else 107 | f_help 108 | fi 109 | ;; 110 | 111 | k|kill) 112 | f_kill_of_process 113 | ;; 114 | 115 | *) 116 | f_help 117 | ;; 118 | 119 | esac 120 | -------------------------------------------------------------------------------- /02_kubernetes-master: -------------------------------------------------------------------------------- 1 | # 2 | # Dockerfile - Google Kubernetes 3 | # 4 | # - Build 5 | # docker build --rm -t kubernetes:master -f 02_kubernetes-master . 6 | # 7 | # - Run 8 | # docker run -d --name="kubernetes-master" -h "kubernetes-master" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules -p 8080:8080 kubernetes:master 9 | # 10 | # - SSH 11 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' kubernetes-master` 12 | 13 | # Use the base images 14 | FROM ubuntu:16.04 15 | MAINTAINER Yongbok Kim 16 | 17 | # Change the repository 18 | RUN sed -i 's/archive.ubuntu.com/ftp.daumkakao.com/g' /etc/apt/sources.list 19 | 20 | # The last update and install package for docker 21 | RUN apt-get update && apt-get install -y supervisor openssh-server nano curl git-core build-essential net-tools iputils-ping bridge-utils 22 | 23 | # Variable 24 | ENV SRC_DIR /opt 25 | WORKDIR $SRC_DIR 26 | 27 | # GO Language 28 | ENV GO_ARCH linux-amd64 29 | ENV GOROOT $SRC_DIR/go 30 | ENV PATH $PATH:$GOROOT/bin 31 | RUN curl -XGET https://github.com/golang/go/tags | grep tag-name > /tmp/golang_tag \ 32 | && sed -e 's/<[^>]*>//g' /tmp/golang_tag > /tmp/golang_ver \ 33 | && GO_VER=`sed -e 's/ go/go/g' /tmp/golang_ver | head -n 1` && rm -f /tmp/golang_* \ 34 | && curl -LO "https://storage.googleapis.com/golang/$GO_VER.$GO_ARCH.tar.gz" \ 35 | && tar -C $SRC_DIR -xzf go*.tar.gz && rm -rf go*.tar.gz \ 36 | && echo '' >> /etc/profile \ 37 | && echo '# Golang' >> /etc/profile \ 38 | && echo "export GOROOT=$GOROOT" >> /etc/profile \ 39 | && echo 'export PATH=$PATH:$GOROOT/bin' >> /etc/profile \ 40 | && echo '' >> /etc/profile 41 | 42 | # Flannel 43 | ENV FLANNEL_HOME $SRC_DIR/flannel 44 | ENV PATH $PATH:$FLANNEL_HOME/bin 45 | RUN git clone https://github.com/coreos/flannel.git \ 46 | && cd flannel && ./build \ 47 | && echo '# flannel'>>/etc/profile \ 48 | && echo "export FLANNEL_HOME=/opt/flannel">>/etc/profile \ 49 | && echo 'export PATH=$PATH:$FLANNEL_HOME/bin'>>/etc/profile \ 50 | && echo ''>>/etc/profile 51 | 52 | # Google - Kubernetes 53 | ENV KUBERNETES_HOME $SRC_DIR/kubernetes 54 | ENV PATH $PATH:$KUBERNETES_HOME/server/bin 55 | ADD kubernetes-server-linux-amd64.tar.gz $SRC_DIR 56 | RUN echo '# Kubernetes' >> /etc/profile \ 57 | && echo "export KUBERNETES_HOME=$KUBERNETES_HOME" >> /etc/profile \ 58 | && echo 'export PATH=$PATH:$KUBERNETES_HOME/server/bin' >> /etc/profile \ 59 | && echo '' >> /etc/profile 60 | 61 | # Add the kubernetes & flannel scripts 62 | ADD conf/cluster/01_k8s.sh /bin/k8s.sh 63 | ADD conf/network/00_flannel.sh /bin/flannel.sh 64 | ADD conf/cluster/01_k8s_master_add_bridge.sh /bin/k8s_master_add_bridge.sh 65 | RUN chmod a+x /bin/k8s.sh /bin/flannel.sh /bin/k8s_master_add_bridge.sh 66 | 67 | # Supervisor 68 | RUN mkdir -p /var/log/supervisor 69 | ADD conf/supervisord/00_default.conf /etc/supervisor/conf.d/supervisord.conf 70 | 71 | # SSH 72 | RUN mkdir /var/run/sshd 73 | RUN sed -i '/^#UseLogin/ s:.*:UseLogin yes:' /etc/ssh/sshd_config 74 | RUN sed -i 's/\#AuthorizedKeysFile/AuthorizedKeysFile/g' /etc/ssh/sshd_config 75 | RUN sed -i '/^PermitRootLogin/ s:.*:PermitRootLogin yes:' /etc/ssh/sshd_config 76 | 77 | # Set the root password for ssh 78 | RUN echo 'root:kubernetes' |chpasswd 79 | 80 | # Port 81 | EXPOSE 22 8080 82 | 83 | # Daemon 84 | CMD ["/usr/bin/supervisord"] 85 | -------------------------------------------------------------------------------- /03_kubernetes-minion: -------------------------------------------------------------------------------- 1 | # 2 | # Dockerfile - Google Kubernetes 3 | # 4 | # - Build 5 | # docker build --rm -t kubernetes:minion -f 03_kubernetes-minion . 6 | # 7 | # - Run 8 | # docker run -d --name="kubernetes-minion-0" -h "kubernetes-minion-0" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules kubernetes:minion 9 | # docker run -d --name="kubernetes-minion-1" -h "kubernetes-minion-1" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules kubernetes:minion 10 | # 11 | # - SSH 12 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' kubernetes-minion-0` 13 | # ssh `docker inspect -f '{{ .NetworkSettings.IPAddress }}' kubernetes-minion-1` 14 | # 15 | # Use the base images 16 | FROM ubuntu:16.04 17 | MAINTAINER Yongbok Kim 18 | 19 | # Change the repository 20 | RUN sed -i 's/archive.ubuntu.com/ftp.daumkakao.com/g' /etc/apt/sources.list 21 | 22 | # The last update and install package for docker 23 | ENV BASE_IMG_CODENAME ubuntu-xenial 24 | ENV DOCKER_REPO_KEY 58118E89F3A912897C070ADBF76221572C52609D 25 | ENV DOCKER_REPO_KEY_SERVER hkp://p80.pool.sks-keyservers.net:80 26 | RUN apt-get update && apt-get install -y add-apt-key apt-transport-https ca-certificates 27 | RUN apt-key adv --keyserver $DOCKER_REPO_KEY_SERVER --recv-keys $DOCKER_REPO_KEY 28 | RUN echo "deb https://apt.dockerproject.org/repo $BASE_IMG_CODENAME main" > /etc/apt/sources.list.d/docker.list 29 | RUN apt-get clean all && apt-get update && apt-get install -y docker-engine iptables apparmor \ 30 | supervisor openssh-server nano curl git-core build-essential net-tools iputils-ping bridge-utils 31 | 32 | # Docker in Docker 33 | ADD conf/docker/default_docker /etc/default/docker 34 | ADD https://raw.githubusercontent.com/jpetazzo/dind/master/wrapdocker /bin/wrapdocker 35 | RUN chmod +x /bin/wrapdocker 36 | 37 | # Volume mount 38 | # Issue : There are no more loopback devices available. 39 | # Solution : docker run -v /dev/:/dev 40 | VOLUME /var/run 41 | VOLUME /var/lib/docker 42 | 43 | # Variable 44 | ENV SRC_DIR /opt 45 | WORKDIR $SRC_DIR 46 | 47 | # GO Language 48 | ENV GO_ARCH linux-amd64 49 | ENV GOROOT $SRC_DIR/go 50 | ENV PATH $PATH:$GOROOT/bin 51 | RUN curl -XGET https://github.com/golang/go/tags | grep tag-name > /tmp/golang_tag \ 52 | && sed -e 's/<[^>]*>//g' /tmp/golang_tag > /tmp/golang_ver \ 53 | && GO_VER=`sed -e 's/ go/go/g' /tmp/golang_ver | head -n 1` && rm -f /tmp/golang_* \ 54 | && curl -LO "https://storage.googleapis.com/golang/$GO_VER.$GO_ARCH.tar.gz" \ 55 | && tar -C $SRC_DIR -xzf go*.tar.gz && rm -rf go*.tar.gz \ 56 | && echo '' >> /etc/profile \ 57 | && echo '# Golang' >> /etc/profile \ 58 | && echo "export GOROOT=$GOROOT" >> /etc/profile \ 59 | && echo 'export PATH=$PATH:$GOROOT/bin' >> /etc/profile \ 60 | && echo '' >> /etc/profile 61 | 62 | # Flannel 63 | ENV FLANNEL_HOME $SRC_DIR/flannel 64 | ENV PATH $PATH:$FLANNEL_HOME/bin 65 | RUN git clone https://github.com/coreos/flannel.git \ 66 | && cd flannel && ./build \ 67 | && echo '# flannel'>>/etc/profile \ 68 | && echo "export FLANNEL_HOME=/opt/flannel">>/etc/profile \ 69 | && echo 'export PATH=$PATH:$FLANNEL_HOME/bin'>>/etc/profile \ 70 | && echo ''>>/etc/profile 71 | 72 | # Google - Kubernetes 73 | ENV KUBERNETES_HOME $SRC_DIR/kubernetes 74 | ENV PATH $PATH:$KUBERNETES_HOME/server/bin 75 | ADD kubernetes-server-linux-amd64.tar.gz $SRC_DIR 76 | RUN echo '# Kubernetes' >> /etc/profile \ 77 | && echo "export KUBERNETES_HOME=$KUBERNETES_HOME" >> /etc/profile \ 78 | && echo 'export PATH=$PATH:$KUBERNETES_HOME/server/bin' >> /etc/profile \ 79 | && echo '' >> /etc/profile 80 | 81 | # kubernetes minion & flannel scripts 82 | ADD conf/network/00_flannel.sh /bin/flannel.sh 83 | ADD conf/cluster/02_minion.sh /bin/minion.sh 84 | ADD conf/cluster/03_minion_init.sh /bin/minion-init.sh 85 | RUN chmod a+x /bin/flannel.sh /bin/minion.sh /bin/minion-init.sh 86 | 87 | # Supervisor 88 | RUN mkdir -p /var/log/supervisor 89 | ADD conf/supervisord/01_minion.conf /etc/supervisor/conf.d/supervisord.conf 90 | 91 | # SSH 92 | RUN mkdir /var/run/sshd 93 | RUN sed -i '/^#UseLogin/ s:.*:UseLogin yes:' /etc/ssh/sshd_config 94 | RUN sed -i 's/\#AuthorizedKeysFile/AuthorizedKeysFile/g' /etc/ssh/sshd_config 95 | RUN sed -i '/^PermitRootLogin/ s:.*:PermitRootLogin yes:' /etc/ssh/sshd_config 96 | 97 | # Set the root password for ssh 98 | RUN echo 'root:kubernetes' |chpasswd 99 | 100 | # Port 101 | EXPOSE 22 8080 102 | 103 | # Daemon 104 | CMD ["/usr/bin/supervisord"] 105 | -------------------------------------------------------------------------------- /conf/yaml/skydns.yaml: -------------------------------------------------------------------------------- 1 | # Service 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: kube-dns 6 | namespace: kube-system 7 | labels: 8 | k8s-app: kube-dns 9 | kubernetes.io/cluster-service: "true" 10 | kubernetes.io/name: "KubeDNS" 11 | spec: 12 | selector: 13 | k8s-app: kube-dns 14 | clusterIP: 15 | ports: 16 | - name: dns 17 | port: 53 18 | protocol: UDP 19 | - name: dns-tcp 20 | port: 53 21 | protocol: TCP 22 | --- 23 | # Replication Controller 24 | apiVersion: v1 25 | kind: ReplicationController 26 | metadata: 27 | name: kube-dns 28 | namespace: kube-system 29 | labels: 30 | k8s-app: kube-dns 31 | version: v11 32 | kubernetes.io/cluster-service: "true" 33 | spec: 34 | replicas: 1 35 | selector: 36 | k8s-app: kube-dns 37 | version: v11 38 | template: 39 | metadata: 40 | labels: 41 | k8s-app: kube-dns 42 | version: v11 43 | kubernetes.io/cluster-service: "true" 44 | spec: 45 | containers: 46 | - name: etcd 47 | image: gcr.io/google_containers/etcd-amd64:2.2.1 48 | resources: 49 | # TODO: Set memory limits when we've profiled the container for large 50 | # clusters, then set request = limit to keep this container in 51 | # guaranteed class. Currently, this container falls into the 52 | # "burstable" category so the kubelet doesn't backoff from restarting it. 53 | limits: 54 | cpu: 100m 55 | memory: 500Mi 56 | requests: 57 | cpu: 100m 58 | memory: 50Mi 59 | command: 60 | - /usr/local/bin/etcd 61 | - -data-dir 62 | - /var/etcd/data 63 | - -listen-client-urls 64 | - http://127.0.0.1:2379,http://127.0.0.1:4001 65 | - -advertise-client-urls 66 | - http://127.0.0.1:2379,http://127.0.0.1:4001 67 | - -initial-cluster-token 68 | - skydns-etcd 69 | volumeMounts: 70 | - name: etcd-storage 71 | mountPath: /var/etcd/data 72 | - name: kube2sky 73 | image: gcr.io/google_containers/kube2sky-amd64:1.15 74 | resources: 75 | # TODO: Set memory limits when we've profiled the container for large 76 | # clusters, then set request = limit to keep this container in 77 | # guaranteed class. Currently, this container falls into the 78 | # "burstable" category so the kubelet doesn't backoff from restarting it. 79 | limits: 80 | cpu: 100m 81 | # Kube2sky watches all pods. 82 | memory: 200Mi 83 | requests: 84 | cpu: 100m 85 | memory: 50Mi 86 | livenessProbe: 87 | httpGet: 88 | path: /healthz 89 | port: 8080 90 | scheme: HTTP 91 | initialDelaySeconds: 60 92 | timeoutSeconds: 5 93 | successThreshold: 1 94 | failureThreshold: 5 95 | readinessProbe: 96 | httpGet: 97 | path: /readiness 98 | port: 8081 99 | scheme: HTTP 100 | # we poll on pod startup for the Kubernetes master service and 101 | # only setup the /readiness HTTP server once that's available. 102 | initialDelaySeconds: 30 103 | timeoutSeconds: 5 104 | args: 105 | # command = "/kube2sky" 106 | - --domain=kube-dns.local 107 | - -kube_master_url=http://172.17.1.4:8080 108 | - name: skydns 109 | image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c 110 | resources: 111 | # TODO: Set memory limits when we've profiled the container for large 112 | # clusters, then set request = limit to keep this container in 113 | # guaranteed class. Currently, this container falls into the 114 | # "burstable" category so the kubelet doesn't backoff from restarting it. 115 | limits: 116 | cpu: 100m 117 | memory: 200Mi 118 | requests: 119 | cpu: 100m 120 | memory: 50Mi 121 | args: 122 | # command = "/skydns" 123 | - -machines=http://127.0.0.1:4001 124 | - -addr=0.0.0.0:53 125 | - -ns-rotate=false 126 | - -domain=kube-dns.local 127 | ports: 128 | - containerPort: 53 129 | name: dns 130 | protocol: UDP 131 | - containerPort: 53 132 | name: dns-tcp 133 | protocol: TCP 134 | - name: healthz 135 | image: gcr.io/google_containers/exechealthz:1.0 136 | resources: 137 | # keep request = limit to keep this container in guaranteed class 138 | limits: 139 | cpu: 10m 140 | memory: 20Mi 141 | requests: 142 | cpu: 10m 143 | memory: 20Mi 144 | args: 145 | - -cmd=nslookup kubernetes.default.svc.kube-dns.local 127.0.0.1 >/dev/null 146 | - -port=8080 147 | ports: 148 | - containerPort: 8080 149 | protocol: TCP 150 | volumes: 151 | - name: etcd-storage 152 | emptyDir: {} 153 | dnsPolicy: Default # Don't use cluster DNS. 154 | -------------------------------------------------------------------------------- /conf/cluster/00_etcd-cluster.sh: -------------------------------------------------------------------------------- 1 | #------------------------------------------------# 2 | # etcd cluster script 3 | # Maintainer: Yongbok Kim (ruo91@yongbok.net) 4 | #------------------------------------------------# 5 | #!/bin/bash 6 | ### Global ### 7 | # ETCD 8 | ETCD=/opt/etcd 9 | PATH=$PATH:$ETCD 10 | 11 | # Cluster 12 | ETCD_CLUSTER_IP="0.0.0.0" 13 | ETCD_CLUSTER_STATE="new" 14 | ETCD_CLUSTER_NAME_0="cluster-0" 15 | ETCD_CLUSTER_NAME_1="cluster-1" 16 | ETCD_CLUSTER_NAME_2="cluster-2" 17 | ETCD_CLUSTER_TOKEN="etcd-cluster" 18 | ETCD_DATA_DIR="/tmp/etcd" 19 | ETCD_ADVERTISE_PEER_IP="$(ip a s | grep 'eth1' | grep 'inet' | cut -d '/' -f 1 | awk '{ print $2 }')" 20 | ETCD_INITIAL_CLUSTER="$ETCD_CLUSTER_NAME_0=http://172.17.1.1:2380,$ETCD_CLUSTER_NAME_1=http://172.17.1.2:2380,$ETCD_CLUSTER_NAME_2=http://172.17.1.3:2380" 21 | 22 | # Logs 23 | ETCD_LOGS="/tmp/etcd-cluster.log" 24 | 25 | # PID 26 | ETCD_PID="$(ps -e | grep 'etcd' | awk '{ printf $1 "\n" }')" 27 | 28 | # Function 29 | function f_etcd { 30 | echo "Start ETCD..." && sleep 1 31 | if [ "$ETCD_ADVERTISE_PEER_IP" == "172.17.1.1" ]; then 32 | etcd \ 33 | --name "$ETCD_CLUSTER_NAME_0" \ 34 | --data-dir "$ETCD_DATA_DIR" \ 35 | --listen-peer-urls "http://$ETCD_CLUSTER_IP:2380" \ 36 | --listen-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 37 | --initial-cluster-state "$ETCD_CLUSTER_STATE" \ 38 | --initial-cluster-token "$ETCD_CLUSTER_TOKEN" \ 39 | --initial-advertise-peer-urls "http://$ETCD_ADVERTISE_PEER_IP:2380" \ 40 | --advertise-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 41 | --initial-cluster "$ETCD_INITIAL_CLUSTER" \ 42 | > $ETCD_LOGS 2>&1 & 43 | echo "done" 44 | 45 | elif [ "$ETCD_ADVERTISE_PEER_IP" == "172.17.1.2" ]; then 46 | etcd \ 47 | --name "$ETCD_CLUSTER_NAME_1" \ 48 | --data-dir "$ETCD_DATA_DIR" \ 49 | --listen-peer-urls "http://$ETCD_CLUSTER_IP:2380" \ 50 | --listen-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 51 | --initial-cluster-state "$ETCD_CLUSTER_STATE" \ 52 | --initial-cluster-token "$ETCD_CLUSTER_TOKEN" \ 53 | --initial-advertise-peer-urls "http://$ETCD_ADVERTISE_PEER_IP:2380" \ 54 | --advertise-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 55 | --initial-cluster "$ETCD_INITIAL_CLUSTER" \ 56 | > $ETCD_LOGS 2>&1 & 57 | echo "done" 58 | 59 | elif [ "$ETCD_ADVERTISE_PEER_IP" == "172.17.1.3" ]; then 60 | etcd \ 61 | --name "$ETCD_CLUSTER_NAME_2" \ 62 | --data-dir "$ETCD_DATA_DIR" \ 63 | --listen-peer-urls "http://$ETCD_CLUSTER_IP:2380" \ 64 | --listen-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 65 | --initial-cluster-state "$ETCD_CLUSTER_STATE" \ 66 | --initial-cluster-token "$ETCD_CLUSTER_TOKEN" \ 67 | --initial-advertise-peer-urls "http://$ETCD_ADVERTISE_PEER_IP:2380" \ 68 | --advertise-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 69 | --initial-cluster "$ETCD_INITIAL_CLUSTER" \ 70 | > $ETCD_LOGS 2>&1 & 71 | echo "done" 72 | 73 | else 74 | echo "IP address does not matching." 75 | fi 76 | } 77 | 78 | # Function of manual 79 | function f_etcd_manual { 80 | echo -ne "\033[33m- Cluster Name \033[0m \n" 81 | echo -ne "\033[33m- ex) cluster-0 \033[0m \n" 82 | echo -ne "\033[33m- Input: \033[0m" 83 | read ETCD_CLUSTER_NAME 84 | echo 85 | 86 | echo -ne "\033[33m- Initial Cluster URL \033[0m \n" 87 | echo -ne "\033[33m- ex) cluster-0=http://172.17.1.1:2380,cluster-1=http://172.17.1.2:2380,cluster-2=http://172.17.1.3:2380 \033[0m \n" 88 | echo -ne "\033[33m- Input: \033[0m" 89 | read ETCD_INITIAL_CLUSTER 90 | echo 91 | 92 | echo "Start ETCD..." && sleep 1 93 | etcd \ 94 | --name "$ETCD_CLUSTER_NAME" \ 95 | --data-dir "$ETCD_DATA_DIR" \ 96 | --listen-peer-urls "http://$ETCD_CLUSTER_IP:2380" \ 97 | --listen-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 98 | --initial-cluster-state "$ETCD_CLUSTER_STATE" \ 99 | --initial-cluster-token "$ETCD_CLUSTER_TOKEN" \ 100 | --initial-advertise-peer-urls "http://$ETCD_ADVERTISE_PEER_IP:2380" \ 101 | --advertise-client-urls "http://$ETCD_CLUSTER_IP:4001" \ 102 | --initial-cluster "$ETCD_INITIAL_CLUSTER" \ 103 | > $ETCD_LOGS 2>&1 & 104 | echo "done" 105 | } 106 | 107 | function f_kill_of_process { 108 | if [[ "$ARG_2" == "e" || "$ARG_2" == "etcd" ]]; then 109 | echo "Kill of ETCD..." && sleep 1 110 | kill -9 $ETCD_PID 111 | echo "done" 112 | 113 | else 114 | echo "Not found PIDs" 115 | fi 116 | } 117 | 118 | function f_help { 119 | echo "Usage: $ARG_0 [Options] [Arguments]" 120 | echo 121 | echo "- Options" 122 | echo "e, etcd : etcd" 123 | echo "k, kill : kill of process" 124 | echo 125 | echo "- Arguments" 126 | echo "s, start : Start commands" 127 | echo "m, manual : Manual commands" 128 | echo "e, etcd : kill of etcd (k or kill option only.)" 129 | echo " ex) $ARG_0 k e or $ARG_0 kill etcd" 130 | echo 131 | } 132 | 133 | # Main 134 | ARG_0="$0" 135 | ARG_1="$1" 136 | ARG_2="$2" 137 | 138 | case ${ARG_1} in 139 | e|etcd) 140 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 141 | f_etcd 142 | 143 | elif [[ "$ARG_2" == "m" || "$ARG_2" == "manual" ]]; then 144 | f_etcd_manual 145 | 146 | else 147 | f_help 148 | fi 149 | ;; 150 | 151 | k|kill) 152 | f_kill_of_process 153 | ;; 154 | 155 | *) 156 | f_help 157 | ;; 158 | 159 | esac 160 | -------------------------------------------------------------------------------- /conf/cluster/02_minion.sh: -------------------------------------------------------------------------------- 1 | #------------------------------------------------# 2 | # Kubernetes minion start script 3 | # Maintainer: Yongbok Kim (ruo91@yongbok.net) 4 | #------------------------------------------------# 5 | #!/bin/bash 6 | ### Global ### 7 | # Kubernetes 8 | K8S_HOME=/opt/kubernetes 9 | PATH=$PATH:$K8S_HOME/server/bin 10 | 11 | # Ports 12 | K8S_CADVISOR_PORT="4194" 13 | K8S_KUBELET_PORT="10250" 14 | K8S_API_SERVER_PORT="8080" 15 | 16 | # Address 17 | K8S_API_SERVER="172.17.1.4" 18 | K8S_COMMON_SERVER_ADDR="0.0.0.0" 19 | K8S_HOST_OVERRIDE="$(ip a s | grep 'eth1' | grep 'inet' | cut -d '/' -f 1 | awk '{ print $2 }')" 20 | 21 | # Options 22 | PROXY_MODE="iptables" 23 | KUBE_DNS_DOMAIN="kube-dns.local" 24 | KUBE_DNS_CLUSTER_IP="10.250.250.250" 25 | 26 | # Logs 27 | K8S_PROXY_LOGS="/tmp/proxy.log" 28 | K8S_KUBELET_LOGS="/tmp/kubelet.log" 29 | 30 | # PID 31 | K8S_PROXY_SERVER_PID="$(ps -e | grep 'kube-proxy' | awk '{ printf $1 "\n" }')" 32 | K8S_KUBELET_SERVER_PID="$(ps -e | grep 'kubelet' | awk '{ printf $1 "\n" }')" 33 | 34 | # Functions 35 | function f_proxy { 36 | # - Issue 37 | # write /sys/module/nf_conntrack/parameters/hashsize: operation not supported 38 | # https://github.com/kubernetes/kubernetes/issues/24295#issuecomment-216486725 39 | # --conntrack-max=0 40 | echo "Start Proxy..." && sleep 1 41 | kube-proxy \ 42 | --proxy-mode="$PROXY_MODE" \ 43 | --conntrack-max=0 \ 44 | --master=$K8S_API_SERVER:$K8S_API_SERVER_PORT \ 45 | --v=0 > $K8S_PROXY_LOGS 2>&1 & 46 | echo "done" 47 | } 48 | 49 | function f_kubelet { 50 | echo "Start Kubelet..." && sleep 1 51 | kubelet \ 52 | --allow-privileged=true \ 53 | # --cluster-dns="$KUBE_DNS_CLUSTER_IP" \ 54 | # --cluster-domain="$KUBE_DNS_DOMAIN" \ 55 | --address=$K8S_COMMON_SERVER_ADDR \ 56 | --port=$K8S_KUBELET_PORT \ 57 | --cadvisor-port=$K8S_CADVISOR_PORT \ 58 | --api-servers=$K8S_API_SERVER:$K8S_API_SERVER_PORT \ 59 | --hostname-override=$K8S_HOST_OVERRIDE \ 60 | --v=0 > $K8S_KUBELET_LOGS 2>&1 & 61 | echo "done" 62 | } 63 | 64 | # Function of manual 65 | function f_proxy_manual { 66 | echo -ne "\033[33m- API Server \033[0m \n" 67 | echo -ne "\033[33m- ex) 172.17.1.4:8080 \033[0m \n" 68 | echo -ne "\033[33m- Input: \033[0m" 69 | read K8S_API_SERVER 70 | echo 71 | 72 | echo "Start Proxy..." && sleep 1 73 | kube-proxy \ 74 | --proxy-mode="$PROXY_MODE" \ 75 | --conntrack-max=0 \ 76 | --master=$K8S_API_SERVER \ 77 | --v=0 > $K8S_PROXY_LOGS 2>&1 & 78 | echo "done" 79 | } 80 | 81 | function f_kubelet_manual { 82 | echo -ne "\033[33m- Kubelet Port \033[0m \n" 83 | echo -ne "\033[33m- ex) 10250 \033[0m \n" 84 | echo -ne "\033[33m- Input: \033[0m" 85 | read K8S_KUBELET_PORT 86 | echo 87 | 88 | echo -ne "\033[33m- cAdvisor Port \033[0m \n" 89 | echo -ne "\033[33m- ex) 4194 \033[0m \n" 90 | echo -ne "\033[33m- Input: \033[0m" 91 | read K8S_CADVISOR_PORT 92 | echo 93 | 94 | echo -ne "\033[33m- Kubelet Service Address \033[0m \n" 95 | echo -ne "\033[33m- ex) 0.0.0.0 \033[0m \n" 96 | echo -ne "\033[33m- Input: \033[0m" 97 | read K8S_KUBELET_SERVICE_ADDR 98 | echo 99 | 100 | echo -ne "\033[33m- API Server \033[0m \n" 101 | echo -ne "\033[33m- ex) 172.17.1.4:8080 \033[0m \n" 102 | echo -ne "\033[33m- Input: \033[0m" 103 | read K8S_API_SERVER 104 | echo 105 | 106 | echo "Start Kubelet..." && sleep 1 107 | kubelet \ 108 | --allow-privileged=true \ 109 | # --cluster-dns="$KUBE_DNS_CLUSTER_IP" \ 110 | # --cluster-domain="$KUBE_DNS_DOMAIN" \ 111 | --port=$K8S_KUBELET_PORT \ 112 | --cadvisor-port=$K8S_CADVISOR_PORT \ 113 | --address=$K8S_KUBELET_SERVICE_ADDR \ 114 | --api-servers=$K8S_API_SERVER \ 115 | --hostname-override=$K8S_HOST_OVERRIDE \ 116 | --v=0 > $K8S_KUBELET_LOGS 2>&1 & 117 | echo "done" 118 | } 119 | 120 | function f_kill_of_process { 121 | if [ "$ARG_2" == "all" ]; then 122 | echo "Kill of All Server..." && sleep 1 123 | kill -9 $K8S_PROXY_SERVER_PID \ 124 | $K8S_KUBELET_SERVER_PID 125 | echo "done" 126 | 127 | elif [[ "$ARG_2" == "p" || "$ARG_2" == "proxy" ]]; then 128 | echo "Kill of Proxy..." && sleep 1 129 | kill -9 $K8S_PROXY_SERVER_PID 130 | echo "done" 131 | 132 | elif [[ "$ARG_2" == "kb" || "$ARG_2" == "kubelet" ]]; then 133 | echo "Kill of Kubelet..." && sleep 1 134 | kill -9 $K8S_KUBELET_SERVER_PID 135 | echo "done" 136 | 137 | else 138 | echo "Not found PIDs" 139 | fi 140 | } 141 | 142 | function f_help { 143 | echo "Usage: $ARG_0 [Options] [Arguments]" 144 | echo 145 | echo "- Options" 146 | echo "p, proxy : proxy" 147 | echo "kb, kubelet : kubelet" 148 | echo "k, kill : kill of process" 149 | echo 150 | echo "- Arguments" 151 | echo "s, start : Start commands" 152 | echo "m, manual : Manual commands" 153 | echo 154 | echo "all : kill of all server (k or kill option only.)" 155 | echo " ex) $ARG_0 k all or $ARG_0 kill all" 156 | echo 157 | echo "p, proxy : kill of proxy (k or kill option only.)" 158 | echo " ex) $ARG_0 k p or $ARG_0 kill proxy" 159 | echo 160 | echo "kb, kubelet : kill of kubelet (k or kill option only.)" 161 | echo " ex) $ARG_0 k kb or $ARG_0 kill kubelet" 162 | echo 163 | } 164 | 165 | # Main 166 | ARG_0="$0" 167 | ARG_1="$1" 168 | ARG_2="$2" 169 | 170 | case ${ARG_1} in 171 | p|proxy) 172 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 173 | f_proxy 174 | 175 | elif [[ "$ARG_2" == "m" || "ARG_2" == "manual" ]]; then 176 | f_proxy_manual 177 | 178 | else 179 | f_help 180 | fi 181 | ;; 182 | 183 | kb|kubelet) 184 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 185 | f_kubelet 186 | 187 | elif [[ "$ARG_2" == "m" || "ARG_2" == "manual" ]]; then 188 | f_kubelet_manual 189 | 190 | else 191 | f_help 192 | fi 193 | ;; 194 | 195 | k|kill) 196 | f_kill_of_process 197 | ;; 198 | 199 | *) 200 | f_help 201 | ;; 202 | 203 | esac 204 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Dockerfile - Google Kubernetes (test only) 2 | ![Fig1. Kubernetes Architecture](http://cdn.yongbok.net/ruo91/architecture/k8s/kubernetes_architecture_v1.x.png) 3 | 4 | # Google Kubernetes? 5 | Container를 쉽게 관리 할 수 있도록 만든 오픈소스 도구 입니다. 6 | 참고: http://www.yongbok.net/blog/google-kubernetes-container-cluster-manager/ 7 | 8 | # Dockerfile로 만들게 된 이유가 무엇입니까? 9 | 실제와 같은 환경을 구축 및 테스트 할때 시간 비용을 줄이고자 만들었습니다. 10 | 11 | #### - Clone 12 | Github 저장소에서 Dockerfile을 받아 옵니다. 13 | 14 | root@ruo91:~# git clone https://github.com/ruo91/docker-kubernetes /opt/docker-kubernetes 15 | 16 | #### - Build 17 | Kubernetes는 Docker를 사용하여 빌드하기 때문에, HostOS에서 빌드 후 tar.gz 파일을 Dockerfile이 있는 경로에 복사합니다. 18 | (일종의 편법 입니다.) 19 | 20 | root@ruo91:~# git clone https://github.com/kubernetes/kubernetes /opt/kubernetes-source 21 | root@ruo91:~# cd /opt/kubernetes-source 22 | root@ruo91:~# make quick-release 23 | root@ruo91:~# cp _output/release-tars/kubernetes-client-linux-amd64.tar.gz /opt/docker-kubernetes 24 | root@ruo91:~# cp _output/release-tars/kubernetes-server-linux-amd64.tar.gz /opt/docker-kubernetes 25 | 26 | 이후 docker-kubernetes.sh 쉘스크립트를 통해 etcd, master, minion, client를 빌드 합니다. 27 | 28 | root@ruo91:~# cd /opt/docker-kubernetes 29 | root@ruo91:~# ./docker-kubernetes.sh build start 30 | 31 | #### - Run 32 | etcd x3, master x1, minion x2, client x1 개의 컨테이너를 실행 합니다. 33 | 34 | root@ruo91:~# ./docker-kubernetes.sh run yes 35 | 36 | # Test 37 | docker exec 명령어를 통해 kubernetes-client 컨테이너에서 테스트 해볼 것입니다. 38 | (-s 옵션은 API Server의 IP와 포트를 지정 해주면 됩니다.) 39 | 40 | root@ruo91:~# docker exec kubernetes-client kubectl get services -s 172.17.1.4:8080 41 | docker exec kubernetes-client kubectl get services -s 172.17.1.4:8080 42 | NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE 43 | kubernetes 10.250.94.1 443/TCP 24m 44 | 45 | Minion 서버에 10개의 Nginx를 실행 해보도록 하겠습니다. 46 | 47 | root@ruo91:~# docker exec kubernetes-client kubectl create -f /opt/nginx.yaml -s 172.17.1.4:8080 48 | replicationcontroller "nginx-svc" created 49 | 50 | create 명령어가 실행 되고 나면, 해당 Minion 서버중에 Workload가 낮은 서버에서 해당 docker 이미지를 받아오고(pending), 51 | 시간이 지나면 다음과 같이 Running 상태로 바뀌게 됩니다. 이는 곧 사용할 준비가 되었다는 뜻입니다. 52 | (시스템 및 네트워크 상황에 따라 몇분 이상 소요 될 수 있습니다.) 53 | 54 | root@ruo91:~# docker exec kubernetes-client kubectl get pods -s 172.17.1.4:8080 55 | NAME READY STATUS RESTARTS AGE 56 | nginxs-3a6jp 1/1 Running 0 6m 57 | nginxs-3x1bc 1/1 Running 0 6m 58 | nginxs-85aej 1/1 Running 0 6m 59 | nginxs-867li 1/1 Running 0 6m 60 | nginxs-8qa8e 1/1 Running 0 6m 61 | nginxs-bgt6c 1/1 Running 0 6m 62 | nginxs-bje1b 1/1 Running 0 6m 63 | nginxs-ecj4y 1/1 Running 0 6m 64 | nginxs-em4r7 1/1 Running 0 6m 65 | nginxs-eqt9v 1/1 Running 0 6m 66 | nginxs-fxmuf 1/1 Running 0 6m 67 | nginxs-mrdsm 1/1 Running 0 6m 68 | nginxs-oa0bt 1/1 Running 0 6m 69 | nginxs-onxg0 1/1 Running 0 6m 70 | nginxs-uxlhf 1/1 Running 0 6m 71 | nginxs-uy8yu 1/1 Running 0 6m 72 | nginxs-vrusv 1/1 Running 0 6m 73 | nginxs-vvjwc 1/1 Running 0 6m 74 | nginxs-xxd5f 1/1 Running 0 6m 75 | nginxs-y31ow 1/1 Running 0 6m 76 | 77 | describe 옵션으로 상태를 확인 해봅니다. 78 | 79 | root@ruo91:~# docker exec kubernetes-client kubectl describe -f nginx.yaml -s 172.17.1.4:8080 80 | Name: nginx-svc 81 | Namespace: default 82 | Labels: app=nginx 83 | Selector: 84 | Type: NodePort 85 | IP: 10.250.94.161 86 | Port: http 80/TCP 87 | NodePort: http 30195/TCP 88 | Endpoints: 89 | Session Affinity: None 90 | No events. 91 | 92 | Name: nginxs 93 | Namespace: default 94 | Image(s): ruo91/nginx:latest 95 | Selector: app=nginx 96 | Labels: app=nginx 97 | Replicas: 20 current / 20 desired 98 | Pods Status: 20 Running / 0 Waiting / 0 Succeeded / 0 Failed 99 | No volumes. 100 | Events: 101 | FirstSeen LastSeen Count From SubobjectPath Type Reason Message 102 | --------- -------- ----- ---- ------------- -------- ------ ------- 103 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-fxmuf 104 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-uy8yu 105 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-8qa8e 106 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-867li 107 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-bje1b 108 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-xxd5f 109 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-onxg0 110 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-vvjwc 111 | 7m 7m 1 {replication-controller } Normal SuccessfulCreate Created pod: nginxs-eqt9v 112 | 7m 7m 11 {replication-controller } Normal SuccessfulCreate (events with common reason combined) 113 | 114 | # Kubernetes Web UI 115 | Kubernetes v1.x 버전 부터는 Web UI(kube-ui)가 Minion 쪽에서 Pod로 실행 되도록 변경 되었습니다. 116 | HostOS에 Nginx 같은 웹서버를 사용한다면, Reverse Proxy 설정을 통하여 넘겨주면 쉽게 접속이 가능합니다. 117 | 118 | # Kubernetes web ui 119 | server { 120 | listen 80; 121 | server_name kubernetes.yongbok.net; 122 | 123 | location / { 124 | proxy_set_header Host $host; 125 | proxy_set_header X-Forwarded-Host $host; 126 | proxy_set_header X-Forwarded-Server $host; 127 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 128 | proxy_pass http://172.17.1.4:8080; 129 | client_max_body_size 10M; 130 | } 131 | } 132 | 133 | ## Kubernetes Web UI #0 134 | ![Kubernetes Web UI #0](http://cdn.yongbok.net/ruo91/img/kubernetes/v1.2/k8s_web_ui_0.png) 135 | 136 | ## Kubernetes Web UI #1 137 | ![Kubernetes Web UI #1](http://cdn.yongbok.net/ruo91/img/kubernetes/v1.2/k8s_web_ui_1.png) 138 | 139 | ## Kubernetes Web UI #2 140 | ![Kubernetes Web UI #2](http://cdn.yongbok.net/ruo91/img/kubernetes/v1.2/k8s_web_ui_2.png) 141 | 142 | ## Kubernetes Web UI #3 143 | ![Kubernetes Web UI #3](http://cdn.yongbok.net/ruo91/img/kubernetes/v1.2/k8s_web_ui_3.png) 144 | 145 | ## Kubernetes Web UI #4 146 | ![Kubernetes Web UI #4](http://cdn.yongbok.net/ruo91/img/kubernetes/v1.2/k8s_web_ui_4.png) 147 | 148 | ## Kubernetes Web UI #5 149 | ![Kubernetes Web UI #5](http://cdn.yongbok.net/ruo91/img/kubernetes/v1.2/k8s_web_ui_5.png) 150 | 151 | Thanks. :-) 152 | -------------------------------------------------------------------------------- /conf/cluster/01_k8s.sh: -------------------------------------------------------------------------------- 1 | #------------------------------------------------# 2 | # Kubernetes start script 3 | # Maintainer: Yongbok Kim (ruo91@yongbok.net) 4 | #------------------------------------------------# 5 | #!/bin/bash 6 | ### Global ### 7 | # Kubernetes 8 | K8S_HOME=/opt/kubernetes 9 | PATH=$PATH:$K8S_HOME/server/bin 10 | 11 | # Ports 12 | K8S_ETCD_PORT="4001" 13 | K8S_KUBELET_PORT="10250" 14 | K8S_API_SERVER_PORT="8080" 15 | 16 | # Address 17 | K8S_API_SERVER="172.17.1.4" 18 | K8S_SERVICE_CLUSTER_IP_RANGE="$(ip a s flannel.1 | grep -v 'inet6' | grep 'inet' | cut -d ':' -f 2 | awk '{ print $2}' | sed 's/0\/16/0\/24/g')" 19 | K8S_COMMON_SERVER_ADDR="0.0.0.0" 20 | K8S_ETCD_SERVER="http://172.17.1.1:$K8S_ETCD_PORT,http://172.17.1.2:$K8S_ETCD_PORT,http://172.17.1.3:$K8S_ETCD_PORT" 21 | 22 | # Logs 23 | K8S_API_SERVER_LOGS="/tmp/apiserver.log" 24 | K8S_SCHEDULER_LOGS="/tmp/scheduler.log" 25 | K8S_CONTROLLER_LOGS="/tmp/controller-manager.log" 26 | 27 | # PID 28 | K8S_API_SERVER_PID="$(ps -e | grep 'kube-apiserver' | awk '{ printf $1 "\n" }')" 29 | K8S_SCHEDULER_SERVER_PID="$(ps -e | grep 'kube-scheduler' | awk '{ printf $1 "\n" }')" 30 | K8S_CONTROLLER_SERVER_PID="$(ps -e | grep 'kube-controller' | awk '{ printf $1 "\n" }')" 31 | 32 | # Functions 33 | function f_apiserver { 34 | echo "Start API Server..." && sleep 1 35 | kube-apiserver \ 36 | --port=$K8S_API_SERVER_PORT \ 37 | --address=$K8S_COMMON_SERVER_ADDR \ 38 | --kubelet-port=$K8S_KUBELET_PORT \ 39 | --external-hostname="$K8S_API_SERVER" \ 40 | --service-cluster-ip-range=$K8S_SERVICE_CLUSTER_IP_RANGE \ 41 | --etcd_servers=$K8S_ETCD_SERVER \ 42 | --v=0 > $K8S_API_SERVER_LOGS 2>&1 & 43 | echo "done" 44 | } 45 | 46 | function f_scheduler { 47 | echo "Start Scheduler..." && sleep 1 48 | kube-scheduler \ 49 | --address=$K8S_COMMON_SERVER_ADDR \ 50 | --master=$K8S_API_SERVER:$K8S_API_SERVER_PORT \ 51 | --v=0 > $K8S_SCHEDULER_LOGS 2>&1 & 52 | echo "done" 53 | } 54 | 55 | function f_controller_manager { 56 | echo "Start Controller Manager..." && sleep 1 57 | kube-controller-manager \ 58 | --address=$K8S_COMMON_SERVER_ADDR \ 59 | --master=$K8S_API_SERVER:$K8S_API_SERVER_PORT \ 60 | --v=0 > $K8S_CONTROLLER_LOGS 2>&1 & 61 | echo "done" 62 | } 63 | 64 | # Function of manual 65 | function f_apiserver_manual { 66 | echo -ne "\033[33m- API Server Port \033[0m \n" 67 | echo -ne "\033[33m- ex) 8080 \033[0m \n" 68 | echo -ne "\033[33m- Input: \033[0m" 69 | read K8S_API_SERVER_PORT 70 | echo 71 | 72 | echo -ne "\033[33m- API Server Service Address \033[0m \n" 73 | echo -ne "\033[33m- ex) 0.0.0.0 \033[0m \n" 74 | echo -ne "\033[33m- Input: \033[0m" 75 | read K8S_API_SERVICE_ADDR 76 | echo 77 | 78 | echo -ne "\033[33m- Kubelet Port \033[0m \n" 79 | echo -ne "\033[33m- ex) 10250 \033[0m \n" 80 | echo -ne "\033[33m- Input: \033[0m" 81 | read K8S_KUBELET_PORT 82 | echo 83 | 84 | echo -ne "\033[33m- Service Cluster IP Range \033[0m \n" 85 | echo -ne "\033[33m- ex) 10.0.42.1/16 \033[0m \n" 86 | echo -ne "\033[33m- Input: \033[0m" 87 | read K8S_POTAL_NET_CIDR 88 | echo 89 | 90 | echo -ne "\033[33m- ETCD Server \033[0m \n" 91 | echo -ne "\033[33m- ex) http://172.17.1.1:4001,http://172.17.1.1:4001\033[0m \n" 92 | echo -ne "\033[33m- Input: \033[0m" 93 | read K8S_ETCD_SERVER 94 | echo 95 | 96 | echo "Start API Server..." && sleep 1 97 | kube-apiserver \ 98 | --port=$K8S_API_SERVER_PORT \ 99 | --address=$K8S_API_SERVICE_ADDR \ 100 | --kubelet-port=$K8S_KUBELET_PORT \ 101 | --external-hostname="$K8S_API_SERVER" \ 102 | --service-cluster-ip-range=$K8S_SERVICE_CLUSTER_IP_RANGE \ 103 | --etcd_servers=$K8S_ETCD_SERVER \ 104 | --v=0 > $K8S_API_SERVER_LOGS 2>&1 & 105 | echo "done" 106 | } 107 | 108 | function f_scheduler_manual { 109 | echo -ne "\033[33m- API Server \033[0m \n" 110 | echo -ne "\033[33m- ex) 172.17.1.1:8080 \033[0m \n" 111 | echo -ne "\033[33m- Input: \033[0m" 112 | read K8S_API_SERVER 113 | echo 114 | 115 | echo -ne "\033[33m- Scheduler Service Address \033[0m \n" 116 | echo -ne "\033[33m- ex) 0.0.0.0 \033[0m \n" 117 | echo -ne "\033[33m- Input: \033[0m" 118 | read K8S_SCHEDULER_SERVICE_ADDR 119 | echo 120 | 121 | echo "Start Scheduler..." && sleep 1 122 | kube-scheduler \ 123 | --address=$K8S_SCHEDULER_SERVICE_ADDR \ 124 | --master=$K8S_API_SERVER \ 125 | --v=0 > $K8S_SCHEDULER_LOGS 2>&1 & 126 | echo "done" 127 | } 128 | 129 | function f_controller_manager_manual { 130 | echo -ne "\033[33m- API Server \033[0m \n" 131 | echo -ne "\033[33m- ex) 172.17.1.1:8080 \033[0m \n" 132 | echo -ne "\033[33m- Input: \033[0m" 133 | read K8S_API_SERVER 134 | echo 135 | 136 | echo -ne "\033[33m- Controller Manager Service Address \033[0m \n" 137 | echo -ne "\033[33m- ex) 0.0.0.0 \033[0m \n" 138 | echo -ne "\033[33m- Input: \033[0m" 139 | read K8S_CONTROLLER_SERVICE_ADDR 140 | echo 141 | 142 | echo "Start Controller Manager..." && sleep 1 143 | kube-controller-manager \ 144 | --address=$K8S_CONTROLLER_SERVICE_ADDR \ 145 | --master=$K8S_API_SERVER \ 146 | --v=0 > $K8S_CONTROLLER_LOGS 2>&1 & 147 | echo "done" 148 | } 149 | 150 | function f_kill_of_process { 151 | if [ "$ARG_2" == "all" ]; then 152 | echo "Kill of All Server..." && sleep 1 153 | kill -9 $K8S_API_SERVER_PID \ 154 | $K8S_SCHEDULER_SERVER_PID \ 155 | $K8S_CONTROLLER_SERVER_PID 156 | echo "done" 157 | 158 | elif [[ "$ARG_2" == "a" || "$ARG_2" == "api" ]]; then 159 | echo "Kill of API Server..." && sleep 1 160 | kill -9 $K8S_API_SERVER_PID 161 | echo "done" 162 | 163 | elif [[ "$ARG_2" == "s" || "$ARG_2" == "sd" ]]; then 164 | echo "Kill of Scheduler..." && sleep 1 165 | kill -9 $K8S_SCHEDULER_SERVER_PID 166 | echo "done" 167 | 168 | elif [[ "$ARG_2" == "c" || "$ARG_2" == "cm" ]]; then 169 | echo "Kill of Controller Manager..." && sleep 1 170 | kill -9 $K8S_CONTROLLER_SERVER_PID 171 | echo "done" 172 | 173 | else 174 | echo "Not found PIDs" 175 | fi 176 | } 177 | 178 | function f_help { 179 | echo "Usage: $ARG_0 [Options] [Arguments]" 180 | echo 181 | echo "- Options" 182 | echo "a, api : apiserver" 183 | echo "s, sd : scheduler" 184 | echo "c, cm : controller manager" 185 | echo "k, kill : kill of process" 186 | echo 187 | echo "- Arguments" 188 | echo "s, start : Start commands" 189 | echo "m, manual : Manual commands" 190 | echo 191 | echo "all : kill of all server (k or kill option only.)" 192 | echo " ex) $ARG_0 k all or $ARG_0 kill all" 193 | echo 194 | echo "a, api : kill of apiserver (k or kill option only.)" 195 | echo " ex) $ARG_0 k a or $ARG_0 kill api" 196 | echo 197 | echo "s, sd : kill of scheduler (k or kill option only.)" 198 | echo " ex) $ARG_0 k s or $ARG_0 kill sd" 199 | echo 200 | echo "c, cm : kill of controller manager (k or kill option only.)" 201 | echo " ex) $ARG_0 k c or $ARG_0 kill cm" 202 | echo 203 | } 204 | 205 | # Main 206 | ARG_0="$0" 207 | ARG_1="$1" 208 | ARG_2="$2" 209 | 210 | case ${ARG_1} in 211 | a|api) 212 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 213 | f_apiserver 214 | 215 | elif [[ "$ARG_2" == "m" || "ARG_2" == "manual" ]]; then 216 | f_apiserver_manual 217 | 218 | else 219 | f_help 220 | fi 221 | ;; 222 | 223 | s|sd) 224 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 225 | f_scheduler 226 | 227 | elif [[ "$ARG_2" == "m" || "ARG_2" == "manual" ]]; then 228 | f_scheduler_manual 229 | 230 | else 231 | f_help 232 | fi 233 | ;; 234 | 235 | c|cm) 236 | if [[ "$ARG_2" == "s" || "$ARG_2" == "start" ]]; then 237 | f_controller_manager 238 | 239 | elif [[ "$ARG_2" == "m" || "ARG_2" == "manual" ]]; then 240 | f_controller_manager_manual 241 | 242 | else 243 | f_help 244 | fi 245 | ;; 246 | 247 | k|kill) 248 | f_kill_of_process 249 | ;; 250 | 251 | *) 252 | f_help 253 | ;; 254 | 255 | esac 256 | -------------------------------------------------------------------------------- /docker-kubernetes.sh: -------------------------------------------------------------------------------- 1 | #------------------------------------------------# 2 | # Docker kubernetes script 3 | # Maintainer: Yongbok Kim (ruo91@yongbok.net) 4 | #------------------------------------------------# 5 | #!/bin/bash 6 | ### Global ### 7 | DOCKER="$(which docker)" 8 | 9 | # Image name 10 | DOCKER_HUB_USER_ID="ruo91" 11 | IMAGE_ETCD="kubernetes:etcd" 12 | IMAGE_MASTER="kubernetes:master" 13 | IMAGE_MINION="kubernetes:minion" 14 | IMAGE_CLIENT="kubernetes:client" 15 | 16 | # Container name 17 | CONTAINER_ETCD="etcd-cluster" 18 | CONTAINER_MASTER="kubernetes-master" 19 | CONTAINER_MINION="kubernetes-minion" 20 | CONTAINER_CLIENT="kubernetes-client" 21 | 22 | # Dockerfiles 23 | DOCKER_FILE_ETCD="00_kubernetes-etcd" 24 | DOCKER_FILE_CLIENT="01_kubernetes-client" 25 | DOCKER_FILE_MASTER="02_kubernetes-master" 26 | DOCKER_FILE_MINION="03_kubernetes-minion" 27 | 28 | # Functions 29 | function f_build { 30 | echo "- Build coreos etcd" && sleep 1 31 | $DOCKER build --rm -t $IMAGE_ETCD -f $DOCKER_FILE_ETCD $(pwd) 32 | echo "done" 33 | echo 34 | 35 | echo "- Build kubernetes client" && sleep 1 36 | $DOCKER build --rm -t $IMAGE_CLIENT -f $DOCKER_FILE_CLIENT $(pwd) 37 | echo "done" 38 | echo 39 | 40 | echo "- Build kubernetes master" && sleep 1 41 | $DOCKER build --rm -t $IMAGE_MASTER -f $DOCKER_FILE_MASTER $(pwd) 42 | echo "done" 43 | echo 44 | 45 | echo "- Build kubernetes minion" && sleep 1 46 | $DOCKER build --rm -t $IMAGE_MINION -f $DOCKER_FILE_MINION $(pwd) 47 | echo "done" 48 | echo 49 | 50 | # Remove none images 51 | #f_none_rmi > /dev/null 2>&1 52 | } 53 | 54 | function f_pull_images { 55 | echo "- Pull images" 56 | echo "├-- Kubernetes ETCD" 57 | $DOCKER pull $DOCKER_HUB_USER_ID/$IMAGE_ETCD 58 | echo 59 | echo "├-- Kubernetes Master" 60 | $DOCKER pull $DOCKER_HUB_USER_ID/$IMAGE_MASTER 61 | echo 62 | echo "├-- Kubernetes Minion" 63 | $DOCKER pull $DOCKER_HUB_USER_ID/$IMAGE_MINION 64 | echo 65 | echo "└-- Kubernetes Client" 66 | $DOCKER pull $DOCKER_HUB_USER_ID/$IMAGE_CLIENT 67 | echo "done" 68 | echo 69 | } 70 | 71 | function f_run { 72 | DOCKER_IFACE="docker0" 73 | DOCKER_PIPEWORK="/bin/pipework" 74 | DOCKER_PIPEWORK_URL="https://raw.githubusercontent.com/jpetazzo/pipework/master/pipework" 75 | 76 | # ARP & SSH known hosts flush 77 | echo "- ARP & SSH known hosts flush" 78 | echo "├-- ARP" 79 | for (( i=1; i<8; i++ )); do 80 | arp -d 172.17.1.$i > /dev/null 2>&1 81 | done 82 | echo "└-- SSH known hosts" 83 | cat /dev/null > $HOME/.ssh/known_hosts > /dev/null 2>&1 84 | echo "done" 85 | echo 86 | 87 | if [ -f "$DOCKER_PIPEWORK" ]; then 88 | ## CoreOS ETCD x3 ## 89 | echo "- ETCD Cluster" 90 | for (( i=0; i<3; i++ )); do 91 | echo "├-- Run $CONTAINER_ETCD-$i" 92 | if [ "$(docker images | grep -v 'REPOSITORY' | grep 'ruo91/kubernetes' | head -n 4 | awk '{ print $2}' | grep 'etcd')" == "etcd" ]; then 93 | $DOCKER run -d --name="$CONTAINER_ETCD-$i" -h "$CONTAINER_ETCD-$i" $DOCKER_HUB_USER_ID/$IMAGE_ETCD > /dev/null 2>&1 94 | 95 | else 96 | $DOCKER run -d --name="$CONTAINER_ETCD-$i" -h "$CONTAINER_ETCD-$i" $IMAGE_ETCD > /dev/null 2>&1 97 | fi 98 | done 99 | 100 | # Static IP 101 | echo "├-- Static IP Setting" 102 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_ETCD-0 172.17.1.1/16 > /dev/null 2>&1 103 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_ETCD-1 172.17.1.2/16 > /dev/null 2>&1 104 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_ETCD-2 172.17.1.3/16 > /dev/null 2>&1 105 | 106 | # Start etcd cluster 107 | for (( i=0; i<3; i++ )); do 108 | echo "├-- Start etcd #$i" 109 | $DOCKER exec $CONTAINER_ETCD-$i /bin/bash etcd-cluster.sh etcd start > /dev/null 2>&1 110 | done 111 | sleep 3 112 | 113 | # Flannel Setting 114 | ETCD_SERVER_1="$(docker inspect -f '{{ .NetworkSettings.IPAddress }}' etcd-cluster-0)" 115 | echo "└-- Flannel Setting" 116 | curl -L http://$ETCD_SERVER_1:4001/v2/keys/overlay/network/config -XPUT --data-urlencode value@conf/network/flannel.json > /dev/null 2>&1 117 | echo "done" 118 | echo 119 | 120 | ## Kubernetes Master ## 121 | echo "- Kubernetes Master" 122 | echo "├-- Run $CONTAINER_MASTER" 123 | if [ "$(docker images | grep -v 'REPOSITORY' | grep 'ruo91/kubernetes' | head -n 4 | awk '{ print $2}' | grep 'master')" == "master" ]; then 124 | $DOCKER run -d --name="$CONTAINER_MASTER" -h "$CONTAINER_MASTER" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules -p 8080:8080 $DOCKER_HUB_USER_ID/$IMAGE_MASTER > /dev/null 2>&1 125 | 126 | else 127 | $DOCKER run -d --name="$CONTAINER_MASTER" -h "$CONTAINER_MASTER" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules -p 8080:8080 $IMAGE_MASTER > /dev/null 2>&1 128 | fi 129 | 130 | # Static IP 131 | echo "├-- Static IP Setting" 132 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_MASTER 172.17.1.4/16 > /dev/null 2>&1 133 | 134 | # Start Flannel 135 | echo "├-- Start Flannel" 136 | $DOCKER exec $CONTAINER_MASTER /bin/bash flannel.sh flannel start > /dev/null 2>&1 137 | 138 | # Add bridge 139 | echo "├-- Add Bridge" 140 | $DOCKER exec $CONTAINER_MASTER /bin/bash k8s_master_add_bridge.sh > /dev/null 2>&1 141 | 142 | # Start API, Scheduler, Controller Manager 143 | echo "├-- Start API Server" 144 | $DOCKER exec $CONTAINER_MASTER /bin/bash k8s.sh api start > /dev/null 2>&1 145 | sleep 3 146 | 147 | echo "├-- Start Scheduler" 148 | $DOCKER exec $CONTAINER_MASTER /bin/bash k8s.sh sd start > /dev/null 2>&1 149 | 150 | echo "└-- Start Controller Manager" 151 | $DOCKER exec $CONTAINER_MASTER /bin/bash k8s.sh cm start > /dev/null 2>&1 152 | echo "done" 153 | echo 154 | 155 | ## Kubernetes Minion x2 ## 156 | echo "- Kubernetes Minion" 157 | for (( i=0; i<2; i++ )); do 158 | echo "├-- Run $CONTAINER_MINION-$i" 159 | if [ "$(docker images | grep -v 'REPOSITORY' | grep 'ruo91/kubernetes' | head -n 4 | awk '{ print $2}' | grep 'minion')" == "minion" ]; then 160 | $DOCKER run -d --name="$CONTAINER_MINION-$i" -h "$CONTAINER_MINION-$i" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules $DOCKER_HUB_USER_ID/$IMAGE_MINION > /dev/null 2>&1 161 | 162 | else 163 | $DOCKER run -d --name="$CONTAINER_MINION-$i" -h "$CONTAINER_MINION-$i" --privileged=true -v /dev:/dev -v /lib/modules:/lib/modules $IMAGE_MINION > /dev/null 2>&1 164 | fi 165 | done 166 | sleep 3 167 | 168 | # Static IP 169 | echo "├-- Static IP Setting" 170 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_MINION-0 172.17.1.5/16 > /dev/null 2>&1 171 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_MINION-1 172.17.1.6/16 > /dev/null 2>&1 172 | 173 | # Start Flannel 174 | echo "├-- Start Flannel" 175 | $DOCKER exec $CONTAINER_MINION-0 /bin/bash minion-init.sh > /dev/null 2>&1 176 | $DOCKER exec $CONTAINER_MINION-1 /bin/bash minion-init.sh > /dev/null 2>&1 177 | 178 | # Start Kubelet, Proxy 179 | echo "├-- Start Kubelet" 180 | $DOCKER exec $CONTAINER_MINION-0 /bin/bash minion.sh kubelet start > /dev/null 2>&1 181 | $DOCKER exec $CONTAINER_MINION-1 /bin/bash minion.sh kubelet start > /dev/null 2>&1 182 | 183 | echo "└-- Start Proxy" 184 | $DOCKER exec $CONTAINER_MINION-0 /bin/bash minion.sh proxy start > /dev/null 2>&1 185 | $DOCKER exec $CONTAINER_MINION-1 /bin/bash minion.sh proxy start > /dev/null 2>&1 186 | echo "done" 187 | echo 188 | 189 | # Kubernetes Client 190 | echo "- Kubernetes Client" 191 | echo "├-- Run $CONTAINER_CLIENT" 192 | if [ "$(docker images | grep -v 'REPOSITORY' | grep 'ruo91/kubernetes' | head -n 4 | awk '{ print $2}' | grep 'client')" == "client" ]; then 193 | $DOCKER run -d --name="$CONTAINER_CLIENT" -h "$CONTAINER_CLIENT" $DOCKER_HUB_USER_ID/$IMAGE_CLIENT > /dev/null 2>&1 194 | 195 | else 196 | $DOCKER run -d --name="$CONTAINER_CLIENT" -h "$CONTAINER_CLIENT" $IMAGE_CLIENT > /dev/null 2>&1 197 | fi 198 | 199 | # Static IP 200 | echo "└-- Static IP Setting" 201 | $DOCKER_PIPEWORK $DOCKER_IFACE $CONTAINER_CLIENT 172.17.1.7/16 > /dev/null 2>&1 202 | echo "done" 203 | echo 204 | 205 | # Dashboard 206 | DASHBOARD="kubectl create -f /opt/dashboard.yaml -s 172.17.1.4:8080" 207 | echo "- Dashboard" 208 | echo "├-- Create Dashboard" 209 | $DOCKER exec $CONTAINER_CLIENT $DASHBOARD > /dev/null 2>&1 210 | echo "└-- URL: http://localhost:8080/" 211 | echo "done" 212 | echo 213 | 214 | else 215 | curl -o $DOCKER_PIPEWORK -L "$DOCKER_PIPEWORK_URL" > /dev/null 2>&1 216 | chmod a+x $DOCKER_PIPEWORK 217 | 218 | # Recusive f_run 219 | f_run 220 | fi 221 | } 222 | 223 | function f_none_rmi { 224 | echo "- Remove images..." && sleep 1 225 | 226 | # Remove none images 227 | $DOCKER rmi $(docker images | grep '' | awk '{ printf $3 " "}') > /dev/null 2>&1 228 | echo "done" 229 | echo 230 | } 231 | 232 | function f_stop_rm { 233 | echo "- Stop & Remove all containers..." && sleep 1 234 | 235 | # Stop 236 | $DOCKER stop $CONTAINER_ETCD-0 $CONTAINER_ETCD-1 $CONTAINER_ETCD-2 \ 237 | $CONTAINER_MASTER $CONTAINER_MINION-0 $CONTAINER_MINION-1 $CONTAINER_CLIENT > /dev/null 2>&1 238 | 239 | # Remove 240 | $DOCKER rm $CONTAINER_ETCD-0 $CONTAINER_ETCD-1 $CONTAINER_ETCD-2 \ 241 | $CONTAINER_MASTER $CONTAINER_MINION-0 $CONTAINER_MINION-1 $CONTAINER_CLIENT > /dev/null 2>&1 242 | 243 | # Remove none images 244 | #f_none_rmi 245 | echo "done" 246 | echo 247 | } 248 | 249 | function f_help { 250 | echo "Usage: $ARG_0 [Options] [Arguments]" 251 | echo 252 | echo "- Options" 253 | echo "b, build : Build containers" 254 | echo "p, pull : Pull images" 255 | echo "r, run : Run containers" 256 | echo "sr : Stop & Remove all containers" 257 | echo "none : Remove images" 258 | echo 259 | echo "- Arguments" 260 | echo "y, yes : build, run, sr option only" 261 | echo 262 | echo "rm, rmi : Remove images (none option only.)" 263 | echo " ex) $ARG_0 n rm or $ARG_0 none rmi" 264 | echo 265 | } 266 | 267 | # Main 268 | ARG_0="$0" 269 | ARG_1="$1" 270 | ARG_2="$2" 271 | 272 | case ${ARG_1} in 273 | b|build) 274 | if [[ "$ARG_2" == "y" || "$ARG_2" == "yes" ]]; then 275 | f_build 276 | 277 | else 278 | f_help 279 | fi 280 | ;; 281 | 282 | p|pull) 283 | if [[ "$ARG_2" == "y" || "$ARG_2" == "yes" ]]; then 284 | f_pull_images 285 | 286 | else 287 | f_help 288 | fi 289 | ;; 290 | 291 | r|run) 292 | if [[ "$ARG_2" == "y" || "$ARG_2" == "yes" ]]; then 293 | f_run 294 | 295 | else 296 | f_help 297 | fi 298 | ;; 299 | 300 | sr) 301 | if [[ "$ARG_2" == "y" || "$ARG_2" == "yes" ]]; then 302 | f_stop_rm 303 | 304 | else 305 | f_help 306 | fi 307 | ;; 308 | 309 | n|none) 310 | if [[ "$ARG_2" == "rm" || "$ARG_2" == "rmi" ]]; then 311 | f_none_rmi 312 | 313 | else 314 | f_help 315 | fi 316 | ;; 317 | 318 | *) 319 | f_help 320 | ;; 321 | esac 322 | --------------------------------------------------------------------------------