├── .gitignore ├── README.md ├── apps ├── gitlab │ └── start.sh ├── marathon-lb │ └── start.sh ├── mysql │ └── start.sh ├── redis │ ├── redis.conf │ └── start.sh └── zookeeper │ └── start.sh ├── config.properties ├── docs ├── 1-pre-centos.md ├── 1-pre-ubuntu.md ├── 2-kubernetes-simple.md ├── 3-kubernetes-with-ca.md └── 4-microservice-deploy.md ├── gen-config.sh ├── images └── k8s-concept.jpg ├── kubernetes-simple ├── all-node │ └── kube-calico.service ├── master-node │ ├── etcd.service │ ├── kube-apiserver.service │ ├── kube-controller-manager.service │ └── kube-scheduler.service ├── services │ └── kube-dns.yaml └── worker-node │ ├── 10-calico.conf │ ├── kube-proxy.kubeconfig │ ├── kube-proxy.service │ ├── kubelet.kubeconfig │ └── kubelet.service ├── kubernetes-with-ca ├── all-node │ └── kube-calico.service ├── ca │ ├── admin │ │ └── admin-csr.json │ ├── ca-config.json │ ├── ca-csr.json │ ├── calico │ │ └── calico-csr.json │ ├── etcd │ │ └── etcd-csr.json │ ├── kube-proxy │ │ └── kube-proxy-csr.json │ └── kubernetes │ │ └── kubernetes-csr.json ├── master-node │ ├── etcd.service │ ├── kube-apiserver.service │ ├── kube-controller-manager.service │ └── kube-scheduler.service ├── services │ ├── kube-dashboard.yaml │ └── kube-dns.yaml └── worker-node │ ├── 10-calico.conf │ ├── kube-proxy.service │ └── kubelet.service └── service-config ├── api-gateway.yaml ├── course-service.yaml ├── message-service.yaml └── user-service.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | # maven ignore 2 | target/ 3 | *.jar 4 | *.war 5 | *.zip 6 | *.tar 7 | *.tar.gz 8 | 9 | # eclipse ignore 10 | .settings/ 11 | .project 12 | .classpath 13 | 14 | # idea ignore 15 | .idea/ 16 | *.ipr 17 | *.iml 18 | *.iws 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Docker基础 2 | ### 1,安装Docker 3 | ```bash 4 | yum -y update --更新 yum 源到最新 5 | 6 | cat >/etc/yum.repos.d/docker.repo <<-EOF --添加 yum 源 如下 7 | [dockerrepo] 8 | name=Docker Repository 9 | baseurl=https://yum.dockerproject.org/repo/main/centos/7 10 | enabled=1 11 | gpgcheck=1 12 | gpgkey=https://yum.dockerproject.org/gpg 13 | EOF 14 | 15 | yum install -y docker-selinux --安装 docker-selinux 16 | 17 | yum install -y docker-engine --安装 docker-engine 18 | 19 | systemctl start docker.service --立即启动 docker 服务 20 | 21 | systemctl enable docker.service --设置 docker 开机服务启动 22 | ``` 23 | ### 2,Docker配置和使用 24 | ```bash 25 | docker info --查看 docker 相关信息 26 | 27 | docker version --查看 docker 服务端和客户端相关版本 28 | 29 | docker search '镜像名称' --查询 docker 镜像 30 | 31 | docker pull '镜像名称' --下载或更新 docker 镜像 32 | 33 | docker images --查看本地已经装载<下载>的镜像 34 | 35 | docker run -it java java -version --docker run:执行容器,-it:打开命令终端,java:执行名叫java的镜像,java -version:打开命令终端后执行该命令 36 | 37 | docker run -it --entrypoint bash openjdk:7-jre --已命令行的方式进入 openjdk 镜像 38 | 39 | docker run java env --查看名叫 java 的 docker 镜像的环境变量 40 | 41 | docker run 后面追加-d=true或者-d --容器将会运行在后台模式。 42 | 43 | docker run 后面追加 -rm --容器运行完成后立即删除 44 | 45 | docker exec --进入到到该容器中,或者attach重新连接容器的会话 <不建议使用 attach,一个是会话的原因,一个是断开后会停止 docker 镜像> 46 | 47 | docker create/start/stop/pause/unpause --容器生命周期相关指令 48 | 49 | docker create -it --name=myjava java java -version --创建一个 docker 容器名叫 myjava,后面的命令就是容器所要做的事情 50 | 51 | docker start myjava --启动刚刚创建的容器<测试> 52 | 53 | docker create --name=mysql -e MYSQL_ROOT_PASSWORD=jiang -p 3306:3306 mysql --建一个 mysql 的 docker 容器,-e是往容器环境变量里面设值<一般服务的配置也是通过这个传递的>,-p 3306:3306 是将主机的3306转发到容器的3306端口《可使用--net=host替代直接使用宿主机端口不做转发》,最后的mysql是镜像的名称 54 | 55 | docker start '容器名称 || 容器ID' --启动刚刚创建的 mysql docker容器 56 | 57 | docker exec -it '容器名称 || 容器ID' bash --进入容器 <如果报没有 bash 的错误直接使用:docker exec -it '容器名称' sh>进入容器 58 | 59 | docker stop '容器名称' --停止 mysql docker容器 60 | 61 | docker ps -a --查看 docker 所有的容器 62 | 63 | docker rm 'id' --删除容器 'id' 就是 docker ps -a 查看到的id 64 | 65 | docker ps --查看 dicker 当前正在运行的容器 66 | 67 | docker logs -f '容器ID' --查看 服务容器运行日志 <就是用docker ps命令显示出来的那个容器ID> 68 | 69 | docker commit 'id' 镜像名称 --将容器创建为镜像 'id'=容器ID《docker ps -a 查看到的id》,《所有镜像可使用 docker images 命令查看》 70 | 71 | /usr/lib/systemd/system/docker.service --docker 配置文件地址 72 | 73 | tail -f /var/log/messages |grep docker --查看 docker 日志,这个日志目录应该是不对的 74 | 75 | docker login --docker 登陆,然后提示 输入用户名,密码 76 | 77 | docker tag zookeeper:3.5 test/zookeeper:3.5 --为镜像 zookeeper 打上tag <:3.5是镜像版本> 78 | 79 | git push test/zookeeper:3.5 --将zookeeper 镜像上传到仓库<:3.5是镜像版本> 80 | ``` 81 | ### 3,Dockerfile使用 82 | ```bash 83 | touch Dockerfile --创建Dockerfile文件 84 | vi Dockerfile --编辑文件 85 | FORM centos --'FORM' 在某个基础镜像之上进行扩展 86 | MAINTAINER chiangfire@outlook.com --'MAINTAINER' 镜像创建者 87 | ADD nginx-1.12.2.tar.gz /usr/local/src --'ADD' 添加 nginx-1.12.2.tar.gz 文件到 /usr/local/src 88 | EXPOSE 6379 --'EXPOSE' 镜像开放6379端口 89 | ENTRYPOINT java -version --'ENTRYPOINT' 镜像启动时要执行的命令,必须是前台执行的方式,一般都是自己的应用系统启动命令 90 | ENV JAVA_HOME /usr/lib/java-8 --'ENV' 添加环境变量 91 | RUN '创建镜像要执行的命令《比如安装软件》' --多条命令可使用 \ 换行,下一行使用 && 开头,整个Dockerfile最好只有一个RUN因为每个RUN都是一层镜像 92 | docker build -t '镜像名称' '镜像完成后所在目录' --创建镜像,镜像完成后所在目录可使用 . 代表当前目录 93 | 94 | Supervisor docker --可存储密码,以及同时使用多个进程以及开启后台进程 <具体可 百度> 95 | ``` 96 | #### 4,Volume存储使用 97 | ```bash 98 | docker run --rm=true -it -v /storage /leader javad /bin/bash --将本机目录 /storage 挂载到 javad 容器的 /bin/bash目录 <注意:容器删除目录还在> 99 | docker run --rm=true --privileged=true -it -v /storage /leader javad /bin/bash --和上面的命令一样只是加了 --privileged=true 可将多个镜像挂载到同一个目录,已达到文件共享 100 | docker run --rm=true --privileged=true --volumes-from='容器ID' -it javad /bin/bash --和上面的命令一样只是加了 --volumes-from='容器ID',将某个容器挂载到 javad 容器的 /bin/bash目录 101 | docker run --rm=true --link=127.0.0.1:myserver -it javad /bin/bash --使用--link=127.0.0.1:myserver,将远程容器挂载到 javad 容器的 /bin/bash目录《myserver是远程容器在当前容器的一个别名,可使用 ping myserver 查看可ping通》 102 | docker run --rm=true --net=container:mysqlserver javad ip addr --容器共享同一个网络《mysqlserver=容器名称》,可用于多个服务使用同一个网络 103 | docker inspect '容器ID' --查看文件所写的真实目录 《可直接在看到的目录下写数据》 104 | ``` 105 | ### 5,Docker 路由机制打通网络《比较高效推荐使用》 《现在两个docker镜像128,130》 106 | ```bash 107 | 修改128镜像: 108 | vi /usr/lib/systemd/system/docker.service 109 | ExecStart=/usr/bin/docker daemon --bip=172.18.42.1/16 -H fd:// -H=unix:///var/run/docker.sock 110 | systemctl daemon-reload 111 | 重启128 docker 镜像 112 | 113 | 128镜像上执行 route add -net 172.17.0.0/16 gw 192.168.18.130 114 | 130镜像上执行 route add -net 172.18.0.0/16 gw 192.168.18.128 115 | 116 | 可以 ping 看看能不能通,如果不同可清理防火墙规则试试 117 | 118 | 119 | 注:有时间看看 docker + open vSwitch 打通网络 120 | ``` 121 | ### 6,Docker-Compose<半个容器编排,还是用 k8s 吧> 可以在容器中直接使用 service 名称 代替 IP,相互访问容器里面的 service;使用如下 122 | ```bash 123 | 1,定义 docker-compose.yml 内容如下: 124 | version: '3' -- docker compose 版本 125 | service: -- service 定义 126 | message-service: -- service 名称 127 | image: message-service:latest -- service 镜像名称 128 | ports: 129 | - 8080:8080 -- service 对外提供的端口 130 | 131 | user-service: -- service 名称 132 | image: user-service:latest -- service 镜像 133 | command -- 命令行参数 134 | - "--mysql.address=192.168.0.1" -- 参数 就是 user-service 容器在启动时所添加的命令行参数,在spring配置文件里面可使用${mysql.address} 取到 135 | 136 | user-edge-service: -- service 名称 137 | image: user-edge-service: latest -- 镜像名称 138 | links: -- 依赖 service 139 | - user-servive -- 依赖 service 名称 140 | - message-service -- 依赖 service 名称 141 | command: -- 命令行参数 142 | - "--redis.address=127.0.0.1" -- 参数 就是 user-service 容器在启动时所添加的命令行参数,在spring配置文件里面可使用${redis.address} 取到 143 | 144 | 145 | 2,docker-compose up -d -- 后台运行 docker-compose 146 | ``` 147 | ### 7,附录 148 | ```bash 149 | netstat -nlpt --查看所有端口映射情况 150 | netstat -nlpt |grep 3306 --查看3306端口使用情况 151 | service mysqld stop --停止名叫 mysqld 的服务 152 | mysql -uroot -p --centos7 使用mysql 153 | env --centos7 查看环境变量 154 | ``` 155 | ### 8,删除Docker 156 | ```bash 157 | yum list installed | grep docker --列出 docker 安装的软件包 158 | yum -y remove '安装的软件包名' --卸载 docker 159 | rm -rf /var/lib/docker --删除 docker 镜像、容器,卷组和用户自配置文件。 160 | ``` 161 | ### 9,说明 162 | ```bash 163 | docker 默认支持互通,可通过 -icc=false 关闭互通。《/usr/bin/docker daemon --icc=false》 164 | 私有库搭建可使用:https://github.com/goharbor/harbor/releases 165 | ``` 166 | 167 | ![image](https://github.com/chiangfire/kubernetes-starter/blob/master/images/k8s-concept.jpg) 168 |  **以下记录kubernetes在绿色网络环境下的集群搭建及集群的使用、常用命令、应用的部署。首先剥离了认证授权和服务发现模块,从最核心的模块开始构建集群,然后逐步增加认证授权和服务发现部分,在搭建过程中逐步熟悉kubernetes。** 169 | 170 | ## [一、环境准备][1-1] [centos][1-1] [ubuntu][1-2] 171 | ## [二、基础集群部署 - kubernetes-simple][2] 172 | ## [三、完整集群部署 - kubernetes-with-ca][3] 173 | ## [四、在kubernetes上部署我们的微服务][4] 174 | 175 | 176 | 177 | 178 | 179 | 180 | 181 | 182 | [1-1]: https://github.com/chiangfire/kubernetes-starter/blob/master/docs/1-pre-centos.md 183 | [1-2]: https://github.com/chiangfire/kubernetes-starter/blob/master/docs/1-pre-ubuntu.md 184 | [2]: https://github.com/chiangfire/kubernetes-starter/blob/master/docs/2-kubernetes-simple.md 185 | [3]: https://github.com/chiangfire/kubernetes-starter/blob/master/docs/3-kubernetes-with-ca.md 186 | [4]: https://github.com/chiangfire/kubernetes-starter/blob/master/docs/4-microservice-deploy.md 187 | -------------------------------------------------------------------------------- /apps/gitlab/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | HOST_NAME=gitlab.mooc.com 3 | GITLAB_DIR=/Users/Michael/work/i/apps/gitlab 4 | docker stop gitlab 5 | docker rm gitlab 6 | docker run -d \ 7 | --hostname ${HOST_NAME} \ 8 | -p 9443:443 -p 9080:80 -p 2222:22 \ 9 | --name gitlab \ 10 | -v ${GITLAB_DIR}/config:/etc/gitlab \ 11 | -v ${GITLAB_DIR}/logs:/var/log/gitlab \ 12 | -v ${GITLAB_DIR}/data:/var/opt/gitlab \ 13 | registry.cn-hangzhou.aliyuncs.com/imooc/gitlab-ce:latest 14 | -------------------------------------------------------------------------------- /apps/marathon-lb/start.sh: -------------------------------------------------------------------------------- 1 | docker run -p 9090:9090 \ 2 | -e PORTS=9090 mesosphere/marathon-lb:v1.11.1 sse --group external --marathon http://192.168.1.12:8080 3 | -------------------------------------------------------------------------------- /apps/mysql/start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cur_dir=`pwd` 3 | docker stop imooc-mysql 4 | docker rm imooc-mysql 5 | docker run --net imooc-network --name imooc-mysql -v ${cur_dir}/conf:/etc/mysql/conf.d -v ${cur_dir}/data:/var/lib/mysql -p 3306:3306 -e MYSQL_ROOT_PASSWORD=aA111111 -d mysql:8 6 | 7 | -------------------------------------------------------------------------------- /apps/redis/redis.conf: -------------------------------------------------------------------------------- 1 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 2 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 3 | daemonize no 4 | 5 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 6 | # default. You can specify a custom pid file location here. 7 | pidfile /var/run/redis/redis-server.pid 8 | 9 | # Accept connections on the specified port, default is 6379. 10 | # If port 0 is specified Redis will not listen on a TCP socket. 11 | port 6379 12 | 13 | # By default Redis listens for connections from all the network interfaces 14 | # available on the server. It is possible to listen to just one or multiple 15 | # interfaces using the "bind" configuration directive, followed by one or 16 | # more IP addresses. 17 | # 18 | # Examples: 19 | # 20 | # bind 192.168.1.100 10.0.0.1 21 | bind 0.0.0.0 22 | 23 | # Specify the path for the unix socket that will be used to listen for 24 | # incoming connections. There is no default, so Redis will not listen 25 | # on a unix socket when not specified. 26 | # 27 | # unixsocket /var/run/redis/redis.sock 28 | # unixsocketperm 755 29 | 30 | # Close the connection after a client is idle for N seconds (0 to disable) 31 | timeout 0 32 | 33 | # TCP keepalive. 34 | # 35 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 36 | # of communication. This is useful for two reasons: 37 | # 38 | # 1) Detect dead peers. 39 | # 2) Take the connection alive from the point of view of network 40 | # equipment in the middle. 41 | # 42 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 43 | # Note that to close the connection the double of the time is needed. 44 | # On other kernels the period depends on the kernel configuration. 45 | # 46 | # A reasonable value for this option is 60 seconds. 47 | tcp-keepalive 0 48 | 49 | # Specify the server verbosity level. 50 | # This can be one of: 51 | # debug (a lot of information, useful for development/testing) 52 | # verbose (many rarely useful info, but not a mess like the debug level) 53 | # notice (moderately verbose, what you want in production probably) 54 | # warning (only very important / critical messages are logged) 55 | loglevel notice 56 | 57 | # Specify the log file name. Also the empty string can be used to force 58 | # Redis to log on the standard output. Note that if you use standard 59 | # output for logging but daemonize, logs will be sent to /dev/null 60 | 61 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 62 | # and optionally update the other syslog parameters to suit your needs. 63 | # syslog-enabled no 64 | 65 | # Specify the syslog identity. 66 | # syslog-ident redis 67 | 68 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 69 | # syslog-facility local0 70 | 71 | # Set the number of databases. The default database is DB 0, you can select 72 | # a different one on a per-connection basis using SELECT where 73 | # dbid is a number between 0 and 'databases'-1 74 | databases 16 75 | 76 | ################################ SNAPSHOTTING ################################ 77 | # 78 | # Save the DB on disk: 79 | # 80 | # save 81 | # 82 | # Will save the DB if both the given number of seconds and the given 83 | # number of write operations against the DB occurred. 84 | # 85 | # In the example below the behaviour will be to save: 86 | # after 900 sec (15 min) if at least 1 key changed 87 | # after 300 sec (5 min) if at least 10 keys changed 88 | # after 60 sec if at least 10000 keys changed 89 | # 90 | # Note: you can disable saving at all commenting all the "save" lines. 91 | # 92 | # It is also possible to remove all the previously configured save 93 | # points by adding a save directive with a single empty string argument 94 | # like in the following example: 95 | # 96 | # save "" 97 | 98 | save 900 1 99 | save 300 10 100 | save 60 10000 101 | 102 | # By default Redis will stop accepting writes if RDB snapshots are enabled 103 | # (at least one save point) and the latest background save failed. 104 | # This will make the user aware (in a hard way) that data is not persisting 105 | # on disk properly, otherwise chances are that no one will notice and some 106 | # disaster will happen. 107 | # 108 | # If the background saving process will start working again Redis will 109 | # automatically allow writes again. 110 | # 111 | # However if you have setup your proper monitoring of the Redis server 112 | # and persistence, you may want to disable this feature so that Redis will 113 | # continue to work as usual even if there are problems with disk, 114 | # permissions, and so forth. 115 | stop-writes-on-bgsave-error yes 116 | 117 | # Compress string objects using LZF when dump .rdb databases? 118 | # For default that's set to 'yes' as it's almost always a win. 119 | # If you want to save some CPU in the saving child set it to 'no' but 120 | # the dataset will likely be bigger if you have compressible values or keys. 121 | rdbcompression yes 122 | 123 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 124 | # This makes the format more resistant to corruption but there is a performance 125 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 126 | # for maximum performances. 127 | # 128 | # RDB files created with checksum disabled have a checksum of zero that will 129 | # tell the loading code to skip the check. 130 | rdbchecksum yes 131 | 132 | # The filename where to dump the DB 133 | dbfilename dump.rdb 134 | 135 | # The working directory. 136 | # 137 | # The DB will be written inside this directory, with the filename specified 138 | # above using the 'dbfilename' configuration directive. 139 | # 140 | # The Append Only File will also be created inside this directory. 141 | # 142 | # Note that you must specify a directory here, not a file name. 143 | dir /data 144 | 145 | ################################# REPLICATION ################################# 146 | 147 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 148 | # another Redis server. Note that the configuration is local to the slave 149 | # so for example it is possible to configure the slave to save the DB with a 150 | # different interval, or to listen to another port, and so on. 151 | # 152 | # slaveof 153 | 154 | # If the master is password protected (using the "requirepass" configuration 155 | # directive below) it is possible to tell the slave to authenticate before 156 | # starting the replication synchronization process, otherwise the master will 157 | # refuse the slave request. 158 | # 159 | # masterauth 160 | 161 | # When a slave loses its connection with the master, or when the replication 162 | # is still in progress, the slave can act in two different ways: 163 | # 164 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 165 | # still reply to client requests, possibly with out of date data, or the 166 | # data set may just be empty if this is the first synchronization. 167 | # 168 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 169 | # an error "SYNC with master in progress" to all the kind of commands 170 | # but to INFO and SLAVEOF. 171 | # 172 | slave-serve-stale-data yes 173 | 174 | # You can configure a slave instance to accept writes or not. Writing against 175 | # a slave instance may be useful to store some ephemeral data (because data 176 | # written on a slave will be easily deleted after resync with the master) but 177 | # may also cause problems if clients are writing to it because of a 178 | # misconfiguration. 179 | # 180 | # Since Redis 2.6 by default slaves are read-only. 181 | # 182 | # Note: read only slaves are not designed to be exposed to untrusted clients 183 | # on the internet. It's just a protection layer against misuse of the instance. 184 | # Still a read only slave exports by default all the administrative commands 185 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 186 | # security of read only slaves using 'rename-command' to shadow all the 187 | # administrative / dangerous commands. 188 | slave-read-only yes 189 | 190 | # Slaves send PINGs to server in a predefined interval. It's possible to change 191 | # this interval with the repl_ping_slave_period option. The default value is 10 192 | # seconds. 193 | # 194 | # repl-ping-slave-period 10 195 | 196 | # The following option sets the replication timeout for: 197 | # 198 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 199 | # 2) Master timeout from the point of view of slaves (data, pings). 200 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 201 | # 202 | # It is important to make sure that this value is greater than the value 203 | # specified for repl-ping-slave-period otherwise a timeout will be detected 204 | # every time there is low traffic between the master and the slave. 205 | # 206 | # repl-timeout 60 207 | 208 | # Disable TCP_NODELAY on the slave socket after SYNC? 209 | # 210 | # If you select "yes" Redis will use a smaller number of TCP packets and 211 | # less bandwidth to send data to slaves. But this can add a delay for 212 | # the data to appear on the slave side, up to 40 milliseconds with 213 | # Linux kernels using a default configuration. 214 | # 215 | # If you select "no" the delay for data to appear on the slave side will 216 | # be reduced but more bandwidth will be used for replication. 217 | # 218 | # By default we optimize for low latency, but in very high traffic conditions 219 | # or when the master and slaves are many hops away, turning this to "yes" may 220 | # be a good idea. 221 | repl-disable-tcp-nodelay no 222 | 223 | # Set the replication backlog size. The backlog is a buffer that accumulates 224 | # slave data when slaves are disconnected for some time, so that when a slave 225 | # wants to reconnect again, often a full resync is not needed, but a partial 226 | # resync is enough, just passing the portion of data the slave missed while 227 | # disconnected. 228 | # 229 | # The biggest the replication backlog, the longer the time the slave can be 230 | # disconnected and later be able to perform a partial resynchronization. 231 | # 232 | # The backlog is only allocated once there is at least a slave connected. 233 | # 234 | # repl-backlog-size 1mb 235 | 236 | # After a master has no longer connected slaves for some time, the backlog 237 | # will be freed. The following option configures the amount of seconds that 238 | # need to elapse, starting from the time the last slave disconnected, for 239 | # the backlog buffer to be freed. 240 | # 241 | # A value of 0 means to never release the backlog. 242 | # 243 | # repl-backlog-ttl 3600 244 | 245 | # The slave priority is an integer number published by Redis in the INFO output. 246 | # It is used by Redis Sentinel in order to select a slave to promote into a 247 | # master if the master is no longer working correctly. 248 | # 249 | # A slave with a low priority number is considered better for promotion, so 250 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 251 | # pick the one with priority 10, that is the lowest. 252 | # 253 | # However a special priority of 0 marks the slave as not able to perform the 254 | # role of master, so a slave with priority of 0 will never be selected by 255 | # Redis Sentinel for promotion. 256 | # 257 | # By default the priority is 100. 258 | slave-priority 100 259 | 260 | # It is possible for a master to stop accepting writes if there are less than 261 | # N slaves connected, having a lag less or equal than M seconds. 262 | # 263 | # The N slaves need to be in "online" state. 264 | # 265 | # The lag in seconds, that must be <= the specified value, is calculated from 266 | # the last ping received from the slave, that is usually sent every second. 267 | # 268 | # This option does not GUARANTEES that N replicas will accept the write, but 269 | # will limit the window of exposure for lost writes in case not enough slaves 270 | # are available, to the specified number of seconds. 271 | # 272 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 273 | # 274 | # min-slaves-to-write 3 275 | # min-slaves-max-lag 10 276 | # 277 | # Setting one or the other to 0 disables the feature. 278 | # 279 | # By default min-slaves-to-write is set to 0 (feature disabled) and 280 | # min-slaves-max-lag is set to 10. 281 | 282 | ################################## SECURITY ################################### 283 | 284 | # Require clients to issue AUTH before processing any other 285 | # commands. This might be useful in environments in which you do not trust 286 | # others with access to the host running redis-server. 287 | # 288 | # This should stay commented out for backward compatibility and because most 289 | # people do not need auth (e.g. they run their own servers). 290 | # 291 | # Warning: since Redis is pretty fast an outside user can try up to 292 | # 150k passwords per second against a good box. This means that you should 293 | # use a very strong password otherwise it will be very easy to break. 294 | # 295 | # requirepass foobared 296 | 297 | # Command renaming. 298 | # 299 | # It is possible to change the name of dangerous commands in a shared 300 | # environment. For instance the CONFIG command may be renamed into something 301 | # hard to guess so that it will still be available for internal-use tools 302 | # but not available for general clients. 303 | # 304 | # Example: 305 | # 306 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 307 | # 308 | # It is also possible to completely kill a command by renaming it into 309 | # an empty string: 310 | # 311 | # rename-command CONFIG "" 312 | # 313 | # Please note that changing the name of commands that are logged into the 314 | # AOF file or transmitted to slaves may cause problems. 315 | 316 | ################################### LIMITS #################################### 317 | 318 | # Set the max number of connected clients at the same time. By default 319 | # this limit is set to 10000 clients, however if the Redis server is not 320 | # able to configure the process file limit to allow for the specified limit 321 | # the max number of allowed clients is set to the current file limit 322 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 323 | # 324 | # Once the limit is reached Redis will close all the new connections sending 325 | # an error 'max number of clients reached'. 326 | # 327 | # maxclients 10000 328 | 329 | # Don't use more memory than the specified amount of bytes. 330 | # When the memory limit is reached Redis will try to remove keys 331 | # according to the eviction policy selected (see maxmemory-policy). 332 | # 333 | # If Redis can't remove keys according to the policy, or if the policy is 334 | # set to 'noeviction', Redis will start to reply with errors to commands 335 | # that would use more memory, like SET, LPUSH, and so on, and will continue 336 | # to reply to read-only commands like GET. 337 | # 338 | # This option is usually useful when using Redis as an LRU cache, or to set 339 | # a hard memory limit for an instance (using the 'noeviction' policy). 340 | # 341 | # WARNING: If you have slaves attached to an instance with maxmemory on, 342 | # the size of the output buffers needed to feed the slaves are subtracted 343 | # from the used memory count, so that network problems / resyncs will 344 | # not trigger a loop where keys are evicted, and in turn the output 345 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 346 | # of more keys, and so forth until the database is completely emptied. 347 | # 348 | # In short... if you have slaves attached it is suggested that you set a lower 349 | # limit for maxmemory so that there is some free RAM on the system for slave 350 | # output buffers (but this is not needed if the policy is 'noeviction'). 351 | # 352 | # maxmemory 353 | 354 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 355 | # is reached. You can select among five behaviors: 356 | # 357 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 358 | # allkeys-lru -> remove any key accordingly to the LRU algorithm 359 | # volatile-random -> remove a random key with an expire set 360 | # allkeys-random -> remove a random key, any key 361 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 362 | # noeviction -> don't expire at all, just return an error on write operations 363 | # 364 | # Note: with any of the above policies, Redis will return an error on write 365 | # operations, when there are not suitable keys for eviction. 366 | # 367 | # At the date of writing this commands are: set setnx setex append 368 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 369 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 370 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 371 | # getset mset msetnx exec sort 372 | # 373 | # The default is: 374 | # 375 | # maxmemory-policy volatile-lru 376 | 377 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 378 | # algorithms (in order to save memory), so you can select as well the sample 379 | # size to check. For instance for default Redis will check three keys and 380 | # pick the one that was used less recently, you can change the sample size 381 | # using the following configuration directive. 382 | # 383 | # maxmemory-samples 3 384 | 385 | ############################## APPEND ONLY MODE ############################### 386 | 387 | # By default Redis asynchronously dumps the dataset on disk. This mode is 388 | # good enough in many applications, but an issue with the Redis process or 389 | # a power outage may result into a few minutes of writes lost (depending on 390 | # the configured save points). 391 | # 392 | # The Append Only File is an alternative persistence mode that provides 393 | # much better durability. For instance using the default data fsync policy 394 | # (see later in the config file) Redis can lose just one second of writes in a 395 | # dramatic event like a server power outage, or a single write if something 396 | # wrong with the Redis process itself happens, but the operating system is 397 | # still running correctly. 398 | # 399 | # AOF and RDB persistence can be enabled at the same time without problems. 400 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 401 | # with the better durability guarantees. 402 | # 403 | # Please check http://redis.io/topics/persistence for more information. 404 | 405 | appendonly no 406 | 407 | # The name of the append only file (default: "appendonly.aof") 408 | 409 | appendfilename "appendonly.aof" 410 | 411 | # The fsync() call tells the Operating System to actually write data on disk 412 | # instead to wait for more data in the output buffer. Some OS will really flush 413 | # data on disk, some other OS will just try to do it ASAP. 414 | # 415 | # Redis supports three different modes: 416 | # 417 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 418 | # always: fsync after every write to the append only log . Slow, Safest. 419 | # everysec: fsync only one time every second. Compromise. 420 | # 421 | # The default is "everysec", as that's usually the right compromise between 422 | # speed and data safety. It's up to you to understand if you can relax this to 423 | # "no" that will let the operating system flush the output buffer when 424 | # it wants, for better performances (but if you can live with the idea of 425 | # some data loss consider the default persistence mode that's snapshotting), 426 | # or on the contrary, use "always" that's very slow but a bit safer than 427 | # everysec. 428 | # 429 | # More details please check the following article: 430 | # http://antirez.com/post/redis-persistence-demystified.html 431 | # 432 | # If unsure, use "everysec". 433 | 434 | # appendfsync always 435 | appendfsync everysec 436 | # appendfsync no 437 | 438 | # When the AOF fsync policy is set to always or everysec, and a background 439 | # saving process (a background save or AOF log background rewriting) is 440 | # performing a lot of I/O against the disk, in some Linux configurations 441 | # Redis may block too long on the fsync() call. Note that there is no fix for 442 | # this currently, as even performing fsync in a different thread will block 443 | # our synchronous write(2) call. 444 | # 445 | # In order to mitigate this problem it's possible to use the following option 446 | # that will prevent fsync() from being called in the main process while a 447 | # BGSAVE or BGREWRITEAOF is in progress. 448 | # 449 | # This means that while another child is saving, the durability of Redis is 450 | # the same as "appendfsync none". In practical terms, this means that it is 451 | # possible to lose up to 30 seconds of log in the worst scenario (with the 452 | # default Linux settings). 453 | # 454 | # If you have latency problems turn this to "yes". Otherwise leave it as 455 | # "no" that is the safest pick from the point of view of durability. 456 | 457 | no-appendfsync-on-rewrite no 458 | 459 | # Automatic rewrite of the append only file. 460 | # Redis is able to automatically rewrite the log file implicitly calling 461 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 462 | # 463 | # This is how it works: Redis remembers the size of the AOF file after the 464 | # latest rewrite (if no rewrite has happened since the restart, the size of 465 | # the AOF at startup is used). 466 | # 467 | # This base size is compared to the current size. If the current size is 468 | # bigger than the specified percentage, the rewrite is triggered. Also 469 | # you need to specify a minimal size for the AOF file to be rewritten, this 470 | # is useful to avoid rewriting the AOF file even if the percentage increase 471 | # is reached but it is still pretty small. 472 | # 473 | # Specify a percentage of zero in order to disable the automatic AOF 474 | # rewrite feature. 475 | 476 | auto-aof-rewrite-percentage 100 477 | auto-aof-rewrite-min-size 64mb 478 | 479 | ################################ LUA SCRIPTING ############################### 480 | 481 | # Max execution time of a Lua script in milliseconds. 482 | # 483 | # If the maximum execution time is reached Redis will log that a script is 484 | # still in execution after the maximum allowed time and will start to 485 | # reply to queries with an error. 486 | # 487 | # When a long running script exceed the maximum execution time only the 488 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 489 | # used to stop a script that did not yet called write commands. The second 490 | # is the only way to shut down the server in the case a write commands was 491 | # already issue by the script but the user don't want to wait for the natural 492 | # termination of the script. 493 | # 494 | # Set it to 0 or a negative value for unlimited execution without warnings. 495 | lua-time-limit 5000 496 | 497 | ################################## SLOW LOG ################################### 498 | 499 | # The Redis Slow Log is a system to log queries that exceeded a specified 500 | # execution time. The execution time does not include the I/O operations 501 | # like talking with the client, sending the reply and so forth, 502 | # but just the time needed to actually execute the command (this is the only 503 | # stage of command execution where the thread is blocked and can not serve 504 | # other requests in the meantime). 505 | # 506 | # You can configure the slow log with two parameters: one tells Redis 507 | # what is the execution time, in microseconds, to exceed in order for the 508 | # command to get logged, and the other parameter is the length of the 509 | # slow log. When a new command is logged the oldest one is removed from the 510 | # queue of logged commands. 511 | 512 | # The following time is expressed in microseconds, so 1000000 is equivalent 513 | # to one second. Note that a negative number disables the slow log, while 514 | # a value of zero forces the logging of every command. 515 | slowlog-log-slower-than 10000 516 | 517 | # There is no limit to this length. Just be aware that it will consume memory. 518 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 519 | slowlog-max-len 128 520 | 521 | ############################# Event notification ############################## 522 | 523 | # Redis can notify Pub/Sub clients about events happening in the key space. 524 | # This feature is documented at http://redis.io/topics/keyspace-events 525 | # 526 | # For instance if keyspace events notification is enabled, and a client 527 | # performs a DEL operation on key "foo" stored in the Database 0, two 528 | # messages will be published via Pub/Sub: 529 | # 530 | # PUBLISH __keyspace@0__:foo del 531 | # PUBLISH __keyevent@0__:del foo 532 | # 533 | # It is possible to select the events that Redis will notify among a set 534 | # of classes. Every class is identified by a single character: 535 | # 536 | # K Keyspace events, published with __keyspace@__ prefix. 537 | # E Keyevent events, published with __keyevent@__ prefix. 538 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 539 | # $ String commands 540 | # l List commands 541 | # s Set commands 542 | # h Hash commands 543 | # z Sorted set commands 544 | # x Expired events (events generated every time a key expires) 545 | # e Evicted events (events generated when a key is evicted for maxmemory) 546 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 547 | # 548 | # The "notify-keyspace-events" takes as argument a string that is composed 549 | # by zero or multiple characters. The empty string means that notifications 550 | # are disabled at all. 551 | # 552 | # Example: to enable list and generic events, from the point of view of the 553 | # event name, use: 554 | # 555 | # notify-keyspace-events Elg 556 | # 557 | # Example 2: to get the stream of the expired keys subscribing to channel 558 | # name __keyevent@0__:expired use: 559 | # 560 | # notify-keyspace-events Ex 561 | # 562 | # By default all notifications are disabled because most users don't need 563 | # this feature and the feature has some overhead. Note that if you don't 564 | # specify at least one of K or E, no events will be delivered. 565 | notify-keyspace-events "" 566 | 567 | ############################### ADVANCED CONFIG ############################### 568 | 569 | # Hashes are encoded using a memory efficient data structure when they have a 570 | # small number of entries, and the biggest entry does not exceed a given 571 | # threshold. These thresholds can be configured using the following directives. 572 | hash-max-ziplist-entries 512 573 | hash-max-ziplist-value 64 574 | 575 | # Similarly to hashes, small lists are also encoded in a special way in order 576 | # to save a lot of space. The special representation is only used when 577 | # you are under the following limits: 578 | list-max-ziplist-entries 512 579 | list-max-ziplist-value 64 580 | 581 | # Sets have a special encoding in just one case: when a set is composed 582 | # of just strings that happens to be integers in radix 10 in the range 583 | # of 64 bit signed integers. 584 | # The following configuration setting sets the limit in the size of the 585 | # set in order to use this special memory saving encoding. 586 | set-max-intset-entries 512 587 | 588 | # Similarly to hashes and lists, sorted sets are also specially encoded in 589 | # order to save a lot of space. This encoding is only used when the length and 590 | # elements of a sorted set are below the following limits: 591 | zset-max-ziplist-entries 128 592 | zset-max-ziplist-value 64 593 | 594 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 595 | # order to help rehashing the main Redis hash table (the one mapping top-level 596 | # keys to values). The hash table implementation Redis uses (see dict.c) 597 | # performs a lazy rehashing: the more operation you run into a hash table 598 | # that is rehashing, the more rehashing "steps" are performed, so if the 599 | # server is idle the rehashing is never complete and some more memory is used 600 | # by the hash table. 601 | # 602 | # The default is to use this millisecond 10 times every second in order to 603 | # active rehashing the main dictionaries, freeing memory when possible. 604 | # 605 | # If unsure: 606 | # use "activerehashing no" if you have hard latency requirements and it is 607 | # not a good thing in your environment that Redis can reply form time to time 608 | # to queries with 2 milliseconds delay. 609 | # 610 | # use "activerehashing yes" if you don't have such hard requirements but 611 | # want to free memory asap when possible. 612 | activerehashing yes 613 | 614 | # The client output buffer limits can be used to force disconnection of clients 615 | # that are not reading data from the server fast enough for some reason (a 616 | # common reason is that a Pub/Sub client can't consume messages as fast as the 617 | # publisher can produce them). 618 | # 619 | # The limit can be set differently for the three different classes of clients: 620 | # 621 | # normal -> normal clients 622 | # slave -> slave clients and MONITOR clients 623 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 624 | # 625 | # The syntax of every client-output-buffer-limit directive is the following: 626 | # 627 | # client-output-buffer-limit 628 | # 629 | # A client is immediately disconnected once the hard limit is reached, or if 630 | # the soft limit is reached and remains reached for the specified number of 631 | # seconds (continuously). 632 | # So for instance if the hard limit is 32 megabytes and the soft limit is 633 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 634 | # if the size of the output buffers reach 32 megabytes, but will also get 635 | # disconnected if the client reaches 16 megabytes and continuously overcomes 636 | # the limit for 10 seconds. 637 | # 638 | # By default normal clients are not limited because they don't receive data 639 | # without asking (in a push way), but just after a request, so only 640 | # asynchronous clients may create a scenario where data is requested faster 641 | # than it can read. 642 | # 643 | # Instead there is a default limit for pubsub and slave clients, since 644 | # subscribers and slaves receive data in a push fashion. 645 | # 646 | # Both the hard or the soft limit can be disabled by setting them to zero. 647 | client-output-buffer-limit normal 0 0 0 648 | client-output-buffer-limit slave 256mb 64mb 60 649 | client-output-buffer-limit pubsub 32mb 8mb 60 650 | 651 | # Redis calls an internal function to perform many background tasks, like 652 | # closing connections of clients in timeout, purging expired keys that are 653 | # never requested, and so forth. 654 | # 655 | # Not all tasks are performed with the same frequency, but Redis checks for 656 | # tasks to perform accordingly to the specified "hz" value. 657 | # 658 | # By default "hz" is set to 10. Raising the value will use more CPU when 659 | # Redis is idle, but at the same time will make Redis more responsive when 660 | # there are many keys expiring at the same time, and timeouts may be 661 | # handled with more precision. 662 | # 663 | # The range is between 1 and 500, however a value over 100 is usually not 664 | # a good idea. Most users should use the default of 10 and raise this up to 665 | # 100 only in environments where very low latency is required. 666 | hz 10 667 | 668 | # When a child rewrites the AOF file, if the following option is enabled 669 | # the file will be fsync-ed every 32 MB of data generated. This is useful 670 | # in order to commit the file to the disk more incrementally and avoid 671 | # big latency spikes. 672 | aof-rewrite-incremental-fsync yes 673 | 674 | -------------------------------------------------------------------------------- /apps/redis/start.sh: -------------------------------------------------------------------------------- 1 | docker stop redis 2 | docker rm redis 3 | docker run -idt --net imooc-network -p 6379:6379 -v `pwd`/data:/data --name redis -v `pwd`/redis.conf:/etc/redis/redis_default.conf hub.c.163.com/public/redis:2.8.4 4 | -------------------------------------------------------------------------------- /apps/zookeeper/start.sh: -------------------------------------------------------------------------------- 1 | docker rm -f imooc-zookeeper 2 | docker run --net imooc-network --name imooc-zookeeper -p 2181:2181 -d zookeeper:3.5 3 | -------------------------------------------------------------------------------- /config.properties: -------------------------------------------------------------------------------- 1 | #kubernetes二进制文件目录,eg: /home/kubernetes/bin 2 | BIN_PATH= 3 | 4 | #当前节点ip, eg: 192.168.1.102 5 | NODE_IP= 6 | 7 | #etcd服务集群列表, eg: http://192.168.1.102:2379 8 | #如果已有etcd集群可以填写现有的。没有的话填写:http://"装有etcd节点的ip":2379 (MASTER_IP自行替换成自己的主节点ip) 9 | ETCD_ENDPOINTS= 10 | 11 | #kubernetes主节点ip地址, eg: 192.168.1.102 12 | MASTER_IP= 13 | 14 | -------------------------------------------------------------------------------- /docs/1-pre-centos.md: -------------------------------------------------------------------------------- 1 | # 一、预先准备环境 2 | ## 1. 准备服务器 3 | 这里准备了三台 centos 虚拟机,每台一核cpu和2G内存,配置好root账户,并安装好了docker,后续的所有操作都是使用root账户。虚拟机具体信息如下表: 4 | 5 | | 系统类型 | IP地址 | 节点角色 | CPU | Memory | Hostname | 6 | | :------: | :--------: | :-------: | :-----: | :---------: | :-----: | 7 | | centos-64 | 192.168.1.101 | worker | 1 | 2G | server01 | 8 | | centos-64 | 192.168.1.102 | master | 1 | 2G | server02 | 9 | | centos-64 | 192.168.1.103 | worker | 1 | 2G | server03 | 10 | 11 | > 使用 ubuntu 的同学可以看 ubuntu 环境准备 12 | 13 | ## 2. 安装docker(所有节点) 14 | 15 | #### 2.1 更新 yum 源 16 | ```bash 17 | yum -y update 18 | ``` 19 | #### 2.2 安装 netstat(用于查看当前机器端口占用情况) 20 | ```bash 21 | #安装 22 | yum install net-tools 23 | #使用 24 | netstat -ntlp 25 | ``` 26 | #### 2.3 添加 yum 源 27 | ```bash 28 | cat >/etc/yum.repos.d/docker.repo <<-EOF 29 | [dockerrepo] 30 | name=Docker Repository 31 | baseurl=https://yum.dockerproject.org/repo/main/centos/7 32 | enabled=1 33 | gpgcheck=1 34 | gpgkey=https://yum.dockerproject.org/gpg 35 | EOF 36 | ``` 37 | #### 2.4 安装 docker 38 | ```bash 39 | yum install -y docker-selinux 40 | ``` 41 | ```bash 42 | yum install -y docker-engine 43 | ``` 44 | #### 2.5 配置docker 45 | 46 | - 配置所有ip的数据包转发 47 | ```bash 48 | vi /lib/systemd/system/docker.service 49 | 50 | #找到ExecStart=xxx,在这行下面加入一行,内容如下:(k8s的网络需要) 51 | ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT 52 | ``` 53 | - 启动服务 54 | ```bash 55 | #设置 docker 开机服务启动 56 | systemctl enable docker.service 57 | 58 | #立即启动 docker 服务 59 | $ systemctl start docker.service 60 | ``` 61 | 62 | 63 | 遇到问题可以参考:[官方教程][1] 64 | 65 | ## 3. 系统设置(所有节点) 66 | #### 3.1 关闭、禁用防火墙(让所有机器之间都可以通过任意端口建立连接) 67 | ```bash 68 | #查看防火墙状态 69 | firewall-cmd --state 70 | #关闭防火墙 71 | systemctl stop firewalld.service 72 | #禁用防火墙<开机不会自动启动> 73 | systemctl disable firewalld.service 74 | ``` 75 | #### 3.2 设置系统参数 - 允许路由转发,不对bridge的数据进行处理 76 | ```bash 77 | #写入配置文件 78 | $ cat < /etc/sysctl.d/k8s.conf 79 | net.ipv4.ip_forward = 1 80 | net.bridge.bridge-nf-call-ip6tables = 1 81 | net.bridge.bridge-nf-call-iptables = 1 82 | EOF 83 | 84 | #生效配置文件 85 | $ sysctl -p /etc/sysctl.d/k8s.conf 86 | ``` 87 | 88 | #### 3.3 配置host文件 89 | ```bash 90 | #配置host,使每个Node都可以通过名字解析到ip地址 91 | $ vi /etc/hosts 92 | #加入如下片段(ip地址和servername替换成自己的) 本机不需要配置 hosts 93 | 192.168.1.101 server01 94 | 192.168.1.102 server02 95 | 192.168.1.103 server03 96 | ``` 97 | 98 | #### 3.4 安装拖拽上传文件 99 | ```bash 100 | #命令(最好只安装一台机器) 101 | $ yum install lrzsz 102 | ``` 103 | 104 | ## 4. 准备二进制文件(所有节点) 105 | ####[下载地址(kubernetes 1.9.0版本)][2] 或者 自己上 github 下载最新版本 106 | ```bash 107 | #下载后找个目录解压 108 | tar -zxvf xxxx.gz 109 | #将解压后的文件名改为bin 110 | mv kubernetes-bins bin 111 | #配置环境变量 112 | vi ~/.bashrc 113 | #添加如下内容 114 | export KUBERNETES_HOME=/home/kubernetes 115 | export PATH=${KUBERNETES_HOME}/bin:$PATH 116 | #确定修改 117 | source ~/.bashrc 118 | #测试 kubernetes环境变量是否配置成功 119 | kubectl -h 120 | ``` 121 | 122 | ## 5. 准备配置文件(所有节点) 123 | 上一步我们下载了kubernetes各个组件的二进制文件,这些可执行文件的运行也是需要添加很多参数的,包括有的还会依赖一些配置文件。现在我们就把运行它们需要的参数和配置文件都准备好。 124 | #### 5.1 下载配置文件 125 | ```bash 126 | #到home目录下载项目(git最好只装一台) 127 | $ yum -y install git 128 | $ cd 129 | $ git clone https://github.com/chiangfire/kubernetes-starter.git 130 | #看看git内容 131 | $ cd ~/kubernetes-starter && ls 132 | ``` 133 | #### 5.2 文件说明 134 | - **gen-config.sh** 135 | > shell脚本,用来根据每个同学自己的集群环境(ip,hostname等),根据下面的模板,生成适合大家各自环境的配置文件。生成的文件会放到target文件夹下。 136 | 137 | - **kubernetes-simple** 138 | > 简易版kubernetes配置模板(剥离了认证授权)。 139 | > 适合刚接触kubernetes的同学,首先会让大家在和kubernetes初次见面不会印象太差(太复杂啦~~),再有就是让大家更容易抓住kubernetes的核心部分,把注意力集中到核心组件及组件的联系,从整体上把握kubernetes的运行机制。 140 | 141 | - **kubernetes-with-ca** 142 | > 在simple基础上增加认证授权部分。大家可以自行对比生成的配置文件,看看跟simple版的差异,更容易理解认证授权的(认证授权也是kubernetes学习曲线较高的重要原因) 143 | 144 | - **service-config** 145 | >这个先不用关注,它是我们曾经开发的那些微服务配置。 146 | > 等我们熟悉了kubernetes后,实践用的,通过这些配置,把我们的微服务都运行到kubernetes集群中。 147 | 148 | #### 5.3 生成配置 149 | 这里会根据大家各自的环境生成kubernetes部署过程需要的配置文件。 150 | 在每个节点上都生成一遍,把所有配置都生成好,后面会根据节点类型去使用相关的配置。 151 | ```bash 152 | #cd到之前下载的git代码目录 153 | $ cd ~/kubernetes-starter 154 | #编辑属性配置(根据文件注释中的说明填写好每个key-value) 155 | $ vi config.properties 156 | #生成配置文件,确保执行过程没有异常信息 157 | $ ./gen-config.sh simple 158 | #查看生成的配置文件,确保脚本执行成功 159 | $ find target/ -type f 160 | target/all-node/kube-calico.service 161 | target/master-node/kube-controller-manager.service 162 | target/master-node/kube-apiserver.service 163 | target/master-node/etcd.service 164 | target/master-node/kube-scheduler.service 165 | target/worker-node/kube-proxy.kubeconfig 166 | target/worker-node/kubelet.service 167 | target/worker-node/10-calico.conf 168 | target/worker-node/kubelet.kubeconfig 169 | target/worker-node/kube-proxy.service 170 | target/services/kube-dns.yaml 171 | ``` 172 | > **执行gen-config.sh常见问题:** 173 | > 1. gen-config.sh: 3: gen-config.sh: Syntax error: "(" unexpected 174 | > - bash版本过低,运行:bash -version查看版本,如果小于4需要升级 175 | > - 不要使用 sh gen-config.sh的方式运行(sh和bash可能不一样哦) 176 | > 2. config.properties文件填写错误,需要重新生成 177 | > 再执行一次./gen-config.sh simple即可,不需要手动删除target 178 | 179 | [1]: https://docs.docker.com/install/linux/docker-ce/centos/ 180 | [2]: https://pan.baidu.com/s/1bMnqWY 181 | -------------------------------------------------------------------------------- /docs/1-pre-ubuntu.md: -------------------------------------------------------------------------------- 1 | # 一、预先准备环境 2 | ## 1. 准备服务器 3 | 这里准备了三台ubuntu虚拟机,每台一核cpu和2G内存,配置好root账户,并安装好了docker,后续的所有操作都是使用root账户。虚拟机具体信息如下表: 4 | 5 | | 系统类型 | IP地址 | 节点角色 | CPU | Memory | Hostname | 6 | | :------: | :--------: | :-------: | :-----: | :---------: | :-----: | 7 | | ubuntu16.04 | 192.168.1.101 | worker | 1 | 2G | server01 | 8 | | ubuntu16.04 | 192.168.1.102 | master | 1 | 2G | server02 | 9 | | ubuntu16.04 | 192.168.1.103 | worker | 1 | 2G | server03 | 10 | 11 | > 使用centos的同学也可以参考此文档,需要注意替换系统命令即可 12 | 13 | ## 2. 安装docker(所有节点) 14 | 一般情况使用下面的方法安装即可 15 | 16 | #### 2.1 卸载旧版本(如果有的话) 17 | ```bash 18 | $ apt-get remove docker docker-engine docker.io 19 | ``` 20 | #### 2.2 更新apt-get源 21 | ```bash 22 | $ add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" 23 | ``` 24 | ```bash 25 | $ apt-get update 26 | ``` 27 | #### 2.3 安装apt的https支持包并添加gpg秘钥 28 | ```bash 29 | $ apt-get install \ 30 | apt-transport-https \ 31 | ca-certificates \ 32 | curl \ 33 | software-properties-common 34 | $ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - 35 | ``` 36 | 37 | #### 2.4 安装docker-ce 38 | 39 | - 安装最新的稳定版 40 | ```bash 41 | $ apt-get install -y docker-ce 42 | ``` 43 | - 安装指定版本 44 | ```bash 45 | #获取版本列表 46 | $ apt-cache madison docker-ce 47 | 48 | #指定版本安装(比如版本是17.09.1~ce-0~ubuntu) 49 | $ apt-get install -y docker-ce=17.09.1~ce-0~ubuntu 50 | 51 | ``` 52 | - 接受所有ip的数据包转发 53 | ```bash 54 | $ vi /lib/systemd/system/docker.service 55 | 56 | #找到ExecStart=xxx,在这行上面加入一行,内容如下:(k8s的网络需要) 57 | ExecStartPost=/sbin/iptables -I FORWARD -s 0.0.0.0/0 -j ACCEPT 58 | ``` 59 | - 启动服务 60 | ```bash 61 | $ systemctl daemon-reload 62 | $ service docker start 63 | ``` 64 | 65 | 66 | 遇到问题可以参考:[官方教程][1] 67 | 68 | ## 3. 系统设置(所有节点) 69 | #### 3.1 关闭、禁用防火墙(让所有机器之间都可以通过任意端口建立连接) 70 | ```bash 71 | $ ufw disable 72 | #查看状态 73 | $ ufw status 74 | ``` 75 | #### 3.2 设置系统参数 - 允许路由转发,不对bridge的数据进行处理 76 | ```bash 77 | #写入配置文件 78 | $ cat < /etc/sysctl.d/k8s.conf 79 | net.ipv4.ip_forward = 1 80 | net.bridge.bridge-nf-call-ip6tables = 1 81 | net.bridge.bridge-nf-call-iptables = 1 82 | EOF 83 | 84 | #生效配置文件 85 | $ sysctl -p /etc/sysctl.d/k8s.conf 86 | ``` 87 | 88 | #### 3.3 配置host文件 89 | ```bash 90 | #配置host,使每个Node都可以通过名字解析到ip地址 91 | $ vi /etc/hosts 92 | #加入如下片段(ip地址和servername替换成自己的) 93 | 192.168.1.101 server01 94 | 192.168.1.102 server02 95 | 192.168.1.103 server03 96 | ``` 97 | 98 | ## 4. 准备二进制文件(所有节点) 99 | kubernetes的安装有几种方式,不管是kube-admin还是社区贡献的部署方案都离不开这几种方式: 100 | - **使用现成的二进制文件** 101 | > 直接从官方或其他第三方下载,就是kubernetes各个组件的可执行文件。拿来就可以直接运行了。不管是centos,ubuntu还是其他的linux发行版本,只要gcc编译环境没有太大的区别就可以直接运行的。使用较新的系统一般不会有什么跨平台的问题。 102 | 103 | - **使用源码编译安装** 104 | >编译结果也是各个组件的二进制文件,所以如果能直接下载到需要的二进制文件基本没有什么编译的必要性了。 105 | 106 | - **使用镜像的方式运行** 107 | > 同样一个功能使用二进制文件提供的服务,也可以选择使用镜像的方式。就像nginx,像mysql,我们可以使用安装版,搞一个可执行文件运行起来,也可以使用它们的镜像运行起来,提供同样的服务。kubernetes也是一样的道理,二进制文件提供的服务镜像也一样可以提供。 108 | 109 | 从上面的三种方式中其实使用镜像是比较优雅的方案,容器的好处自然不用多说。但从初学者的角度来说容器的方案会显得有些复杂,不那么纯粹,会有很多容器的配置文件以及关于类似二进制文件提供的服务如何在容器中提供的问题,容易跑偏。 110 | 所以我们这里使用二进制的方式来部署。二进制文件已经这里备好,大家可以打包下载,把下载好的文件放到每个节点上,放在哪个目录随你喜欢,**放好后最好设置一下环境变量$PATH**,方便后面可以直接使用命令。(科学上网的同学也可以自己去官网找找) 111 | ####[下载地址(kubernetes 1.9.0版本)][2] 112 | 113 | ## 5. 准备配置文件(所有节点) 114 | 上一步我们下载了kubernetes各个组件的二进制文件,这些可执行文件的运行也是需要添加很多参数的,包括有的还会依赖一些配置文件。现在我们就把运行它们需要的参数和配置文件都准备好。 115 | #### 5.1 下载配置文件 116 | ```bash 117 | #到home目录下载项目 118 | $ cd 119 | $ git clone https://github.com/chiangfire/kubernetes-starter.git 120 | #看看git内容 121 | $ cd ~/kubernetes-starter && ls 122 | ``` 123 | #### 5.2 文件说明 124 | - **gen-config.sh** 125 | > shell脚本,用来根据每个同学自己的集群环境(ip,hostname等),根据下面的模板,生成适合大家各自环境的配置文件。生成的文件会放到target文件夹下。 126 | 127 | - **kubernetes-simple** 128 | > 简易版kubernetes配置模板(剥离了认证授权)。 129 | > 适合刚接触kubernetes的同学,首先会让大家在和kubernetes初次见面不会印象太差(太复杂啦~~),再有就是让大家更容易抓住kubernetes的核心部分,把注意力集中到核心组件及组件的联系,从整体上把握kubernetes的运行机制。 130 | 131 | - **kubernetes-with-ca** 132 | > 在simple基础上增加认证授权部分。大家可以自行对比生成的配置文件,看看跟simple版的差异,更容易理解认证授权的(认证授权也是kubernetes学习曲线较高的重要原因) 133 | 134 | - **service-config** 135 | >这个先不用关注,它是我们曾经开发的那些微服务配置。 136 | > 等我们熟悉了kubernetes后,实践用的,通过这些配置,把我们的微服务都运行到kubernetes集群中。 137 | 138 | #### 5.3 生成配置 139 | 这里会根据大家各自的环境生成kubernetes部署过程需要的配置文件。 140 | 在每个节点上都生成一遍,把所有配置都生成好,后面会根据节点类型去使用相关的配置。 141 | ```bash 142 | #cd到之前下载的git代码目录 143 | $ cd ~/kubernetes-starter 144 | #编辑属性配置(根据文件注释中的说明填写好每个key-value) 145 | $ vi config.properties 146 | #生成配置文件,确保执行过程没有异常信息 147 | $ ./gen-config.sh simple 148 | #查看生成的配置文件,确保脚本执行成功 149 | $ find target/ -type f 150 | target/all-node/kube-calico.service 151 | target/master-node/kube-controller-manager.service 152 | target/master-node/kube-apiserver.service 153 | target/master-node/etcd.service 154 | target/master-node/kube-scheduler.service 155 | target/worker-node/kube-proxy.kubeconfig 156 | target/worker-node/kubelet.service 157 | target/worker-node/10-calico.conf 158 | target/worker-node/kubelet.kubeconfig 159 | target/worker-node/kube-proxy.service 160 | target/services/kube-dns.yaml 161 | ``` 162 | > **执行gen-config.sh常见问题:** 163 | > 1. gen-config.sh: 3: gen-config.sh: Syntax error: "(" unexpected 164 | > - bash版本过低,运行:bash -version查看版本,如果小于4需要升级 165 | > - 不要使用 sh gen-config.sh的方式运行(sh和bash可能不一样哦) 166 | > 2. config.properties文件填写错误,需要重新生成 167 | > 再执行一次./gen-config.sh simple即可,不需要手动删除target 168 | 169 | [1]: https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/ 170 | [2]: https://pan.baidu.com/s/1bMnqWY 171 | -------------------------------------------------------------------------------- /docs/2-kubernetes-simple.md: -------------------------------------------------------------------------------- 1 | # 二、基础集群部署 - kubernetes-simple 2 | ## 1. 部署ETCD(主节点) 3 | #### 1.1 简介 4 |   kubernetes需要存储很多东西,像它本身的节点信息,组件信息,还有通过kubernetes运行的pod,deployment,service等等。都需要持久化。etcd就是它的数据中心。生产环境中为了保证数据中心的高可用和数据的一致性,一般会部署最少三个节点。我们这里以学习为主就只在主节点部署一个实例。 5 | > 如果你的环境已经有了etcd服务(不管是单点还是集群),可以忽略这一步。前提是你在生成配置的时候填写了自己的etcd endpoint哦~ 6 | 7 | #### 1.2 部署 8 | **etcd的二进制文件和服务的配置我们都已经准备好,现在的目的就是把它做成系统服务并启动。** 9 | 10 | ```bash 11 | #把服务配置文件copy到系统服务目录 12 | $ cp ./target/master-node/etcd.service /lib/systemd/system/ 13 | #enable服务 14 | $ systemctl enable etcd.service 15 | #创建工作目录(保存数据的地方) 16 | $ mkdir -p /var/lib/etcd 17 | # 启动服务 18 | $ service etcd start 19 | # 查看服务日志,看是否有错误信息,确保服务正常 20 | $ journalctl -f -u etcd.service 21 | ``` 22 | 23 | ## 2. 部署APIServer(主节点) 24 | #### 2.1 简介 25 | kube-apiserver是Kubernetes最重要的核心组件之一,主要提供以下的功能 26 | - 提供集群管理的REST API接口,包括认证授权(我们现在没有用到)数据校验以及集群状态变更等 27 | - 提供其他模块之间的数据交互和通信的枢纽(其他模块通过API Server查询或修改数据,只有API Server才直接操作etcd) 28 | 29 | > 生产环境为了保证apiserver的高可用一般会部署2+个节点,在上层做一个lb做负载均衡,比如haproxy。由于单节点和多节点在apiserver这一层说来没什么区别,所以我们学习部署一个节点就足够啦 30 | 31 | #### 2.2 部署 32 | APIServer的部署方式也是通过系统服务。部署流程跟etcd完全一样,不再注释 33 | ```bash 34 | $ cp ./target/master-node/kube-apiserver.service /lib/systemd/system/ 35 | $ systemctl enable kube-apiserver.service 36 | $ service kube-apiserver start 37 | $ journalctl -f -u kube-apiserver 38 | ``` 39 | 40 | #### 2.3 重点配置说明 41 | > [Unit] 42 | > Description=Kubernetes API Server 43 | > ... 44 | > [Service] 45 | > \#可执行文件的位置 46 | > ExecStart=/home/michael/bin/kube-apiserver \\ 47 | > \#非安全端口(8080)绑定的监听地址 这里表示监听所有地址 48 | > --insecure-bind-address=0.0.0.0 \\ 49 | > \#不使用https 50 | > --kubelet-https=false \\ 51 | > \#kubernetes集群的虚拟ip的地址范围 52 | > --service-cluster-ip-range=10.68.0.0/16 \\ 53 | > \#service的nodeport的端口范围限制 54 | > --service-node-port-range=20000-40000 \\ 55 | > \#很多地方都需要和etcd打交道,也是唯一可以直接操作etcd的模块 56 | > --etcd-servers=http://192.168.1.102:2379 \\ 57 | > ... 58 | 59 | ## 3. 部署ControllerManager(主节点) 60 | #### 3.1 简介 61 | Controller Manager由kube-controller-manager和cloud-controller-manager组成,是Kubernetes的大脑,它通过apiserver监控整个集群的状态,并确保集群处于预期的工作状态。 62 | kube-controller-manager由一系列的控制器组成,像Replication Controller控制副本,Node Controller节点控制,Deployment Controller管理deployment等等 63 | cloud-controller-manager在Kubernetes启用Cloud Provider的时候才需要,用来配合云服务提供商的控制 64 | > controller-manager、scheduler和apiserver 三者的功能紧密相关,一般运行在同一个机器上,我们可以把它们当做一个整体来看,所以保证了apiserver的高可用即是保证了三个模块的高可用。也可以同时启动多个controller-manager进程,但只有一个会被选举为leader提供服务。 65 | 66 | #### 3.2 部署 67 | **通过系统服务方式部署** 68 | ```bash 69 | $ cp ./target/master-node/kube-controller-manager.service /lib/systemd/system/ 70 | $ systemctl enable kube-controller-manager.service 71 | $ service kube-controller-manager start 72 | $ journalctl -f -u kube-controller-manager 73 | ``` 74 | #### 3.3 重点配置说明 75 | > [Unit] 76 | > Description=Kubernetes Controller Manager 77 | > ... 78 | > [Service] 79 | > ExecStart=/home/michael/bin/kube-controller-manager \\ 80 | > \#对外服务的监听地址,这里表示只有本机的程序可以访问它 81 | > --address=127.0.0.1 \\ 82 | > \#apiserver的url 83 | > --master=http://127.0.0.1:8080 \\ 84 | > \#服务虚拟ip范围,同apiserver的配置 85 | > --service-cluster-ip-range=10.68.0.0/16 \\ 86 | > \#pod的ip地址范围 87 | > --cluster-cidr=172.20.0.0/16 \\ 88 | > \#下面两个表示不使用证书,用空值覆盖默认值 89 | > --cluster-signing-cert-file= \\ 90 | > --cluster-signing-key-file= \\ 91 | > ... 92 | 93 | ## 4. 部署Scheduler(主节点) 94 | #### 4.1 简介 95 | kube-scheduler负责分配调度Pod到集群内的节点上,它监听kube-apiserver,查询还未分配Node的Pod,然后根据调度策略为这些Pod分配节点。我们前面讲到的kubernetes的各种调度策略就是它实现的。 96 | 97 | #### 4.2 部署 98 | **通过系统服务方式部署** 99 | ```bash 100 | $ cp ./target/master-node/kube-scheduler.service /lib/systemd/system/ 101 | $ systemctl enable kube-scheduler.service 102 | $ service kube-scheduler start 103 | $ journalctl -f -u kube-scheduler 104 | ``` 105 | 106 | #### 4.3 重点配置说明 107 | > [Unit] 108 | > Description=Kubernetes Scheduler 109 | > ... 110 | > [Service] 111 | > ExecStart=/home/michael/bin/kube-scheduler \\ 112 | > \#对外服务的监听地址,这里表示只有本机的程序可以访问它 113 | > --address=127.0.0.1 \\ 114 | > \#apiserver的url 115 | > --master=http://127.0.0.1:8080 \\ 116 | > ... 117 | 118 | ## 5. 部署CalicoNode(所有节点) 119 | #### 5.1 简介 120 | Calico实现了CNI接口,是kubernetes网络方案的一种选择,它一个纯三层的数据中心网络方案(不需要Overlay),并且与OpenStack、Kubernetes、AWS、GCE等IaaS和容器平台都有良好的集成。 121 | Calico在每一个计算节点利用Linux Kernel实现了一个高效的vRouter来负责数据转发,而每个vRouter通过BGP协议负责把自己上运行的workload的路由信息像整个Calico网络内传播——小规模部署可以直接互联,大规模下可通过指定的BGP route reflector来完成。 这样保证最终所有的workload之间的数据流量都是通过IP路由的方式完成互联的。 122 | #### 5.2 部署 123 | **calico是通过系统服务+docker方式完成的** 124 | ```bash 125 | $ cp ./target/all-node/kube-calico.service /lib/systemd/system/ 126 | $ systemctl enable kube-calico.service 127 | $ service kube-calico start 128 | $ journalctl -f -u kube-calico 129 | ``` 130 | #### 5.3 calico可用性验证 131 | **查看容器运行情况(可能没有那么快)** 132 | ```bash 133 | $ docker ps 134 | CONTAINER ID IMAGE COMMAND CREATED ... 135 | 4d371b58928b calico/node:v2.6.2 "start_runit" 3 hours ago... 136 | ``` 137 | **安装 calicoctl(可以不安装)** 138 | ```bash 139 | wget -O /usr/local/bin/calicoctl https://github.com/projectcalico/calicoctl/releases/download/v3.3.0/calicoctl -- 下载安装包 140 | chmod +x /usr/local/bin/calicoctl -- 安装 141 | ``` 142 | **查看节点运行情况** 143 | ```bash 144 | $ calicoctl node status 145 | Calico process is running. 146 | IPv4 BGP status 147 | +---------------+-------------------+-------+----------+-------------+ 148 | | PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO | 149 | +---------------+-------------------+-------+----------+-------------+ 150 | | 192.168.1.103 | node-to-node mesh | up | 13:13:13 | Established | 151 | +---------------+-------------------+-------+----------+-------------+ 152 | IPv6 BGP status 153 | No IPv6 peers found. 154 | ``` 155 | **查看端口BGP 协议是通过TCP 连接来建立邻居的,因此可以用netstat 命令验证 BGP Peer** 156 | ```bash 157 | $ netstat -natp|grep ESTABLISHED|grep 179 158 | tcp 0 0 192.168.1.102:60959 192.168.1.103:179 ESTABLISHED 29680/bird 159 | ``` 160 | **查看集群ippool情况 (因为我们使用k8s去控制所以可以不配置 ipPool 网络资源,如果要配置 calicoctl "ipPool" 网络资源再查查)** 161 | ```bash 162 | $ calicoctl get ipPool -o yaml 163 | - apiVersion: v1 164 | kind: ipPool 165 | metadata: 166 | cidr: 172.20.0.0/16 167 | spec: 168 | nat-outgoing: true 169 | ``` 170 | #### 5.4 重点配置说明 171 | > [Unit] 172 | > Description=calico node 173 | > ... 174 | > [Service] 175 | > \#以docker方式运行 176 | > ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \\ 177 | > \#指定etcd endpoints(这里主要负责网络元数据一致性,确保Calico网络状态的准确性) 178 | > -e ETCD_ENDPOINTS=http://192.168.1.102:2379 \\ 179 | > \#网络地址范围(同上面ControllerManager) 180 | > -e CALICO_IPV4POOL_CIDR=172.20.0.0/16 \\ 181 | > \#镜像名,为了加快大家的下载速度,镜像都放到了阿里云上 182 | > registry.cn-hangzhou.aliyuncs.com/imooc/calico-node:v2.6.2 183 | 184 | ## 6. 配置kubectl命令(任意节点) 185 | #### 6.1 简介 186 | kubectl是Kubernetes的命令行工具,是Kubernetes用户和管理员必备的管理工具。 187 | kubectl提供了大量的子命令,方便管理Kubernetes集群中的各种功能。 188 | #### 6.2 初始化 189 | 使用kubectl的第一步是配置Kubernetes集群以及认证方式,包括: 190 | - cluster信息:api-server地址 191 | - 用户信息:用户名、密码或密钥 192 | - Context:cluster、用户信息以及Namespace的组合 193 | 194 | 我们这没有安全相关的东西,只需要设置好api-server和上下文就好啦: 195 | ```bash 196 | #指定apiserver地址(ip替换为你自己的api-server地址) 197 | kubectl config set-cluster kubernetes --server=http://192.168.1.102:8080 198 | #指定设置上下文,指定cluster 199 | kubectl config set-context kubernetes --cluster=kubernetes 200 | #选择默认的上下文 201 | kubectl config use-context kubernetes 202 | #测试kubectl,获取所有的pod(还没有开始使用应该是没有pod的) 203 | kubectl get pods 204 | ``` 205 | > 通过上面的设置最终目的是生成了一个配置文件:~/.kube/config,当然你也可以手写或复制一个文件放在那,就不需要上面的命令了。 206 | 207 | ## 7. 配置kubelet(工作节点) 208 | #### 7.1 简介 209 | 每个工作节点上都运行一个kubelet服务进程,默认监听10250端口,接收并执行master发来的指令,管理Pod及Pod中的容器。每个kubelet进程会在API Server上注册节点自身信息,定期向master节点汇报节点的资源使用情况,并通过cAdvisor监控节点和容器的资源。 210 | #### 7.2 部署 211 | **通过系统服务方式部署,但步骤会多一些,具体如下:** 212 | ```bash 213 | #确保相关目录存在 214 | $ mkdir -p /var/lib/kubelet 215 | $ mkdir -p /etc/kubernetes 216 | $ mkdir -p /etc/cni/net.d 217 | 218 | #复制kubelet服务配置文件 219 | $ cp target/worker-node/kubelet.service /lib/systemd/system/ 220 | #复制kubelet依赖的配置文件 221 | $ cp target/worker-node/kubelet.kubeconfig /etc/kubernetes/ 222 | #复制kubelet用到的cni插件配置文件 223 | $ cp target/worker-node/10-calico.conf /etc/cni/net.d/ 224 | 225 | $ systemctl enable kubelet.service 226 | $ service kubelet start 227 | $ journalctl -f -u kubelet 228 | ``` 229 | #### 7.3 重点配置说明 230 | **kubelet.service** 231 | > [Unit] 232 | Description=Kubernetes Kubelet 233 | [Service] 234 | \#kubelet工作目录,存储当前节点容器,pod等信息 235 | WorkingDirectory=/var/lib/kubelet 236 | ExecStart=/home/michael/bin/kubelet \\ 237 | \#对外服务的监听地址 238 | --address=192.168.1.103 \\ 239 | \#指定基础容器的镜像,负责创建Pod 内部共享的网络、文件系统等,这个基础容器非常重要:K8S每一个运行的 POD里面必然包含这个基础容器,如果它没有运行起来那么你的POD 肯定创建不了 240 | --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/imooc/pause-amd64:3.0 \\ 241 | \#访问集群方式的配置,如api-server地址等 242 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\ 243 | \#声明cni网络插件 244 | --network-plugin=cni \\ 245 | \#cni网络配置目录,kubelet会读取该目录下得网络配置 246 | --cni-conf-dir=/etc/cni/net.d \\ 247 | \#指定 kubedns 的 Service IP(可以先分配,后续创建 kubedns 服务时指定该 IP),--cluster-domain 指定域名后缀,这两个参数同时指定后才会生效 248 | --cluster-dns=10.68.0.2 \\ 249 | ... 250 | 251 | **kubelet.kubeconfig** 252 | kubelet依赖的一个配置,格式看也是我们后面经常遇到的yaml格式,描述了kubelet访问apiserver的方式 253 | > apiVersion: v1 254 | > clusters: 255 | > \- cluster: 256 | > \#跳过tls,即是kubernetes的认证 257 | > insecure-skip-tls-verify: true 258 | > \#api-server地址 259 | > server: http://192.168.1.102:8080 260 | > ... 261 | 262 | **10-calico.conf** 263 | calico作为kubernets的CNI插件的配置 264 | ```xml 265 | { 266 | "name": "calico-k8s-network", 267 | "cniVersion": "0.1.0", 268 | "type": "calico", 269 | 270 | "ed_endpoints": "http://192.168.1.102:2379", 271 | "logevel": "info", 272 | "ipam": { 273 | "type": "calico-ipam" 274 | }, 275 | "kubernetes": { 276 | 277 | "k8s_api_root": "http://192.168.1.102:8080" 278 | } 279 | } 280 | ``` 281 | 282 | 283 | ## 8. 小试牛刀 284 | ``` 285 | journalctl -f --查看当前系统日志 286 | kubectl version -- 查看kubernetes的一些版本信息 287 | kubectl get --help -- 获取 get 命令的使用方法 288 | kubectl get nodes -- 获取所有的节点信息 289 | kubectl run '部署名称' --image='容器名称' --port=9090 --创建一个部署<默认一个pod> 290 | kubectl run kubernetes-bootcamp --image=jocatalin/kubernetes-bootcamp:v2 --port=9090 -- 测试创建一个部署 291 | kubectl get deployments -- 获取所有的部署,数据如下: 292 | 部署名称 期望 pod数 当前pod数 最新pod数 可用pod数 293 | | | | | | 294 | NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE 295 | kubernetes-bootcamp 1 1 1 1 3m 296 | 297 | kubectl get pods -- 获取所有的 pod 信息 298 | kubectl get pods -o wide -- 获取所有的 pod 显示更多的pod信息 299 | kubectl get pods -l app=nginx -- 获取 '部署' 里 labels 里面 app=nginx的pod 300 | 301 | kubectl delete deployments '部署名字' -- 删除一个部署 302 | kubectl describe deployment '部署名称' -- 描述一个部署的详细信息 303 | kubectl describe pods 'pod名称' -- 描述一个 pod 的详细信息<名字可以使用:kubectl get pods 命令查看> 304 | kubectl proxy -- 在当前机器上起一个 8001 的代理 305 | curl http://localhost:8001/api/v1/proxy/namespaces/default/pods/pod名称/ --重启一个窗口,执行一条命令来测试pod是否启动 306 | 307 | kubectl scale deployments '部署名称' --replicas=4 -- 为某个部署扩缩容数量 308 | kubectl set image deployment '部署名称' '容器名称'='新的容器名称' -- 修改某个部署的镜像 309 | kubectl set image deployment kubernetes-bootcamp kubernetes-bootcamp=jocatalin/kubernetes-bootcamp:v2 -- 测试修改命令 310 | kubectl rollout status deployment '部署名称' -- 查看上面的是否修改成功 311 | 312 | kubectl rollout undo deployment '部署名称' -- 回滚上一步对某个部署的操作 <比如上面我更新了部署的镜像> 313 | 314 | 使用配置文件的方式创建 pod,需要创建配置文件,具体如下: 315 | nginx-pod.yaml -- 文件名,内容如下 316 | apiVersion: v1 317 | kind: Pod #类型 318 | metadata: #源数据 319 | name: nginx 320 | spec: #说明 321 | containers: #容器 322 | - name: nginx #容器名称 323 | image: nginx:1.7.9 #镜像 324 | ports: 325 | - containerPort: 80 #容器端口 326 | 327 | kubectl create -f 'yaml文件名称' -- 使用配置文件创建 pod 328 | kubectl get pods -- 查看刚刚创建的pod是否成功 329 | 330 | 331 | 使用配置文件的方式创建 '部署',需要创建配置文件,具体如下: 332 | nginx-deployment.yaml -- 文件名,内容如下 333 | apiVersion: apps/v1beta1 334 | kind: Deployment #类型 335 | metadata: #源数据 336 | name: nginx-deployment 337 | spec: 338 | replicas: 3 #副本数<就是部署时有几个实列,启动几个服务> 339 | template: #对什么进行副本,根据什么区创建副本 340 | metadata: 341 | labels: 342 | app: nginx 343 | spec: #说明 344 | containers: #容器 345 | - name: nginx #容器名称 346 | image: nginx:1.7.9 #镜像 347 | ports: 348 | - containerPort: 80 #容器端口 349 | 350 | kubectl create -f 'yaml文件名称' -- 使用配置文件创建 '部署' 351 | kubectl get deployments -- 查看刚刚创建的 '部署' 是否成功 352 | kubectl get pods -l app=nginx -- 获取 '部署' 里 labels 里面 app=nginx的pod 353 | ``` 354 | 355 | ## 9. 为集群增加service功能 - kube-proxy(工作节点) 356 | #### 9.1 简介 357 | 每台工作节点上都应该运行一个kube-proxy服务,它监听API server中service和endpoint的变化情况,并通过iptables等来为服务配置负载均衡,是让我们的服务在集群外可以被访问到的重要方式。 358 | #### 9.2 部署 359 | **通过系统服务方式部署:** 360 | ```bash 361 | #确保工作目录存在 362 | $ mkdir -p /var/lib/kube-proxy 363 | #复制kube-proxy服务配置文件 364 | $ cp ./target/worker-node/kube-proxy.service /lib/systemd/system/ 365 | #复制kube-proxy依赖的配置文件 366 | $ cp ./target/worker-node/kube-proxy.kubeconfig /etc/kubernetes/ 367 | 368 | $ systemctl enable kube-proxy.service 369 | $ service kube-proxy start 370 | $ journalctl -f -u kube-proxy 371 | ``` 372 | #### 9.3 重点配置说明 373 | **kube-proxy.service** 374 | > [Unit] 375 | Description=Kubernetes Kube-Proxy Server 376 | ... 377 | [Service] 378 | \#工作目录 379 | WorkingDirectory=/var/lib/kube-proxy 380 | ExecStart=/home/michael/bin/kube-proxy \\ 381 | \#监听地址 382 | --bind-address=192.168.1.103 \\ 383 | \#依赖的配置文件,描述了kube-proxy如何访问api-server 384 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \\ 385 | ... 386 | 387 | **kube-proxy.kubeconfig** 388 | 配置了kube-proxy如何访问api-server,内容与kubelet雷同,不再赘述。 389 | 390 | #### 9.4 操练service 391 | ```bash 392 | kubectl get services -- 在装有apiServer节点执行查看当前服务 393 | kubectl describe service '服务名字' -- 查看服务详细信息(服务名称可通过 kubectl get service 获取) 394 | 395 | "暴露" 一个 "部署" 部署的名称 "暴露类型" "目标的端口(容器的端口)" "服务的端口"(内网服务的端口,和 CLUSTER-IP(看下面)绑定的端口,仅限内网使用) 396 | | | | | | | 397 | kubectl expose deploy nginx-deployment --type="NodePort" --target-port=80 --port=80 #暴露(创建)服务(Service),随机生成端口 398 | 399 | kubectl get services -- 验证上面的命令是否创建service成功,显示数据如下 400 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 401 | kubernetes ClusterIP 10.68.0.1 443/TCP 1d 402 | nginx-deployment NodePort 10.68.193.138 80:38661/TCP 9s 403 | 404 | 第二条数据看 "PORT(S)"项,从80(内网服务的端口)映射到了38661(随机生成的)端口,可使用如下命令查看端口监听情况: 405 | netstat -ntlp|grep 38661 406 | -- 如果当前机器起了 kube-proxy 是会有监听的,而这个端口实际是kube-proxy在节点上启动的一个端口, 407 | -- 可通过这个这台机器的ip+这个端口(38661)来访问我们 的服务(前提是该机器跑有 kube-proxy) 408 | -- 到服务(service)里面的容器中使用 CLUSTER-IP(看上面)+ (内网服务的端口)是可以访问的 409 | -- 在节点内部使用pod的ip+容器的端口是可以访问的,或者使用 CLUSTER-IP(看上面)+ (内网服务的端口)也是可以访问的 410 | 411 | 412 | 使用配置文件的方式创建 service,创建文件 nginx-service.yaml 内容如下: 413 | apiVersion: v1 414 | kind: Service 415 | metadata: 416 | name: nginx-service 417 | spec: 418 | ports: 419 | - port: 80 #服务的端口(内网服务的端口,和 CLUSTER-IP(看下面)绑定的端口,仅限内网使用 420 | targetPort: 80 #目标的端口(容器的端口 421 | nodePort: 20000 #node节点绑定的端口,就是机器对外提供服务的端口 422 | selector: 423 | app: nginx #选择给谁提供服务,这里选的是 deployments(部署) 里 labels 下 app=nginx 的部署。(可以看上面我们创建了一个 deployments(部署)里面有 app:nginx) 424 | type: NodePort 425 | 426 | 427 | kubectl create -f nginx-services.yaml -- 创建 service 428 | kubectl get services -- 看看创建的 service 是否成功 429 | ``` 430 | 431 | ## 10. 为集群增加dns功能 - kube-dns(app<和普通应用差不不多>) 432 | #### 10.1 简介 433 | kube-dns为Kubernetes集群提供命名服务,主要用来解析集群服务名和Pod的hostname。目的是让pod可以通过名字访问到集群内服务。它通过添加A记录的方式实现名字和service的解析。普通的service会解析到service-ip。headless service会解析到pod列表。 434 | #### 10.2 部署 435 | **通过kubernetes应用的方式部署** 436 | kube-dns.yaml文件基本与官方一致(除了镜像名不同外)。 437 | 里面配置了多个组件,之间使用”---“分隔 438 | ```bash 439 | #到kubernetes-starter目录执行命令 440 | $ kubectl create -f ./target/services/kube-dns.yaml -- 这个是官方提供创建 dns 服务的配置(里面有注释,可以看看) 441 | $ kubectl -n kube-system get services -- 查看kube-dns 服务是否创建成功,-n 是制定命名空间,kube-system 是 kubernetes 系统内部的命名空间 442 | $ kubectl -n kube-system get deployments -- 查看kube-dns 部署是否创建成功 443 | $ kubectl -n kube-system get pods -o wide -- 查看kube-dns 的 pod是否运行 444 | $ docker ps|grep dns -- 到 dns 运行的pod上执行,查看运行了那些容器 445 | 一般会运行如下几个容器: 446 | k8s-dns-sidecar:用于监控其他几个容器的健康状态 447 | k8s-dns-dnsmasq:用于 dns 缓存,来提升效率 448 | k8s-dns-kube-dns:真正提供 dns 服务的容器 449 | pause-amd64:pod 容器 450 | ``` 451 | #### 10.3 重点配置说明 452 | 请直接参考配置文件中的注释。 453 | 454 | #### 10.4 通过dns访问服务,到主节点上执行如下操作 455 | ```bash 456 | kubectl get services -- 查看所有的 services 457 | kubectl get pods -o wide -- 找一个装有 curl 的容器 458 | docker exec -it '容器的ID(docker ps 查看)' -- 进入容器内部执行如下命令: 459 | curl '服务的名称(kubectl get services)':内网端口 -- 验证使用名称是否可以访问服务 460 | cat /etc/resolv.conf -- 查看当前容器的 dns 配置 461 | 462 | kubectl delete -f ./target/services/kube-dns.yaml -- 删除 dns 服务(不要做这一步) 463 | ``` 464 | -------------------------------------------------------------------------------- /docs/3-kubernetes-with-ca.md: -------------------------------------------------------------------------------- 1 | # 三、完整集群部署 - kubernetes-with-ca 2 | ## 1. 理解认证授权 3 | #### 1.1 为什么要认证 4 | 想理解认证,我们得从认证解决什么问题、防止什么问题的发生入手。 5 | 防止什么问题呢?是防止有人入侵你的集群,root你的机器后让我们集群依然安全吗?不是吧,root都到手了,那就为所欲为,防不胜防了。 6 | 其实网络安全本身就是为了解决在某些假设成立的条件下如何防范的问题。比如一个非常重要的假设就是两个节点或者ip之间的通讯网络是不可信任的,可能会被第三方窃取,也可能会被第三方篡改。就像我们上学时候给心仪的女孩传纸条,传送的过程可能会被别的同学偷看,甚至内容可能会从我喜欢你修改成我不喜欢你了。当然这种假设不是随便想出来的,而是从网络技术现状和实际发生的问题中发现、总结出来的。kubernetes的认证也是从这个问题出发来实现的。 7 | #### 1.2 概念梳理 8 | 为了解决上面说的问题,kubernetes并不需要自己想办法,毕竟是网络安全层面的问题,是每个服务都会遇到的问题,业内也有成熟的方案来解决。这里我们一起了解一下业内方案和相关的概念。 9 | - **对称加密/非对称加密** 10 | 这两个概念属于密码学的东西,对于没接触过的同学不太容易理解。可以参考知乎大神的生动讲解:[《如何用通俗易懂的话来解释非对称加密》][1] 11 | - **SSL/TLS** 12 | 了解了对称加密和非对称加密后,我们就可以了解一下SSL/TLS了。同样,已经有大神总结了非常好的入门文章:[《SSL/TLS协议运行机制的概述》][2] 13 | 14 | #### 1.3 什么是授权 15 | 授权的概念就简单多了,就是什么人具有什么样的权限,一般通过角色作为纽带把他们组合在一起。也就是一个角色一边拥有多种权限,一边拥有多个人。这样就把人和权限建立了一个关系。 16 | ## 2. kubernetes的认证授权 17 | Kubernetes集群的所有操作基本上都是通过kube-apiserver这个组件进行的,它提供HTTP RESTful形式的API供集群内外客户端调用。需要注意的是:认证授权过程只存在HTTPS形式的API中。也就是说,如果客户端使用HTTP连接到kube-apiserver,那么是不会进行认证授权的。所以说,可以这么设置,在集群内部组件间通信使用HTTP,集群外部就使用HTTPS,这样既增加了安全性,也不至于太复杂。 18 | 对APIServer的访问要经过的三个步骤,前面两个是认证和授权,第三个是 Admission Control,它也能在一定程度上提高安全性,不过更多是资源管理方面的作用。 19 | #### 2.1 kubernetes的认证 20 | kubernetes提供了多种认证方式,比如客户端证书、静态token、静态密码文件、ServiceAccountTokens等等。你可以同时使用一种或多种认证方式。只要通过任何一个都被认作是认证通过。下面我们就认识几个常见的认证方式。 21 | - **客户端证书认证** 22 | 客户端证书认证叫作TLS双向认证,也就是服务器客户端互相验证证书的正确性,在都正确的情况下协调通信加密方案。 23 | 为了使用这个方案,api-server需要用--client-ca-file选项来开启。 24 | - **引导Token** 25 | 当我们有非常多的node节点时,手动为每个node节点配置TLS认证比较麻烦,这时就可以用到引导token的认证方式,前提是需要在api-server开启 experimental-bootstrap-token-auth 特性,客户端的token信息与预先定义的token匹配认证通过后,自动为node颁发证书。当然引导token是一种机制,可以用到各种场景中。 26 | - **Service Account Tokens 认证** 27 | 有些情况下,我们希望在pod内部访问api-server,获取集群的信息,甚至对集群进行改动。针对这种情况,kubernetes提供了一种特殊的认证方式:Service Account。 Service Account 和 pod、service、deployment 一样是 kubernetes 集群中的一种资源,用户也可以创建自己的 Service Account。 28 | ServiceAccount 主要包含了三个内容:namespace、Token 和 CA。namespace 指定了 pod 所在的 namespace,CA 用于验证 apiserver 的证书,token 用作身份验证。它们都通过 mount 的方式保存在 pod 的文件系统中。 29 | 30 | #### 2.2 kubernetes的授权 31 | 在Kubernetes1.6版本中新增角色访问控制机制(Role-Based Access,RBAC)让集群管理员可以针对特定使用者或服务账号的角色,进行更精确的资源访问控制。在RBAC中,权限与角色相关联,用户通过成为适当角色的成员而得到这些角色的权限。这就极大地简化了权限的管理。在一个组织中,角色是为了完成各种工作而创造,用户则依据它的责任和资格来被指派相应的角色,用户可以很容易地从一个角色被指派到另一个角色。 32 | 目前 Kubernetes 中有一系列的鉴权机制,因为Kubernetes社区的投入和偏好,相对于其它鉴权机制而言,RBAC是更好的选择。具体RBAC是如何体现在kubernetes系统中的我们会在后面的部署中逐步的深入了解。 33 | #### 2.3 kubernetes的AdmissionControl 34 | AdmissionControl - 准入控制本质上为一段准入代码,在对kubernetes api的请求过程中,顺序为:先经过认证 & 授权,然后执行准入操作,最后对目标对象进行操作。这个准入代码在api-server中,而且必须被编译到二进制文件中才能被执行。 35 | 在对集群进行请求时,每个准入控制代码都按照一定顺序执行。如果有一个准入控制拒绝了此次请求,那么整个请求的结果将会立即返回,并提示用户相应的error信息。 36 | 常用组件(控制代码)如下: 37 | - AlwaysAdmit:允许所有请求 38 | - AlwaysDeny:禁止所有请求,多用于测试环境 39 | - ServiceAccount:它将serviceAccounts实现了自动化,它会辅助serviceAccount做一些事情,比如如果pod没有serviceAccount属性,它会自动添加一个default,并确保pod的serviceAccount始终存在 40 | - LimitRanger:他会观察所有的请求,确保没有违反已经定义好的约束条件,这些条件定义在namespace中LimitRange对象中。如果在kubernetes中使用LimitRange对象,则必须使用这个插件。 41 | - NamespaceExists:它会观察所有的请求,如果请求尝试创建一个不存在的namespace,则这个请求被拒绝。 42 | 43 | ## 3. 环境准备 44 | #### 3.1 停止原有kubernetes相关服务 45 | 开始之前我们要先把基础版本的集群停掉,包括service,deployments,pods以及运行的所有kubernetes组件 46 | 注:kubernetes 服务不需要删除它是 ApiServer自动创建的。 47 | ```bash 48 | #删除services 49 | $ kubectl delete services nginx-service 50 | 51 | #删除deployments 52 | $ kubectl delete deploy kubernetes-bootcamp 53 | $ kubectl delete deploy nginx-deployment 54 | 55 | #停掉worker节点的服务 56 | $ service kubelet stop && rm -fr /var/lib/kubelet/* 57 | $ service kube-proxy stop && rm -fr /var/lib/kube-proxy/* 58 | $ service kube-calico stop 59 | 60 | #停掉master节点的服务 61 | $ service kube-calico stop 62 | $ service kube-scheduler stop 63 | $ service kube-controller-manager stop 64 | $ service kube-apiserver stop 65 | $ service etcd stop && rm -fr /var/lib/etcd/* 66 | #所有节点(查看是否删除干净) 67 | $ netstat -ntlp 68 | ``` 69 | #### 3.2 生成配置(所有节点) 70 | 跟基础环境搭建一样,我们需要生成kubernetes-with-ca的所有相关配置文件 71 | ```bash 72 | $ cd ~/kubernetes-starter 73 | #按照配置文件的提示编辑好配置(将 ETCD 改为 https,其它应该不用改) 74 | $ vi config.properties 75 | #生成配置 76 | $ ./gen-config.sh with-ca 77 | ``` 78 | #### 3.3 安装cfssl(所有节点) 79 | cfssl是非常好用的CA工具,我们用它来生成证书和秘钥文件 80 | 安装过程比较简单,如下: 81 | ```bash 82 | #查看是否安装了 wget 83 | $ yum list installed wget | grep wget 84 | #安装 wget 85 | $ yum -y install wget 86 | #下载(如果报 "--show-progress" 等参数不可用,直接下载那两个文件即可) 87 | $ wget -q --show-progress --https-only --timestamping \ 88 | https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 \ 89 | https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 90 | #修改为可执行权限 91 | $ chmod +x cfssl_linux-amd64 cfssljson_linux-amd64 92 | #移动到bin目录 93 | $ mv cfssl_linux-amd64 /usr/local/bin/cfssl 94 | $ mv cfssljson_linux-amd64 /usr/local/bin/cfssljson 95 | #验证 96 | $ cfssl version 97 | ``` 98 | #### 3.4 生成根证书(主节点) 99 | 根证书是证书信任链的根,各个组件通讯的前提是有一份大家都信任的证书(根证书),每个人使用的证书都是由这个根证书签发的。 100 | ```bash 101 | #所有证书相关的东西都放在这 102 | $ mkdir -p /etc/kubernetes/ca 103 | #准备生成证书的配置文件 104 | $ cp ./kubernetes-starter/target/ca/ca-config.json /etc/kubernetes/ca 105 | $ cp ./kubernetes-starter/target/ca/ca-csr.json /etc/kubernetes/ca 106 | #生成证书和秘钥 107 | $ cd /etc/kubernetes/ca 108 | $ cfssl gencert -initca ca-csr.json | cfssljson -bare ca 109 | #生成完成后会有以下文件(我们最终想要的就是ca-key.pem和ca.pem,一个秘钥,一个证书) 110 | $ ls 111 | ca-config.json ca.csr ca-csr.json ca-key.pem ca.pem 112 | ``` 113 | 114 | ## 4. 改造etcd 115 | 116 | #### 4.1 准备证书 117 | etcd节点需要提供给其他服务访问,就要验证其他服务的身份,所以需要一个标识自己监听服务的server证书,当有多个etcd节点的时候也需要client证书与etcd集群其他节点交互,当然也可以client和server使用同一个证书因为它们本质上没有区别。 118 | ```bash 119 | #etcd证书放在这 120 | $ mkdir -p /etc/kubernetes/ca/etcd 121 | #准备etcd证书配置 122 | $ cp ./kubernetes-starter/target/ca/etcd/etcd-csr.json /etc/kubernetes/ca/etcd/ 123 | $ cd /etc/kubernetes/ca/etcd/ 124 | #使用根证书(ca.pem)签发etcd证书 125 | $ cfssl gencert \ 126 | -ca=/etc/kubernetes/ca/ca.pem \ 127 | -ca-key=/etc/kubernetes/ca/ca-key.pem \ 128 | -config=/etc/kubernetes/ca/ca-config.json \ 129 | -profile=kubernetes etcd-csr.json | cfssljson -bare etcd 130 | #跟之前类似生成三个文件etcd.csr是个中间证书请求文件,我们最终要的是etcd-key.pem和etcd.pem 131 | $ ls 132 | etcd.csr etcd-csr.json etcd-key.pem etcd.pem 133 | ``` 134 | #### 4.2 改造etcd服务 135 | 建议大家先比较一下增加认证的etcd配置与原有配置的区别,做到心中有数。 136 | 可以使用命令比较: 137 | ```bash 138 | $ cd ./kubernetes-starter/ 139 | #centos使用 140 | $ diff kubernetes-simple/master-node/etcd.service kubernetes-with-ca/master-node/etcd.service 141 | #ubuntu使用 142 | $ vimdiff kubernetes-simple/master-node/etcd.service kubernetes-with-ca/master-node/etcd.service 143 | ``` 144 | **更新etcd服务:** 145 | ```bash 146 | $ cp ./kubernetes-starter/target/master-node/etcd.service /lib/systemd/system/ 147 | $ systemctl daemon-reload 148 | $ service etcd start 149 | # 查看服务日志,看是否有错误信息,确保服务正常 150 | $ journalctl -f -u etcd.service 151 | #验证etcd服务(endpoints自行替换) 152 | $ ETCDCTL_API=3 etcdctl \ 153 | --endpoints=https://192.168.78.128:2379 \ 154 | --cacert=/etc/kubernetes/ca/ca.pem \ 155 | --cert=/etc/kubernetes/ca/etcd/etcd.pem \ 156 | --key=/etc/kubernetes/ca/etcd/etcd-key.pem \ 157 | endpoint health 158 | ``` 159 | 160 | ## 5. 改造api-server 161 | #### 5.1 准备证书 162 | ```bash 163 | #api-server证书放在这,api-server是核心,文件夹叫kubernetes吧,如果想叫apiserver也可以,不过相关的地方都需要修改哦 164 | $ mkdir -p /etc/kubernetes/ca/kubernetes 165 | #准备apiserver证书配置 166 | $ cp ./kubernetes-starter/target/ca/kubernetes/kubernetes-csr.json /etc/kubernetes/ca/kubernetes/ 167 | $ cd /etc/kubernetes/ca/kubernetes/ 168 | #使用根证书(ca.pem)签发kubernetes证书 169 | $ cfssl gencert \ 170 | -ca=/etc/kubernetes/ca/ca.pem \ 171 | -ca-key=/etc/kubernetes/ca/ca-key.pem \ 172 | -config=/etc/kubernetes/ca/ca-config.json \ 173 | -profile=kubernetes kubernetes-csr.json | cfssljson -bare kubernetes 174 | #跟之前类似生成三个文件kubernetes.csr是个中间证书请求文件,我们最终要的是kubernetes-key.pem和kubernetes.pem 175 | $ ls 176 | kubernetes.csr kubernetes-csr.json kubernetes-key.pem kubernetes.pem 177 | ``` 178 | #### 5.2 改造api-server服务 179 | **比较查看改造前和改造后配置文件的差异(diff)** 180 | ```bash 181 | $ cd ./kubernetes-starter 182 | #centos使用 183 | $ diff kubernetes-simple/master-node/kube-apiserver.service kubernetes-with-ca/master-node/kube-apiserver.service 184 | #ubuntu使用 185 | $ vimdiff kubernetes-simple/master-node/kube-apiserver.service kubernetes-with-ca/master-node/kube-apiserver.service 186 | ``` 187 | **生成token认证文件** 188 | ```bash 189 | #生成随机token 190 | $ head -c 16 /dev/urandom | od -An -t x | tr -d ' ' 191 | c5bea714f1c6c6e8175787b0224b739a 192 | 193 | #按照固定格式写入token.csv,注意替换token内容 194 | $ echo "c5bea714f1c6c6e8175787b0224b739a,kubelet-bootstrap,10001,\"system:kubelet-bootstrap\"" > /etc/kubernetes/ca/kubernetes/token.csv 195 | ``` 196 | **更新api-server服务** 197 | ```bash 198 | $ cp ./kubernetes-starter/target/master-node/kube-apiserver.service /lib/systemd/system/ 199 | $ systemctl daemon-reload 200 | $ service kube-apiserver start 201 | 202 | #检查日志 203 | $ journalctl -f -u kube-apiserver 204 | ``` 205 | 206 | ## 6. 改造controller-manager 207 | controller-manager一般与api-server在同一台机器上,所以可以使用非安全端口与api-server通讯,不需要生成证书和私钥。 208 | #### 6.1 改造controller-manager服务 209 | **查看diff** 210 | ```bash 211 | $ cd ./kubernetes-starter/ 212 | #centos使用 213 | $ diff kubernetes-simple/master-node/kube-controller-manager.service kubernetes-with-ca/master-node/kube-controller-manager.service 214 | #ubuntu使用 215 | $ vimdiff kubernetes-simple/master-node/kube-controller-manager.service kubernetes-with-ca/master-node/kube-controller-manager.service 216 | ``` 217 | **更新controller-manager服务** 218 | ```bash 219 | $ cp ./kubernetes-starter/target/master-node/kube-controller-manager.service /lib/systemd/system/ 220 | $ systemctl daemon-reload 221 | $ service kube-controller-manager start 222 | 223 | #检查日志 224 | $ journalctl -f -u kube-controller-manager 225 | ``` 226 | 227 | ## 7. 改造scheduler 228 | scheduler一般与apiserver在同一台机器上,所以可以使用非安全端口与apiserver通讯。不需要生成证书和私钥。 229 | #### 7.1 改造scheduler服务 230 | **查看diff** 231 | 比较会发现两个文件并没有区别,不需要改造 232 | ```bash 233 | $ cd ./kubernetes-starter/ 234 | #centos使用 235 | $ diff kubernetes-simple/master-node/kube-scheduler.service kubernetes-with-ca/master-node/kube-scheduler.service 236 | #ubuntu使用 237 | $ vimdiff kubernetes-simple/master-node/kube-scheduler.service kubernetes-with-ca/master-node/kube-scheduler.service 238 | ``` 239 | **启动服务** 240 | ```bash 241 | $ service kube-scheduler start 242 | #检查日志 243 | $ journalctl -f -u kube-scheduler 244 | ``` 245 | ## 8. 改造kubectl 246 | #### 8.0 先测试 247 | #看看会不会报错 248 | $ kubectl get nodes 249 | #到根目录 250 | $ cd 251 | #查看kube配置 server 的监听地址是不是和 ApiServer配置的一致(ApiSerevr配置的很有可能是127.0.0.1) 252 | $ more .kube/config 253 | #如果不一致,修改 254 | $ vi .kube/config 255 | #再看看会不会报错 256 | $ kubectl get nodes 257 | #### 8.1 准备证书(如果是在主节点上可以不改造,但最好还是改造一下) 258 | ```bash 259 | #kubectl证书放在这,由于kubectl相当于系统管理员,我们使用admin命名 260 | $ mkdir -p /etc/kubernetes/ca/admin 261 | #准备admin证书配置 - kubectl只需客户端证书,因此证书请求中 hosts 字段可以为空 262 | $ cp ./kubernetes-starter/target/ca/admin/admin-csr.json /etc/kubernetes/ca/admin/ 263 | $ cd /etc/kubernetes/ca/admin/ 264 | #使用根证书(ca.pem)签发admin证书 265 | $ cfssl gencert \ 266 | -ca=/etc/kubernetes/ca/ca.pem \ 267 | -ca-key=/etc/kubernetes/ca/ca-key.pem \ 268 | -config=/etc/kubernetes/ca/ca-config.json \ 269 | -profile=kubernetes admin-csr.json | cfssljson -bare admin 270 | #我们最终要的是admin-key.pem和admin.pem 271 | $ ls 272 | admin.csr admin-csr.json admin-key.pem admin.pem 273 | ``` 274 | 275 | #### 8.2 配置kubectl 276 | ```bash 277 | #指定apiserver的地址和证书位置(ip自行修改) 278 | $ kubectl config set-cluster kubernetes \ 279 | --certificate-authority=/etc/kubernetes/ca/ca.pem \ 280 | --embed-certs=true \ 281 | --server=https://192.168.78.128:6443 282 | #设置客户端认证参数,指定admin证书和秘钥 283 | $ kubectl config set-credentials admin \ 284 | --client-certificate=/etc/kubernetes/ca/admin/admin.pem \ 285 | --embed-certs=true \ 286 | --client-key=/etc/kubernetes/ca/admin/admin-key.pem 287 | #关联用户和集群 288 | $ kubectl config set-context kubernetes \ 289 | --cluster=kubernetes --user=admin 290 | #设置当前上下文 291 | $ kubectl config use-context kubernetes 292 | 293 | #设置结果就是一个配置文件,可以看看内容 294 | $ cat ~/.kube/config 295 | ``` 296 | 297 | **验证master节点** 298 | ```bash 299 | #可以使用刚配置好的kubectl查看一下组件状态 300 | $ kubectl get componentstatus 301 | NAME STATUS MESSAGE ERROR 302 | scheduler Healthy ok 303 | controller-manager Healthy ok 304 | etcd-0 Healthy {"health": "true"} 305 | ``` 306 | 307 | 308 | ## 9. 改造calico-node 309 | #### 9.1 准备证书(先在一个节点上生成,然后再拷贝证书) 310 | 后续可以看到calico证书用在四个地方: 311 | * calico/node 这个docker 容器运行时访问 etcd 使用证书 312 | * cni 配置文件中,cni 插件需要访问 etcd 使用证书 313 | * calicoctl 操作集群网络时访问 etcd 使用证书 314 | * calico/kube-controllers 同步集群网络策略时访问 etcd 使用证书 315 | ```bash 316 | #calico证书放在这 317 | $ mkdir -p /etc/kubernetes/ca/calico 318 | #准备calico证书配置 - calico只需客户端证书,因此证书请求中 hosts 字段可以为空 319 | $ cp ./kubernetes-starter/target/ca/calico/calico-csr.json /etc/kubernetes/ca/calico/ 320 | $ cd /etc/kubernetes/ca/calico/ 321 | #使用根证书(ca.pem)签发calico证书 322 | $ cfssl gencert \ 323 | -ca=/etc/kubernetes/ca/ca.pem \ 324 | -ca-key=/etc/kubernetes/ca/ca-key.pem \ 325 | -config=/etc/kubernetes/ca/ca-config.json \ 326 | -profile=kubernetes calico-csr.json | cfssljson -bare calico 327 | #我们最终要的是calico-key.pem和calico.pem 328 | $ ls 329 | calico.csr calico-csr.json calico-key.pem calico.pem 330 | ``` 331 | 332 | #### 9.2 改造calico服务 333 | **查看diff** 334 | ```bash 335 | $ cd ./kubernetes-starter 336 | #centos使用 337 | $ diff kubernetes-simple/all-node/kube-calico.service kubernetes-with-ca/all-node/kube-calico.service 338 | #ubuntu使用 339 | $ vimdiff kubernetes-simple/all-node/kube-calico.service kubernetes-with-ca/all-node/kube-calico.service 340 | ``` 341 | #通过diff会发现,calico多了几个认证相关的文件: 342 | #/etc/kubernetes/ca/ca.pem 343 | #/etc/kubernetes/ca/calico/calico.pem 344 | #/etc/kubernetes/ca/calico/calico-key.pem 345 | #(拷贝证书)由于calico服务是所有节点都需要启动的,大家需要把这几个文件拷贝到每台服务器上 346 | $ scp -r /etc/kubernetes/ca/ root@192.168.78.129:/etc/kubernetes/ 347 | $ scp -r /etc/kubernetes/ca/ root@192.168.78.130:/etc/kubernetes/ 348 | #上面注意拷贝后目录是否一致 349 | **更新calico服务(所有工作节点)** 350 | ```bash 351 | $ cp ./kubernetes-starter/target/all-node/kube-calico.service /lib/systemd/system/ 352 | $ systemctl daemon-reload 353 | $ service kube-calico start 354 | #检查日志 355 | $ journalctl -f -u kube-calico 356 | #验证calico(能看到其他节点的列表就对啦) 357 | $ calicoctl node status 358 | ``` 359 | 360 | 361 | ## 10. 改造kubelet 362 | 我们这里让kubelet使用引导token的方式认证,所以认证方式跟之前的组件不同,它的证书不是手动生成,而是由工作节点TLS BootStrap 向api-server请求,由主节点的controller-manager 自动签发。 363 | #### 10.1 创建角色绑定(主节点) 364 | 引导token的方式要求客户端向api-server发起请求时告诉他你的用户名和token,并且这个用户是具有一个特定的角色:system:node-bootstrapper,所以需要先将 bootstrap token 文件中的 kubelet-bootstrap 用户赋予这个特定角色,然后 kubelet 才有权限发起创建认证请求。 365 | **在主节点执行下面命令** 366 | ```bash 367 | #可以通过下面命令查询clusterrole(集群的角色)列表 368 | $ kubectl -n kube-system get clusterrole 369 | 370 | #可以回顾一下token文件的内容 371 | $ cat /etc/kubernetes/ca/kubernetes/token.csv 372 | c5bea714f1c6c6e8175787b0224b739a,kubelet-bootstrap,10001,"system:kubelet-bootstrap" 373 | 374 | #创建角色绑定(将用户kubelet-bootstrap与角色system:node-bootstrapper绑定) 375 | $ kubectl create clusterrolebinding kubelet-bootstrap \ 376 | --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap 377 | ``` 378 | #### 10.2 创建bootstrap.kubeconfig(工作节点) 379 | 这个配置是用来完成bootstrap token认证的,保存了像用户,token等重要的认证信息,这个文件可以借助kubectl命令生成:(也可以自己写配置) 380 | ```bash 381 | #设置集群参数(注意替换ip) 382 | $ kubectl config set-cluster kubernetes \ 383 | --certificate-authority=/etc/kubernetes/ca/ca.pem \ 384 | --embed-certs=true \ 385 | --server=https://192.168.78.128:6443 \ 386 | --kubeconfig=bootstrap.kubeconfig 387 | #设置客户端认证参数(注意替换token) 388 | $ kubectl config set-credentials kubelet-bootstrap \ 389 | --token=c5bea714f1c6c6e8175787b0224b739a \ 390 | --kubeconfig=bootstrap.kubeconfig 391 | #设置上下文 392 | $ kubectl config set-context default \ 393 | --cluster=kubernetes \ 394 | --user=kubelet-bootstrap \ 395 | --kubeconfig=bootstrap.kubeconfig 396 | #选择上下文 397 | $ kubectl config use-context default --kubeconfig=bootstrap.kubeconfig 398 | #将刚生成的文件移动到合适的位置 399 | $ mv bootstrap.kubeconfig /etc/kubernetes/ 400 | ``` 401 | #### 10.3 准备cni配置 402 | **查看diff** 403 | ```bash 404 | $ cd ./kubernetes-starter 405 | #centos使用 406 | $ diff kubernetes-simple/worker-node/10-calico.conf kubernetes-with-ca/worker-node/10-calico.conf 407 | #ubuntu使用 408 | $ vimdiff kubernetes-simple/worker-node/10-calico.conf kubernetes-with-ca/worker-node/10-calico.conf 409 | ``` 410 | **copy配置** 411 | ```bash 412 | $ cp ./kubernetes-starter/target/worker-node/10-calico.conf /etc/cni/net.d/ 413 | ``` 414 | #### 10.4 改造kubelet服务 415 | **查看diff** 416 | ```bash 417 | $ cd ./kubernetes-starter 418 | #centos使用 419 | $ diff kubernetes-simple/worker-node/kubelet.service kubernetes-with-ca/worker-node/kubelet.service 420 | #ubuntu使用 421 | $ vimdiff kubernetes-simple/worker-node/kubelet.service kubernetes-with-ca/worker-node/kubelet.service 422 | ``` 423 | 424 | **更新服务** 425 | ```bash 426 | $ cp ./kubernetes-starter/target/worker-node/kubelet.service /lib/systemd/system/ 427 | $ systemctl daemon-reload 428 | $ service kubelet start 429 | 430 | #启动kubelet之后到master节点允许worker加入(批准worker的tls证书请求) 431 | #--------*在主节点执行*--------- 432 | #看看是不是有 Pending 状态的请求 433 | $ kubectl get csr 434 | #批准worker的tls证书请求 435 | $ kubectl get csr|grep 'Pending' | awk '{print $1}'| xargs kubectl certificate approve 436 | #----------------------------- 437 | 438 | #检查日志 439 | $ journalctl -f -u kubelet 440 | ``` 441 | 442 | ## 11. 改造kube-proxy 443 | #### 11.1 准备证书 444 | ```bash 445 | #proxy证书放在这 446 | $ mkdir -p /etc/kubernetes/ca/kube-proxy 447 | 448 | #准备proxy证书配置 - proxy只需客户端证书,因此证书请求中 hosts 字段可以为空。 449 | #CN 指定该证书的 User 为 system:kube-proxy,预定义的 ClusterRoleBinding system:node-proxy 将User system:kube-proxy 与 Role system:node-proxier 绑定,授予了调用 kube-api-server proxy的相关 API 的权限 450 | $ cp ./kubernetes-starter/target/ca/kube-proxy/kube-proxy-csr.json /etc/kubernetes/ca/kube-proxy/ 451 | $ cd /etc/kubernetes/ca/kube-proxy/ 452 | 453 | #使用根证书(ca.pem)签发calico证书 454 | $ cfssl gencert \ 455 | -ca=/etc/kubernetes/ca/ca.pem \ 456 | -ca-key=/etc/kubernetes/ca/ca-key.pem \ 457 | -config=/etc/kubernetes/ca/ca-config.json \ 458 | -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy 459 | #我们最终要的是kube-proxy-key.pem和kube-proxy.pem 460 | $ ls 461 | kube-proxy.csr kube-proxy-csr.json kube-proxy-key.pem kube-proxy.pem 462 | ``` 463 | 464 | #### 11.2 生成kube-proxy.kubeconfig配置 465 | ```bash 466 | #设置集群参数(注意替换ip) 467 | $ kubectl config set-cluster kubernetes \ 468 | --certificate-authority=/etc/kubernetes/ca/ca.pem \ 469 | --embed-certs=true \ 470 | --server=https://192.168.78.128:6443 \ 471 | --kubeconfig=kube-proxy.kubeconfig 472 | #置客户端认证参数 473 | $ kubectl config set-credentials kube-proxy \ 474 | --client-certificate=/etc/kubernetes/ca/kube-proxy/kube-proxy.pem \ 475 | --client-key=/etc/kubernetes/ca/kube-proxy/kube-proxy-key.pem \ 476 | --embed-certs=true \ 477 | --kubeconfig=kube-proxy.kubeconfig 478 | #设置上下文参数 479 | $ kubectl config set-context default \ 480 | --cluster=kubernetes \ 481 | --user=kube-proxy \ 482 | --kubeconfig=kube-proxy.kubeconfig 483 | #选择上下文 484 | $ kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig 485 | #移动到合适位置 486 | $ mv kube-proxy.kubeconfig /etc/kubernetes/kube-proxy.kubeconfig 487 | 488 | ``` 489 | #### 11.3 改造kube-proxy服务 490 | **查看diff** 491 | ```bash 492 | $ cd ./kubernetes-starter 493 | #centos使用 494 | $ diff kubernetes-simple/worker-node/kube-proxy.service kubernetes-with-ca/worker-node/kube-proxy.service 495 | #ubuntu使用 496 | $ vimdiff kubernetes-simple/worker-node/kube-proxy.service kubernetes-with-ca/worker-node/kube-proxy.service 497 | ``` 498 | > 经过diff你应该发现kube-proxy.service没有变化 499 | 500 | **启动服务** 501 | ```bash 502 | #如果之前的配置没有了,可以重新复制一份过去 503 | $ cp ./kubernetes-starter/target/worker-node/kube-proxy.service /lib/systemd/system/ 504 | $ systemctl daemon-reload 505 | 506 | #安装依赖软件 507 | #ubuntu使用 508 | $ apt install conntrack 509 | #centos使用 510 | $ yum install conntrack 511 | 512 | #启动服务 513 | $ service kube-proxy start 514 | #查看日志 515 | $ journalctl -f -u kube-proxy 516 | ``` 517 | 518 | ## 12. 改造kube-dns 519 | kube-dns有些特别,因为它本身是运行在kubernetes集群中,以kubernetes应用的形式运行。所以它的认证授权方式跟之前的组件都不一样。它需要用到service account认证和RBAC授权。 520 | **service account认证:** 521 | 每个service account都会自动生成自己的secret,用于包含一个ca,token和secret,用于跟api-server认证 522 | **RBAC授权:** 523 | 权限、角色和角色绑定都是kubernetes自动创建好的。我们只需要创建一个叫做kube-dns的 ServiceAccount即可,官方现有的配置已经把它包含进去了。 524 | 525 | #### 12.1 准备配置文件 526 | 我们在官方的基础上添加的变量,生成适合我们集群的配置。直接copy就可以啦 527 | ```bash 528 | $ cd ./kubernetes-starter 529 | #centos使用 530 | $ diff kubernetes-simple/services/kube-dns.yaml kubernetes-with-ca/services/kube-dns.yaml 531 | #ubuntu使用 532 | $ vimdiff kubernetes-simple/services/kube-dns.yaml kubernetes-with-ca/services/kube-dns.yaml 533 | ``` 534 | > 大家可以看到diff只有一处,新的配置没有设定api-server。不访问api-server,它是怎么知道每个服务的cluster ip和pod的endpoints的呢?这就是因为kubernetes在启动每个服务service的时候会以环境变量的方式把所有服务的ip,端口等信息注入进来。 535 | 536 | #### 12.2 创建kube-dns 537 | ```bash 538 | $ kubectl create -f ./kubernetes-starter/target/services/kube-dns.yaml 539 | #看看启动是否成功 540 | $ kubectl -n kube-system get pods -o wide -- 查看kube-dns 的 pod是否运行 541 | $ kubectl -n kube-system get services -- 查看kube-dns 服务是否创建成功,-n 是制定命名空间,kube-system 是 kubernetes 系统内部的命名空间 542 | $ kubectl -n kube-system get deployments -- 查看kube-dns 部署是否创建成功 543 | $ docker ps|grep dns -- 到 dns 运行的pod上执行,查看运行了那些容器 544 | 一般会运行如下几个容器: 545 | k8s-dns-sidecar:用于监控其他几个容器的健康状态 546 | k8s-dns-dnsmasq:用于 dns 缓存,来提升效率 547 | k8s-dns-kube-dns:真正提供 dns 服务的容器 548 | pause-amd64:pod 容器 549 | ``` 550 | 551 | ## 13. 再试牛刀 552 | 终于,安全版的kubernetes集群我们部署完成了。 553 | 下面我们使用新集群先温习一下之前学习过的命令,然后再认识一些新的命令,新的参数,新的功能。 554 | ```bash 555 | kubectl version #查看集群版本 556 | kubectl get nodes #查看所有节点 557 | kubectl get pods #查看所有pod 558 | #创建一个 deployments 559 | kubectl run kubernetes-bootcamp --image=jocatalin/kubernetes-bootcamp:v1 --port=8080 560 | kubectl get deployments #查看 deployments 是否创建成功 561 | kubectl get pods #查看所有 pod 562 | kubectl logs 'pod的名称' #查看 pod 的日志,如果想跟随日志,就在命令后面加一个 -f 563 | kubectl describe pods 'pod名称' #查看 pod 详细信息 564 | kubectl exec -it 'pod名称' bash #进入 pod 内部 565 | #查看某个pod的挂载点文件目录(使用 kubectl describe pods 'pod名称' 可以看到这个目录) 566 | cd /var/run/secrets/kubernetes.io/serviceaccount 567 | exit #退出 pod 568 | #上面我们看的那个目录是和 serviceaccount 相关 569 | kubectl get serviceaccount #查看serviceaccont,也可以也简写:kubectl get sa 570 | kubectl get serviceaccount -o yaml #查看 serviceaccount 详细信息,-0 表示输出格式 571 | #执行上面命令我们会看到 yaml 文件里有个 secrets 572 | kubectl get secrets #查看是不是又 scripts 573 | kubectl get secrets -o yaml #查看 scripts 详细信息,-o 是输出格式 574 | ``` 575 | #ApiServer如果开启了serviveAccount 他会在default这个命名空间下创建一个默认的serviceaccount, 576 | #然后再每个pod启动的时候会把service里面的 scripts 已文件的形式挂载到每个pod里面,有了这些证书文件之后, 577 | #我们的pod就可以https的形式访问我们的ApiServer,也就是说可以让pod通过apiServer认证。 578 | ```bash 579 | kubectl apply -f 'yaml文件' #(建议创建都用 apply)创建pod或deployments或service(这个创建命令和create创建有区别,它会把每次的配置都保存起来,create只有把应用删除了才可以重新创建,apply的话是在现有的基础之上做修改) 580 | kubectl describe pods 'pod名称' #查看pod的详细信息 581 | kubectl get pods 'pod名称' -o json #查看pod的详细信息,-o 是输出格式 582 | #我们可以将nginx容器的版本修改为1.13来测试 apply 命令是不是在现有的基础之上做修改的 583 | kubectl apply -f 'yaml文件' 584 | kubectl describe pods 'pod名称' #查看pod的详细信息,主要查看nginx容器的版本是不是变成1.13了 585 | #修改pod容器的版本(这个已上面创建的nginx pod为列) 586 | kubectl set image pods 'pod名称' '容器的名称'=nginx:1.7.9 587 | #创建一个 service,先创建 deployments,因为 services是建立在deployments之上 588 | kubectl apply -f nginx-deployment.yaml 589 | kubectl apply -f nginx-services.yaml 590 | kubectl get service #查看所有service 591 | #开启一个用于测试集群的容器 --rm=true就是退自动删除,--restart=Never重启方案,这里是不重起,--tty 开启一个终端,-i拿到输入 592 | kubectl run busybox --rm=true --image=busybox --restart=Never --tty -i 593 | wget -q0- 'get service查看到的端口':80 #这里其实是测试在集群内是否可以访问上面创建的nginx这个service 594 | ``` 595 | 596 | 597 | 598 | [1]: https://www.zhihu.com/question/33645891/answer/57721969 599 | [2]: http://www.ruanyifeng.com/blog/2014/02/ssl_tls.html 600 | 601 | -------------------------------------------------------------------------------- /docs/4-microservice-deploy.md: -------------------------------------------------------------------------------- 1 | # 四、kubernetes集群部署微服务 2 | ## 1. 微服务部署方案 - 思路整理 3 | ##### 我们有如下微服务: 4 | - 消息服务:message-service 5 | - 课程dubbo服务:course-dubbo-service 6 | - 课程web服务:course-edge-service 7 | - 用户thrift服务:user-thrift-service 8 | - 用户web服务:user-edge-service 9 | - API网关:api-gateway 10 | 11 | ##### 把它们放到kubernetes集群运行我们要考虑什么问题? 12 | - 哪些服务适合单独成为一个pod?哪些服务适合在一个pod中? 13 | - 在一个pod里面的服务如何彼此访问?他们的服务如何对外提供服务? 14 | - 单独的pod如何对外提供服务? 15 | - 哪个服务作为整个服务的入口,入口服务如何对外提供服务? 16 | 17 | 18 | ## 2. 搞定配置 19 | 配置的模板已经为大家准备好了,但是还需要大家做一下处理才能使用哦,参考下面脚本: 20 | ```bash 21 | $ cd ~/kubernetes-starter/service-config/ 22 | $ ls 23 | api-gateway.yaml message-service.yaml 24 | course-service.yaml user-service.yaml 25 | #替换变量 - (hub.mooc.com:8080是我的环境的镜像仓库地址,大家修改为各自的仓库) 26 | $ sed -i 's/{{HUB}}/hub.mooc.com:8080/g' * 27 | ``` 28 | ## 3. 部署服务 29 | ##### 部署前准备: 30 | - **要过一遍我们现有的代码配置,看看是否有需要修改的,修改后需要新生成镜像** 31 | - **要启动好微服务依赖的服务,像zookeeper,mysql,registry等** 32 | 33 | -------------------------------------------------------------------------------- /gen-config.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | declare -A kvs=() 4 | 5 | function replace_files() { 6 | local file=$1 7 | if [ -f $file ];then 8 | echo "$file" 9 | for key in ${!kvs[@]} 10 | do 11 | value=${kvs[$key]} 12 | value=${value//\//\\\/} 13 | sed -i "s/{{$key}}/${value}/g" $file 14 | done 15 | return 0 16 | fi 17 | if [ -d $file ];then 18 | for f in `ls $file` 19 | do 20 | replace_files "${file}/${f}" 21 | done 22 | fi 23 | return 0 24 | } 25 | 26 | target=$1 27 | 28 | if [ "$target" != "simple" -a "$target" != "with-ca" ];then 29 | echo -e "Usage:\n\t sh gen-config.sh (simple / with-ca)" 30 | exit 1 31 | fi 32 | 33 | if [ "$target" == "simple" ];then 34 | folder="kubernetes-simple" 35 | else 36 | folder="kubernetes-with-ca" 37 | fi 38 | 39 | target="target" 40 | rm -fr $target 41 | cp -r $folder $target 42 | cd $target 43 | 44 | echo "====替换变量列表====" 45 | while read line;do 46 | if [ "${line:0:1}" == "#" -o "${line:0:1}" == "" ];then 47 | continue; 48 | fi 49 | key=${line/=*/} 50 | value=${line#*=} 51 | echo "$key=$value" 52 | kvs["$key"]="$value" 53 | done < ../config.properties 54 | echo "====================" 55 | 56 | echo "====替换配置文件====" 57 | for element in `ls` 58 | do 59 | dir_or_file=$element 60 | if [ ! -d $dir_or_file ];then 61 | continue 62 | fi 63 | replace_files $dir_or_file 64 | done 65 | echo "=================" 66 | echo "配置生成成功,位置: `pwd`" 67 | -------------------------------------------------------------------------------- /images/k8s-concept.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chiangfire/kubernetes-starter/e9449ad8c91d794d70dbe7549fd734e5a957b8ba/images/k8s-concept.jpg -------------------------------------------------------------------------------- /kubernetes-simple/all-node/kube-calico.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=calico node 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | User=root 8 | PermissionsStartOnly=true 9 | ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \ 10 | -e ETCD_ENDPOINTS={{ETCD_ENDPOINTS}} \ 11 | -e CALICO_LIBNETWORK_ENABLED=true \ 12 | -e CALICO_NETWORKING_BACKEND=bird \ 13 | -e CALICO_DISABLE_FILE_LOGGING=true \ 14 | -e CALICO_IPV4POOL_CIDR=172.20.0.0/16 \ 15 | -e CALICO_IPV4POOL_IPIP=off \ 16 | -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \ 17 | -e FELIX_IPV6SUPPORT=false \ 18 | -e FELIX_LOGSEVERITYSCREEN=info \ 19 | -e FELIX_IPINIPMTU=1440 \ 20 | -e FELIX_HEALTHENABLED=true \ 21 | -e IP={{NODE_IP}} \ 22 | -v /var/run/calico:/var/run/calico \ 23 | -v /lib/modules:/lib/modules \ 24 | -v /run/docker/plugins:/run/docker/plugins \ 25 | -v /var/run/docker.sock:/var/run/docker.sock \ 26 | -v /var/log/calico:/var/log/calico \ 27 | registry.cn-hangzhou.aliyuncs.com/imooc/calico-node:v2.6.2 28 | ExecStop=/usr/bin/docker rm -f calico-node 29 | Restart=always 30 | RestartSec=10 31 | 32 | [Install] 33 | WantedBy=multi-user.target 34 | -------------------------------------------------------------------------------- /kubernetes-simple/master-node/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Server 3 | After=network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | Documentation=https://github.com/coreos 7 | 8 | [Service] 9 | Type=notify 10 | WorkingDirectory=/var/lib/etcd/ 11 | ExecStart={{BIN_PATH}}/etcd \ 12 | --name={{NODE_IP}} \ 13 | --listen-client-urls=http://{{NODE_IP}}:2379,http://127.0.0.1:2379 \ 14 | --advertise-client-urls=http://{{NODE_IP}}:2379 \ 15 | --data-dir=/var/lib/etcd 16 | Restart=on-failure 17 | RestartSec=5 18 | LimitNOFILE=65536 19 | 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /kubernetes-simple/master-node/kube-apiserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=network.target 5 | [Service] 6 | ExecStart={{BIN_PATH}}/kube-apiserver \ 7 | --admission-control=NamespaceLifecycle,LimitRanger,DefaultStorageClass,ResourceQuota,NodeRestriction \ 8 | --insecure-bind-address=0.0.0.0 \ 9 | --kubelet-https=false \ 10 | --service-cluster-ip-range=10.68.0.0/16 \ 11 | --service-node-port-range=20000-40000 \ 12 | --etcd-servers={{ETCD_ENDPOINTS}} \ 13 | --enable-swagger-ui=true \ 14 | --allow-privileged=true \ 15 | --audit-log-maxage=30 \ 16 | --audit-log-maxbackup=3 \ 17 | --audit-log-maxsize=100 \ 18 | --audit-log-path=/var/lib/audit.log \ 19 | --event-ttl=1h \ 20 | --v=2 21 | Restart=on-failure 22 | RestartSec=5 23 | Type=notify 24 | LimitNOFILE=65536 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /kubernetes-simple/master-node/kube-controller-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | [Service] 5 | ExecStart={{BIN_PATH}}/kube-controller-manager \ 6 | --address=127.0.0.1 \ 7 | --master=http://127.0.0.1:8080 \ 8 | --allocate-node-cidrs=true \ 9 | --service-cluster-ip-range=10.68.0.0/16 \ 10 | --cluster-cidr=172.20.0.0/16 \ 11 | --cluster-name=kubernetes \ 12 | --leader-elect=true \ 13 | --cluster-signing-cert-file= \ 14 | --cluster-signing-key-file= \ 15 | --v=2 16 | Restart=on-failure 17 | RestartSec=5 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /kubernetes-simple/master-node/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart={{BIN_PATH}}/kube-scheduler \ 7 | --address=127.0.0.1 \ 8 | --master=http://127.0.0.1:8080 \ 9 | --leader-elect=true \ 10 | --v=2 11 | Restart=on-failure 12 | RestartSec=5 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /kubernetes-simple/services/kube-dns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | #ConfigMap是我们见到的一个新类型,顾名思义是做配置管理的,这里用作kube-dns配置存储 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: kube-dns 7 | namespace: kube-system 8 | labels: 9 | addonmanager.kubernetes.io/mode: EnsureExists 10 | 11 | --- 12 | #认证授权使用,这里未用到 13 | apiVersion: v1 14 | kind: ServiceAccount 15 | metadata: 16 | name: kube-dns 17 | namespace: kube-system 18 | labels: 19 | addonmanager.kubernetes.io/mode: Reconcile 20 | 21 | --- 22 | #dns服务 23 | apiVersion: v1 24 | kind: Service 25 | metadata: 26 | name: kube-dns 27 | namespace: kube-system 28 | labels: 29 | k8s-app: kube-dns 30 | addonmanager.kubernetes.io/mode: Reconcile 31 | kubernetes.io/name: "KubeDNS" 32 | spec: 33 | selector: 34 | #选择器,一个服务包含了哪些pods 35 | k8s-app: kube-dns 36 | #服务的clusterip,需要跟kubelet保持一致 37 | clusterIP: 10.68.0.2 38 | ports: 39 | - name: dns 40 | port: 53 41 | protocol: UDP 42 | - name: dns-tcp 43 | port: 53 44 | protocol: TCP 45 | 46 | --- 47 | #具体的pod定义,包含了三个容器 48 | apiVersion: apps/v1 49 | kind: Deployment 50 | metadata: 51 | name: kube-dns 52 | namespace: kube-system 53 | labels: 54 | k8s-app: kube-dns 55 | addonmanager.kubernetes.io/mode: Reconcile 56 | spec: 57 | strategy: 58 | rollingUpdate: 59 | maxSurge: 10% 60 | maxUnavailable: 0 61 | selector: 62 | matchLabels: 63 | k8s-app: kube-dns 64 | template: 65 | metadata: 66 | labels: 67 | k8s-app: kube-dns 68 | annotations: 69 | scheduler.alpha.kubernetes.io/critical-pod: '' 70 | spec: 71 | tolerations: 72 | - key: "CriticalAddonsOnly" 73 | operator: "Exists" 74 | volumes: 75 | - name: kube-dns-config 76 | configMap: 77 | name: kube-dns 78 | optional: true 79 | containers: 80 | #实现dns解析功能 81 | - name: kubedns 82 | image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-kube-dns-amd64:1.14.5 83 | resources: 84 | # TODO: Set memory limits when we've profiled the container for large 85 | # clusters, then set request = limit to keep this container in 86 | # guaranteed class. Currently, this container falls into the 87 | # "burstable" category so the kubelet doesn't backoff from restarting it. 88 | limits: 89 | memory: 170Mi 90 | requests: 91 | cpu: 100m 92 | memory: 70Mi 93 | livenessProbe: 94 | httpGet: 95 | path: /healthcheck/kubedns 96 | port: 10054 97 | scheme: HTTP 98 | initialDelaySeconds: 60 99 | timeoutSeconds: 5 100 | successThreshold: 1 101 | failureThreshold: 5 102 | readinessProbe: 103 | httpGet: 104 | path: /readiness 105 | port: 8081 106 | scheme: HTTP 107 | # we poll on pod startup for the Kubernetes master service and 108 | # only setup the /readiness HTTP server once that's available. 109 | initialDelaySeconds: 3 110 | timeoutSeconds: 5 111 | args: 112 | - --domain=cluster.local. 113 | - --dns-port=10053 114 | - --config-dir=/kube-dns-config 115 | #访问kube-apiserver的地址 116 | - --kube-master-url=http://{{MASTER_IP}}:8080 117 | - --v=2 118 | env: 119 | - name: PROMETHEUS_PORT 120 | value: "10055" 121 | ports: 122 | - containerPort: 10053 123 | name: dns-local 124 | protocol: UDP 125 | - containerPort: 10053 126 | name: dns-tcp-local 127 | protocol: TCP 128 | - containerPort: 10055 129 | name: metrics 130 | protocol: TCP 131 | volumeMounts: 132 | - name: kube-dns-config 133 | mountPath: /kube-dns-config 134 | #dnsmasq类似一个dns缓存,用于提高访问效率 135 | - name: dnsmasq 136 | image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-dnsmasq-nanny-amd64:1.14.5 137 | livenessProbe: 138 | httpGet: 139 | path: /healthcheck/dnsmasq 140 | port: 10054 141 | scheme: HTTP 142 | initialDelaySeconds: 60 143 | timeoutSeconds: 5 144 | successThreshold: 1 145 | failureThreshold: 5 146 | args: 147 | - -v=2 148 | - -logtostderr 149 | - -configDir=/etc/k8s/dns/dnsmasq-nanny 150 | - -restartDnsmasq=true 151 | - -- 152 | - -k 153 | - --cache-size=1000 154 | - --log-facility=- 155 | - --server=/cluster.local./127.0.0.1#10053 156 | - --server=/in-addr.arpa/127.0.0.1#10053 157 | - --server=/ip6.arpa/127.0.0.1#10053 158 | ports: 159 | - containerPort: 53 160 | name: dns 161 | protocol: UDP 162 | - containerPort: 53 163 | name: dns-tcp 164 | protocol: TCP 165 | # see: https://github.com/kubernetes/kubernetes/issues/29055 for details 166 | resources: 167 | requests: 168 | cpu: 150m 169 | memory: 20Mi 170 | volumeMounts: 171 | - name: kube-dns-config 172 | mountPath: /etc/k8s/dns/dnsmasq-nanny 173 | #sidecar是一个监控功能,负责监控另外两个容器的运行 174 | - name: sidecar 175 | image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-sidecar-amd64:1.14.5 176 | livenessProbe: 177 | httpGet: 178 | path: /metrics 179 | port: 10054 180 | scheme: HTTP 181 | initialDelaySeconds: 60 182 | timeoutSeconds: 5 183 | successThreshold: 1 184 | failureThreshold: 5 185 | args: 186 | - --v=2 187 | - --logtostderr 188 | - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A 189 | - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A 190 | ports: 191 | - containerPort: 10054 192 | name: metrics 193 | protocol: TCP 194 | resources: 195 | requests: 196 | memory: 20Mi 197 | cpu: 10m 198 | dnsPolicy: Default # Don't use cluster DNS. 199 | serviceAccountName: kube-dns 200 | -------------------------------------------------------------------------------- /kubernetes-simple/worker-node/10-calico.conf: -------------------------------------------------------------------------------- 1 | { 2 | "name": "calico-k8s-network", 3 | "cniVersion": "0.1.0", 4 | "type": "calico", 5 | "etcd_endpoints": "{{ETCD_ENDPOINTS}}", 6 | "log_level": "info", 7 | "ipam": { 8 | "type": "calico-ipam" 9 | }, 10 | "kubernetes": { 11 | "k8s_api_root": "http://{{MASTER_IP}}:8080" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /kubernetes-simple/worker-node/kube-proxy.kubeconfig: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | server: http://{{MASTER_IP}}:8080 5 | name: kubernetes 6 | contexts: 7 | - context: 8 | cluster: kubernetes 9 | name: default 10 | current-context: default 11 | kind: Config 12 | preferences: {} 13 | users: [] 14 | -------------------------------------------------------------------------------- /kubernetes-simple/worker-node/kube-proxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube-Proxy Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=network.target 5 | [Service] 6 | WorkingDirectory=/var/lib/kube-proxy 7 | ExecStart={{BIN_PATH}}/kube-proxy \ 8 | --bind-address={{NODE_IP}} \ 9 | --hostname-override={{NODE_IP}} \ 10 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ 11 | --logtostderr=true \ 12 | --v=2 13 | Restart=on-failure 14 | RestartSec=5 15 | LimitNOFILE=65536 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /kubernetes-simple/worker-node/kubelet.kubeconfig: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | clusters: 3 | - cluster: 4 | insecure-skip-tls-verify: true 5 | server: http://{{MASTER_IP}}:8080 6 | name: kubernetes 7 | contexts: 8 | - context: 9 | cluster: kubernetes 10 | user: "" 11 | name: system:node:kube-master 12 | current-context: system:node:kube-master 13 | kind: Config 14 | preferences: {} 15 | users: [] 16 | -------------------------------------------------------------------------------- /kubernetes-simple/worker-node/kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=docker.service 5 | Requires=docker.service 6 | 7 | [Service] 8 | WorkingDirectory=/var/lib/kubelet 9 | ExecStart={{BIN_PATH}}/kubelet \ 10 | --address={{NODE_IP}} \ 11 | --hostname-override={{NODE_IP}} \ 12 | --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/imooc/pause-amd64:3.0 \ 13 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 14 | --network-plugin=cni \ 15 | --cni-conf-dir=/etc/cni/net.d \ 16 | --cni-bin-dir={{BIN_PATH}} \ 17 | --cluster-dns=10.68.0.2 \ 18 | --cluster-domain=cluster.local. \ 19 | --allow-privileged=true \ 20 | --fail-swap-on=false \ 21 | --logtostderr=true \ 22 | --v=2 23 | #kubelet cAdvisor 默认在所有接口监听 4194 端口的请求, 以下iptables限制内网访问 24 | ExecStartPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT 25 | ExecStartPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT 26 | ExecStartPost=/sbin/iptables -A INPUT -s 192.168.0.0/16 -p tcp --dport 4194 -j ACCEPT 27 | ExecStartPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP 28 | Restart=on-failure 29 | RestartSec=5 30 | 31 | [Install] 32 | WantedBy=multi-user.target 33 | -------------------------------------------------------------------------------- /kubernetes-with-ca/all-node/kube-calico.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=calico node 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | User=root 8 | PermissionsStartOnly=true 9 | ExecStart=/usr/bin/docker run --net=host --privileged --name=calico-node \ 10 | -e ETCD_ENDPOINTS={{ETCD_ENDPOINTS}} \ 11 | -e ETCD_CA_CERT_FILE=/etc/kubernetes/ca/ca.pem \ 12 | -e ETCD_CERT_FILE=/etc/kubernetes/ca/calico/calico.pem \ 13 | -e ETCD_KEY_FILE=/etc/kubernetes/ca/calico/calico-key.pem \ 14 | -e CALICO_LIBNETWORK_ENABLED=true \ 15 | -e CALICO_NETWORKING_BACKEND=bird \ 16 | -e CALICO_DISABLE_FILE_LOGGING=true \ 17 | -e CALICO_IPV4POOL_CIDR=172.20.0.0/16 \ 18 | -e CALICO_IPV4POOL_IPIP=off \ 19 | -e FELIX_DEFAULTENDPOINTTOHOSTACTION=ACCEPT \ 20 | -e FELIX_IPV6SUPPORT=false \ 21 | -e FELIX_LOGSEVERITYSCREEN=info \ 22 | -e FELIX_IPINIPMTU=1440 \ 23 | -e FELIX_HEALTHENABLED=true \ 24 | -e IP={{NODE_IP}} \ 25 | -v /etc/kubernetes/ca:/etc/kubernetes/ca \ 26 | -v /var/run/calico:/var/run/calico \ 27 | -v /lib/modules:/lib/modules \ 28 | -v /run/docker/plugins:/run/docker/plugins \ 29 | -v /var/run/docker.sock:/var/run/docker.sock \ 30 | -v /var/log/calico:/var/log/calico \ 31 | registry.cn-hangzhou.aliyuncs.com/imooc/calico-node:v2.6.2 32 | ExecStop=/usr/bin/docker rm -f calico-node 33 | Restart=always 34 | RestartSec=10 35 | 36 | [Install] 37 | WantedBy=multi-user.target 38 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/admin/admin-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "admin", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "Beijing", 12 | "L": "XS", 13 | "O": "system:masters", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "87600h" 5 | }, 6 | "profiles": { 7 | "kubernetes": { 8 | "usages": [ 9 | "signing", 10 | "key encipherment", 11 | "server auth", 12 | "client auth" 13 | ], 14 | "expiry": "87600h" 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | }, 7 | "names": [ 8 | { 9 | "C": "CN", 10 | "ST": "Beijing", 11 | "L": "XS", 12 | "O": "k8s", 13 | "OU": "System" 14 | } 15 | ] 16 | } 17 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/calico/calico-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "calico", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "Beijing", 12 | "L": "XS", 13 | "O": "k8s", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/etcd/etcd-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "etcd", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "{{NODE_IP}}" 6 | ], 7 | "key": { 8 | "algo": "rsa", 9 | "size": 2048 10 | }, 11 | "names": [ 12 | { 13 | "C": "CN", 14 | "ST": "Beijing", 15 | "L": "XS", 16 | "O": "k8s", 17 | "OU": "System" 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/kube-proxy/kube-proxy-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "system:kube-proxy", 3 | "hosts": [], 4 | "key": { 5 | "algo": "rsa", 6 | "size": 2048 7 | }, 8 | "names": [ 9 | { 10 | "C": "CN", 11 | "ST": "Beijing", 12 | "L": "XS", 13 | "O": "k8s", 14 | "OU": "System" 15 | } 16 | ] 17 | } 18 | -------------------------------------------------------------------------------- /kubernetes-with-ca/ca/kubernetes/kubernetes-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "kubernetes", 3 | "hosts": [ 4 | "127.0.0.1", 5 | "{{MASTER_IP}}", 6 | "10.68.0.1", 7 | "kubernetes", 8 | "kubernetes.default", 9 | "kubernetes.default.svc", 10 | "kubernetes.default.svc.cluster", 11 | "kubernetes.default.svc.cluster.local" 12 | ], 13 | "key": { 14 | "algo": "rsa", 15 | "size": 2048 16 | }, 17 | "names": [ 18 | { 19 | "C": "CN", 20 | "ST": "Beijing", 21 | "L": "XS", 22 | "O": "k8s", 23 | "OU": "System" 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /kubernetes-with-ca/master-node/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Etcd Server 3 | After=network.target 4 | After=network-online.target 5 | Wants=network-online.target 6 | Documentation=https://github.com/coreos 7 | 8 | [Service] 9 | Type=notify 10 | WorkingDirectory=/var/lib/etcd/ 11 | ExecStart={{BIN_PATH}}/etcd \ 12 | --name={{NODE_IP}} \ 13 | --listen-client-urls=https://{{NODE_IP}}:2379,http://127.0.0.1:2379 \ 14 | --advertise-client-urls=https://{{NODE_IP}}:2379 \ 15 | --data-dir=/var/lib/etcd \ 16 | --listen-peer-urls=https://{{NODE_IP}}:2380 \ 17 | --initial-advertise-peer-urls=https://{{NODE_IP}}:2380 \ 18 | --cert-file=/etc/kubernetes/ca/etcd/etcd.pem \ 19 | --key-file=/etc/kubernetes/ca/etcd/etcd-key.pem \ 20 | --peer-cert-file=/etc/kubernetes/ca/etcd/etcd.pem \ 21 | --peer-key-file=/etc/kubernetes/ca/etcd/etcd-key.pem \ 22 | --trusted-ca-file=/etc/kubernetes/ca/ca.pem \ 23 | --peer-trusted-ca-file=/etc/kubernetes/ca/ca.pem 24 | Restart=on-failure 25 | RestartSec=5 26 | LimitNOFILE=65536 27 | 28 | [Install] 29 | WantedBy=multi-user.target 30 | -------------------------------------------------------------------------------- /kubernetes-with-ca/master-node/kube-apiserver.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes API Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=network.target 5 | [Service] 6 | ExecStart={{BIN_PATH}}/kube-apiserver \ 7 | --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \ 8 | --insecure-bind-address=127.0.0.1 \ 9 | --kubelet-https=true \ 10 | --bind-address={{NODE_IP}} \ 11 | --authorization-mode=Node,RBAC \ 12 | --runtime-config=rbac.authorization.k8s.io/v1 \ 13 | --enable-bootstrap-token-auth \ 14 | --token-auth-file=/etc/kubernetes/ca/kubernetes/token.csv \ 15 | --tls-cert-file=/etc/kubernetes/ca/kubernetes/kubernetes.pem \ 16 | --tls-private-key-file=/etc/kubernetes/ca/kubernetes/kubernetes-key.pem \ 17 | --client-ca-file=/etc/kubernetes/ca/ca.pem \ 18 | --service-account-key-file=/etc/kubernetes/ca/ca-key.pem \ 19 | --etcd-cafile=/etc/kubernetes/ca/ca.pem \ 20 | --etcd-certfile=/etc/kubernetes/ca/kubernetes/kubernetes.pem \ 21 | --etcd-keyfile=/etc/kubernetes/ca/kubernetes/kubernetes-key.pem \ 22 | --service-cluster-ip-range=10.68.0.0/16 \ 23 | --service-node-port-range=20000-40000 \ 24 | --etcd-servers={{ETCD_ENDPOINTS}} \ 25 | --enable-swagger-ui=true \ 26 | --allow-privileged=true \ 27 | --audit-log-maxage=30 \ 28 | --audit-log-maxbackup=3 \ 29 | --audit-log-maxsize=100 \ 30 | --audit-log-path=/var/lib/audit.log \ 31 | --event-ttl=1h \ 32 | --v=2 33 | Restart=on-failure 34 | RestartSec=5 35 | Type=notify 36 | LimitNOFILE=65536 37 | [Install] 38 | WantedBy=multi-user.target 39 | -------------------------------------------------------------------------------- /kubernetes-with-ca/master-node/kube-controller-manager.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Controller Manager 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | [Service] 5 | ExecStart={{BIN_PATH}}/kube-controller-manager \ 6 | --address=127.0.0.1 \ 7 | --master=http://127.0.0.1:8080 \ 8 | --allocate-node-cidrs=true \ 9 | --service-cluster-ip-range=10.68.0.0/16 \ 10 | --cluster-cidr=172.20.0.0/16 \ 11 | --cluster-name=kubernetes \ 12 | --leader-elect=true \ 13 | --cluster-signing-cert-file=/etc/kubernetes/ca/ca.pem \ 14 | --cluster-signing-key-file=/etc/kubernetes/ca/ca-key.pem \ 15 | --service-account-private-key-file=/etc/kubernetes/ca/ca-key.pem \ 16 | --root-ca-file=/etc/kubernetes/ca/ca.pem \ 17 | --v=2 18 | Restart=on-failure 19 | RestartSec=5 20 | [Install] 21 | WantedBy=multi-user.target 22 | -------------------------------------------------------------------------------- /kubernetes-with-ca/master-node/kube-scheduler.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Scheduler 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | 5 | [Service] 6 | ExecStart={{BIN_PATH}}/kube-scheduler \ 7 | --address=127.0.0.1 \ 8 | --master=http://127.0.0.1:8080 \ 9 | --leader-elect=true \ 10 | --v=2 11 | Restart=on-failure 12 | RestartSec=5 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /kubernetes-with-ca/services/kube-dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2017 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Configuration to deploy release version of the Dashboard UI compatible with 16 | # Kubernetes 1.8. 17 | # 18 | # Example usage: kubectl create -f 19 | 20 | # ------------------- Dashboard Secret ------------------- # 21 | 22 | apiVersion: v1 23 | kind: Secret 24 | metadata: 25 | labels: 26 | k8s-app: kubernetes-dashboard 27 | name: kubernetes-dashboard-certs 28 | namespace: kube-system 29 | type: Opaque 30 | 31 | --- 32 | # ------------------- Dashboard Service Account ------------------- # 33 | 34 | apiVersion: v1 35 | kind: ServiceAccount 36 | metadata: 37 | labels: 38 | k8s-app: kubernetes-dashboard 39 | name: kubernetes-dashboard 40 | namespace: kube-system 41 | 42 | --- 43 | # ------------------- Dashboard Role & Role Binding ------------------- # 44 | 45 | kind: Role 46 | apiVersion: rbac.authorization.k8s.io/v1 47 | metadata: 48 | name: kubernetes-dashboard-minimal 49 | namespace: kube-system 50 | rules: 51 | # Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret. 52 | - apiGroups: [""] 53 | resources: ["secrets"] 54 | verbs: ["create"] 55 | # Allow Dashboard to create 'kubernetes-dashboard-settings' config map. 56 | - apiGroups: [""] 57 | resources: ["configmaps"] 58 | verbs: ["create"] 59 | # Allow Dashboard to get, update and delete Dashboard exclusive secrets. 60 | - apiGroups: [""] 61 | resources: ["secrets"] 62 | resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"] 63 | verbs: ["get", "update", "delete"] 64 | # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. 65 | - apiGroups: [""] 66 | resources: ["configmaps"] 67 | resourceNames: ["kubernetes-dashboard-settings"] 68 | verbs: ["get", "update"] 69 | # Allow Dashboard to get metrics from heapster. 70 | - apiGroups: [""] 71 | resources: ["services"] 72 | resourceNames: ["heapster"] 73 | verbs: ["proxy"] 74 | - apiGroups: [""] 75 | resources: ["services/proxy"] 76 | resourceNames: ["heapster", "http:heapster:", "https:heapster:"] 77 | verbs: ["get"] 78 | 79 | --- 80 | apiVersion: rbac.authorization.k8s.io/v1 81 | kind: RoleBinding 82 | metadata: 83 | name: kubernetes-dashboard-minimal 84 | namespace: kube-system 85 | roleRef: 86 | apiGroup: rbac.authorization.k8s.io 87 | kind: Role 88 | name: kubernetes-dashboard-minimal 89 | subjects: 90 | - kind: ServiceAccount 91 | name: kubernetes-dashboard 92 | namespace: kube-system 93 | 94 | --- 95 | # ------------------- Dashboard Deployment ------------------- # 96 | 97 | kind: Deployment 98 | apiVersion: apps/v1beta2 99 | metadata: 100 | labels: 101 | k8s-app: kubernetes-dashboard 102 | name: kubernetes-dashboard 103 | namespace: kube-system 104 | spec: 105 | replicas: 1 106 | revisionHistoryLimit: 10 107 | selector: 108 | matchLabels: 109 | k8s-app: kubernetes-dashboard 110 | template: 111 | metadata: 112 | labels: 113 | k8s-app: kubernetes-dashboard 114 | spec: 115 | containers: 116 | - name: kubernetes-dashboard 117 | image: registry.cn-hangzhou.aliyuncs.com/imooc/kubernetes-dashboard-amd64:v1.8.1 118 | ports: 119 | - containerPort: 8443 120 | protocol: TCP 121 | args: 122 | - --auto-generate-certificates 123 | # Uncomment the following line to manually specify Kubernetes API server Host 124 | # If not specified, Dashboard will attempt to auto discover the API server and connect 125 | # to it. Uncomment only if the default does not work. 126 | # - --apiserver-host=http://my-address:port 127 | volumeMounts: 128 | - name: kubernetes-dashboard-certs 129 | mountPath: /certs 130 | # Create on-disk volume to store exec logs 131 | - mountPath: /tmp 132 | name: tmp-volume 133 | livenessProbe: 134 | httpGet: 135 | scheme: HTTPS 136 | path: / 137 | port: 8443 138 | initialDelaySeconds: 30 139 | timeoutSeconds: 30 140 | volumes: 141 | - name: kubernetes-dashboard-certs 142 | secret: 143 | secretName: kubernetes-dashboard-certs 144 | - name: tmp-volume 145 | emptyDir: {} 146 | serviceAccountName: kubernetes-dashboard 147 | # Comment the following tolerations if Dashboard must not be deployed on master 148 | tolerations: 149 | - key: node-role.kubernetes.io/master 150 | effect: NoSchedule 151 | 152 | --- 153 | # ------------------- Dashboard Service ------------------- # 154 | 155 | kind: Service 156 | apiVersion: v1 157 | metadata: 158 | labels: 159 | k8s-app: kubernetes-dashboard 160 | name: kubernetes-dashboard 161 | namespace: kube-system 162 | spec: 163 | ports: 164 | - port: 443 165 | targetPort: 8443 166 | nodePort: 9443 167 | selector: 168 | k8s-app: kubernetes-dashboard 169 | type: NodePort 170 | -------------------------------------------------------------------------------- /kubernetes-with-ca/services/kube-dns.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: kube-dns 6 | namespace: kube-system 7 | labels: 8 | addonmanager.kubernetes.io/mode: EnsureExists 9 | 10 | --- 11 | apiVersion: v1 12 | kind: ServiceAccount 13 | metadata: 14 | name: kube-dns 15 | namespace: kube-system 16 | labels: 17 | addonmanager.kubernetes.io/mode: Reconcile 18 | 19 | --- 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: kube-dns 24 | namespace: kube-system 25 | labels: 26 | k8s-app: kube-dns 27 | addonmanager.kubernetes.io/mode: Reconcile 28 | kubernetes.io/name: "KubeDNS" 29 | spec: 30 | selector: 31 | k8s-app: kube-dns 32 | clusterIP: 10.68.0.2 33 | ports: 34 | - name: dns 35 | port: 53 36 | protocol: UDP 37 | - name: dns-tcp 38 | port: 53 39 | protocol: TCP 40 | 41 | --- 42 | apiVersion: apps/v1 43 | kind: Deployment 44 | metadata: 45 | name: kube-dns 46 | namespace: kube-system 47 | labels: 48 | k8s-app: kube-dns 49 | addonmanager.kubernetes.io/mode: Reconcile 50 | spec: 51 | strategy: 52 | rollingUpdate: 53 | maxSurge: 10% 54 | maxUnavailable: 0 55 | selector: 56 | matchLabels: 57 | k8s-app: kube-dns 58 | template: 59 | metadata: 60 | labels: 61 | k8s-app: kube-dns 62 | annotations: 63 | scheduler.alpha.kubernetes.io/critical-pod: '' 64 | spec: 65 | tolerations: 66 | - key: "CriticalAddonsOnly" 67 | operator: "Exists" 68 | volumes: 69 | - name: kube-dns-config 70 | configMap: 71 | name: kube-dns 72 | optional: true 73 | containers: 74 | - name: kubedns 75 | image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-kube-dns-amd64:1.14.5 76 | resources: 77 | # TODO: Set memory limits when we've profiled the container for large 78 | # clusters, then set request = limit to keep this container in 79 | # guaranteed class. Currently, this container falls into the 80 | # "burstable" category so the kubelet doesn't backoff from restarting it. 81 | limits: 82 | memory: 170Mi 83 | requests: 84 | cpu: 100m 85 | memory: 70Mi 86 | livenessProbe: 87 | httpGet: 88 | path: /healthcheck/kubedns 89 | port: 10054 90 | scheme: HTTP 91 | initialDelaySeconds: 60 92 | timeoutSeconds: 5 93 | successThreshold: 1 94 | failureThreshold: 5 95 | readinessProbe: 96 | httpGet: 97 | path: /readiness 98 | port: 8081 99 | scheme: HTTP 100 | # we poll on pod startup for the Kubernetes master service and 101 | # only setup the /readiness HTTP server once that's available. 102 | initialDelaySeconds: 3 103 | timeoutSeconds: 5 104 | args: 105 | - --domain=cluster.local. 106 | - --dns-port=10053 107 | - --config-dir=/kube-dns-config 108 | - --v=2 109 | env: 110 | - name: PROMETHEUS_PORT 111 | value: "10055" 112 | ports: 113 | - containerPort: 10053 114 | name: dns-local 115 | protocol: UDP 116 | - containerPort: 10053 117 | name: dns-tcp-local 118 | protocol: TCP 119 | - containerPort: 10055 120 | name: metrics 121 | protocol: TCP 122 | volumeMounts: 123 | - name: kube-dns-config 124 | mountPath: /kube-dns-config 125 | - name: dnsmasq 126 | image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-dnsmasq-nanny-amd64:1.14.5 127 | livenessProbe: 128 | httpGet: 129 | path: /healthcheck/dnsmasq 130 | port: 10054 131 | scheme: HTTP 132 | initialDelaySeconds: 60 133 | timeoutSeconds: 5 134 | successThreshold: 1 135 | failureThreshold: 5 136 | args: 137 | - -v=2 138 | - -logtostderr 139 | - -configDir=/etc/k8s/dns/dnsmasq-nanny 140 | - -restartDnsmasq=true 141 | - -- 142 | - -k 143 | - --cache-size=1000 144 | - --log-facility=- 145 | - --server=/cluster.local./127.0.0.1#10053 146 | - --server=/in-addr.arpa/127.0.0.1#10053 147 | - --server=/ip6.arpa/127.0.0.1#10053 148 | ports: 149 | - containerPort: 53 150 | name: dns 151 | protocol: UDP 152 | - containerPort: 53 153 | name: dns-tcp 154 | protocol: TCP 155 | # see: https://github.com/kubernetes/kubernetes/issues/29055 for details 156 | resources: 157 | requests: 158 | cpu: 150m 159 | memory: 20Mi 160 | volumeMounts: 161 | - name: kube-dns-config 162 | mountPath: /etc/k8s/dns/dnsmasq-nanny 163 | - name: sidecar 164 | image: registry.cn-hangzhou.aliyuncs.com/imooc/k8s-dns-sidecar-amd64:1.14.5 165 | livenessProbe: 166 | httpGet: 167 | path: /metrics 168 | port: 10054 169 | scheme: HTTP 170 | initialDelaySeconds: 60 171 | timeoutSeconds: 5 172 | successThreshold: 1 173 | failureThreshold: 5 174 | args: 175 | - --v=2 176 | - --logtostderr 177 | - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A 178 | - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A 179 | ports: 180 | - containerPort: 10054 181 | name: metrics 182 | protocol: TCP 183 | resources: 184 | requests: 185 | memory: 20Mi 186 | cpu: 10m 187 | dnsPolicy: Default # Don't use cluster DNS. 188 | serviceAccountName: kube-dns 189 | -------------------------------------------------------------------------------- /kubernetes-with-ca/worker-node/10-calico.conf: -------------------------------------------------------------------------------- 1 | { 2 | "name": "calico-k8s-network", 3 | "cniVersion": "0.1.0", 4 | "type": "calico", 5 | "etcd_endpoints": "{{ETCD_ENDPOINTS}}", 6 | "etcd_key_file": "/etc/kubernetes/ca/calico/calico-key.pem", 7 | "etcd_cert_file": "/etc/kubernetes/ca/calico/calico.pem", 8 | "etcd_ca_cert_file": "/etc/kubernetes/ca/ca.pem", 9 | "log_level": "info", 10 | "ipam": { 11 | "type": "calico-ipam" 12 | }, 13 | "kubernetes": { 14 | "kubeconfig": "/etc/kubernetes/kubelet.kubeconfig" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /kubernetes-with-ca/worker-node/kube-proxy.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kube-Proxy Server 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=network.target 5 | [Service] 6 | WorkingDirectory=/var/lib/kube-proxy 7 | ExecStart={{BIN_PATH}}/kube-proxy \ 8 | --bind-address={{NODE_IP}} \ 9 | --hostname-override={{NODE_IP}} \ 10 | --kubeconfig=/etc/kubernetes/kube-proxy.kubeconfig \ 11 | --logtostderr=true \ 12 | --v=2 13 | Restart=on-failure 14 | RestartSec=5 15 | LimitNOFILE=65536 16 | 17 | [Install] 18 | WantedBy=multi-user.target 19 | -------------------------------------------------------------------------------- /kubernetes-with-ca/worker-node/kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Kubernetes Kubelet 3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes 4 | After=docker.service 5 | Requires=docker.service 6 | 7 | [Service] 8 | WorkingDirectory=/var/lib/kubelet 9 | ExecStart={{BIN_PATH}}/kubelet \ 10 | --address={{NODE_IP}} \ 11 | --hostname-override={{NODE_IP}} \ 12 | --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/imooc/pause-amd64:3.0 \ 13 | --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \ 14 | --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \ 15 | --cert-dir=/etc/kubernetes/ca \ 16 | --hairpin-mode hairpin-veth \ 17 | --network-plugin=cni \ 18 | --cni-conf-dir=/etc/cni/net.d \ 19 | --cni-bin-dir={{BIN_PATH}} \ 20 | --cluster-dns=10.68.0.2 \ 21 | --cluster-domain=cluster.local. \ 22 | --allow-privileged=true \ 23 | --fail-swap-on=false \ 24 | --logtostderr=true \ 25 | --v=2 26 | #kubelet cAdvisor 默认在所有接口监听 4194 端口的请求, 以下iptables限制内网访问 27 | ExecStartPost=/sbin/iptables -A INPUT -s 10.0.0.0/8 -p tcp --dport 4194 -j ACCEPT 28 | ExecStartPost=/sbin/iptables -A INPUT -s 172.16.0.0/12 -p tcp --dport 4194 -j ACCEPT 29 | ExecStartPost=/sbin/iptables -A INPUT -s 192.168.0.0/16 -p tcp --dport 4194 -j ACCEPT 30 | ExecStartPost=/sbin/iptables -A INPUT -p tcp --dport 4194 -j DROP 31 | Restart=on-failure 32 | RestartSec=5 33 | 34 | [Install] 35 | WantedBy=multi-user.target 36 | -------------------------------------------------------------------------------- /service-config/api-gateway.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: api-gateway 6 | name: api-gateway 7 | spec: 8 | ports: 9 | - port: 80 10 | protocol: TCP 11 | targetPort: 8080 12 | nodePort: 80 13 | selector: 14 | app: api-gateway 15 | type: NodePort 16 | --- 17 | apiVersion: apps/v1beta1 18 | kind: Deployment 19 | metadata: 20 | name: api-gateway-deployment 21 | spec: 22 | replicas: 1 23 | template: 24 | metadata: 25 | labels: 26 | app: api-gateway 27 | spec: 28 | containers: 29 | - name: api-gateway 30 | image: {{HUB}}/micro-service/api-gateway-zuul:latest 31 | ports: 32 | - containerPort: 8080 33 | 34 | -------------------------------------------------------------------------------- /service-config/course-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: course-service 6 | name: course-service 7 | spec: 8 | ports: 9 | - port: 8081 10 | protocol: TCP 11 | targetPort: 8081 12 | selector: 13 | app: course-service 14 | type: ClusterIP 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: Deployment 18 | metadata: 19 | name: course-service-deployment 20 | spec: 21 | replicas: 1 22 | template: 23 | metadata: 24 | labels: 25 | app: course-service 26 | spec: 27 | containers: 28 | - name: course-service 29 | image: {{HUB}}/micro-service/course-service:latest 30 | ports: 31 | - containerPort: 20880 32 | - name: course-edge-service 33 | image: {{HUB}}/micro-service/course-edge-service:latest 34 | ports: 35 | - containerPort: 8081 36 | 37 | -------------------------------------------------------------------------------- /service-config/message-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: message-service 6 | name: message-service 7 | spec: 8 | ports: 9 | - port: 9090 10 | protocol: TCP 11 | targetPort: 9090 12 | selector: 13 | app: message-service 14 | type: ClusterIP 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: Deployment 18 | metadata: 19 | name: message-service-deployment 20 | spec: 21 | replicas: 1 22 | template: 23 | metadata: 24 | labels: 25 | app: message-service 26 | spec: 27 | containers: 28 | - name: message-service 29 | image: {{HUB}}/micro-service/message-service:latest 30 | ports: 31 | - containerPort: 9090 32 | 33 | -------------------------------------------------------------------------------- /service-config/user-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: user-service 6 | name: user-service 7 | namespace: default 8 | spec: 9 | ports: 10 | - name: user-edge-service-port 11 | port: 8082 12 | protocol: TCP 13 | targetPort: 8082 14 | - name: user-service-port 15 | port: 7911 16 | protocol: TCP 17 | targetPort: 7911 18 | selector: 19 | app: user-service 20 | sessionAffinity: None 21 | type: ClusterIP 22 | --- 23 | apiVersion: apps/v1beta1 24 | kind: Deployment 25 | metadata: 26 | name: user-service-deployment 27 | spec: 28 | replicas: 1 29 | template: 30 | metadata: 31 | labels: 32 | app: user-service 33 | spec: 34 | containers: 35 | - name: user-service 36 | image: {{HUB}}/micro-service/user-service:latest 37 | ports: 38 | - containerPort: 7911 39 | - name: user-edge-service 40 | image: {{HUB}}/micro-service/user-edge-service:latest 41 | ports: 42 | - containerPort: 8082 43 | 44 | --------------------------------------------------------------------------------