├── .gitignore ├── README.md ├── cluster ├── etcd.sh ├── halm │ └── README.md ├── master │ ├── kube-apiserver.sh │ ├── kube-controller-manager.sh │ ├── kube-install.sh │ ├── kube-master.sh │ ├── kube-scheduler.sh │ └── kube-ssl-master.sh └── node │ ├── docker.sh │ ├── flannel.sh │ ├── kube-install.sh │ ├── kube-node.sh │ ├── kube-proxy.sh │ ├── kube-ssl-node.sh │ └── kubelet.sh ├── demo ├── kubernetes │ ├── host-storage.yaml │ ├── mysql-service.yaml │ ├── mysql-statefulset.yaml │ ├── redis-deployment.yaml │ ├── redis-service.yaml │ ├── regsecret.md │ ├── web-deployment.yaml │ └── web-service.yaml ├── pom.xml ├── thrift-server │ ├── pom.xml │ └── src │ │ └── thrift │ │ └── Hello.thrift └── web │ ├── docker-compose.yml │ ├── pom.xml │ ├── sql │ └── db.sql │ └── src │ └── main │ ├── docker │ └── Dockerfile │ ├── java │ └── com │ │ └── szss │ │ └── demo │ │ ├── Application.java │ │ ├── config │ │ └── WebConfig.java │ │ ├── controller │ │ ├── IndexController.java │ │ └── LoginController.java │ │ ├── entity │ │ └── User.java │ │ ├── init │ │ └── DBInitializer.java │ │ ├── interceptor │ │ └── AuthInterceptor.java │ │ ├── mapper │ │ └── UserMapper.java │ │ └── service │ │ └── UserService.java │ └── resources │ ├── application.yaml │ ├── mybatis-config.xml │ └── templates │ ├── index.ftl │ └── login.ftl ├── kubeadm ├── v.1.7.2 │ ├── README.md │ ├── image.sh │ ├── kubeadm-master.sh │ └── kubeadm-node.sh ├── v1.11.1 │ ├── README.md │ ├── image.sh │ └── kubeadm.sh ├── v1.8.4 │ ├── README.md │ ├── image.sh │ ├── kubeadm-reset.sh │ └── kubeadm.sh └── v1.9.1 │ ├── README.md │ ├── image.sh │ └── kubeadm.sh ├── kubespray └── README.md ├── rancher ├── README.md ├── nginx.conf ├── reset.sh ├── setup-db.sh ├── setup-ha-nginx-lb.sh ├── setup-ha.sh └── setup.sh ├── rke └── README.md └── yaml ├── aliyun-nas └── README.md ├── apollo └── README.md ├── busybox └── busybox.yaml ├── ceph ├── README.md ├── ceph-install.sh ├── ceph-secret.yaml └── cephfs-with-secret.yaml ├── cluster-monitoring-for-rancher ├── README.md ├── grafana.yaml ├── heapster.yaml └── influxdb.yaml ├── cluster-monitoring ├── README.md ├── grafana.yaml ├── heapster.yaml └── influxdb.yaml ├── dashboard ├── README.md └── kubernetes-dashboard.yaml ├── efk ├── README.md ├── es-controller.yaml ├── es-service.yaml ├── fluentd-es-ds.yaml ├── kibana-controller.yaml └── kibana-service.yaml ├── example ├── demo.yaml ├── demo1.yaml ├── hpe-k8s-mysql.yaml ├── nginx-deploy.yaml ├── nginx-pod.yaml ├── nginx-rc.yaml ├── nginx.yaml ├── rc-frontend.yaml ├── rc-redis-master.yaml ├── rc-redis-slave.yaml ├── redis.yaml ├── service-frontend.yaml ├── service-redis-master.yaml └── service-redis-slave.yaml ├── glusterfs ├── glusterfs-endpoints.yaml └── glusterfs-service.yaml ├── harbor └── README.md ├── ingress-nginx ├── app-deployment.yaml ├── app-ingress.yaml ├── app-service.yaml ├── default-backend-deployment.yaml ├── default-backend-service.yaml ├── nginx-ingress-controller-config-map.yaml ├── nginx-ingress-controller-deployment.yaml ├── nginx-ingress-controller-roles.yaml ├── nginx-ingress-controller-service.yaml ├── nginx-ingress-namespace.yaml └── nginx-ingress.yaml ├── kubedns ├── README.md ├── kubedns-cm.yaml ├── kubedns-controller.yaml ├── kubedns-sa.yaml └── kubedns-svc.yaml ├── mongo ├── mongo-standalone-cephfs.yaml └── mongo-standalone.yaml ├── mysql └── mysql.yaml ├── nginx ├── nginx-s.yaml ├── nginx.yaml └── test.yaml ├── rabbitmq └── cluster │ ├── README.md │ ├── docker │ ├── Dockerfile │ └── plugins │ │ ├── autocluster-0.10.0.ez │ │ └── rabbitmq_aws-0.10.0.ez │ └── yaml │ ├── rabbitmq-deploy.yaml │ └── rabbitmq-svc.yaml ├── redis └── redis-sentinel │ ├── README.md │ ├── image │ ├── Dockerfile │ ├── redis-master.conf │ ├── redis-slave.conf │ └── run.sh │ ├── redis-master-pod.yaml │ ├── redis-sentinel-deploy.yaml │ ├── redis-sentinel-svc.yaml │ ├── redis-slave-statefulset.yaml │ └── redis-slave-svc.yaml ├── rocketmq └── README.md ├── storage-class ├── README.md ├── aliyun-nas-storageclass.yaml ├── clusterrole.yaml ├── clusterrolebinding.yaml ├── nfs-client-deploy.yaml └── serviceaccount.yaml ├── traefik ├── README.md ├── traefik-ds.yaml ├── traefik-ingress.yaml ├── traefik-rbac.yaml └── ui.yaml └── zookeeper └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Compiled class file 2 | *.class 3 | 4 | # Log file 5 | *.log 6 | 7 | # BlueJ files 8 | *.ctxt 9 | 10 | # Mobile Tools for Java (J2ME) 11 | .mtj.tmp/ 12 | 13 | # Package Files # 14 | *.jar 15 | *.war 16 | *.ear 17 | *.zip 18 | *.tar.gz 19 | *.rar 20 | 21 | # virtual machine crash logs, see http://www.java.com/en/download/help/error_hotspot.xml 22 | hs_err_pid* 23 | 24 | .idea 25 | *.iml 26 | *.ipr 27 | *.iws 28 | target 29 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 1 环境 2 | 3 | 至少3台centos 7虚机 4 | 5 | | 虚机名称 | IP | 6 | | ------------- |:-------------:| 7 | | node01 | 172.16.120.151| 8 | | node02 | 172.16.120.152| 9 | | node03 | 172.16.120.153| 10 | 11 | 12 | # 2 安装etcd 13 | 14 | etcd.sh实现关闭selinux和firewall,下载v3.2.4版本的etcd,并进行安装,同时将etcd添加到环境变量。 15 | 16 | 在node01节点执行: 17 | ```bash 18 | sh etcd.sh "node01" "172.16.120.151" "node01=http://172.16.120.151:2380,node02=http://172.16.120.152:2380,node03=http://172.16.120.153:2380" 19 | ``` 20 | 21 | 在node02节点执行: 22 | ```bash 23 | sh etcd.sh "node02" "172.16.120.152" "node01=http://172.16.120.151:2380,node02=http://172.16.120.152:2380,node03=http://172.16.120.153:2380" 24 | ``` 25 | 26 | 在node03节点执行: 27 | ```bash 28 | sh etcd.sh "node03" "172.16.120.153" "node01=http://172.16.120.151:2380,node02=http://172.16.120.152:2380,node03=http://172.16.120.153:2380" 29 | ``` 30 | 31 | - 第1个参数是etcd当前节点名称ETCD_NAME 32 | - 第2个参数是etcd当前节点IP地址ETCD_LISTEN_IP 33 | - 第3个参数是etcd集群地址ETCD_INITIAL_CLUSTER 34 | 35 | >注意:如果etcd下载较慢,可以将etcd事先下载好,放到etcd.sh所在目录下。etcd下载地址https://github.com/coreos/etcd/releases/download/v3.2.4/etcd-v3.2.4-linux-amd64.tar.gz 36 | 37 | 38 | 安装验证: 39 | ```bash 40 | etcdctl member list 41 | etcdctl cluster-health 42 | ``` 43 | 如果etcdctl命令无法使用,执行下面的命令,使~/.bash_profile生效 44 | ```bash 45 | source ~/.bash_profile 46 | ``` 47 | 48 | # 3 安装kubernetes master 49 | master/kube-master.sh实现关闭selinux和firewall,下载v1.6.7版本的kubernetes,并进行安装,生成apiserver、controller manager、kube-scheduler服务证书,并使用kubernetes的TLS。 50 | 51 | 在node01节点执行: 52 | ```bash 53 | sh kube-master.sh "172.16.120.151" "k8s-node01" "http://172.16.120.151:2379,http://172.16.120.152:2379,http://172.16.120.153:2379" 54 | ``` 55 | 56 | - 第1个参数MASTER_ADDRESS是master节点的地址 57 | - 第2个参数MASTER_DNS是master的DNS名称 58 | - 第3个参数ETCD_SERVERS是etcd集群地址 59 | - 第4个参数SERVICE_CLUSTER_IP_RANGE是kubernetes分配的集群IP范围,默认值为10.0.0.0/24 60 | - 第5个参数MASTER_CLUSTER_IP是kubernetes指定master的集群IP,默认值为10.0.0.1 61 | 62 | 安装验证: 63 | ```bash 64 | # kubectl get componentstatuses 65 | NAME STATUS MESSAGE ERROR 66 | scheduler Healthy ok 67 | controller-manager Healthy ok 68 | etcd-0 Healthy {"health": "true"} 69 | etcd-2 Healthy {"health": "true"} 70 | etcd-1 Healthy {"health": "true"} 71 | ``` 72 | 73 | 74 | >注意:如果kubernetes下载较慢,可以将kubernetes事先下载好,放到master/kube-master.sh所在目录下。kubernetes下载地址https://github.com/kubernetes/kubernetes/releases/download/v1.6.7/kubernetes.tar.gz 75 | 76 | # 4 安装kubernetes node 77 | node/kube-node.sh实现关闭selinux和firewall,下载v1.6.7版本的kubernetes,并进行安装,安装服务有flannel、docker、kubelet、kube-proxy,从master节点获取根证书,并生成kubelet、kube-proxy服务证书,使用kubernetes的TLS。 78 | 79 | 在node02节点执行: 80 | ```bash 81 | sh kube-node.sh "172.16.120.152" "172.16.120.151" "root" "123456" "http://172.16.120.151:2379,http://172.16.120.152:2379,http://172.16.120.153:2379" 82 | ``` 83 | 84 | 在node03节点执行: 85 | ```bash 86 | sh kube-node.sh "172.16.120.153" "172.16.120.151" "root" "123456" "http://172.16.120.151:2379,http://172.16.120.152:2379,http://172.16.120.153:2379" 87 | ``` 88 | 89 | - 第1个参数NODE_ADDRESS是node节点IP 90 | - 第2个参数MASTER_ADDRESS是master节点IP 91 | - 第3个参数MASTER_USER是master节点用户名名 92 | - 第4个参数MASTER_PASSWORD是master节点登录密码 93 | - 第5个参数ETCD_SERVERS是etcd集群地址 94 | - 第6个参数FLANNEL_NET是flannel地址段,默认为172.18.0.0/16 95 | - 第7个参数DOCKER_OPTS是docker参数配置 96 | - 第8个参数KUBELET_POD_INFRA_CONTAINER是kubelet pod的基础镜像名称,默认值为hub.c.163.com/k8s163/pause-amd64:3.0 97 | 98 | 安装验证: 99 | ```bash 100 | # kubectl get nodes 101 | NAME STATUS AGE VERSION 102 | 172.16.120.152 Ready 8h v1.6.7 103 | 172.16.120.153 Ready 5h v1.6.7 104 | ``` 105 | 106 | >注意:如果kubernetes和flannel下载较慢,可以将kubernetes和flannel事先下载好,放到node/kube-node.sh所在目录下。flannel下载地址https://github.com/coreos/flannel/releases/download/v0.7.1/flannel-v0.7.1-linux-amd64.tar.gz 107 | 108 | 由于在kubernetes中是以Pod而不是docker容器作为管理单元,在kubelet创建Pod时,还通过启动一个名为google_containers/pause的镜像来实现Pod的概念。该镜像存在于谷歌的镜像库http://gcr.io中,需要将其下载并push到私有Docker Registry中去,在本脚步中使用下面配置 --pod_infra_container_image=hub.c.163.com/k8s163/pause-amd64:3.0。 109 | 110 | 111 | 参考: 112 | http://tonybai.com/2017/07/20/fix-cannot-access-dashboard-in-k8s-1-6-4/ 113 | 114 | https://stackoverflow.com/questions/44469277/var-log-kube-apiserver-log-not-work-for-kubernetes1-6 115 | 116 | https://github.com/opsnull/follow-me-install-kubernetes-cluster 117 | 118 | http://www.cnblogs.com/breg/p/5923604.html 119 | 120 | https://wiki.shileizcc.com/display/KUB/Kubernetes+HA+Cluster+Build#KubernetesHAClusterBuild-配置CA证书和私钥 121 | -------------------------------------------------------------------------------- /cluster/etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #第1个参数是etcd当前节点名称ETCD_NAME 4 | #第2个参数是etcd当前节点IP地址ETCD_LISTEN_IP 5 | #第3个参数是etcd集群地址ETCD_INITIAL_CLUSTER 6 | #例子: 7 | #sh etcd.sh "node01" "172.16.120.151" "node01=http://172.16.120.151:2380,node02=http://172.16.120.152:2380,node03=http://172.16.120.153:2380" 8 | #sh etcd.sh "node02" "172.16.120.152" "node01=http://172.16.120.151:2380,node02=http://172.16.120.152:2380,node03=http://172.16.120.153:2380" 9 | #sh etcd.sh "node03" "172.16.120.153" "node01=http://172.16.120.151:2380,node02=http://172.16.120.152:2380,node03=http://172.16.120.153:2380" 10 | 11 | echo '============================================================' 12 | echo '====================Disable selinux and firewalld...========' 13 | echo '============================================================' 14 | if [ $(getenforce) = "Enabled" ]; then 15 | setenforce 0 16 | fi 17 | systemctl disable firewalld 18 | systemctl stop firewalld 19 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 20 | 21 | echo "Disable selinux and firewalld success!" 22 | 23 | echo '============================================================' 24 | echo '====================Downland etcd... =======================' 25 | echo '============================================================' 26 | ETCD_VERSION=v3.2.4 27 | echo "etcd version is $ETCD_VERSION" 28 | ETCD_FILE=etcd-$ETCD_VERSION-linux-amd64 29 | echo "etcd zip file is $ETCD_FILE" 30 | 31 | if [ ! -f "./$ETCD_FILE.tar.gz" ]; then 32 | wget https://github.com/coreos/etcd/releases/download/$ETCD_VERSION/$ETCD_FILE.tar.gz 33 | fi 34 | 35 | 36 | echo '============================================================' 37 | echo '=====================Unzip etcd zip file... ================' 38 | echo '============================================================' 39 | tar xzvf $ETCD_FILE.tar.gz 40 | 41 | ETCD_BIN_DIR=/opt/kubernetes/bin 42 | ETCD_CFG_DIR=/opt/kubernetes/cfg 43 | mkdir -p $ETCD_BIN_DIR 44 | mkdir -p $ETCD_CFG_DIR 45 | 46 | echo '============================================================' 47 | echo '=====================Install etcd... =======================' 48 | echo '============================================================' 49 | cp $ETCD_FILE/etcd $ETCD_BIN_DIR 50 | cp $ETCD_FILE/etcdctl $ETCD_BIN_DIR 51 | rm -rf $ETCD_FILE 52 | 53 | sed -i 's/$PATH:/$PATH:\/opt\/kubernetes\/bin:/g' ~/.bash_profile 54 | #source ~/.bash_profile 55 | exec bash --login 56 | 57 | ETCD_DATA_DIR=/var/lib/etcd 58 | mkdir -p ${ETCD_DATA_DIR} 59 | 60 | 61 | ETCD_NAME=${1:-"default"} 62 | ETCD_LISTEN_IP=${2:-"0.0.0.0"} 63 | ETCD_INITIAL_CLUSTER=${3:-} 64 | 65 | echo 'Create /opt/kubernetes/cfg/etcd.conf ...' 66 | cat </opt/kubernetes/cfg/etcd.conf 67 | # [member] 68 | ETCD_NAME="${ETCD_NAME}" 69 | ETCD_DATA_DIR="${ETCD_DATA_DIR}/default.etcd" 70 | #ETCD_SNAPSHOT_COUNTER="10000" 71 | #ETCD_HEARTBEAT_INTERVAL="100" 72 | #ETCD_ELECTION_TIMEOUT="1000" 73 | ETCD_LISTEN_PEER_URLS="http://0.0.0.0:2380" 74 | ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379" 75 | #ETCD_MAX_SNAPSHOTS="5" 76 | #ETCD_MAX_WALS="5" 77 | #ETCD_CORS="" 78 | # 79 | #[cluster] 80 | ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${ETCD_LISTEN_IP}:2380" 81 | # if you use different ETCD_NAME (e.g. test), 82 | # set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..." 83 | ETCD_INITIAL_CLUSTER="${ETCD_INITIAL_CLUSTER}" 84 | ETCD_INITIAL_CLUSTER_STATE="new" 85 | ETCD_INITIAL_CLUSTER_TOKEN="k8s-etcd-cluster" 86 | ETCD_ADVERTISE_CLIENT_URLS="http://${ETCD_LISTEN_IP}:2379" 87 | #ETCD_DISCOVERY="" 88 | #ETCD_DISCOVERY_SRV="" 89 | #ETCD_DISCOVERY_FALLBACK="proxy" 90 | #ETCD_DISCOVERY_PROXY="" 91 | # 92 | #[proxy] 93 | #ETCD_PROXY="off" 94 | # 95 | #[security] 96 | #ETCD_CA_FILE="" 97 | #ETCD_CERT_FILE="" 98 | #ETCD_KEY_FILE="" 99 | #ETCD_PEER_CA_FILE="" 100 | #ETCD_PEER_CERT_FILE="" 101 | #ETCD_PEER_KEY_FILE="" 102 | EOF 103 | 104 | echo 'Create /usr/lib/systemd/system/etcd.service ...' 105 | cat <//usr/lib/systemd/system/etcd.service 106 | [Unit] 107 | Description=Etcd Server 108 | After=network.target 109 | 110 | [Service] 111 | Type=simple 112 | WorkingDirectory=${ETCD_DATA_DIR} 113 | EnvironmentFile=-/opt/kubernetes/cfg/etcd.conf 114 | # set GOMAXPROCS to number of processors 115 | ExecStart=/bin/bash -c "GOMAXPROCS=\$(nproc) /opt/kubernetes/bin/etcd" 116 | Type=notify 117 | 118 | [Install] 119 | WantedBy=multi-user.target 120 | EOF 121 | 122 | echo '============================================================' 123 | echo '===================start etcd service... ===================' 124 | echo '============================================================' 125 | systemctl daemon-reload 126 | systemctl enable etcd 127 | systemctl restart etcd 128 | 129 | echo 'The etcd service is started!' 130 | -------------------------------------------------------------------------------- /cluster/halm/README.md: -------------------------------------------------------------------------------- 1 | # 1 halm 2 | Helm是用来管理Kubernetes预先配置的Kubernetes资源的包。 3 | 4 | # 2 安装和初始化 5 | ``` 6 | wget https://storage.googleapis.com/kubernetes-helm/helm-v2.5.1-linux-amd64.tar.gz 7 | rm -rf linux-amd64 8 | tar zxvf helm-v2.5.1-linux-amd64.tar.gz 9 | rm -f /opt/kubernetes/bin/helm 10 | cp linux-amd64/helm /opt/kubernetes/bin/helm 11 | cd ~ 12 | rm -rf ~/.helm 13 | helm init --tiller-image=sapcc/tiller:v2.5.1 --kube-context my-context 14 | ``` 15 | # 3 验证 16 | helm客户端要能与运行在k8s容器里的tiller正常通信 17 | 18 | ``` 19 | helm version 20 | helm search 21 | ``` 22 | 23 | # 4 添加fabric8库 24 | ``` 25 | helm repo add fabric8 https://fabric8.io/helm 26 | helm search fabric8 27 | ``` 28 | 29 | 30 | # 5.其他 31 | 删除tiller 32 | ``` 33 | kubectl --namespace=kube-system delete deployment tiller-deploy 34 | ``` 35 | 36 | 参考:http://blog.csdn.net/wzp1986/article/details/71910335?utm_source=itdadao&utm_medium=referral 37 | 38 | -------------------------------------------------------------------------------- /cluster/master/kube-apiserver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | 8 | MASTER_ADDRESS=${1:-"127.0.0.1"} 9 | ETCD_SERVERS=${2:-"http://127.0.0.1:2379"} 10 | SERVICE_CLUSTER_IP_RANGE=${3:-"10.0.0.0/24"} 11 | KUBE_BIN_DIR=${4:-"/opt/kubernetes/bin"} 12 | KUBE_CFG_DIR=${5:-"/opt/kubernetes/cfg"} 13 | KUBE_LOG_DIR=${6:-"/opt/kubernetes/logs"} 14 | 15 | echo '============================================================' 16 | echo '===================Config kube-apiserver... ================' 17 | echo '============================================================' 18 | 19 | echo "Create /srv/kubernetes/token_auth_file.csv" 20 | export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ') 21 | cat </srv/kubernetes/token_auth_file.csv 22 | ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap" 23 | EOF 24 | 25 | echo "Create /srv/kubernetes/basic_auth_file.csv" 26 | cat </srv/kubernetes/basic_auth_file.csv 27 | admin,admin,1 28 | system,system,2 29 | EOF 30 | 31 | echo "Create ${KUBE_CFG_DIR}/kubeconfig.yaml" 32 | cat <${KUBE_CFG_DIR}/kubeconfig.yaml 33 | apiVersion: v1 34 | kind: Config 35 | users: 36 | - name: controllermanager 37 | user: 38 | client-certificate: /srv/kubernetes/cs_client.crt 39 | client-key: /srv/kubernetes/cs_client.key 40 | clusters: 41 | - name: local 42 | cluster: 43 | certificate-authority: /srv/kubernetes/ca.crt 44 | contexts: 45 | - context: 46 | cluster: local 47 | user: controllermanager 48 | name: my-context 49 | current-context: my-context 50 | EOF 51 | 52 | 53 | 54 | #公共配置该配置文件同时被kube-apiserver、kube-controller-manager、kube-scheduler、kubelet、kube-proxy使用 55 | echo "Create ${KUBE_CFG_DIR}/config file" 56 | cat <${KUBE_CFG_DIR}/config 57 | ### 58 | # kubernetes system config 59 | # 60 | # The following values are used to configure various aspects of all 61 | # kubernetes services, including 62 | # 63 | # kube-apiserver.service 64 | # kube-controller-manager.service 65 | # kube-scheduler.service 66 | # kubelet.service 67 | # kube-proxy.service 68 | # logging to stderr means we get it in the systemd journal,设置为false输出日志到目录 69 | KUBE_LOGTOSTDERR="--logtostderr=false" 70 | 71 | # journal message level, 0 is debug 72 | KUBE_LOG_LEVEL="--v=0" 73 | 74 | # Should this cluster be allowed to run privileged docker containers 75 | KUBE_ALLOW_PRIV="--allow-privileged=true" 76 | 77 | # How the controller-manager, scheduler, and proxy find the apiserver 78 | KUBE_MASTER="--master=https://${MASTER_ADDRESS}:6443" 79 | EOF 80 | 81 | 82 | #kube-apiserver配置 83 | echo "Create ${KUBE_CFG_DIR}/kube-apiserver file" 84 | cat <${KUBE_CFG_DIR}/kube-apiserver 85 | # 86 | # kubernetes system config 87 | # 88 | # The following values are used to configure the kube-apiserver 89 | 90 | # --etcd-servers=[]: List of etcd servers to watch (http://ip:port), 91 | # comma separated. Mutually exclusive with -etcd-config 92 | KUBE_ETCD_SERVERS="--etcd-servers=${ETCD_SERVERS}" 93 | 94 | # --insecure-bind-address=127.0.0.1: The IP address on which to serve the --insecure-port. 95 | KUBE_API_ADDRESS="--bind-address=${MASTER_ADDRESS}" 96 | KUBE_API_INSECURE_ADDRESS="--insecure-bind-address=${MASTER_ADDRESS}" 97 | 98 | # --insecure-port=8080: The port on which to serve unsecured, unauthenticated access. 99 | KUBE_API_PORT="--secure-port=6443" 100 | 101 | # --kubelet-port=10250: Kubelet port 102 | NODE_PORT="--kubelet-port=10250" 103 | 104 | # --advertise-address=: The IP address on which to advertise 105 | # the apiserver to members of the cluster. 106 | KUBE_ADVERTISE_ADDR="--advertise-address=${MASTER_ADDRESS}" 107 | 108 | # --service-cluster-ip-range=: A CIDR notation IP range from which to assign service cluster IPs. 109 | # This must not overlap with any IP ranges assigned to nodes for pods. 110 | KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" 111 | 112 | # --admission-control="AlwaysAdmit": Ordered list of plug-ins 113 | # to do admission control of resources into cluster. 114 | # Comma-delimited list of: 115 | # LimitRanger, AlwaysDeny, SecurityContextDeny, NamespaceExists, 116 | # NamespaceLifecycle, NamespaceAutoProvision, AlwaysAdmit, 117 | # ServiceAccount, DefaultStorageClass, DefaultTolerationSeconds, ResourceQuota 118 | KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota" 119 | 120 | # --client-ca-file="": If set, any request presenting a client certificate signed 121 | # by one of the authorities in the client-ca-file is authenticated with an identity 122 | # corresponding to the CommonName of the client certificate. 123 | KUBE_API_CLIENT_CA_FILE="--client-ca-file=/srv/kubernetes/ca.crt" 124 | 125 | # --service-account-key-file="":服务账号文件,包含x509公私钥 126 | KUBE_SERVICE_ACCOUNT_KEY_FILE="--service-account-key-file=/srv/kubernetes/ca.key" 127 | 128 | # --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, 129 | # concatenated after server cert). If HTTPS serving is enabled, and --tls-cert-file 130 | # and --tls-private-key-file are not provided, a self-signed certificate and key are 131 | # generated for the public address and saved to /var/run/kubernetes. 132 | KUBE_API_TLS_CERT_FILE="--tls-cert-file=/srv/kubernetes/server.crt" 133 | 134 | # --tls-private-key-file="": File containing x509 private key matching --tls-cert-file. 135 | KUBE_API_TLS_PRIVATE_KEY_FILE="--tls-private-key-file=/srv/kubernetes/server.key" 136 | 137 | # --authorization-mode=RBAC 138 | KUBE_AUTHORIZATION_MODE="--authorization-mode=RBAC" 139 | 140 | #--experimental-bootstrap-token-auth 141 | KUBE_BOOTSTRAP_TOKEN_AUTH="--experimental-bootstrap-token-auth" 142 | 143 | #--token-auth-file=/srv/kubernetes/token_auth_file.csv 144 | KUBE_TOKEN_AUTH_FILE="--token-auth-file=/srv/kubernetes/token_auth_file.csv" 145 | 146 | #--basic-auth-file=/srv/kubernetes/basic_auth_file.csv 147 | KUBE_BASIC_AUTH_FILE="--basic-auth-file=/srv/kubernetes/basic_auth_file.csv" 148 | 149 | #log dir 150 | KUBE_LOG_DIR="--log-dir=${KUBE_LOG_DIR}" 151 | 152 | # Add your own! 153 | KUBE_API_ARGS="--runtime-config=rbac.authorization.k8s.io/v1beta1" 154 | EOF 155 | 156 | echo "Create /usr/lib/systemd/system/kube-apiserver.service file" 157 | cat </usr/lib/systemd/system/kube-apiserver.service 158 | [Unit] 159 | Description=Kubernetes API Server 160 | Documentation=https://github.com/kubernetes/kubernetes 161 | After=network.target 162 | After=etcd.service 163 | 164 | [Service] 165 | EnvironmentFile=-${KUBE_CFG_DIR}/config 166 | EnvironmentFile=-${KUBE_CFG_DIR}/kube-apiserver 167 | ExecStart=${KUBE_BIN_DIR}/kube-apiserver \\ 168 | \${KUBE_LOGTOSTDERR} \\ 169 | \${KUBE_LOG_LEVEL} \\ 170 | \${KUBE_ETCD_SERVERS} \\ 171 | \${KUBE_API_ADDRESS} \\ 172 | \${KUBE_API_INSECURE_ADDRESS} \\ 173 | \${KUBE_API_PORT} \\ 174 | \${NODE_PORT} \\ 175 | \${KUBE_ADVERTISE_ADDR} \\ 176 | \${KUBE_ALLOW_PRIV} \\ 177 | \${KUBE_SERVICE_ADDRESSES} \\ 178 | \${KUBE_ADMISSION_CONTROL} \\ 179 | \${KUBE_API_CLIENT_CA_FILE} \\ 180 | \${KUBE_API_TLS_CERT_FILE} \\ 181 | \${KUBE_API_TLS_PRIVATE_KEY_FILE} \\ 182 | \${KUBE_SERVICE_ACCOUNT_KEY_FILE} \\ 183 | \${KUBE_AUTHORIZATION_MODE} \\ 184 | \${KUBE_TOKEN_AUTH_FILE} \\ 185 | \${KUBE_BASIC_AUTH_FILE} \\ 186 | \${KUBE_BOOTSTRAP_TOKEN_AUTH} \\ 187 | \${KUBE_LOG_DIR} \\ 188 | \${KUBE_API_ARGS} 189 | Restart=on-failure 190 | 191 | [Install] 192 | WantedBy=multi-user.target 193 | EOF 194 | 195 | echo '============================================================' 196 | echo '===================Start kube-apiserver... =================' 197 | echo '============================================================' 198 | systemctl daemon-reload 199 | systemctl enable kube-apiserver 200 | systemctl restart kube-apiserver 201 | 202 | echo "Start kube-apiserver success!" 203 | -------------------------------------------------------------------------------- /cluster/master/kube-controller-manager.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | 8 | MASTER_ADDRESS=${1:-"127.0.0.1"} 9 | KUBE_BIN_DIR=${2:-"/opt/kubernetes/bin"} 10 | KUBE_CFG_DIR=${3:-"/opt/kubernetes/cfg"} 11 | KUBE_LOG_DIR=${6:-"/opt/kubernetes/logs"} 12 | 13 | echo '============================================================' 14 | echo '===================Config kube-controller-manager...========' 15 | echo '============================================================' 16 | 17 | echo "Create ${KUBE_CFG_DIR}/kube-controller-manager file" 18 | cat </opt/kubernetes/cfg/kube-controller-manager 19 | # --root-ca-file="": If set, this root certificate authority will be included in 20 | # service account's token secret. This must be a valid PEM-encoded CA bundle. 21 | KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE="--root-ca-file=/srv/kubernetes/ca.crt" 22 | 23 | # --service-account-private-key-file="": Filename containing a PEM-encoded private 24 | # RSA key used to sign service account tokens. 25 | KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE="--service-account-private-key-file=/srv/kubernetes/server.key" 26 | 27 | #--cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem 28 | KUBE_CLUSTER_SIGNING_CERT_FILE="--cluster-signing-cert-file=/srv/kubernetes/ca.crt" 29 | 30 | #--cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem 31 | KUBE_CLUSTER_SIGNING_KEY_FILE="--cluster-signing-key-file=/srv/kubernetes/ca.key" 32 | 33 | # --leader-elect 34 | KUBE_LEADER_ELECT="--leader-elect=true" 35 | 36 | #log dir 37 | KUBE_LOG_DIR="--log-dir=${KUBE_LOG_DIR}" 38 | 39 | KUBE_CONFIG="--kubeconfig=${KUBE_CFG_DIR}/kubeconfig.yaml" 40 | EOF 41 | 42 | 43 | echo "Create /usr/lib/systemd/system/kube-controller-manager.service file" 44 | cat </usr/lib/systemd/system/kube-controller-manager.service 45 | [Unit] 46 | Description=Kubernetes Controller Manager 47 | Documentation=https://github.com/kubernetes/kubernetes 48 | After=kube-apiserver.service 49 | Requires=kube-apiserver.service 50 | 51 | [Service] 52 | EnvironmentFile=-${KUBE_CFG_DIR}/config 53 | EnvironmentFile=-${KUBE_CFG_DIR}/kube-controller-manager 54 | 55 | ExecStart=${KUBE_BIN_DIR}/kube-controller-manager \\ 56 | \${KUBE_LOGTOSTDERR} \\ 57 | \${KUBE_LOG_LEVEL} \\ 58 | \${KUBE_MASTER} \\ 59 | \${KUBE_CLUSTER_SIGNING_CERT_FILE} \\ 60 | \${KUBE_CLUSTER_SIGNING_KEY_FILE} \\ 61 | \${KUBE_CONTROLLER_MANAGER_ROOT_CA_FILE} \\ 62 | \${KUBE_CONTROLLER_MANAGER_SERVICE_ACCOUNT_PRIVATE_KEY_FILE} \\ 63 | \${KUBE_CONFIG} \\ 64 | \${KUBE_LOG_DIR} \\ 65 | \${KUBE_LEADER_ELECT} 66 | Restart=on-failure 67 | 68 | [Install] 69 | WantedBy=multi-user.target 70 | EOF 71 | 72 | echo '============================================================' 73 | echo '===================Start kube-controller-manager... ========' 74 | echo '============================================================' 75 | 76 | systemctl daemon-reload 77 | systemctl enable kube-controller-manager 78 | systemctl restart kube-controller-manager 79 | 80 | echo "Start kube-controller-manager success!" 81 | 82 | -------------------------------------------------------------------------------- /cluster/master/kube-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | INSTALL_MASTER=${1:-"true"} 8 | KUBE_BIN_DIR=${2:-"/opt/kubernetes/bin"} 9 | KUBE_CFG_DIR=${3:-"/opt/kubernetes/cfg"} 10 | KUBE_LOG_DIR=${4:-"/opt/kubernetes/logs"} 11 | KUBE_VERSION=${5:-"v1.6.7"} 12 | 13 | echo '============================================================' 14 | echo '===================Downland kubernetes... ==================' 15 | echo '============================================================' 16 | 17 | mkdir -p ${KUBE_BIN_DIR} 18 | mkdir -p ${KUBE_CFG_DIR} 19 | mkdir -p ${KUBE_LOG_DIR} 20 | 21 | if [ ! -f "./kubernetes.tar.gz" ]; then 22 | echo "downland kubernetes.tar.gz file" 23 | wget https://github.com/kubernetes/kubernetes/releases/download/${KUBE_VERSION}/kubernetes.tar.gz 24 | else 25 | echo "kubernetes.tar.gz file already exists" 26 | fi 27 | 28 | if [ ! -d "./kubernetes" ]; then 29 | echo "unzip kubernetes.tar.gz file" 30 | tar zxvf kubernetes.tar.gz 31 | sh ./kubernetes/cluster/get-kube-binaries.sh 32 | tar zxvf ./kubernetes/server/kubernetes-server-linux-amd64.tar.gz 33 | else 34 | echo "kubernetes directory already exists" 35 | fi 36 | 37 | 38 | echo '============================================================' 39 | echo '===================Install kubernetes... ===================' 40 | echo '============================================================' 41 | 42 | if [ ${INSTALL_MASTER} = "true" ]; then 43 | echo "This node is a master node!" 44 | echo "Copy kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet to ${KUBE_BIN_DIR} " 45 | cp ./kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} ${KUBE_BIN_DIR} 46 | else 47 | echo "This node is a slave node!" 48 | echo "Copy kubectl,kube-proxy,kubelet to ${KUBE_BIN_DIR} " 49 | cp ./kubernetes/server/bin/{kubectl,kube-proxy,kubelet} ${KUBE_BIN_DIR} 50 | cp ./kubernetes/cluster/centos/node/bin/{mk-docker-opts.sh,remove-docker0.sh} ${KUBE_BIN_DIR} 51 | fi 52 | -------------------------------------------------------------------------------- /cluster/master/kube-master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | #sh kube-master.sh "172.16.120.151" "k8s-node01" "http://172.16.120.151:2379,http://172.16.120.152:2379,http://172.16.120.153:2379" 5 | # 6 | #systemctl stop kube-apiserver 7 | #systemctl stop kube-controller-manager 8 | #systemctl stop kube-scheduler 9 | 10 | set -o errexit 11 | set -o nounset 12 | set -o pipefail 13 | 14 | #kubernetes安装版本 15 | export KUBE_VERSION=v1.6.7 16 | 17 | #kubernetes执行和配置文件目录 18 | export KUBE_BIN_DIR=/opt/kubernetes/bin 19 | export KUBE_CFG_DIR=/opt/kubernetes/cfg 20 | export KUBE_LOG_DIR=/opt/kubernetes/logs 21 | 22 | export MASTER_ADDRESS=${1:-} 23 | export MASTER_DNS=${2:-} 24 | export ETCD_SERVERS=${3:-} 25 | export SERVICE_CLUSTER_IP_RANGE=${4:-"10.0.0.0/24"} 26 | export MASTER_CLUSTER_IP=${5:-"10.0.0.1"} 27 | 28 | echo '============================================================' 29 | echo '====================Disable selinux and firewalld...========' 30 | echo '============================================================' 31 | if [ $(getenforce) = "Enabled" ]; then 32 | setenforce 0 33 | fi 34 | systemctl disable firewalld 35 | systemctl stop firewalld 36 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 37 | 38 | 39 | #创建证书 40 | sh kube-ssl-master.sh ${MASTER_ADDRESS} ${MASTER_DNS} ${MASTER_CLUSTER_IP} ${KUBE_CFG_DIR} 41 | 42 | #安装kubernetes 43 | sh kube-install.sh "true" ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} ${KUBE_LOG_DIR} ${KUBE_VERSION} 44 | 45 | #配置kube api server,并启动服务 46 | sh kube-apiserver.sh ${MASTER_ADDRESS} ${ETCD_SERVERS} ${SERVICE_CLUSTER_IP_RANGE} ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} ${KUBE_LOG_DIR} 47 | 48 | #配置kube controller manager,并启动服务 49 | sh kube-controller-manager.sh ${MASTER_ADDRESS} ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} 50 | 51 | #配置kube scheduler,并启动服务 52 | sh kube-scheduler.sh ${MASTER_ADDRESS} ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} 53 | -------------------------------------------------------------------------------- /cluster/master/kube-scheduler.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | 8 | MASTER_ADDRESS=${1:-"127.0.0.1"} 9 | KUBE_BIN_DIR=${2:-"/opt/kubernetes/bin"} 10 | KUBE_CFG_DIR=${3:-"/opt/kubernetes/cfg"} 11 | KUBE_LOG_DIR=${6:-"/opt/kubernetes/logs"} 12 | 13 | 14 | echo '============================================================' 15 | echo '===================Config kube-scheduler...=================' 16 | echo '============================================================' 17 | 18 | echo "Create ${KUBE_CFG_DIR}/kube-scheduler file" 19 | cat <${KUBE_CFG_DIR}/kube-scheduler 20 | ### 21 | # kubernetes scheduler config 22 | 23 | # --leader-elect 24 | KUBE_LEADER_ELECT="--leader-elect=true" 25 | 26 | #log dir 27 | KUBE_LOG_DIR="--log-dir=${KUBE_LOG_DIR}" 28 | 29 | # Add your own! 30 | KUBE_SCHEDULER_ARGS="--kubeconfig=${KUBE_CFG_DIR}/kubeconfig.yaml" 31 | 32 | EOF 33 | 34 | echo "Create ${KUBE_CFG_DIR}/kube-scheduler.service file" 35 | cat </usr/lib/systemd/system/kube-scheduler.service 36 | [Unit] 37 | Description=Kubernetes Scheduler 38 | Documentation=https://github.com/kubernetes/kubernetes 39 | After=kube-apiserver.service 40 | Requires=kube-apiserver.service 41 | 42 | [Service] 43 | EnvironmentFile=-${KUBE_CFG_DIR}/config 44 | EnvironmentFile=-${KUBE_CFG_DIR}/kube-scheduler 45 | ExecStart=${KUBE_BIN_DIR}/kube-scheduler \\ 46 | \${KUBE_LOGTOSTDERR} \\ 47 | \${KUBE_LOG_LEVEL} \\ 48 | \${KUBE_MASTER} \\ 49 | \${KUBE_LEADER_ELECT} \\ 50 | \${KUBE_LOG_DIR} \\ 51 | \${KUBE_SCHEDULER_ARGS} 52 | Restart=on-failure 53 | 54 | [Install] 55 | WantedBy=multi-user.target 56 | EOF 57 | 58 | echo '============================================================' 59 | echo '===================Start kube-scheduler... =================' 60 | echo '============================================================' 61 | 62 | systemctl daemon-reload 63 | systemctl enable kube-scheduler 64 | systemctl restart kube-scheduler 65 | 66 | echo "Start kube-scheduler success!" 67 | -------------------------------------------------------------------------------- /cluster/master/kube-ssl-master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | 8 | KUBE_MASTER_IP=${1:-} 9 | KUBE_MASTER_DNS=${2:-} 10 | KUBE_CLUSTER_IP=${3:-"10.0.0.1"} 11 | KUBE_CFG_DIR=${4:-"/opt/kubernetes/cfg"} 12 | 13 | 14 | echo '============================================================' 15 | echo '===================Create ssl for kube master node...=======' 16 | echo '============================================================' 17 | 18 | #创建证书存放目录 19 | rm -rf /srv/kubernetes 20 | mkdir /srv/kubernetes 21 | 22 | ###############生成根证书################ 23 | #创建CA私钥 24 | openssl genrsa -out /srv/kubernetes/ca.key 2048 25 | #自签CA 26 | openssl req -x509 -new -nodes -key /srv/kubernetes/ca.key -subj "/CN=kubernetes/O=k8s/OU=System" -days 10000 -out /srv/kubernetes/ca.crt 27 | ###############生成 API Server 服务端证书和私钥############### 28 | 29 | cat </srv/kubernetes/master_ssl.cnf 30 | [req] 31 | req_extensions = v3_req 32 | distinguished_name = req_distinguished_name 33 | [req_distinguished_name] 34 | [ v3_req ] 35 | basicConstraints = CA:FALSE 36 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 37 | subjectAltName = @alt_names 38 | [alt_names] 39 | DNS.1 = kubernetes 40 | DNS.2 = kubernetes.default 41 | DNS.3 = kubernetes.default.svc 42 | DNS.4 = kubernetes.default.svc.cluster.local 43 | DNS.5 = ${KUBE_MASTER_DNS} 44 | IP.1 = ${KUBE_CLUSTER_IP} 45 | IP.2 = ${KUBE_MASTER_IP} 46 | EOF 47 | 48 | #生成apiserver私钥 49 | echo "Create kubernetes api server ssl key..." 50 | openssl genrsa -out /srv/kubernetes/server.key 2048 51 | 52 | #生成签署请求 53 | openssl req -new -key /srv/kubernetes/server.key -subj "/CN=kubernetes/O=k8s/OU=System" -config /srv/kubernetes/master_ssl.cnf -out /srv/kubernetes/server.csr 54 | 55 | #使用自建CA签署 56 | openssl x509 -req -in /srv/kubernetes/server.csr -CA /srv/kubernetes/ca.crt -CAkey /srv/kubernetes/ca.key -CAcreateserial -days 10000 -extensions v3_req -extfile /srv/kubernetes/master_ssl.cnf -out /srv/kubernetes/server.crt 57 | 58 | #生成 Controller Manager 与 Scheduler 进程共用的证书和私钥 59 | echo "Create kubernetes controller manager and scheduler server ssl key..." 60 | openssl genrsa -out /srv/kubernetes/cs_client.key 2048 61 | 62 | #生成签署请求 63 | openssl req -new -key /srv/kubernetes/cs_client.key -subj "/CN=admin/O=system:masters/OU=System" -out /srv/kubernetes/cs_client.csr 64 | 65 | #使用自建CA签署 66 | openssl x509 -req -in /srv/kubernetes/cs_client.csr -CA /srv/kubernetes/ca.crt -CAkey /srv/kubernetes/ca.key -CAcreateserial -out /srv/kubernetes/cs_client.crt -days 10000 67 | -------------------------------------------------------------------------------- /cluster/node/docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | KUBE_DOCKER_OPTS=${1:-""} 8 | KUBE_CFG_DIR=${2:-"/opt/kubernetes/cfg"} 9 | 10 | echo '============================================================' 11 | echo '===================Install docker ==========================' 12 | echo '============================================================' 13 | 14 | yum install -y yum-utils 15 | 16 | yum-config-manager \ 17 | --add-repo \ 18 | https://docs.docker.com/v1.13/engine/installation/linux/repo_files/centos/docker.repo 19 | 20 | yum makecache fast 21 | 22 | yum list docker-engine.x86_64 --showduplicates |sort -r 23 | 24 | yum install -y docker-engine-1.12.6 25 | 26 | echo '============================================================' 27 | echo '===================Config docker ===========================' 28 | echo '============================================================' 29 | 30 | echo "Create ${KUBE_CFG_DIR}/docker file" 31 | cat <${KUBE_CFG_DIR}/docker 32 | #--selinux-enabled=false 33 | DOCKER_OPTS="${KUBE_DOCKER_OPTS}" 34 | EOF 35 | 36 | echo "Create /usr/lib/systemd/system/docker.service file" 37 | cat </usr/lib/systemd/system/docker.service 38 | [Unit] 39 | Description=Docker Application Container Engine 40 | Documentation=http://docs.docker.com 41 | After=network.target flannel.service 42 | Requires=flannel.service 43 | 44 | [Service] 45 | Type=notify 46 | EnvironmentFile=-/run/flannel/docker 47 | EnvironmentFile=-${KUBE_CFG_DIR}/docker 48 | ExecStart=/usr/bin/dockerd \${DOCKER_OPT_BIP} \${DOCKER_OPT_MTU} \${DOCKER_OPTS} 49 | ExecReload=/bin/kill -s HUP \$MAINPID 50 | LimitNOFILE=infinity 51 | LimitNPROC=infinity 52 | LimitCORE=infinity 53 | # Uncomment TasksMax if your systemd version supports it. 54 | # Only systemd 226 and above support this version. 55 | #TasksMax=infinity 56 | TimeoutStartSec=0 57 | # set delegate yes so that systemd does not reset the cgroups of docker containers 58 | Delegate=yes 59 | # kill only the docker process, not all processes in the cgroup 60 | KillMode=process 61 | 62 | [Install] 63 | WantedBy=multi-user.target 64 | EOF 65 | 66 | echo '============================================================' 67 | echo '===================Start docker... =========================' 68 | echo '============================================================' 69 | 70 | systemctl daemon-reload 71 | systemctl enable docker 72 | systemctl restart docker 73 | 74 | echo "Start docker success!" 75 | -------------------------------------------------------------------------------- /cluster/node/flannel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ETCD_SERVERS=${1:-"http://8.8.8.18:2379"} 4 | KUBE_MASTER_ADDRESS=${2:-} 5 | KUBE_MASTER_USER=${3:-"root"} 6 | KUBE_MASTER_PASSWORD=${4:-"123456"} 7 | FLANNEL_NET=${5:-"172.18.0.0/16"} 8 | FLANNEL_VERSION=${6:-"v0.7.1"} 9 | KUBE_BIN_DIR=${7:-"/opt/kubernetes/bin"} 10 | KUBE_CFG_DIR=${8:-"/opt/kubernetes/cfg"} 11 | 12 | 13 | echo '============================================================' 14 | echo '===================Install flannel =========================' 15 | echo '============================================================' 16 | FLANNEL_FILE_NAME="flannel-${FLANNEL_VERSION}-linux-amd64.tar.gz" 17 | 18 | if [ ! -f "./$FLANNEL_FILE_NAME" ]; then 19 | wget -c https://github.com/coreos/flannel/releases/download/${FLANNEL_VERSION}/${FLANNEL_FILE_NAME} 20 | fi 21 | tar zxvf ${FLANNEL_FILE_NAME} 22 | cp flanneld ${KUBE_BIN_DIR} 23 | 24 | echo '============================================================' 25 | echo '===================Config flannel ==========================' 26 | echo '============================================================' 27 | 28 | echo "Create ${KUBE_CFG_DIR}/flannel file" 29 | cat <${KUBE_CFG_DIR}/flannel 30 | FLANNEL_ETCD="-etcd-endpoints=${ETCD_SERVERS}" 31 | FLANNEL_ETCD_KEY="-etcd-prefix=/coreos.com/network" 32 | EOF 33 | 34 | echo "Create /usr/lib/systemd/system/flannel.service file" 35 | cat </usr/lib/systemd/system/flannel.service 36 | [Unit] 37 | Description=Flanneld overlay address etcd agent 38 | After=network.target 39 | Before=docker.service 40 | 41 | [Service] 42 | EnvironmentFile=-${KUBE_CFG_DIR}/flannel 43 | ExecStartPre=${KUBE_BIN_DIR}/remove-docker0.sh 44 | ExecStart=${KUBE_BIN_DIR}/flanneld --ip-masq \${FLANNEL_ETCD} \${FLANNEL_ETCD_KEY} 45 | ExecStartPost=${KUBE_BIN_DIR}/mk-docker-opts.sh -d /run/flannel/docker 46 | 47 | Type=notify 48 | 49 | [Install] 50 | WantedBy=multi-user.target 51 | RequiredBy=docker.service 52 | EOF 53 | 54 | 55 | echo '============================================================' 56 | echo '===================Store FLANNEL_NET to etcd ===============' 57 | echo '============================================================' 58 | # Store FLANNEL_NET to etcd. 59 | if [ ! -f "$KUBE_BIN_DIR/etcdctl" ]; then 60 | yum install -y expect 61 | 62 | echo "Copy etcdctl from master node!" 63 | expect -c "spawn scp ${KUBE_MASTER_USER}@${KUBE_MASTER_ADDRESS}:${KUBE_BIN_DIR}/etcdctl ${KUBE_BIN_DIR} 64 | set timeout 3 65 | expect \"${KUBE_MASTER_USER}@${KUBE_MASTER_ADDRESS} password:\" 66 | exec sleep 2 67 | send \"${KUBE_MASTER_PASSWORD}\r\" 68 | interact" 69 | fi 70 | 71 | attempt=0 72 | while true; do 73 | ${KUBE_BIN_DIR}/etcdctl --no-sync -C ${ETCD_SERVERS} \ 74 | get /coreos.com/network/config >/dev/null 2>&1 75 | if [[ "$?" == 0 ]]; then 76 | break 77 | else 78 | if (( attempt > 600 )); then 79 | echo "timeout for waiting network config" > ~/kube/err.log 80 | exit 2 81 | fi 82 | 83 | ${KUBE_BIN_DIR}/etcdctl --no-sync -C ${ETCD_SERVERS} \ 84 | mk /coreos.com/network/config "{\"Network\":\"${FLANNEL_NET}\"}" >/dev/null 2>&1 85 | attempt=$((attempt+1)) 86 | sleep 3 87 | echo "Store FLANNEL_NET to etcd success!" 88 | fi 89 | done 90 | wait 91 | 92 | echo '============================================================' 93 | echo '===================Start flannel... ========================' 94 | echo '============================================================' 95 | systemctl daemon-reload 96 | systemctl enable flannel 97 | systemctl restart flannel 98 | 99 | echo "Start flannel success!" 100 | -------------------------------------------------------------------------------- /cluster/node/kube-install.sh: -------------------------------------------------------------------------------- 1 | set -o errexit 2 | set -o nounset 3 | set -o pipefail 4 | 5 | INSTALL_MASTER=${1:-"true"} 6 | KUBE_BIN_DIR=${2:-"/opt/kubernetes/bin"} 7 | KUBE_CFG_DIR=${3:-"/opt/kubernetes/cfg"} 8 | KUBE_LOG_DIR=${4:-"/opt/kubernetes/logs"} 9 | KUBE_VERSION=${5:-"v1.6.7"} 10 | 11 | echo '============================================================' 12 | echo '===================Downland kubernetes... ==================' 13 | echo '============================================================' 14 | 15 | mkdir -p ${KUBE_BIN_DIR} 16 | mkdir -p ${KUBE_CFG_DIR} 17 | mkdir -p ${KUBE_LOG_DIR} 18 | 19 | if [ ! -f "./kubernetes.tar.gz" ]; then 20 | echo "downland kubernetes.tar.gz file" 21 | wget https://github.com/kubernetes/kubernetes/releases/download/${KUBE_VERSION}/kubernetes.tar.gz 22 | else 23 | echo "kubernetes.tar.gz file already exists" 24 | fi 25 | 26 | if [ ! -d "./kubernetes" ]; then 27 | echo "unzip kubernetes.tar.gz file" 28 | tar zxvf kubernetes.tar.gz 29 | sh ./kubernetes/cluster/get-kube-binaries.sh 30 | tar zxvf ./kubernetes/server/kubernetes-server-linux-amd64.tar.gz 31 | else 32 | echo "kubernetes directory already exists" 33 | fi 34 | 35 | 36 | echo '============================================================' 37 | echo '===================Install kubernetes... ===================' 38 | echo '============================================================' 39 | 40 | if [ ${INSTALL_MASTER} = "true" ]; then 41 | echo "This node is a master node!" 42 | echo "Copy kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet to ${KUBE_BIN_DIR} " 43 | cp ./kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} ${KUBE_BIN_DIR} 44 | else 45 | echo "This node is a slave node!" 46 | echo "Copy kubectl,kube-proxy,kubelet to ${KUBE_BIN_DIR} " 47 | cp ./kubernetes/server/bin/{kubectl,kube-proxy,kubelet} ${KUBE_BIN_DIR} 48 | cp ./kubernetes/cluster/centos/node/bin/{mk-docker-opts.sh,remove-docker0.sh} ${KUBE_BIN_DIR} 49 | fi 50 | -------------------------------------------------------------------------------- /cluster/node/kube-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #example: 4 | #sh kube-node.sh "172.16.120.152" "172.16.120.151" "root" "123456" "http://172.16.120.151:2379,http://172.16.120.152:2379,http://172.16.120.153:2379" 5 | #sh kube-node.sh "172.16.120.153" "172.16.120.151" "root" "123456" "http://172.16.120.151:2379,http://172.16.120.152:2379,http://172.16.120.153:2379" 6 | # 7 | #systemctl stop kubelet 8 | #systemctl stop kube-proxy 9 | #systemctl stop flannel 10 | #systemctl stop docker 11 | # 12 | 13 | set -o errexit 14 | set -o nounset 15 | set -o pipefail 16 | 17 | #kubernetes安装版本 18 | export KUBE_VERSION=v1.6.7 19 | export FLANNEL_VERSION=v0.7.1 20 | 21 | #kubernetes执行和配置文件目录 22 | export KUBE_BIN_DIR=/opt/kubernetes/bin 23 | export KUBE_CFG_DIR=/opt/kubernetes/cfg 24 | export KUBE_LOG_DIR=/opt/kubernetes/logs 25 | 26 | 27 | export NODE_ADDRESS=${1:-} 28 | export MASTER_ADDRESS=${2:-} 29 | export MASTER_USER=${3:-} 30 | export MASTER_PASSWORD=${4:-} 31 | export ETCD_SERVERS=${5:-} 32 | export FLANNEL_NET=${6:-"172.18.0.0/16"} 33 | export DOCKER_OPTS=${7:-"--registry-mirror=https://5md0553g.mirror.aliyuncs.com"} 34 | export KUBELET_POD_INFRA_CONTAINER=${8:-"hub.c.163.com/k8s163/pause-amd64:3.0"} 35 | export DNS_SERVER_IP=${9:-"10.0.0.10"} 36 | 37 | echo '============================================================' 38 | echo '====================Disable selinux and firewalld...========' 39 | echo '============================================================' 40 | if [ $(getenforce) = "Enabled" ]; then 41 | setenforce 0 42 | fi 43 | systemctl disable firewalld 44 | systemctl stop firewalld 45 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 46 | 47 | echo "Disable selinux and firewalld success!" 48 | 49 | #创建证书 50 | sh kube-ssl-node.sh ${NODE_ADDRESS} ${MASTER_ADDRESS} ${MASTER_USER} ${MASTER_PASSWORD} 51 | 52 | #安装kubernetes 53 | sh kube-install.sh "false" ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} ${KUBE_LOG_DIR} ${KUBE_VERSION} 54 | 55 | #安装顺序为 flannel docker kubelet kube-proxy 56 | #安装和配置flannel 57 | sh flannel.sh ${ETCD_SERVERS} ${MASTER_ADDRESS} ${MASTER_USER} ${MASTER_PASSWORD} ${FLANNEL_NET} ${FLANNEL_VERSION} ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} 58 | 59 | #安装和配置docker 60 | sh docker.sh ${DOCKER_OPTS} 61 | 62 | #配置kube api server,并启动服务 63 | sh kubelet.sh ${MASTER_ADDRESS} ${NODE_ADDRESS} ${KUBELET_POD_INFRA_CONTAINER} ${DNS_SERVER_IP} "cluster.local" ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} 64 | 65 | #配置kube controller manager,并启动服务 66 | sh kube-proxy.sh ${NODE_ADDRESS} ${MASTER_ADDRESS} ${KUBE_BIN_DIR} ${KUBE_CFG_DIR} 67 | -------------------------------------------------------------------------------- /cluster/node/kube-proxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | 8 | NODE_ADDRESS=${1:-"8.8.8.20"} 9 | MASTER_ADDRESS=${2:-"8.8.8.18"} 10 | KUBE_BIN_DIR=${3:-"/opt/kubernetes/bin"} 11 | KUBE_CFG_DIR=${4:-"/opt/kubernetes/cfg"} 12 | 13 | echo '============================================================' 14 | echo '===================Config kube-proxy... ====================' 15 | echo '============================================================' 16 | 17 | echo "Create ${KUBE_CFG_DIR}/kube-proxy file" 18 | cat <${KUBE_CFG_DIR}/kube-proxy 19 | # --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. 20 | NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" 21 | 22 | # Add your own! 23 | KUBE_PROXY_ARGS="--kubeconfig=${KUBE_CFG_DIR}/kubeconfig.yaml" 24 | EOF 25 | 26 | echo "Create /usr/lib/systemd/system/kube-proxy.service file" 27 | cat </usr/lib/systemd/system/kube-proxy.service 28 | [Unit] 29 | Description=Kubernetes Proxy 30 | After=network.target 31 | 32 | [Service] 33 | EnvironmentFile=-${KUBE_CFG_DIR}/config 34 | EnvironmentFile=-${KUBE_CFG_DIR}/kube-proxy 35 | ExecStart=${KUBE_BIN_DIR}/kube-proxy \ 36 | \${KUBE_LOGTOSTDERR} \ 37 | \${KUBE_LOG_LEVEL} \ 38 | \${NODE_HOSTNAME} \ 39 | \${KUBE_MASTER} \ 40 | \${KUBE_PROXY_ARGS} 41 | Restart=on-failure 42 | 43 | [Install] 44 | WantedBy=multi-user.target 45 | EOF 46 | 47 | echo "============================================================" 48 | echo "===================Start kube-proxy... =====================" 49 | echo "============================================================" 50 | 51 | systemctl daemon-reload 52 | systemctl enable kube-proxy 53 | systemctl restart kube-proxy 54 | 55 | echo "Start kube proxy success!" 56 | -------------------------------------------------------------------------------- /cluster/node/kube-ssl-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | 8 | KUBE_NODE_ADDRESS=${1:-} 9 | KUBE_MASTER_ADDRESS=${2:-} 10 | KUBE_MASTER_USER=${3:-"root"} 11 | KUBE_MASTER_PASSWORD=${4:-"123456"} 12 | 13 | 14 | 15 | echo '============================================================' 16 | echo '===================Create ssl for kube node...==============' 17 | echo '============================================================' 18 | 19 | #创建证书存放目录 20 | rm -rf /srv/kubernetes 21 | mkdir /srv/kubernetes 22 | 23 | ###############生成node端证书################ 24 | openssl genrsa -out /srv/kubernetes/kubelet_client.key 2048 25 | 26 | openssl req -new -key /srv/kubernetes/kubelet_client.key -subj "/CN=${KUBE_NODE_ADDRESS}" -out /srv/kubernetes/kubelet_client.csr 27 | 28 | #从master节点获取根证书 29 | yum install -y expect 30 | 31 | expect -c "spawn scp ${KUBE_MASTER_USER}@${KUBE_MASTER_ADDRESS}:/srv/kubernetes/ca.* /srv/kubernetes/ 32 | set timeout 3 33 | expect \"${KUBE_MASTER_USER}@${KUBE_MASTER_ADDRESS} password:\" 34 | exec sleep 2 35 | send \"${KUBE_MASTER_PASSWORD}\r\" 36 | interact" 37 | 38 | openssl x509 -req -in /srv/kubernetes/kubelet_client.csr -CA /srv/kubernetes/ca.crt -CAkey /srv/kubernetes/ca.key -CAcreateserial -out /srv/kubernetes/kubelet_client.crt -days 10000 39 | -------------------------------------------------------------------------------- /cluster/node/kubelet.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | MASTER_ADDRESS=${1:-""} 8 | NODE_ADDRESS=${2:-""} 9 | KUBELET_POD_INFRA_CONTAINER=${3:-"hub.c.163.com/k8s163/pause-amd64:3.0"} 10 | DNS_SERVER_IP=${4:-"10.0.0.10"} 11 | DNS_DOMAIN=${5:-"cluster.local"} 12 | KUBE_BIN_DIR=${6:-"/opt/kubernetes/bin"} 13 | KUBE_CFG_DIR=${7:-"/opt/kubernetes/cfg"} 14 | 15 | echo '============================================================' 16 | echo '===================Config kubelet... =======================' 17 | echo '============================================================' 18 | echo "Create ${KUBE_CFG_DIR}/kubeconfig.yaml" 19 | cat <${KUBE_CFG_DIR}/kubeconfig.yaml 20 | apiVersion: v1 21 | kind: Config 22 | users: 23 | - name: kubelet 24 | user: 25 | client-certificate: /srv/kubernetes/kubelet_client.crt 26 | client-key: /srv/kubernetes/kubelet_client.key 27 | clusters: 28 | - name: local 29 | cluster: 30 | certificate-authority: /srv/kubernetes/ca.crt 31 | contexts: 32 | - context: 33 | cluster: local 34 | user: kubelet 35 | name: my-context 36 | current-context: my-context 37 | EOF 38 | 39 | 40 | 41 | if [ ! -f ".${KUBE_CFG_DIR}/config" ]; then 42 | echo "Create ${KUBE_CFG_DIR}/config file" 43 | cat <${KUBE_CFG_DIR}/config 44 | ### 45 | # kubernetes system config 46 | # 47 | # The following values are used to configure various aspects of all 48 | # kubernetes services, including 49 | # 50 | # kube-apiserver.service 51 | # kube-controller-manager.service 52 | # kube-scheduler.service 53 | # kubelet.service 54 | # kube-proxy.service 55 | # logging to stderr means we get it in the systemd journal 56 | KUBE_LOGTOSTDERR="--logtostderr=true" 57 | 58 | # journal message level, 0 is debug 59 | KUBE_LOG_LEVEL="--v=0" 60 | 61 | # Should this cluster be allowed to run privileged docker containers 62 | KUBE_ALLOW_PRIV="--allow-privileged=true" 63 | 64 | # How the controller-manager, scheduler, and proxy find the apiserver 65 | KUBE_MASTER="--master=https://${MASTER_ADDRESS}:6443" 66 | EOF 67 | fi 68 | 69 | echo "Create ${KUBE_CFG_DIR}/kubelet file" 70 | cat <${KUBE_CFG_DIR}/kubelet 71 | 72 | # --address=0.0.0.0: The IP address for the Kubelet to serve on (set to 0.0.0.0 for all interfaces) 73 | NODE_ADDRESS="--address=${NODE_ADDRESS}" 74 | 75 | # --port=10250: The port for the Kubelet to serve on. Note that "kubectl logs" will not work if you set this flag. 76 | NODE_PORT="--port=10250" 77 | 78 | # --hostname-override="": If non-empty, will use this string as identification instead of the actual hostname. 79 | NODE_HOSTNAME="--hostname-override=${NODE_ADDRESS}" 80 | 81 | # --api-servers=[]: List of Kubernetes API servers for publishing events, 82 | # and reading pods and services. (ip:port), comma separated. 83 | KUBELET_API_SERVER="--api-servers=https://${MASTER_ADDRESS}:6443" 84 | 85 | # DNS info 86 | KUBELET_DNS_IP="--cluster-dns=${DNS_SERVER_IP}" 87 | KUBELET_DNS_DOMAIN="--cluster-domain=${DNS_DOMAIN}" 88 | 89 | #kubelet pod infra container 90 | KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=${KUBELET_POD_INFRA_CONTAINER}" 91 | 92 | # Add your own! 93 | KUBELET_ARGS="--kubeconfig=${KUBE_CFG_DIR}/kubeconfig.yaml" 94 | 95 | EOF 96 | 97 | 98 | echo "Create /usr/lib/systemd/system/kubelet.service file" 99 | cat </usr/lib/systemd/system/kubelet.service 100 | [Unit] 101 | Description=Kubernetes Kubelet 102 | After=docker.service 103 | Requires=docker.service 104 | 105 | [Service] 106 | EnvironmentFile=-${KUBE_CFG_DIR}/config 107 | EnvironmentFile=-${KUBE_CFG_DIR}/kubelet 108 | ExecStart=${KUBE_BIN_DIR}/kubelet \ 109 | \${KUBE_LOGTOSTDERR} \ 110 | \${KUBE_LOG_LEVEL} \ 111 | \${NODE_ADDRESS} \ 112 | \${NODE_PORT} \ 113 | \${NODE_HOSTNAME} \ 114 | \${KUBELET_API_SERVER} \ 115 | \${KUBE_ALLOW_PRIV} \ 116 | \${KUBELET_DNS_IP} \ 117 | \${KUBELET_DNS_DOMAIN} \ 118 | \${KUBELET_POD_INFRA_CONTAINER} \ 119 | \${KUBELET_ARGS} 120 | 121 | Restart=on-failure 122 | KillMode=process 123 | 124 | [Install] 125 | WantedBy=multi-user.target 126 | EOF 127 | 128 | 129 | 130 | echo '============================================================' 131 | echo '===================Start kubelet... ========================' 132 | echo '============================================================' 133 | 134 | systemctl daemon-reload 135 | systemctl enable kubelet 136 | systemctl restart kubelet 137 | 138 | echo "Start kubelet success!" 139 | -------------------------------------------------------------------------------- /demo/kubernetes/host-storage.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: host-storage 5 | provisioner: kubernetes.io/host-path 6 | -------------------------------------------------------------------------------- /demo/kubernetes/mysql-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: mysql 5 | spec: 6 | selector: 7 | app: mysql 8 | type: NodePort 9 | ports: 10 | - port: 3306 11 | name: mysql-port 12 | nodePort: 30006 13 | targetPort: 3306 14 | 15 | -------------------------------------------------------------------------------- /demo/kubernetes/mysql-statefulset.yaml: -------------------------------------------------------------------------------- 1 | kind: StatefulSet 2 | apiVersion: apps/v1beta1 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: mysql 7 | spec: 8 | replicas: 1 9 | serviceName: mysql 10 | template: 11 | metadata: 12 | name: mysql 13 | labels: 14 | app: mysql 15 | spec: 16 | restartPolicy: Always 17 | containers: 18 | - name: mysql 19 | image: mariadb:10.1.26 20 | ports: 21 | - containerPort: 3306 22 | env: 23 | - name: MYSQL_ROOT_PASSWORD 24 | value: "123456" 25 | volumeMounts: 26 | - name: mysql-storage 27 | mountPath: /var/lib/mysql 28 | volumeClaimTemplates: 29 | - metadata: 30 | name: mysql-storage 31 | annotations: 32 | volume.beta.kubernetes.io/storage-class: host-storage 33 | spec: 34 | accessModes: 35 | - ReadWriteOnce 36 | resources: 37 | requests: 38 | storage: 1Gi 39 | -------------------------------------------------------------------------------- /demo/kubernetes/redis-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: redis 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | name: redis 10 | labels: 11 | app: redis 12 | spec: 13 | restartPolicy: Always 14 | containers: 15 | - name: redis 16 | image: redis:4.0.1-alpine 17 | ports: 18 | - containerPort: 6379 19 | -------------------------------------------------------------------------------- /demo/kubernetes/redis-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: redis 5 | spec: 6 | selector: 7 | app: redis 8 | type: NodePort 9 | ports: 10 | - port: 6379 11 | name: redis-port 12 | nodePort: 30007 13 | targetPort: 6379 14 | -------------------------------------------------------------------------------- /demo/kubernetes/regsecret.md: -------------------------------------------------------------------------------- 1 | kubectl create secret docker-registry regsecret --docker-server=172.16.120.153 --docker-username=admin --docker-password=Harbor12345 --docker-email=admin@example.com 2 | -------------------------------------------------------------------------------- /demo/kubernetes/web-deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: web-deployment 5 | spec: 6 | replicas: 2 7 | template: 8 | metadata: 9 | name: web 10 | labels: 11 | app: web 12 | spec: 13 | restartPolicy: Always 14 | containers: 15 | - name: web 16 | image: 172.16.120.153/test/web:v1.0 17 | imagePullPolicy: Always 18 | ports: 19 | - containerPort: 8080 20 | env: 21 | - name: "MY_REDIS_HOST" 22 | value: "redis" 23 | - name: "MY_REDIS_PORT" 24 | value: "6379" 25 | - name: "MY_DATASOURCE_URL" 26 | value: "jdbc:mariadb://mysql:3306/test?characterEncoding=utf-8" 27 | - name: "MY_DATASOURCE_USERNAME" 28 | value: "root" 29 | - name: "MY_DATASOURCE_PASSWORD" 30 | value: "123456" 31 | imagePullSecrets: 32 | - name: regsecret 33 | 34 | -------------------------------------------------------------------------------- /demo/kubernetes/web-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: web-service 5 | spec: 6 | selector: 7 | app: web 8 | type: NodePort 9 | ports: 10 | - port: 8080 11 | targetPort: 8080 12 | nodePort: 30010 13 | 14 | -------------------------------------------------------------------------------- /demo/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 4.0.0 6 | 7 | com.szss 8 | demo 9 | 1.0-SNAPSHOT 10 | pom 11 | 12 | web 13 | thrift-server 14 | 15 | 16 | 17 | UTF-8 18 | 1.8 19 | 2.6 20 | 0.4.14 21 | szss 22 | 23 | 24 | 25 | 26 | 27 | org.springframework.boot 28 | spring-boot-starter-parent 29 | 1.3.8.RELEASE 30 | pom 31 | import 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | org.apache.maven.plugins 41 | maven-compiler-plugin 42 | 3.5.1 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | aliyun 51 | aliyun 52 | http://maven.aliyun.com/nexus/content/groups/public/ 53 | default 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /demo/thrift-server/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | demo 7 | com.szss 8 | 1.0-SNAPSHOT 9 | 10 | 4.0.0 11 | 12 | thrift-server 13 | 14 | 15 | 16 | -------------------------------------------------------------------------------- /demo/thrift-server/src/thrift/Hello.thrift: -------------------------------------------------------------------------------- 1 | namespace java service.demo 2 | service Hello{ 3 | string hello(1:string param) 4 | i32 helloInt(1:i32 param) 5 | bool helloBoolean(1:bool param) 6 | void helloVoid() 7 | string helloNull() 8 | } 9 | -------------------------------------------------------------------------------- /demo/web/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | mariadb: 4 | image: mariadb 5 | restart: always 6 | ports: 7 | - "3306:3306" 8 | environment: 9 | - MYSQL_ROOT_PASSWORD=123456 10 | - MYSQL_DATABASE=test 11 | redis: 12 | image: redis:4.0.5 13 | restart: always 14 | ports: 15 | - "6379:6379" 16 | demo-web: 17 | image: szss/web 18 | restart: always 19 | ports: 20 | - "8080:8080" 21 | environment: 22 | - MY_REDIS_HOST=redis 23 | - MY_REDIS_PORT=6379 24 | - MY_DATASOURCE_URL=jdbc:mariadb://mariadb:3306/test?characterEncoding=utf-8 25 | - MY_DATASOURCE_USERNAME=root 26 | - MY_DATASOURCE_PASSWORD=123456 27 | 28 | -------------------------------------------------------------------------------- /demo/web/pom.xml: -------------------------------------------------------------------------------- 1 | 2 | 5 | 6 | demo 7 | com.szss 8 | 1.0-SNAPSHOT 9 | 10 | 4.0.0 11 | 12 | web 13 | 1.0-SNAPSHOT 14 | 15 | 16 | 17 | org.springframework.boot 18 | spring-boot-starter 19 | 20 | 21 | org.springframework.boot 22 | spring-boot-starter-undertow 23 | 24 | 25 | org.springframework.boot 26 | spring-boot-starter-web 27 | 28 | 29 | org.springframework.boot 30 | spring-boot-starter-tomcat 31 | 32 | 33 | 34 | 35 | org.springframework.boot 36 | spring-boot-starter-freemarker 37 | 38 | 39 | org.springframework.boot 40 | spring-boot-starter-redis 41 | 42 | 43 | org.springframework.session 44 | spring-session-data-redis 45 | 46 | 47 | org.springframework.boot 48 | spring-boot-starter-jdbc 49 | 50 | 51 | org.mybatis.spring.boot 52 | mybatis-spring-boot-starter 53 | 1.1.1 54 | 55 | 56 | org.mariadb.jdbc 57 | mariadb-java-client 58 | 1.5.4 59 | 60 | 61 | org.projectlombok 62 | lombok 63 | 1.16.6 64 | 65 | 66 | org.webjars.bower 67 | adminlte 68 | 2.3.2 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | src/main/resources 77 | true 78 | 79 | 80 | 81 | 82 | 83 | org.springframework.boot 84 | spring-boot-maven-plugin 85 | 86 | 87 | 88 | repackage 89 | 90 | 91 | 92 | 93 | 94 | com.spotify 95 | docker-maven-plugin 96 | ${docker.plugin.version} 97 | 98 | 99 | install 100 | 101 | build 102 | 103 | 104 | 105 | 106 | ${docker.image.prefix}/${project.artifactId} 107 | ${project.basedir}/src/main/docker 108 | 109 | 110 | / 111 | ${project.build.directory} 112 | ${project.build.finalName}.jar 113 | 114 | 115 | 116 | 117 | 118 | org.apache.maven.plugins 119 | maven-compiler-plugin 120 | 121 | 1.8 122 | 1.8 123 | 124 | 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /demo/web/sql/db.sql: -------------------------------------------------------------------------------- 1 | CREATE TABLE t_user ( 2 | id INT PRIMARY KEY, 3 | username VARCHAR(20), 4 | password VARCHAR(20), 5 | name VARCHAR(20) 6 | ) 7 | -------------------------------------------------------------------------------- /demo/web/src/main/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | #使用daocloud的java8镜像 2 | FROM java:8 3 | #镜像创建人 4 | MAINTAINER sxt i_sxtian@3songshu.com 5 | #附加卷 6 | VOLUME /tmp 7 | #添加jar包 8 | ADD web*.jar app.jar 9 | #修改jar包日期 10 | RUN bash -c "touch app.jar" 11 | #并指定端口号 12 | EXPOSE 8080 13 | #环境变量 14 | ENV TZ Asia/Shanghai 15 | #设置时区 16 | RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone 17 | #运行脚步 18 | ENTRYPOINT java -jar /app.jar -Djava.security.egd=file:/dev/./urandom 19 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/Application.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo; 2 | 3 | import org.springframework.boot.SpringApplication; 4 | import org.springframework.boot.autoconfigure.SpringBootApplication; 5 | import org.springframework.session.data.redis.config.annotation.web.http.EnableRedisHttpSession; 6 | 7 | /** 8 | * Created by zcg on 2017/8/15. 9 | */ 10 | @SpringBootApplication 11 | @EnableRedisHttpSession(maxInactiveIntervalInSeconds= 60) 12 | public class Application { 13 | public static void main(String[] args) { 14 | SpringApplication.run(Application.class, args); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/config/WebConfig.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.config; 2 | 3 | import com.szss.demo.interceptor.AuthInterceptor; 4 | import org.springframework.context.annotation.Configuration; 5 | import org.springframework.web.servlet.config.annotation.InterceptorRegistry; 6 | import org.springframework.web.servlet.config.annotation.WebMvcConfigurerAdapter; 7 | 8 | /** 9 | * Created by zcg on 2017/8/15. 10 | */ 11 | @Configuration 12 | public class WebConfig extends WebMvcConfigurerAdapter { 13 | @Override 14 | public void addInterceptors(InterceptorRegistry registry) { 15 | registry.addInterceptor(new AuthInterceptor()).addPathPatterns("/**").excludePathPatterns("/login"); 16 | super.addInterceptors(registry); 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/controller/IndexController.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.controller; 2 | 3 | import com.szss.demo.interceptor.AuthInterceptor; 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.springframework.stereotype.Controller; 6 | import org.springframework.ui.Model; 7 | import org.springframework.web.bind.annotation.RequestMapping; 8 | import org.springframework.web.bind.annotation.RequestMethod; 9 | 10 | import javax.servlet.http.HttpServletRequest; 11 | import java.net.InetAddress; 12 | 13 | /** 14 | * Created by zcg on 2017/8/15. 15 | */ 16 | @Slf4j 17 | @Controller 18 | public class IndexController { 19 | 20 | @RequestMapping(value = "/index", method = RequestMethod.GET) 21 | public String index(HttpServletRequest request, Model model) throws Exception { 22 | String ip = InetAddress.getLocalHost().getHostAddress(); 23 | model.addAttribute("serverIP", ip); 24 | model.addAttribute("user", request.getSession().getAttribute(AuthInterceptor.USER_KEY)); 25 | return "index"; 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/controller/LoginController.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.controller; 2 | 3 | import com.szss.demo.entity.User; 4 | import com.szss.demo.interceptor.AuthInterceptor; 5 | import com.szss.demo.service.UserService; 6 | import lombok.extern.slf4j.Slf4j; 7 | import org.springframework.beans.factory.annotation.Autowired; 8 | import org.springframework.stereotype.Controller; 9 | import org.springframework.web.bind.annotation.RequestMapping; 10 | import org.springframework.web.bind.annotation.RequestMethod; 11 | import org.springframework.web.bind.annotation.RequestParam; 12 | 13 | import javax.servlet.http.HttpServletRequest; 14 | import javax.servlet.http.HttpServletResponse; 15 | import javax.servlet.http.HttpSession; 16 | 17 | /** 18 | * Created by zcg on 2017/8/15. 19 | */ 20 | @Slf4j 21 | @Controller 22 | public class LoginController { 23 | 24 | @Autowired 25 | private UserService userService; 26 | 27 | @RequestMapping(value = "/login", method = RequestMethod.GET) 28 | public String login() { 29 | return "login"; 30 | } 31 | 32 | @RequestMapping(value = "/login", method = RequestMethod.POST) 33 | public String login(@RequestParam("username") String username, 34 | @RequestParam("password") String password, 35 | HttpServletRequest request, 36 | HttpServletResponse response) throws Exception{ 37 | log.debug("username:{} password:{}", username, password); 38 | User user = userService.find(username, password); 39 | if (user != null) { 40 | HttpSession session = request.getSession(); 41 | session.setAttribute(AuthInterceptor.USER_KEY, user); 42 | response.sendRedirect("/index"); 43 | } 44 | return "login"; 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/entity/User.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.entity; 2 | 3 | import lombok.AllArgsConstructor; 4 | import lombok.Data; 5 | import lombok.NoArgsConstructor; 6 | 7 | import java.io.Serializable; 8 | 9 | /** 10 | * Created by zcg on 2017/8/15. 11 | */ 12 | @Data 13 | @NoArgsConstructor 14 | @AllArgsConstructor 15 | public class User implements Serializable { 16 | private long id; 17 | private String username; 18 | private String password; 19 | private String name; 20 | } 21 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/init/DBInitializer.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.init; 2 | 3 | import com.szss.demo.entity.User; 4 | import com.szss.demo.service.UserService; 5 | import org.apache.tomcat.jdbc.pool.DataSource; 6 | import org.springframework.context.ApplicationListener; 7 | import org.springframework.context.event.ContextRefreshedEvent; 8 | import org.springframework.stereotype.Component; 9 | 10 | import java.sql.*; 11 | import java.util.List; 12 | 13 | /** 14 | * Created by zcg on 2017/8/15. 15 | */ 16 | @Component 17 | public class DBInitializer implements ApplicationListener { 18 | 19 | @Override 20 | public void onApplicationEvent(ContextRefreshedEvent event) { 21 | if (event.getApplicationContext().getDisplayName().contains("AnnotationConfigEmbeddedWebApplicationContext")) { 22 | UserService service = event.getApplicationContext().getBean(UserService.class); 23 | DataSource dataSource = event.getApplicationContext().getBean(DataSource.class); 24 | Connection connection = null; 25 | try { 26 | connection = dataSource.getConnection(); 27 | DatabaseMetaData meta = connection.getMetaData(); 28 | ResultSet rsTables = meta.getTables("test", null, "t_user", 29 | new String[]{"TABLE"}); 30 | if (!rsTables.next()) { 31 | Statement stmt = connection.createStatement(); 32 | 33 | String sql = "CREATE TABLE t_user ( " + 34 | " id INT PRIMARY KEY," + 35 | " username VARCHAR(20)," + 36 | " password VARCHAR(20)," + 37 | " name VARCHAR(20))"; 38 | 39 | stmt.executeUpdate(sql); 40 | } 41 | rsTables.close(); 42 | } catch (SQLException e) { 43 | e.printStackTrace(); 44 | } 45 | List list = service.findAll(); 46 | if (list == null || list.isEmpty()) { 47 | service.save(new User(1, "admin", "123456", "admin")); 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/interceptor/AuthInterceptor.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.interceptor; 2 | 3 | import com.szss.demo.entity.User; 4 | import lombok.extern.slf4j.Slf4j; 5 | import org.springframework.web.servlet.HandlerInterceptor; 6 | import org.springframework.web.servlet.ModelAndView; 7 | 8 | import javax.servlet.http.HttpServletRequest; 9 | import javax.servlet.http.HttpServletResponse; 10 | import javax.servlet.http.HttpSession; 11 | 12 | /** 13 | * Created by zcg on 2017/8/15. 14 | */ 15 | @Slf4j 16 | public class AuthInterceptor implements HandlerInterceptor { 17 | 18 | public static final String USER_KEY = "USER_KEY"; 19 | 20 | @Override 21 | public boolean preHandle(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object o) throws Exception { 22 | HttpSession session = httpServletRequest.getSession(); 23 | User user = (User) session.getAttribute(USER_KEY); 24 | if (user == null) { 25 | httpServletResponse.sendRedirect(httpServletRequest.getContextPath() + "/login"); 26 | log.debug("redirect to /login page!"); 27 | return false; 28 | } 29 | return true; 30 | } 31 | 32 | @Override 33 | public void postHandle(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object o, ModelAndView modelAndView) throws Exception { 34 | 35 | } 36 | 37 | @Override 38 | public void afterCompletion(HttpServletRequest httpServletRequest, HttpServletResponse httpServletResponse, Object o, Exception e) throws Exception { 39 | 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/mapper/UserMapper.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.mapper; 2 | 3 | import com.szss.demo.entity.User; 4 | import org.apache.ibatis.annotations.Insert; 5 | import org.apache.ibatis.annotations.Mapper; 6 | import org.apache.ibatis.annotations.Param; 7 | import org.apache.ibatis.annotations.Select; 8 | 9 | import java.util.List; 10 | 11 | /** 12 | * Created by zcg on 2017/8/15. 13 | */ 14 | @Mapper 15 | public interface UserMapper { 16 | @Select("select id,username,name,password from t_user") 17 | List findAll(); 18 | 19 | @Select("select id,username,name,password from t_user where username=#{username} and password=#{password}") 20 | User find(@Param("username") String username, @Param("password") String password); 21 | 22 | @Insert("insert into t_user(id,username,name,password) values(#{id},#{username},#{name},#{password})") 23 | void save(User user); 24 | } 25 | -------------------------------------------------------------------------------- /demo/web/src/main/java/com/szss/demo/service/UserService.java: -------------------------------------------------------------------------------- 1 | package com.szss.demo.service; 2 | 3 | import com.szss.demo.entity.User; 4 | import com.szss.demo.mapper.UserMapper; 5 | import org.springframework.beans.factory.annotation.Autowired; 6 | import org.springframework.stereotype.Service; 7 | 8 | import java.util.List; 9 | 10 | /** 11 | * Created by zcg on 2017/8/15. 12 | */ 13 | @Service 14 | public class UserService { 15 | 16 | @Autowired 17 | public UserMapper userMapper; 18 | 19 | public void save(User user) { 20 | userMapper.save(user); 21 | } 22 | 23 | public User find(String username, String password) { 24 | return userMapper.find(username, password); 25 | } 26 | 27 | public List findAll() { 28 | return userMapper.findAll(); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /demo/web/src/main/resources/application.yaml: -------------------------------------------------------------------------------- 1 | #server: 2 | # port: 8080 3 | # 4 | #spring: 5 | # application: 6 | # name: demo-web 7 | # freemarker: 8 | # expose-request-attributes: true 9 | # expose-spring-macro-helpers: true 10 | # request-context-attribute: request 11 | # cache: false 12 | # settings: 13 | # auto_import: /spring.ftl as spring 14 | # resources: 15 | # chain: 16 | # enabled: true 17 | # redis: 18 | # host: 172.16.120.153 19 | # port: 30007 20 | # datasource: 21 | # url: jdbc:mariadb://172.16.120.153:30006/test?characterEncoding=utf-8 22 | # username: root 23 | # password: 123456 24 | # driver-class-name: org.mariadb.jdbc.Driver 25 | # initialSize: 10 26 | # maxActive: 20 27 | # minIdle: 1 28 | # maxWait: 60000 29 | # timeBetweenEvictionRunsMillis: 60000 30 | # minEvictableIdleTimeMillis: 300000 31 | # validation-query: select 1 32 | # test-while-idle: true 33 | # 34 | # 35 | #mybatis: 36 | # config-location: classpath:mybatis-config.xml 37 | # 38 | # 39 | #logging: 40 | # level: 41 | # root: info 42 | # com.szss: debug 43 | 44 | 45 | 46 | server: 47 | port: 8080 48 | 49 | spring: 50 | application: 51 | name: demo-web 52 | freemarker: 53 | expose-request-attributes: true 54 | expose-spring-macro-helpers: true 55 | request-context-attribute: request 56 | cache: false 57 | settings: 58 | auto_import: /spring.ftl as spring 59 | resources: 60 | chain: 61 | enabled: true 62 | redis: 63 | host: ${MY_REDIS_HOST} 64 | port: ${MY_REDIS_PORT} 65 | datasource: 66 | url: ${MY_DATASOURCE_URL} 67 | username: ${MY_DATASOURCE_USERNAME} 68 | password: ${MY_DATASOURCE_PASSWORD} 69 | driver-class-name: org.mariadb.jdbc.Driver 70 | initialSize: 10 71 | maxActive: 20 72 | minIdle: 1 73 | maxWait: 60000 74 | timeBetweenEvictionRunsMillis: 60000 75 | minEvictableIdleTimeMillis: 300000 76 | validation-query: select 1 77 | test-while-idle: true 78 | 79 | 80 | mybatis: 81 | config-location: classpath:mybatis-config.xml 82 | 83 | 84 | logging: 85 | level: 86 | root: info 87 | com.szss: debug 88 | 89 | 90 | #server: 91 | # port: 8080 92 | # 93 | #spring: 94 | # application: 95 | # name: demo-web 96 | # freemarker: 97 | # expose-request-attributes: true 98 | # expose-spring-macro-helpers: true 99 | # request-context-attribute: request 100 | # cache: false 101 | # settings: 102 | # auto_import: /spring.ftl as spring 103 | # resources: 104 | # chain: 105 | # enabled: true 106 | # redis: 107 | # host: redis 108 | # datasource: 109 | # url: jdbc:mariadb://mysql:3306/test?characterEncoding=utf-8 110 | # username: root 111 | # password: 123456 112 | # driver-class-name: org.mariadb.jdbc.Driver 113 | # initialSize: 10 114 | # maxActive: 20 115 | # minIdle: 1 116 | # maxWait: 60000 117 | # timeBetweenEvictionRunsMillis: 60000 118 | # minEvictableIdleTimeMillis: 300000 119 | # validation-query: select 1 120 | # test-while-idle: true 121 | # 122 | # 123 | #mybatis: 124 | # config-location: classpath:mybatis-config.xml 125 | # 126 | # 127 | #logging: 128 | # level: 129 | # root: info 130 | 131 | -------------------------------------------------------------------------------- /demo/web/src/main/resources/mybatis-config.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | -------------------------------------------------------------------------------- /demo/web/src/main/resources/templates/index.ftl: -------------------------------------------------------------------------------- 1 | Hello ${user.name!""}!My ip is ${serverIP!""}! 2 | -------------------------------------------------------------------------------- /demo/web/src/main/resources/templates/login.ftl: -------------------------------------------------------------------------------- 1 | <#assign base=request.contextPath+"/webjars/adminlte/2.3.2"/> 2 | 3 | 4 | 5 | 6 | 7 | Genisys | 登录 8 | 9 | 10 | 11 | 12 | <#----> 13 | 14 | <#----> 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 34 | 35 | 36 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 93 | 94 | 95 | -------------------------------------------------------------------------------- /kubeadm/v.1.7.2/README.md: -------------------------------------------------------------------------------- 1 | # 1 环境 2 | 3 | 1~2台centos 7虚机 4 | 5 | | 虚机名称 | IP | 6 | | ------------- |:-------------:| 7 | | master | 172.16.120.151| 8 | | node01 | 172.16.120.152| 9 | 10 | kubeadm-master.sh和kubeadm-node.sh脚本采用kubeadm进行安装,采用国内镜像,安装简单快速。 11 | 12 | # 2 master节点安装 13 | ```bash 14 | sh kubeadm-master.sh 172.16.120.151 15 | ``` 16 | 17 | # 3 node节点安装 18 | ```bash 19 | sh kubeadm-node.sh 172.16.120.151 20 | ``` 21 | -------------------------------------------------------------------------------- /kubeadm/v.1.7.2/image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | #查看gcr.io镜像 5 | #https://console.cloud.google.com/kubernetes/images/list?location=GLOBAL&project=google-containers 6 | #通过下面的网址查看依赖镜像的版本号: 7 | #https://kubernetes.io/docs/admin/kubeadm/ 8 | set -o errexit 9 | set -o nounset 10 | set -o pipefail 11 | 12 | KUBE_VERSION=v1.7.2 13 | KUBE_PAUSE_VERSION=3.0 14 | ETCD_VERSION=3.0.17 15 | DNS_VERSION=1.14.4 16 | 17 | GCR_URL=gcr.io/google_containers 18 | ALIYUN_URL=registry.cn-hangzhou.aliyuncs.com/szss_k8s 19 | 20 | images=(kube-proxy-amd64:${KUBE_VERSION} 21 | kube-scheduler-amd64:${KUBE_VERSION} 22 | kube-controller-manager-amd64:${KUBE_VERSION} 23 | kube-apiserver-amd64:${KUBE_VERSION} 24 | pause-amd64:${KUBE_PAUSE_VERSION} 25 | etcd-amd64:${ETCD_VERSION} 26 | k8s-dns-sidecar-amd64:${DNS_VERSION} 27 | k8s-dns-kube-dns-amd64:${DNS_VERSION} 28 | k8s-dns-dnsmasq-nanny-amd64:${DNS_VERSION}) 29 | 30 | 31 | for imageName in ${images[@]} ; do 32 | docker pull $GCR_URL/$imageName 33 | docker tag $GCR_URL/$imageName $ALIYUN_URL/$imageName 34 | docker push $ALIYUN_URL/$imageName 35 | docker rmi $ALIYUN_URL/$imageName 36 | done 37 | -------------------------------------------------------------------------------- /kubeadm/v.1.7.2/kubeadm-master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sh kubeadm-master.sh 172.16.120.151 4 | 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | 9 | MASTER_ADDRESS=${1:-"127.0.0.1"} 10 | KUBE_TOKEN=${2:-"863f67.19babbff7bfe8543"} 11 | DOCKER_MIRRORS=${3:-"https://5md0553g.mirror.aliyuncs.com"} 12 | KUBE_VERSION=1.7.2 13 | KUBE_PAUSE_VERSION=3.0 14 | KUBE_CNI_VERSION=0.5.1 15 | ETCD_VERSION=3.0.17 16 | FLANNEL_VERSION=v0.8.0 17 | 18 | echo '============================================================' 19 | echo '====================Disable selinux and firewalld...========' 20 | echo '============================================================' 21 | if [ $(getenforce) = "Enabled" ]; then 22 | setenforce 0 23 | fi 24 | systemctl disable firewalld 25 | systemctl stop firewalld 26 | 27 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 28 | 29 | cat >> /etc/sysctl.d/k8s.conf <> /etc/yum.repos.d/docker.repo <> /etc/yum.repos.d/kubernetes.repo < /etc/docker/daemon.json < /etc/systemd/system/kubelet.service.d/20-pod-infra-image.conf <.<16 character string>,指定token后可以通过cat /etc/kubernetes/pki/tokens.csv查看 124 | kubeadm init --apiserver-advertise-address=${MASTER_ADDRESS} --kubernetes-version=v${KUBE_VERSION} --token=${KUBE_TOKEN} --pod-network-cidr=10.244.0.0/16 125 | 126 | #查看token的命令 127 | echo "you can use this order to query the token: kubeadm token list" 128 | 129 | echo '============================================================' 130 | echo '=====================Config admin...========================' 131 | echo '============================================================' 132 | mkdir -p $HOME/.kube 133 | cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 134 | chown $(id -u):$(id -g) $HOME/.kube/config 135 | echo "Config admin success!" 136 | 137 | echo '============================================================' 138 | echo '==============Create flannel service...=====================' 139 | echo '============================================================' 140 | kubectl --namespace kube-system apply -f https://raw.githubusercontent.com/coreos/flannel/${FLANNEL_VERSION}/Documentation/kube-flannel-rbac.yml 141 | rm -rf ./kube-flannel.yml 142 | wget https://raw.githubusercontent.com/coreos/flannel/${FLANNEL_VERSION}/Documentation/kube-flannel.yml 143 | sed -i 's/quay.io\/coreos\/flannel/registry.cn-hangzhou.aliyuncs.com\/szss_k8s\/flannel/g' ./kube-flannel.yml 144 | kubectl --namespace kube-system apply -f ./kube-flannel.yml 145 | echo "Flannel created!" 146 | -------------------------------------------------------------------------------- /kubeadm/v.1.7.2/kubeadm-node.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sh kubeadm-node.sh 172.16.120.151 4 | 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | 9 | MASTER_ADDRESS=${1:-"127.0.0.1"} 10 | KUBE_TOKEN=${2:-"863f67.19babbff7bfe8543"} 11 | DOCKER_MIRRORS=${3:-"https://5md0553g.mirror.aliyuncs.com"} 12 | KUBE_VERSION=1.7.2 13 | KUBE_PAUSE_VERSION=3.0 14 | KUBE_CNI_VERSION=0.5.1 15 | ETCD_VERSION=3.0.17 16 | 17 | echo '============================================================' 18 | echo '====================Disable selinux and firewalld...========' 19 | echo '============================================================' 20 | if [ $(getenforce) = "Enabled" ]; then 21 | setenforce 0 22 | fi 23 | systemctl disable firewalld 24 | systemctl stop firewalld 25 | 26 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 27 | 28 | cat >> /etc/sysctl.d/k8s.conf <> /etc/yum.repos.d/docker.repo <> /etc/yum.repos.d/kubernetes.repo < /etc/docker/daemon.json < /etc/systemd/system/kubelet.service.d/20-pod-infra-image.conf < 注意:192.168.0.1为master节点IP 24 | 25 | slave节点执行下面命令,slave节点只安装docker环境: 26 | ```bash 27 | sh setup-db.sh slave 28 | ``` 29 | 30 | # 3 rancher HA 31 | rancher高可用至少要3个节点。 32 | ```bash 33 | sh setup-db.sh master 192.168.0.1 192.168.0.10 34 | ``` 35 | 192.168.0.1为当前节点IP 36 | 192.168.0.10为mysql数据库IP 37 | 38 | # 4 rancher HA模式添加rancher主节点作为host需要配置负载均衡 39 | 使用setup-db.sh构建一个rancher集群,如果需要将rancher主节点添加为host,需要在每个主节点在部署一个nginx用于负载3个主节点,rancher界面添加每个rancher节点之前需要修改主机注册URL,该URL为该主机的IP加本机运行的nginx端口号。 40 | 删除添加的主节点时不要点击疏散该节点,这样会导致rancher-server被停止。 41 | 42 | 添加子节点也可以使用上面的方式,如果在阿里云,可以使用SLB服务对三个主节点做内网负载,但是SLB的地址服务用来添加rancher主节点作为host. 43 | # 5 rancher上部署kubernetes 44 | 45 | master节点安装完成后,添加节点时主机连接Rancher API的Base URL请使用内网地址,参考【原生加速中国区Kubernetes安装】https://www.cnrancher.com/kubernetes-installation/ 安装kubernetes。 46 | 47 | rancher提供关闭自动安装kubernetes addones的选项,dashboard、dns等组件需要自行安装。 48 | 49 | 从rancher页面生成kubeconfig配置文件,保存到master节点~/.kube/config文件中,下载kubectl文件到master节点/usr/bin目录,设置文件为可执行文件 chmoc a+x /usr/bin/kubectl。kubectl已经上传到百度网盘 https://pan.baidu.com/s/1jH9dsF0 50 | 51 | 参考: 52 | 53 | 【原生加速中国区Kubernetes安装】https://www.cnrancher.com/kubernetes-installation/ 54 | 55 | 【Installing Rancher Server】http://rancher.com/docs/rancher/latest/en/installing-rancher/installing-server/#multi-nodes 56 | 57 | # 6 安装重置 58 | 先在rancher界面删除所有节点,再在每个节点执行reset.sh脚步。 59 | 60 | # 7 主节点不做pod调度 61 | ```bash 62 | kubectl label nodes k8s-master01 node-role.kubernetes.io/master= 63 | kubectl taint nodes k8s-master01 node-role.kubernetes.io/master=:NoSchedule 64 | ``` 65 | 66 | 参考: 67 | 【rancher server的安装】http://rancher.com/docs/rancher/v1.6/zh/installing-rancher/installing-server/ 68 | 69 | 【rancher server的升级】http://rancher.com/docs/rancher/v1.6/zh/upgrading/ 70 | -------------------------------------------------------------------------------- /rancher/nginx.conf: -------------------------------------------------------------------------------- 1 | #user nobody; 2 | worker_processes 4; 3 | 4 | #error_log logs/error.log; 5 | #error_log logs/error.log notice; 6 | #error_log logs/error.log info; 7 | 8 | #pid logs/nginx.pid; 9 | #pid /usr/local/nginx/nginx.pid; 10 | 11 | events { 12 | #use epoll; 13 | worker_connections 102400; 14 | } 15 | 16 | 17 | http { 18 | include mime.types; 19 | default_type application/octet-stream; 20 | 21 | fastcgi_intercept_errors on; 22 | charset utf-8; 23 | server_names_hash_bucket_size 128; 24 | client_header_buffer_size 4k; 25 | large_client_header_buffers 4 32k; 26 | client_max_body_size 300m; 27 | sendfile on; 28 | tcp_nopush on; 29 | keepalive_timeout 60; 30 | tcp_nodelay on; 31 | client_body_buffer_size 512k; 32 | proxy_connect_timeout 5; 33 | proxy_read_timeout 60; 34 | proxy_send_timeout 5; 35 | proxy_buffer_size 16k; 36 | proxy_buffers 4 64k; 37 | proxy_busy_buffers_size 128k; 38 | proxy_temp_file_write_size 128k; 39 | gzip on; 40 | gzip_min_length 1k; 41 | gzip_buffers 4 16k; 42 | gzip_http_version 1.1; 43 | gzip_comp_level 2; 44 | gzip_types text/plain application/x-javascript text/css application/xml application/json; 45 | gzip_vary on; 46 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 47 | '$status $body_bytes_sent "$http_referer" ' 48 | '"$http_user_agent" "$http_x_forwarded_for"'; 49 | 50 | 51 | upstream rancher { 52 | server rancher-server:8080; 53 | } 54 | 55 | map $http_upgrade $connection_upgrade { 56 | default Upgrade; 57 | '' close; 58 | } 59 | 60 | server { 61 | listen 80; 62 | server_name rancher-server-nginx; 63 | 64 | location / { 65 | proxy_set_header Host $host; 66 | proxy_set_header X-Forwarded-Proto $scheme; 67 | proxy_set_header X-Forwarded-Port $server_port; 68 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 69 | proxy_pass http://rancher; 70 | proxy_http_version 1.1; 71 | proxy_set_header Upgrade $http_upgrade; 72 | proxy_set_header Connection $connection_upgrade; 73 | # This allows the ability for the execute shell window to remain open for up to 15 minutes. Without this parameter, the default is 1 minute and will automatically close. 74 | proxy_read_timeout 10s; 75 | } 76 | } 77 | 78 | } 79 | 80 | 81 | 82 | -------------------------------------------------------------------------------- /rancher/reset.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | docker stop $(docker ps -q) 8 | docker rm $(docker ps -a -q) 9 | if [ -d "/var/etcd/backups" ]; then 10 | rm -rf /var/etcd/backups 11 | fi 12 | if [ -d "/mnt/data/rancher-mariadb" ]; then 13 | rm -rf /mnt/data/rancher-mariadb 14 | fi 15 | 16 | -------------------------------------------------------------------------------- /rancher/setup-db.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #sh setup-db.sh master 192.168.0.1 4 | 5 | set -o errexit 6 | set -o nounset 7 | set -o pipefail 8 | 9 | # NODE_TYPE表示节点类型,取值为master,slave 10 | NODE_TYPE=${1:-"master"} 11 | MASTER_ADDRESS=${2:-"127.0.0.1"} 12 | DOCKER_MIRRORS=${3:-"https://5md0553g.mirror.aliyuncs.com"} 13 | DOCKER_GRAPH=${4:-"/mnt/docker"} 14 | 15 | RANCHER_VERSION=v1.6.11 16 | 17 | echo '============================================================' 18 | echo '====================Disable selinux and firewalld...========' 19 | echo '============================================================' 20 | # 关闭selinux 21 | if [ $(getenforce) = "Enabled" ]; then 22 | setenforce 0 23 | fi 24 | 25 | # 关闭防火墙 26 | systemctl disable firewalld 27 | systemctl stop firewalld 28 | 29 | # selinux设置为disabled 30 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 31 | 32 | # Kubernetes 1.8开始要求关闭系统的Swap,如果不关闭,默认配置下kubelet将无法启动。可以通过kubelet的启动参数–fail-swap-on=false更改这个限制。 33 | # 修改 /etc/fstab 文件,注释掉 SWAP 的自动挂载,使用free -m确认swap已经关闭。 34 | swapoff -a 35 | 36 | echo "Disable selinux and firewalld success!" 37 | 38 | echo '============================================================' 39 | echo '====================Add docker yum repo...==================' 40 | echo '============================================================' 41 | #aliyun docker yum源 42 | #cat >> /etc/yum.repos.d/docker.repo <> /etc/yum.repos.d/docker.repo < /etc/docker/daemon.json < $HOME/cattle.sql < /etc/nginx/nginx.conf <> /etc/yum.repos.d/docker.repo <> /etc/yum.repos.d/docker.repo < /etc/docker/daemon.json <> /etc/yum.repos.d/docker.repo <> /etc/yum.repos.d/docker.repo < /etc/docker/daemon.json <> /etc/yum.repos.d/docker.repo <> /etc/yum.repos.d/docker.repo < /etc/docker/daemon.json </usr/lib/systemd/system/ceph-monitor.service 108 | [Unit] 109 | Description=ceph monitor 110 | Documentation=https://github.com/ceph/ceph 111 | After=docker.service 112 | Requires=docker.service 113 | [Service] 114 | EnvironmentFile=-${CEPH_CFG_DIR}/config 115 | ExecStart=docker run -d --name=mon --net=host \\ 116 | --restart=always \\ 117 | -v /etc/ceph:/etc/ceph \\ 118 | -v /var/lib/ceph:/var/lib/ceph \\ 119 | -e MON_IP=192.168.0.1 \ 120 | -e CEPH_PUBLIC_NETWORK=192.168.0.0/24 \\ 121 | ceph/daemon:tag-build-master-jewel-centos-7 mon 122 | Restart=on-failure 123 | [Install] 124 | WantedBy=multi-user.target 125 | EOF 126 | 127 | echo "Rancher server start success!" 128 | fi 129 | -------------------------------------------------------------------------------- /yaml/ceph/ceph-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: ceph-secret 5 | data: 6 | key: QVFEUWpQNVpGRE5WR0JBQWlFRG9tUG5lelJZYWdyTHB1QzlJS0E9PQo= 7 | -------------------------------------------------------------------------------- /yaml/ceph/cephfs-with-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: cephfs2 5 | spec: 6 | containers: 7 | - name: cephfs-rw 8 | image: nginx 9 | volumeMounts: 10 | - mountPath: "/mnt/cephfs" 11 | name: cephfs 12 | volumes: 13 | - name: cephfs 14 | cephfs: 15 | monitors: 16 | - 10.135.204.131:6789 17 | - 10.135.204.157:6789 18 | user: admin 19 | secretRef: 20 | name: ceph-secret 21 | readOnly: false 22 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring-for-rancher/README.md: -------------------------------------------------------------------------------- 1 | rancher上部署heapster,如果直接使用kubernetes的默认脚本,会出现下面的错误: 2 | ```bash 3 | https://kubernetes.default/api/v1/nodes: x509: certificate is valid for 10.43.0.1, kubernetes.default.svc.cluster.local, kubernetes, kubernetes.kubernetes, kubernetes.kubernetes.rancher.internal, not kubernetes.default 4 | ``` 5 | 出现上面错误的原因是rancher生成证书的时候,没有添加kubernetes.default这个dns名称,选取以上任何一种都是可以的。 6 | 7 | 下面需要修改heapster的source参数,内容为 --source=kubernetes:https://kubernetes.kubernetes:6443?inClusterConfig=true&insecure=true,详细配置如下: 8 | ```bash 9 | apiVersion: extensions/v1beta1 10 | kind: Deployment 11 | metadata: 12 | name: heapster 13 | namespace: kube-system 14 | spec: 15 | replicas: 1 16 | template: 17 | metadata: 18 | labels: 19 | task: monitoring 20 | k8s-app: heapster 21 | spec: 22 | serviceAccountName: heapster 23 | containers: 24 | - name: heapster 25 | image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-amd64:v1.4.2 26 | imagePullPolicy: IfNotPresent 27 | command: 28 | - /heapster 29 | - --source=kubernetes:https://kubernetes.kubernetes:6443?inClusterConfig=true&insecure=true 30 | - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086 31 | ``` 32 | 33 | 由于rancher对kubernetes的接口重新进行了封装,rancher上是无法通过apiserver的方式访问grafana的,所以我们把grafana通过nodePort的方式对外提供服务。 34 | 35 | 安装完成后,如果kubernetes dashboard中没有显示cpu和内存信息,重新安装dashboard即可。 36 | 37 | 参考: 38 | kubernetes安装heapster、influxdb及grafana: 39 | http://www.jianshu.com/p/60069089c981 40 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring-for-rancher/grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: monitoring-grafana 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | task: monitoring 13 | k8s-app: grafana 14 | spec: 15 | containers: 16 | - name: grafana 17 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/heapster-grafana-amd64:v4.4.1 18 | ports: 19 | - containerPort: 3000 20 | protocol: TCP 21 | volumeMounts: 22 | - mountPath: /var 23 | name: grafana-storage 24 | env: 25 | - name: INFLUXDB_HOST 26 | value: monitoring-influxdb 27 | - name: GF_SERVER_HTTP_PORT 28 | value: "3000" 29 | # The following env variables are required to make Grafana accessible via 30 | # the kubernetes api-server proxy. On production clusters, we recommend 31 | # removing these env variables, setup auth for grafana, and expose the grafana 32 | # service using a LoadBalancer or a public IP. 33 | - name: GF_AUTH_BASIC_ENABLED 34 | value: "false" 35 | - name: GF_AUTH_ANONYMOUS_ENABLED 36 | value: "true" 37 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 38 | value: Admin 39 | - name: GF_SERVER_ROOT_URL 40 | # If you're only using the API Server proxy, set this value instead: 41 | # value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ 42 | value: / 43 | volumes: 44 | - name: grafana-storage 45 | emptyDir: {} 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | labels: 51 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 52 | # If you are NOT using this as an addon, you should comment out this line. 53 | kubernetes.io/cluster-service: 'true' 54 | kubernetes.io/name: monitoring-grafana 55 | name: monitoring-grafana 56 | namespace: kube-system 57 | spec: 58 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer 59 | # or through a public IP. 60 | # type: LoadBalancer 61 | # You could also use NodePort to expose the service at a randomly-generated port 62 | # type: NodePort 63 | type: NodePort 64 | ports: 65 | - port: 80 66 | targetPort: 3000 67 | nodePort: 30000 68 | selector: 69 | k8s-app: grafana 70 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring-for-rancher/heapster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: heapster 6 | namespace: kube-system 7 | --- 8 | apiVersion: extensions/v1beta1 9 | kind: Deployment 10 | metadata: 11 | name: heapster 12 | namespace: kube-system 13 | spec: 14 | replicas: 1 15 | template: 16 | metadata: 17 | labels: 18 | task: monitoring 19 | k8s-app: heapster 20 | spec: 21 | serviceAccountName: heapster 22 | containers: 23 | - name: heapster 24 | image: registry.cn-hangzhou.aliyuncs.com/google-containers/heapster-amd64:v1.4.2 25 | imagePullPolicy: IfNotPresent 26 | command: 27 | - /heapster 28 | - --source=kubernetes:https://kubernetes.kubernetes:6443?inClusterConfig=true&insecure=true 29 | - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | labels: 35 | task: monitoring 36 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 37 | # If you are NOT using this as an addon, you should comment out this line. 38 | kubernetes.io/cluster-service: 'true' 39 | kubernetes.io/name: Heapster 40 | name: heapster 41 | namespace: kube-system 42 | spec: 43 | ports: 44 | - port: 80 45 | targetPort: 8082 46 | selector: 47 | k8s-app: heapster 48 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring-for-rancher/influxdb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: monitoring-influxdb 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | task: monitoring 13 | k8s-app: influxdb 14 | spec: 15 | containers: 16 | - name: influxdb 17 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/heapster-influxdb-amd64:v1.1.1 18 | volumeMounts: 19 | - mountPath: /data 20 | name: influxdb-storage 21 | volumes: 22 | - name: influxdb-storage 23 | emptyDir: {} 24 | --- 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | labels: 29 | task: monitoring 30 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 31 | # If you are NOT using this as an addon, you should comment out this line. 32 | kubernetes.io/cluster-service: 'true' 33 | kubernetes.io/name: monitoring-influxdb 34 | name: monitoring-influxdb 35 | namespace: kube-system 36 | spec: 37 | ports: 38 | - port: 8086 39 | targetPort: 8086 40 | selector: 41 | k8s-app: influxdb 42 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring/README.md: -------------------------------------------------------------------------------- 1 | 安装完成后,如果kubernetes dashboard中没有显示cpu和内存信息,重新安装dashboard即可。 2 | 3 | 参考: 4 | kubernetes安装heapster、influxdb及grafana: 5 | http://www.jianshu.com/p/60069089c981 6 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring/grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: monitoring-grafana 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | task: monitoring 13 | k8s-app: grafana 14 | spec: 15 | containers: 16 | - name: grafana 17 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/heapster-grafana-amd64:v4.4.1 18 | ports: 19 | - containerPort: 3000 20 | protocol: TCP 21 | volumeMounts: 22 | - mountPath: /var 23 | name: grafana-storage 24 | env: 25 | - name: INFLUXDB_HOST 26 | value: monitoring-influxdb 27 | - name: GF_SERVER_HTTP_PORT 28 | value: "3000" 29 | # The following env variables are required to make Grafana accessible via 30 | # the kubernetes api-server proxy. On production clusters, we recommend 31 | # removing these env variables, setup auth for grafana, and expose the grafana 32 | # service using a LoadBalancer or a public IP. 33 | - name: GF_AUTH_BASIC_ENABLED 34 | value: "false" 35 | - name: GF_AUTH_ANONYMOUS_ENABLED 36 | value: "true" 37 | - name: GF_AUTH_ANONYMOUS_ORG_ROLE 38 | value: Admin 39 | - name: GF_SERVER_ROOT_URL 40 | # If you're only using the API Server proxy, set this value instead: 41 | # value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ 42 | value: / 43 | volumes: 44 | - name: grafana-storage 45 | emptyDir: {} 46 | --- 47 | apiVersion: v1 48 | kind: Service 49 | metadata: 50 | labels: 51 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 52 | # If you are NOT using this as an addon, you should comment out this line. 53 | kubernetes.io/cluster-service: 'true' 54 | kubernetes.io/name: monitoring-grafana 55 | name: monitoring-grafana 56 | namespace: kube-system 57 | spec: 58 | # In a production setup, we recommend accessing Grafana through an external Loadbalancer 59 | # or through a public IP. 60 | # type: LoadBalancer 61 | # You could also use NodePort to expose the service at a randomly-generated port 62 | # type: NodePort 63 | ports: 64 | - port: 80 65 | targetPort: 3000 66 | selector: 67 | k8s-app: grafana 68 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring/heapster.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: heapster 6 | namespace: kube-system 7 | --- 8 | apiVersion: extensions/v1beta1 9 | kind: Deployment 10 | metadata: 11 | name: heapster 12 | namespace: kube-system 13 | spec: 14 | replicas: 1 15 | template: 16 | metadata: 17 | labels: 18 | task: monitoring 19 | k8s-app: heapster 20 | spec: 21 | serviceAccountName: heapster 22 | containers: 23 | - name: heapster 24 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/heapster-amd64:v1.3.0 25 | imagePullPolicy: IfNotPresent 26 | command: 27 | - /heapster 28 | - --source=kubernetes:https://kubernetes.default 29 | - --sink=influxdb:http://monitoring-influxdb.kube-system.svc:8086 30 | --- 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | labels: 35 | task: monitoring 36 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 37 | # If you are NOT using this as an addon, you should comment out this line. 38 | kubernetes.io/cluster-service: 'true' 39 | kubernetes.io/name: Heapster 40 | name: heapster 41 | namespace: kube-system 42 | spec: 43 | ports: 44 | - port: 80 45 | targetPort: 8082 46 | selector: 47 | k8s-app: heapster 48 | -------------------------------------------------------------------------------- /yaml/cluster-monitoring/influxdb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: extensions/v1beta1 3 | kind: Deployment 4 | metadata: 5 | name: monitoring-influxdb 6 | namespace: kube-system 7 | spec: 8 | replicas: 1 9 | template: 10 | metadata: 11 | labels: 12 | task: monitoring 13 | k8s-app: influxdb 14 | spec: 15 | containers: 16 | - name: influxdb 17 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/heapster-influxdb-amd64:v1.1.1 18 | volumeMounts: 19 | - mountPath: /data 20 | name: influxdb-storage 21 | volumes: 22 | - name: influxdb-storage 23 | emptyDir: {} 24 | --- 25 | apiVersion: v1 26 | kind: Service 27 | metadata: 28 | labels: 29 | task: monitoring 30 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 31 | # If you are NOT using this as an addon, you should comment out this line. 32 | kubernetes.io/cluster-service: 'true' 33 | kubernetes.io/name: monitoring-influxdb 34 | name: monitoring-influxdb 35 | namespace: kube-system 36 | spec: 37 | ports: 38 | - port: 8086 39 | targetPort: 8086 40 | selector: 41 | k8s-app: influxdb 42 | -------------------------------------------------------------------------------- /yaml/dashboard/README.md: -------------------------------------------------------------------------------- 1 | 在master节点使用kubectl proxy命令就可以使API server监听在本地的8001端口上: 2 | ```bash 3 | kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' 4 | ``` 5 | 后台执行: 6 | ```bash 7 | nohup kubectl proxy --address='0.0.0.0' --accept-hosts='^*$' >/dev/null 2>&1 & 8 | ``` 9 | 10 | 在浏览器访问master节点:http://master-ip:8001/ui 11 | 12 | 13 | 参考:http://blog.csdn.net/cuipengchong/article/details/72459299 14 | 15 | -------------------------------------------------------------------------------- /yaml/dashboard/kubernetes-dashboard.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2015 Google Inc. All Rights Reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Configuration to deploy release version of the Dashboard UI compatible with 16 | # Kubernetes 1.6 (RBAC enabled). 17 | # 18 | # Example usage: kubectl create -f 19 | 20 | apiVersion: v1 21 | kind: ServiceAccount 22 | metadata: 23 | labels: 24 | k8s-app: kubernetes-dashboard 25 | name: kubernetes-dashboard 26 | namespace: kube-system 27 | --- 28 | apiVersion: rbac.authorization.k8s.io/v1beta1 29 | kind: ClusterRoleBinding 30 | metadata: 31 | name: kubernetes-dashboard 32 | labels: 33 | k8s-app: kubernetes-dashboard 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: cluster-admin 38 | subjects: 39 | - kind: ServiceAccount 40 | name: kubernetes-dashboard 41 | namespace: kube-system 42 | --- 43 | kind: Deployment 44 | apiVersion: extensions/v1beta1 45 | metadata: 46 | labels: 47 | k8s-app: kubernetes-dashboard 48 | name: kubernetes-dashboard 49 | namespace: kube-system 50 | spec: 51 | replicas: 1 52 | revisionHistoryLimit: 10 53 | selector: 54 | matchLabels: 55 | k8s-app: kubernetes-dashboard 56 | template: 57 | metadata: 58 | labels: 59 | k8s-app: kubernetes-dashboard 60 | spec: 61 | containers: 62 | - name: kubernetes-dashboard 63 | image: registry.cn-hangzhou.aliyuncs.com/google-containers/kubernetes-dashboard-amd64:v1.6.3 64 | ports: 65 | - containerPort: 9090 66 | protocol: TCP 67 | args: 68 | # Uncomment the following line to manually specify Kubernetes API server Host 69 | # If not specified, Dashboard will attempt to auto discover the API server and connect 70 | # to it. Uncomment only if the default does not work. 71 | # - --apiserver-host=http://my-address:port 72 | livenessProbe: 73 | httpGet: 74 | path: / 75 | port: 9090 76 | initialDelaySeconds: 30 77 | timeoutSeconds: 30 78 | serviceAccountName: kubernetes-dashboard 79 | # Comment the following tolerations if Dashboard must not be deployed on master 80 | tolerations: 81 | - key: node-role.kubernetes.io/master 82 | effect: NoSchedule 83 | --- 84 | kind: Service 85 | apiVersion: v1 86 | metadata: 87 | labels: 88 | k8s-app: kubernetes-dashboard 89 | name: kubernetes-dashboard 90 | namespace: kube-system 91 | spec: 92 | ports: 93 | - port: 80 94 | targetPort: 9090 95 | selector: 96 | k8s-app: kubernetes-dashboard 97 | -------------------------------------------------------------------------------- /yaml/efk/README.md: -------------------------------------------------------------------------------- 1 | # kibana和elasticsearch访问地址 2 | 由于rancher对kubernetes的接口重新进行了封装,rancher上是无法通过apiserver的方式访问kibana和elasticsearch的,所以我们把kibana和elasticsearch通过nodePort的方式对外提供服务。 3 | 4 | # 开启节点日志收集 5 | 脚步已经在kubespray 2.1.2版本测试通过。 6 | 7 | 在需要收集日志的节点上打上beta.kubernetes.io/fluentd-ds-ready=true的标签。 8 | ```bash 9 | kubectl label nodes beta.kubernetes.io/fluentd-ds-ready=true 10 | ``` 11 | # 注意事项 12 | fluentd的sepc.template.spec.volumes.hostPath配置的路径必须和sepc.template.spec.containers.volumeMounts.mountPath配置的路径一致。更多内容请参考:https://github.com/kubernetes/minikube/issues/876 13 | 14 | 15 | 参考: 16 | https://my.oschina.net/newlife111/blog/714574 17 | 18 | 19 | http://tonybai.com/2017/03/03/implement-kubernetes-cluster-level-logging-with-fluentd-and-elasticsearch-stack/ 20 | -------------------------------------------------------------------------------- /yaml/efk/es-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | --- 11 | kind: ClusterRole 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | metadata: 14 | name: elasticsearch-logging 15 | labels: 16 | k8s-app: elasticsearch-logging 17 | kubernetes.io/cluster-service: "true" 18 | addonmanager.kubernetes.io/mode: Reconcile 19 | rules: 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - "services" 24 | - "namespaces" 25 | - "endpoints" 26 | verbs: 27 | - "get" 28 | --- 29 | kind: ClusterRoleBinding 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | metadata: 32 | namespace: kube-system 33 | name: elasticsearch-logging 34 | labels: 35 | k8s-app: elasticsearch-logging 36 | kubernetes.io/cluster-service: "true" 37 | addonmanager.kubernetes.io/mode: Reconcile 38 | subjects: 39 | - kind: ServiceAccount 40 | name: elasticsearch-logging 41 | namespace: kube-system 42 | apiGroup: "" 43 | roleRef: 44 | kind: ClusterRole 45 | name: elasticsearch-logging 46 | apiGroup: "" 47 | --- 48 | apiVersion: v1 49 | kind: ReplicationController 50 | metadata: 51 | name: elasticsearch-logging-v1 52 | namespace: kube-system 53 | labels: 54 | k8s-app: elasticsearch-logging 55 | version: v1 56 | kubernetes.io/cluster-service: "true" 57 | addonmanager.kubernetes.io/mode: Reconcile 58 | spec: 59 | replicas: 2 60 | selector: 61 | k8s-app: elasticsearch-logging 62 | version: v1 63 | template: 64 | metadata: 65 | labels: 66 | k8s-app: elasticsearch-logging 67 | version: v1 68 | kubernetes.io/cluster-service: "true" 69 | spec: 70 | serviceAccountName: elasticsearch-logging 71 | containers: 72 | - image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/elasticsearch:v2.4.1-2 73 | name: elasticsearch-logging 74 | resources: 75 | # need more cpu upon initialization, therefore burstable class 76 | limits: 77 | cpu: 1000m 78 | requests: 79 | cpu: 100m 80 | ports: 81 | - containerPort: 9200 82 | name: db 83 | protocol: TCP 84 | - containerPort: 9300 85 | name: transport 86 | protocol: TCP 87 | volumeMounts: 88 | - name: es-persistent-storage 89 | mountPath: /data 90 | env: 91 | - name: "NAMESPACE" 92 | valueFrom: 93 | fieldRef: 94 | fieldPath: metadata.namespace 95 | volumes: 96 | - name: es-persistent-storage 97 | emptyDir: {} 98 | -------------------------------------------------------------------------------- /yaml/efk/es-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: elasticsearch-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: elasticsearch-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | kubernetes.io/name: "Elasticsearch" 11 | spec: 12 | type: NodePort 13 | ports: 14 | - port: 9200 15 | protocol: TCP 16 | nodePort: 30011 17 | targetPort: db 18 | selector: 19 | k8s-app: elasticsearch-logging 20 | -------------------------------------------------------------------------------- /yaml/efk/fluentd-es-ds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: fluentd-es 5 | namespace: kube-system 6 | labels: 7 | k8s-app: fluentd-es 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | --- 11 | kind: ClusterRole 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | metadata: 14 | name: fluentd-es 15 | labels: 16 | k8s-app: fluentd-es 17 | kubernetes.io/cluster-service: "true" 18 | addonmanager.kubernetes.io/mode: Reconcile 19 | rules: 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - "namespaces" 24 | - "pods" 25 | verbs: 26 | - "get" 27 | - "watch" 28 | - "list" 29 | --- 30 | kind: ClusterRoleBinding 31 | apiVersion: rbac.authorization.k8s.io/v1 32 | metadata: 33 | name: fluentd-es 34 | labels: 35 | k8s-app: fluentd-es 36 | kubernetes.io/cluster-service: "true" 37 | addonmanager.kubernetes.io/mode: Reconcile 38 | subjects: 39 | - kind: ServiceAccount 40 | name: fluentd-es 41 | namespace: kube-system 42 | apiGroup: "" 43 | roleRef: 44 | kind: ClusterRole 45 | name: fluentd-es 46 | apiGroup: "" 47 | --- 48 | apiVersion: extensions/v1beta1 49 | kind: DaemonSet 50 | metadata: 51 | name: fluentd-es-v1.22 52 | namespace: kube-system 53 | labels: 54 | k8s-app: fluentd-es 55 | kubernetes.io/cluster-service: "true" 56 | addonmanager.kubernetes.io/mode: Reconcile 57 | version: v1.22 58 | spec: 59 | template: 60 | metadata: 61 | labels: 62 | k8s-app: fluentd-es 63 | kubernetes.io/cluster-service: "true" 64 | version: v1.22 65 | # This annotation ensures that fluentd does not get evicted if the node 66 | # supports critical pod annotation based priority scheme. 67 | # Note that this does not guarantee admission on the nodes (#40573). 68 | annotations: 69 | scheduler.alpha.kubernetes.io/critical-pod: '' 70 | spec: 71 | serviceAccountName: fluentd-es 72 | containers: 73 | - name: fluentd-es 74 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/fluentd-elasticsearch:1.22 75 | command: 76 | - '/bin/sh' 77 | - '-c' 78 | - '/usr/sbin/td-agent 2>&1 >> /var/log/fluentd.log' 79 | resources: 80 | limits: 81 | memory: 200Mi 82 | requests: 83 | cpu: 100m 84 | memory: 200Mi 85 | volumeMounts: 86 | - name: varlog 87 | mountPath: /var/log 88 | - name: varlibdockercontainers 89 | mountPath: /mnt/docker/containers 90 | readOnly: true 91 | nodeSelector: 92 | beta.kubernetes.io/fluentd-ds-ready: "true" 93 | tolerations: 94 | - key : "node.alpha.kubernetes.io/ismaster" 95 | effect: "NoSchedule" 96 | terminationGracePeriodSeconds: 30 97 | volumes: 98 | - name: varlog 99 | hostPath: 100 | path: /var/log 101 | - name: varlibdockercontainers 102 | hostPath: 103 | path: /mnt/docker/containers 104 | -------------------------------------------------------------------------------- /yaml/efk/kibana-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | spec: 11 | replicas: 1 12 | selector: 13 | matchLabels: 14 | k8s-app: kibana-logging 15 | template: 16 | metadata: 17 | labels: 18 | k8s-app: kibana-logging 19 | spec: 20 | containers: 21 | - name: kibana-logging 22 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/kibana:v4.6.1-1 23 | resources: 24 | # keep request = limit to keep this container in guaranteed class 25 | limits: 26 | cpu: 100m 27 | requests: 28 | cpu: 100m 29 | env: 30 | - name: "ELASTICSEARCH_URL" 31 | value: "http://elasticsearch-logging:9200" 32 | - name: "KIBANA_BASE_URL" 33 | value: "/api/v1/proxy/namespaces/kube-system/services/kibana-logging" 34 | ports: 35 | - containerPort: 5601 36 | name: ui 37 | protocol: TCP 38 | -------------------------------------------------------------------------------- /yaml/efk/kibana-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kibana-logging 5 | namespace: kube-system 6 | labels: 7 | k8s-app: kibana-logging 8 | kubernetes.io/cluster-service: "true" 9 | addonmanager.kubernetes.io/mode: Reconcile 10 | kubernetes.io/name: "Kibana" 11 | spec: 12 | type: NodePort 13 | ports: 14 | - port: 5601 15 | protocol: TCP 16 | nodePort: 30010 17 | targetPort: ui 18 | selector: 19 | k8s-app: kibana-logging 20 | -------------------------------------------------------------------------------- /yaml/example/demo.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: hpe-lession-web 5 | spec: 6 | type: NodePort 7 | #sessionAffinity: ClientIP 8 | selector: 9 | app: hpe-lession-web 10 | ports: 11 | - port: 8080 12 | targetPort: 8080 13 | nodePort: 30082 14 | --- 15 | kind: ReplicationController 16 | apiVersion: v1 17 | metadata: 18 | name: hpe-lession-web 19 | spec: 20 | replicas: 3 21 | template: 22 | metadata: 23 | labels: 24 | app: hpe-lession-web 25 | spec: 26 | containers: 27 | - name: hpe-lesssion-web 28 | image: hpe_k8s_lession_image 29 | imagePullPolicy: Never 30 | ports: 31 | - containerPort: 8080 32 | env: 33 | - name: mysql_ip 34 | value: "hpe-k8s-mysql" 35 | - name: user 36 | value: "lession" 37 | - name: password 38 | value: "mypass" 39 | -------------------------------------------------------------------------------- /yaml/example/demo1.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: hpe-k8s-mysql 5 | spec: 6 | selector: 7 | app: hpe-k8s-mysql 8 | ports: 9 | - port: 3306 10 | --- 11 | kind: Pod 12 | apiVersion: v1 13 | metadata: 14 | name: hpe-k8s-mysql 15 | labels: 16 | app: hpe-k8s-mysql 17 | spec: 18 | containers: 19 | - name: hpe-k8s-mysql 20 | image: mysql 21 | volumeMounts: 22 | - mountPath: /var/lib/mysql 23 | name: datavolume 24 | env: 25 | - name: MYSQL_ROOT_PASSWORD 26 | value: "123456" 27 | - name: MYSQL_DATABASE 28 | value: "HPE_APP" 29 | - name: MYSQL_USER 30 | value: "lession" 31 | - name: MYSQL_PASSWORD 32 | value: "mypass" 33 | volumes: 34 | - name: datavolume 35 | hostPath: 36 | path: /mysqldata 37 | -------------------------------------------------------------------------------- /yaml/example/hpe-k8s-mysql.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: my-storage 5 | provisioner: kubernetes.io/host-path 6 | --- 7 | kind: PersistentVolumeClaim 8 | apiVersion: v1 9 | metadata: 10 | name: my-claim 11 | annotations: 12 | volume.beta.kubernetes.io/storage-class: my-storage 13 | spec: 14 | accessModes: 15 | - ReadWriteOnce 16 | resources: 17 | requests: 18 | storage: 1Gi 19 | --- 20 | kind: Service 21 | apiVersion: v1 22 | metadata: 23 | name: hpe-k8s-mysql 24 | spec: 25 | selector: 26 | app: hpe-k8s-mysql 27 | ports: 28 | - port: 3306 29 | --- 30 | kind: Pod 31 | apiVersion: v1 32 | metadata: 33 | name: hpe-k8s-mysql 34 | labels: 35 | app: hpe-k8s-mysql 36 | spec: 37 | containers: 38 | - name: hpe-k8s-mysql 39 | image: mysql 40 | volumeMounts: 41 | - mountPath: /var/lib/mysql 42 | name: datavolume 43 | env: 44 | - name: MYSQL_ROOT_PASSWORD 45 | value: "123456" 46 | - name: MYSQL_DATABASE 47 | value: "HPE_APP" 48 | - name: MYSQL_USER 49 | value: "lession" 50 | - name: MYSQL_PASSWORD 51 | value: "mypass" 52 | volumes: 53 | - name: datavolume 54 | persistentVolumeClaim: 55 | claimName: my-claim 56 | -------------------------------------------------------------------------------- /yaml/example/nginx-deploy.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | replicas: 2 7 | template: 8 | metadata: 9 | name: nginx-pod 10 | labels: 11 | app: nginx 12 | spec: 13 | restartPolicy: Always 14 | containers: 15 | - name: nginx-container 16 | image: nginx 17 | ports: 18 | - containerPort: 80 19 | -------------------------------------------------------------------------------- /yaml/example/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-pod 5 | labels: 6 | app: nginx 7 | spec: 8 | restartPolicy: Always 9 | containers: 10 | - image: nginx 11 | name: nginx 12 | ports: 13 | - containerPort: 80 14 | 15 | --- 16 | kind: Pod 17 | apiVersion: v1 18 | metadata: 19 | name: httpd-pod 20 | labels: 21 | app: nginx 22 | spec: 23 | restartPolicy: Always 24 | containers: 25 | - name: httpd 26 | image: httpd 27 | ports: 28 | - containerPort: 80 29 | 30 | --- 31 | kind: Service 32 | apiVersion: v1 33 | metadata: 34 | name: nginx-service 35 | namespace: default 36 | spec: 37 | selector: 38 | app: nginx 39 | type: NodePort 40 | sessionAffinity: None 41 | ports: 42 | - port: 80 43 | targetPort: 80 44 | nodePort: 30000 45 | protocol: TCP 46 | 47 | -------------------------------------------------------------------------------- /yaml/example/nginx-rc.yaml: -------------------------------------------------------------------------------- 1 | kind: ReplicationController 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-rc 5 | labels: 6 | app: nginx-rc-label 7 | spec: 8 | replicas: 2 9 | selector: 10 | app: nginx 11 | template: 12 | metadata: 13 | name: nginx-pod 14 | labels: 15 | app: nginx 16 | spec: 17 | restartPolicy: Always 18 | containers: 19 | - name: nginx 20 | image: nginx 21 | ports: 22 | - containerPort: 80 23 | --- 24 | kind: Service 25 | apiVersion: v1 26 | metadata: 27 | name: nginx-service 28 | spec: 29 | selector: 30 | app: nginx 31 | type: NodePort 32 | ports: 33 | - port: 80 34 | targetPort: 80 35 | nodePort: 30000 36 | -------------------------------------------------------------------------------- /yaml/example/nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: nginx 5 | labels: 6 | k8s-app: nginx 7 | spec: 8 | replicas: 1 9 | selector: 10 | name: nginx 11 | template: 12 | metadata: 13 | labels: 14 | name: nginx 15 | spec: 16 | containers: 17 | - name: nginx 18 | image: nginx 19 | ports: 20 | - containerPort: 80 21 | --- 22 | apiVersion: v1 23 | kind: Service 24 | metadata: 25 | name: nginx 26 | labels: 27 | name: nginx 28 | spec: 29 | type: NodePort 30 | ports: 31 | - port: 80 32 | nodePort: 30001 33 | selector: 34 | name: nginx 35 | -------------------------------------------------------------------------------- /yaml/example/rc-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: frontend 5 | labels: 6 | name: frontend 7 | spec: 8 | replicas: 3 9 | selector: 10 | name: frontend 11 | template: 12 | metadata: 13 | labels: 14 | name: frontend 15 | spec: 16 | containers: 17 | - name: frontend 18 | image: kubeguide/guestbook-php-frontend 19 | env: 20 | - name: GET_HOSTS_FROM 21 | value: env 22 | ports: 23 | - containerPort: 80 24 | -------------------------------------------------------------------------------- /yaml/example/rc-redis-master.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: redis-master 5 | labels: 6 | name: redis-master 7 | spec: 8 | replicas: 1 9 | selector: 10 | name: redis-master 11 | template: 12 | metadata: 13 | labels: 14 | name: redis-master 15 | spec: 16 | containers: 17 | - name: master 18 | image: kubeguide/redis-master 19 | ports: 20 | - containerPort: 6379 21 | -------------------------------------------------------------------------------- /yaml/example/rc-redis-slave.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: redis-slave 5 | labels: 6 | name: redis-slave 7 | spec: 8 | replicas: 2 9 | selector: 10 | name: redis-slave 11 | template: 12 | metadata: 13 | labels: 14 | name: redis-slave 15 | spec: 16 | containers: 17 | - name: slave 18 | image: kubeguide/guestbook-redis-slave 19 | env: 20 | - name: GET_HOST_FROM 21 | value: env 22 | ports: 23 | - containerPort: 6379 24 | 25 | 26 | 27 | 28 | -------------------------------------------------------------------------------- /yaml/example/redis.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: redis-client 5 | spec: 6 | containers: 7 | - name: redis 8 | image: redis:4.0.1-alpine 9 | ports: 10 | - containerPort: 6379 11 | 12 | -------------------------------------------------------------------------------- /yaml/example/service-frontend.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: frontend 5 | labels: 6 | name: frontend 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 80 11 | nodePort: 30001 12 | selector: 13 | name: frontend 14 | -------------------------------------------------------------------------------- /yaml/example/service-redis-master.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis-master 5 | labels: 6 | name: redis-master 7 | spec: 8 | ports: 9 | - port: 6379 10 | targetPort: 6379 11 | selector: 12 | name: redis-master 13 | -------------------------------------------------------------------------------- /yaml/example/service-redis-slave.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis-slave 5 | labels: 6 | name: redis-slave 7 | spec: 8 | ports: 9 | - port: 6379 10 | selector: 11 | name: redis-slave 12 | -------------------------------------------------------------------------------- /yaml/glusterfs/glusterfs-endpoints.yaml: -------------------------------------------------------------------------------- 1 | kind: Endpoints 2 | apiVersion: v1 3 | metadata: 4 | name: glusterfs-cluster 5 | subsets: 6 | - addresses: 7 | - ip: 10.0.0.4 8 | - ports: 9 | - port: 1000 10 | - addresses: 11 | - ip: 10.0.0.5 12 | - ports: 13 | - port: 1000 14 | - addresses: 15 | - ip: 10.0.0.6 16 | - ports: 17 | - port: 1000 18 | 19 | -------------------------------------------------------------------------------- /yaml/glusterfs/glusterfs-service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: glusterfs-cluster 5 | spec: 6 | ports: 7 | - protocol: TCP 8 | port: 1000 9 | -------------------------------------------------------------------------------- /yaml/harbor/README.md: -------------------------------------------------------------------------------- 1 | [TOC] 2 | 3 | # 搭建docker私有仓库harbor 4 | 5 | 6 | 7 | # 1 安装docker-compose 8 | 使用docker-compose安装harbor。 9 | 10 | 安装epel。epel是centos等衍生发行版,用来弥补centos内容更新有时比较滞后或是一些扩展的源没有。 11 | ```bash 12 | yum -y install epel-release 13 | ``` 14 | 15 | 安装pip。pip 是一个Python包管理工具。 16 | ```bash 17 | yum install -y python-pip 18 | ``` 19 | 20 | 对安装好的pip进行升级。 21 | ```bash 22 | pip install --upgrade pip 23 | ``` 24 | 25 | 升级python。 26 | ```bash 27 | yum upgrade -y python* 28 | ``` 29 | 30 | centos 7下使用pip安装docker-compse。 31 | ```bash 32 | pip install docker-compose 33 | ``` 34 | 35 | 验证: 36 | ```bash 37 | docker-compose --version 38 | ``` 39 | 40 | # 2 使用docker compose安装harbor 41 | 下载在线安装包harbor-online-installer-v1.1.2.tgz: 42 | ```bash 43 | wget https://github.com/vmware/harbor/releases/download/v1.1.2/harbor-online-installer-v1.1.2.tgz 44 | ``` 45 | 46 | 解压: 47 | ```bash 48 | tar xvf harbor-online-installer-v1.1.2.tgz 49 | ``` 50 | 51 | docker-compose.yaml文件nginx端口号修改: 52 | ```yaml 53 | proxy: 54 | image: vmware/nginx:1.11.5-patched 55 | container_name: nginx 56 | restart: always 57 | volumes: 58 | - ./common/config/nginx:/etc/nginx:z 59 | networks: 60 | - harbor 61 | ports: 62 | - 80:80 63 | ``` 64 | 如将80端端口修改8080,配置如下: 65 | ```yaml 66 | proxy: 67 | image: vmware/nginx:1.11.5-patched 68 | container_name: nginx 69 | restart: always 70 | volumes: 71 | - ./common/config/nginx:/etc/nginx:z 72 | networks: 73 | - harbor 74 | ports: 75 | - 8080:80 76 | ``` 77 | 78 | 79 | 配置主机名称: 80 | ```bash 81 | vi harbor/barbor.cfg 82 | ``` 83 | 修改内容如下: 84 | ```bash 85 | hostname=172.16.120.153:8080 86 | ``` 87 | hostname设置运行主机的IP,或者是域名。其他配置可以进行更改,或使用默认配置,登录页面后部分参数可在页面修改。 88 | 89 | 安装: 90 | ```bash 91 | sh harbor/install.sh 92 | ``` 93 | 安装成功后,通过之前在harbor.cfg配置的hostname即可以访问到前端了,默认登陆用户名密码是admin/Harbor12345 94 | 95 | 96 | # 3 客户端配置 97 | 因为docker客户端默认采用https访问docker registry,而我们默认安装的Harbor并没有启用https。 可以在Docker客户端所在的机器修改/etc/docker/daemon.json: 98 | ```json 99 | { 100 | "insecure-registries": ["172.16.120.153:8080"] 101 | } 102 | ``` 103 | 配置非安全的docker registry。 104 | 105 | # 4 客户端推送镜像 106 | 107 | ```bash 108 | docker login -u admin -p Harbor12345 172.16.120.153:8090 109 | Login Succeeded 110 | 111 | docker pull nginx 112 | docker tag nginx 172.16.120.153:8080/library/nginx 113 | 114 | docker push 172.16.120.153:8080/library/nginx 115 | ``` 116 | 117 | # 5 配置https访问 118 | ## 5.1 SAN 证书扩展域名配置 119 | 默认的OpenSSL生成的签名请求只适用于生成时填写的域名,即Common Name填的是哪个域名,证书就只能应用于哪个域名, 120 | 但是一般内网都是以IP方式部署,所以需要添加SAN(Subject Alternative Name)扩展信息,以支持多域名和IP。 121 | 122 | 完整的配置文件如下: 123 | ``` 124 | [ req ] 125 | distinguished_name = req_distinguished_name 126 | req_extensions = v3_req # The extensions to add to a certificate request 127 | [ v3_req ] 128 | 129 | # Extensions to add to a certificate request 130 | 131 | basicConstraints = CA:FALSE 132 | keyUsage = nonRepudiation, digitalSignature, keyEncipherment 133 | subjectAltName = @alt_names 134 | 135 | [ alt_names ] 136 | IP.1=172.16.120.153 137 | DNS.1=*.3songshu.com 138 | ``` 139 | 140 | ## 5.2 创建CA及自签名 141 | ```bash 142 | rm -rf ~/crt 143 | mkdir ~/crt 144 | # 创建私钥 145 | openssl genrsa -out ~/crt/ca.key 2048 146 | openssl req -x509 -new -nodes -key ~/crt/ca.key -days 10000 -out ~/crt/ca.crt -subj "/CN=harbor" 147 | # 生成带有 SAN 的证书请求 148 | openssl req -newkey rsa:4096 -nodes -sha256 -keyout ~/crt/server.key -out ~/crt/server.csr -subj "/C=CN/ST=Anhui/L=Wuhu/O=organization/OU=IT/CN=harbor/emailAddress=example@example.com" 149 | # 签名带有 SAN 的证书 150 | openssl x509 -req -in ~/crt/server.csr -CA ~/crt/ca.crt -CAkey ~/crt/ca.key -CAcreateserial -out ~/crt/server.crt -days 365 -extensions v3_req -extfile openssl.cnf 151 | ``` 152 | 根据harbor/barbor.cfg中有关证书的内容,将证书复制到server.crt和server.key复制到/data/cert文件夹下: 153 | ```bash 154 | mkdir -p /data/cert 155 | cp ~/crt/{server.crt,server.key} /data/cert 156 | ``` 157 | 158 | ## 5.3 配置证书 159 | ```bash 160 | vi harbor/barbor.cfg 161 | ``` 162 | 163 | ``` 164 | # 访问UI与token/notification服务的协议,默认为http。 165 | # 如果在nginx中开启了ssl,可以设置为https 166 | ui_url_protocol = https 167 | ``` 168 | 重新安装。 169 | 170 | ## 5.4 配置docker客户端 171 | 如果使用的自签证书,需要配置docker客户端。 172 | ```bash 173 | # 如果如下目录不存在,请创建,如果有域名请按此格式依次创建 174 | mkdir -p /etc/docker/certs.d/172.16.120.153:8080 175 | # mkdir -p /etc/docker/certs.d/[IP2] 176 | # mkdir -p /etc/docker/certs.d/[example1.com] 177 | # 如果端口为443,则不需要指定。如果为自定义端口,请指定端口 178 | # /etc/docker/certs.d/yourdomain.com:port 179 | 180 | # 将ca根证书依次复制到上述创建的目录中 181 | cp ca.crt /etc/docker/certs.d/172.16.120.153:8080/ 182 | ``` 183 | 重启docker,到此,harbor https已经可以使用。 184 | 185 | # 4 参考 186 | http://blog.frognew.com/2017/06/install-harbor.html 187 | 188 | https://www.digitalocean.com/community/tutorials/how-to-install-and-use-docker-compose-on-centos-7 189 | 190 | http://blog.csdn.net/yulei_qq/article/details/52984334 191 | 192 | http://blog.csdn.net/yulei_qq/article/details/52985550 193 | 194 | http://blog.csdn.net/shenshouer/article/details/53390581 195 | 196 | https://github.com/vmware/harbor/issues/2452 197 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/app-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: app1 5 | spec: 6 | replicas: 2 7 | template: 8 | metadata: 9 | labels: 10 | app: app1 11 | spec: 12 | containers: 13 | - name: app1 14 | image: dockersamples/static-site 15 | env: 16 | - name: AUTHOR 17 | value: app1 18 | ports: 19 | - containerPort: 80 20 | --- 21 | apiVersion: extensions/v1beta1 22 | kind: Deployment 23 | metadata: 24 | name: app2 25 | spec: 26 | replicas: 2 27 | template: 28 | metadata: 29 | labels: 30 | app: app2 31 | spec: 32 | containers: 33 | - name: app2 34 | image: dockersamples/static-site 35 | env: 36 | - name: AUTHOR 37 | value: app2 38 | ports: 39 | - containerPort: 80 40 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/app-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | ingress.kubernetes.io/rewrite-target: / 6 | name: app-ingress 7 | spec: 8 | rules: 9 | - host: crontestsite01.com 10 | http: 11 | paths: 12 | - backend: 13 | serviceName: appsvc1 14 | servicePort: 80 15 | path: /app1 16 | - backend: 17 | serviceName: appsvc2 18 | servicePort: 80 19 | path: /app2 20 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/app-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: appsvc1 5 | spec: 6 | ports: 7 | - port: 80 8 | protocol: TCP 9 | targetPort: 80 10 | selector: 11 | app: app1 12 | --- 13 | apiVersion: v1 14 | kind: Service 15 | metadata: 16 | name: appsvc2 17 | spec: 18 | ports: 19 | - port: 80 20 | protocol: TCP 21 | targetPort: 80 22 | selector: 23 | app: app2 24 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/default-backend-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: default-backend 5 | namespace: nginx-ingress 6 | spec: 7 | replicas: 2 8 | template: 9 | metadata: 10 | labels: 11 | app: default-backend 12 | spec: 13 | terminationGracePeriodSeconds: 60 14 | containers: 15 | - name: default-backend 16 | image: registry.cn-hangzhou.aliyuncs.com/google-containers/defaultbackend:1.0 17 | livenessProbe: 18 | httpGet: 19 | path: /healthz 20 | port: 8080 21 | scheme: HTTP 22 | initialDelaySeconds: 30 23 | timeoutSeconds: 5 24 | ports: 25 | - containerPort: 8080 26 | resources: 27 | limits: 28 | cpu: 10m 29 | memory: 20Mi 30 | requests: 31 | cpu: 10m 32 | memory: 20Mi 33 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/default-backend-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: default-backend 5 | namespace: nginx-ingress 6 | spec: 7 | ports: 8 | - port: 80 9 | protocol: TCP 10 | targetPort: 8080 11 | selector: 12 | app: default-backend 13 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/nginx-ingress-controller-config-map.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nginx-ingress-controller-conf 5 | namespace: nginx-ingress 6 | labels: 7 | app: nginx-ingress-lb 8 | data: 9 | enable-vts-status: 'true' 10 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/nginx-ingress-controller-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-ingress-controller 5 | namespace: nginx-ingress 6 | spec: 7 | replicas: 1 8 | revisionHistoryLimit: 3 9 | template: 10 | metadata: 11 | labels: 12 | app: nginx-ingress-lb 13 | spec: 14 | terminationGracePeriodSeconds: 60 15 | serviceAccount: nginx 16 | containers: 17 | - name: nginx-ingress-controller 18 | image: registry.cn-hangzhou.aliyuncs.com/google-containers/nginx-ingress-controller:0.8.3 19 | imagePullPolicy: Always 20 | readinessProbe: 21 | httpGet: 22 | path: /healthz 23 | port: 10254 24 | scheme: HTTP 25 | livenessProbe: 26 | httpGet: 27 | path: /healthz 28 | port: 10254 29 | scheme: HTTP 30 | initialDelaySeconds: 10 31 | timeoutSeconds: 5 32 | args: 33 | - /nginx-ingress-controller 34 | - --default-backend-service=$(POD_NAMESPACE)/default-backend 35 | - --nginx-configmap=$(POD_NAMESPACE)/nginx-ingress-controller-conf 36 | - --v=2 37 | env: 38 | - name: POD_NAME 39 | valueFrom: 40 | fieldRef: 41 | fieldPath: metadata.name 42 | - name: POD_NAMESPACE 43 | valueFrom: 44 | fieldRef: 45 | fieldPath: metadata.namespace 46 | ports: 47 | - containerPort: 80 48 | - containerPort: 18080 49 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/nginx-ingress-controller-roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nginx 5 | namespace: nginx-ingress 6 | --- 7 | kind: ClusterRole 8 | apiVersion: rbac.authorization.k8s.io/v1beta1 9 | metadata: 10 | name: nginx-role 11 | rules: 12 | - apiGroups: 13 | - "" 14 | - "extensions" 15 | resources: 16 | - configmaps 17 | - secrets 18 | - endpoints 19 | - ingresses 20 | - nodes 21 | - pods 22 | verbs: 23 | - list 24 | - watch 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - services 29 | verbs: 30 | - list 31 | - watch 32 | - get 33 | - update 34 | - apiGroups: 35 | - "extensions" 36 | resources: 37 | - ingresses 38 | verbs: 39 | - get 40 | - apiGroups: 41 | - "" 42 | resources: 43 | - events 44 | verbs: 45 | - create 46 | - apiGroups: 47 | - "extensions" 48 | resources: 49 | - ingresses/status 50 | verbs: 51 | - update 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - configmaps 56 | verbs: 57 | - get 58 | --- 59 | kind: ClusterRoleBinding 60 | apiVersion: rbac.authorization.k8s.io/v1beta1 61 | metadata: 62 | name: nginx-role 63 | roleRef: 64 | apiGroup: rbac.authorization.k8s.io 65 | kind: ClusterRole 66 | name: nginx-role 67 | subjects: 68 | - kind: ServiceAccount 69 | name: nginx 70 | namespace: nginx-ingress 71 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/nginx-ingress-controller-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-ingress 5 | namespace: nginx-ingress 6 | spec: 7 | type: NodePort 8 | ports: 9 | - port: 80 10 | nodePort: 30009 11 | name: http 12 | - port: 18080 13 | nodePort: 32000 14 | name: http-mgmt 15 | selector: 16 | app: nginx-ingress-lb 17 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/nginx-ingress-namespace.yaml: -------------------------------------------------------------------------------- 1 | kind: Namespace 2 | apiVersion: v1 3 | metadata: 4 | name: nginx-ingress 5 | -------------------------------------------------------------------------------- /yaml/ingress-nginx/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: nginx-ingress 5 | namespace: nginx-ingress 6 | spec: 7 | rules: 8 | - host: crontestsite01.com 9 | http: 10 | paths: 11 | - backend: 12 | serviceName: nginx-ingress 13 | servicePort: 18080 14 | path: /nginx_status 15 | -------------------------------------------------------------------------------- /yaml/kubedns/README.md: -------------------------------------------------------------------------------- 1 | 下载kubernetes安装包https://github.com/kubernetes/kubernetes/releases/download/v1.6.7/kubernetes.tar.gz, 2 | 在kubernetes/cluster/addons/dns目录下有kubedns的安装脚本。 3 | 4 | # 1.kubedns-cm.yaml和kubedns-sa.yaml 5 | kubedns-cm.yaml和kubedns-sa.yaml不需要进行修改,直接使用。 6 | 7 | # 2.kubedns-svc.yaml 8 | kubedns-svc.yaml有三种类型的模板文件,我们使用kubedns-svc.yaml.sed文件来生成kubedns-svc.yaml文件,替换$DNS_SERVER_IP为指定IP,我们这里使用10.0.0.10。 9 | ``` 10 | cp kubedns-svc.yaml.sed kubedns-svc.yaml 11 | sed -i 's/$DNS_SERVER_IP/10.0.0.10/g' kubedns-svc.yaml 12 | ``` 13 | 14 | # 3.kubedns-controller.yaml 15 | kubedns-controller.yaml有三种类型的模板文件,我们使用kubedns-controller.yaml.sed文件来生成kubedns-controller.yaml文件,替换$DNS_DOMAIN为cluster.local.。 16 | ``` 17 | cp kubedns-controller.yaml.sed kubedns-controller.yaml 18 | sed -i 's/$DNS_DOMAIN/cluster.local./g' kubedns-controller.yaml 19 | ``` 20 | 由于gcr.io进行下载问题,对kubedns-controller.yaml使用的docker镜像进行了替换, 21 | - gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4镜像改为registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-kube-dns-amd64:1.14.5 22 | - gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.4镜像改为registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-dnsmasq-nanny-amd64:1.14.5 23 | - gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.4镜像改为registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-sidecar-amd64:1.14.5 24 | 25 | 26 | # 4.启动kubedns服务 27 | ``` 28 | kubectl create -f kubedns-cm.yaml 29 | kubectl create -f kubedns-sa.yaml 30 | kubectl create -f kubedns-svc.yaml 31 | kubectl create -f kubedns-controller.yaml 32 | ``` 33 | >注意:需要配置kubelet的启动参数--cluster-dns=10.0.0.10 --cluster-domain=cluster.local 34 | 35 | # 5.验证 36 | 创建pod,pod-busybox.yaml 37 | ``` 38 | apiVersion: v1 39 | kind: Pod 40 | metadata: 41 | name: busybox 42 | namespace: default 43 | spec: 44 | containers: 45 | - image: busybox 46 | command: 47 | - sleep 48 | - "3600" 49 | imagePullPolicy: IfNotPresent 50 | name: busybox 51 | restartPolicy: Always 52 | ``` 53 | 54 | 登录busybox容器内部 55 | ``` 56 | kubectl exec -it busybox -- /bin/sh 57 | ``` 58 | 59 | 输入命令认证 60 | ``` 61 | nslookup kubernetes 62 | ``` 63 | 输出结果为: 64 | ``` 65 | Server: 10.0.0.10 66 | Address 1: 10.0.0.10 kube-dns.kube-system.svc.cluster.local 67 | 68 | Name: kubernetes 69 | Address 1: 10.0.0.1 kubernetes.default.svc.cluster.local 70 | ``` 71 | -------------------------------------------------------------------------------- /yaml/kubedns/kubedns-cm.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | apiVersion: v1 16 | kind: ConfigMap 17 | metadata: 18 | name: kube-dns 19 | namespace: kube-system 20 | labels: 21 | addonmanager.kubernetes.io/mode: EnsureExists 22 | -------------------------------------------------------------------------------- /yaml/kubedns/kubedns-controller.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml 16 | # in sync with this file. 17 | 18 | # __MACHINE_GENERATED_WARNING__ 19 | 20 | apiVersion: extensions/v1beta1 21 | kind: Deployment 22 | metadata: 23 | name: kube-dns 24 | namespace: kube-system 25 | labels: 26 | k8s-app: kube-dns 27 | kubernetes.io/cluster-service: "true" 28 | addonmanager.kubernetes.io/mode: Reconcile 29 | spec: 30 | # replicas: not specified here: 31 | # 1. In order to make Addon Manager do not reconcile this replicas parameter. 32 | # 2. Default is 1. 33 | # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. 34 | strategy: 35 | rollingUpdate: 36 | maxSurge: 10% 37 | maxUnavailable: 0 38 | selector: 39 | matchLabels: 40 | k8s-app: kube-dns 41 | template: 42 | metadata: 43 | labels: 44 | k8s-app: kube-dns 45 | annotations: 46 | scheduler.alpha.kubernetes.io/critical-pod: '' 47 | spec: 48 | tolerations: 49 | - key: "CriticalAddonsOnly" 50 | operator: "Exists" 51 | volumes: 52 | - name: kube-dns-config 53 | configMap: 54 | name: kube-dns 55 | optional: true 56 | containers: 57 | - name: kubedns 58 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-kube-dns-amd64:1.14.5 59 | resources: 60 | # TODO: Set memory limits when we've profiled the container for large 61 | # clusters, then set request = limit to keep this container in 62 | # guaranteed class. Currently, this container falls into the 63 | # "burstable" category so the kubelet doesn't backoff from restarting it. 64 | limits: 65 | memory: 170Mi 66 | requests: 67 | cpu: 100m 68 | memory: 70Mi 69 | livenessProbe: 70 | httpGet: 71 | path: /healthcheck/kubedns 72 | port: 10054 73 | scheme: HTTP 74 | initialDelaySeconds: 60 75 | timeoutSeconds: 5 76 | successThreshold: 1 77 | failureThreshold: 5 78 | readinessProbe: 79 | httpGet: 80 | path: /readiness 81 | port: 8081 82 | scheme: HTTP 83 | # we poll on pod startup for the Kubernetes master service and 84 | # only setup the /readiness HTTP server once that's available. 85 | initialDelaySeconds: 3 86 | timeoutSeconds: 5 87 | args: 88 | - --domain=cluster.local. 89 | - --dns-port=10053 90 | - --config-dir=/kube-dns-config 91 | - --v=2 92 | 93 | env: 94 | - name: PROMETHEUS_PORT 95 | value: "10055" 96 | ports: 97 | - containerPort: 10053 98 | name: dns-local 99 | protocol: UDP 100 | - containerPort: 10053 101 | name: dns-tcp-local 102 | protocol: TCP 103 | - containerPort: 10055 104 | name: metrics 105 | protocol: TCP 106 | volumeMounts: 107 | - name: kube-dns-config 108 | mountPath: /kube-dns-config 109 | - name: dnsmasq 110 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-dnsmasq-nanny-amd64:1.14.5 111 | livenessProbe: 112 | httpGet: 113 | path: /healthcheck/dnsmasq 114 | port: 10054 115 | scheme: HTTP 116 | initialDelaySeconds: 60 117 | timeoutSeconds: 5 118 | successThreshold: 1 119 | failureThreshold: 5 120 | args: 121 | - -v=2 122 | - -logtostderr 123 | - -configDir=/etc/k8s/dns/dnsmasq-nanny 124 | - -restartDnsmasq=true 125 | - -- 126 | - -k 127 | - --cache-size=1000 128 | - --log-facility=- 129 | - --server=/cluster.local./127.0.0.1#10053 130 | - --server=/in-addr.arpa/127.0.0.1#10053 131 | - --server=/ip6.arpa/127.0.0.1#10053 132 | ports: 133 | - containerPort: 53 134 | name: dns 135 | protocol: UDP 136 | - containerPort: 53 137 | name: dns-tcp 138 | protocol: TCP 139 | # see: https://github.com/kubernetes/kubernetes/issues/29055 for details 140 | resources: 141 | requests: 142 | cpu: 150m 143 | memory: 20Mi 144 | volumeMounts: 145 | - name: kube-dns-config 146 | mountPath: /etc/k8s/dns/dnsmasq-nanny 147 | - name: sidecar 148 | image: registry.cn-hangzhou.aliyuncs.com/szss_k8s/k8s-dns-sidecar-amd64:1.14.5 149 | livenessProbe: 150 | httpGet: 151 | path: /metrics 152 | port: 10054 153 | scheme: HTTP 154 | initialDelaySeconds: 60 155 | timeoutSeconds: 5 156 | successThreshold: 1 157 | failureThreshold: 5 158 | args: 159 | - --v=2 160 | - --logtostderr 161 | - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local.,5,A 162 | - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local.,5,A 163 | ports: 164 | - containerPort: 10054 165 | name: metrics 166 | protocol: TCP 167 | resources: 168 | requests: 169 | memory: 20Mi 170 | cpu: 10m 171 | dnsPolicy: Default # Don't use cluster DNS. 172 | serviceAccountName: kube-dns 173 | -------------------------------------------------------------------------------- /yaml/kubedns/kubedns-sa.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: kube-dns 5 | namespace: kube-system 6 | labels: 7 | kubernetes.io/cluster-service: "true" 8 | addonmanager.kubernetes.io/mode: Reconcile 9 | -------------------------------------------------------------------------------- /yaml/kubedns/kubedns-svc.yaml: -------------------------------------------------------------------------------- 1 | # Copyright 2016 The Kubernetes Authors. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # __MACHINE_GENERATED_WARNING__ 16 | 17 | apiVersion: v1 18 | kind: Service 19 | metadata: 20 | name: kube-dns 21 | namespace: kube-system 22 | labels: 23 | k8s-app: kube-dns 24 | kubernetes.io/cluster-service: "true" 25 | addonmanager.kubernetes.io/mode: Reconcile 26 | kubernetes.io/name: "KubeDNS" 27 | spec: 28 | selector: 29 | k8s-app: kube-dns 30 | clusterIP: 10.0.0.10 31 | ports: 32 | - name: dns 33 | port: 53 34 | protocol: UDP 35 | - name: dns-tcp 36 | port: 53 37 | protocol: TCP 38 | -------------------------------------------------------------------------------- /yaml/mongo/mongo-standalone-cephfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongo 5 | labels: 6 | name: mongo 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | nodePort: 30040 13 | selector: 14 | app: mongo-standalone-ceph 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: StatefulSet 18 | metadata: 19 | name: mongo 20 | labels: 21 | app: mongo-standalone-ceph 22 | spec: 23 | serviceName: "mongo" 24 | replicas: 1 25 | template: 26 | metadata: 27 | labels: 28 | app: mongo-standalone-ceph 29 | spec: 30 | terminationGracePeriodSeconds: 10 31 | containers: 32 | - name: mongo 33 | image: mongo 34 | ports: 35 | - containerPort: 27017 36 | volumeMounts: 37 | - mountPath: "/data/db" 38 | name: cephfs 39 | volumes: 40 | - name: cephfs 41 | cephfs: 42 | monitors: 43 | - 10.80.0.1:6789 44 | - 10.80.0.2:6789 45 | - 10.80.0.3:6789 46 | user: admin 47 | path: /mongo 48 | secretRef: 49 | name: ceph-secret 50 | readOnly: false 51 | -------------------------------------------------------------------------------- /yaml/mongo/mongo-standalone.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongo 5 | labels: 6 | name: mongo 7 | spec: 8 | type: NodePort 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | nodePort: 30040 13 | selector: 14 | app: mongo-standalone 15 | --- 16 | apiVersion: apps/v1beta1 17 | kind: StatefulSet 18 | metadata: 19 | name: mongo 20 | labels: 21 | app: mongo-standalone 22 | spec: 23 | serviceName: "mongo" 24 | replicas: 1 25 | template: 26 | metadata: 27 | labels: 28 | app: mongo-standalone 29 | spec: 30 | terminationGracePeriodSeconds: 10 31 | containers: 32 | - name: mongo 33 | image: mongo 34 | ports: 35 | - containerPort: 27017 36 | -------------------------------------------------------------------------------- /yaml/mysql/mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ReplicationController 3 | metadata: 4 | name: mysql 5 | labels: 6 | k8s-app: mysql 7 | spec: 8 | replicas: 1 9 | selector: 10 | name: mysql 11 | template: 12 | metadata: 13 | name: mysql 14 | labels: 15 | name: mysql 16 | spec: 17 | containers: 18 | - image: mysql:5.7 19 | name: mysql 20 | resources: 21 | limits: 22 | cpu: 100m 23 | memory: 500Mi 24 | requests: 25 | cpu: 100m 26 | memory: 500Mi 27 | ports: 28 | - containerPort: 3306 29 | volumeMounts: 30 | - name: mysql-data-storage 31 | mountPath: /data 32 | env: 33 | - name: MYSQL_ROOT_PASSWORD 34 | value: "123456" 35 | volumes: 36 | - name: mysql-data-storage 37 | emptyDir: {} 38 | --- 39 | apiVersion: v1 40 | kind: Service 41 | metadata: 42 | name: mysql 43 | labels: 44 | name: mysql 45 | spec: 46 | selector: 47 | name: mysql 48 | type: NodePort 49 | ports: 50 | - port: 3306 51 | targetPort: 3306 52 | nodePort: 30001 53 | 54 | -------------------------------------------------------------------------------- /yaml/nginx/nginx-s.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: host-storage 5 | provisioner: kubernetes.io/host-path 6 | --- 7 | kind: Service 8 | apiVersion: v1 9 | metadata: 10 | name: nginx 11 | spec: 12 | selector: 13 | app: nginx 14 | ports: 15 | - port: 80 16 | name: web 17 | clusterIP: None 18 | --- 19 | kind: StatefulSet 20 | apiVersion: apps/v1beta1 21 | metadata: 22 | name: nginx-statefule 23 | labels: 24 | app: nginx 25 | spec: 26 | replicas: 2 27 | serviceName: nginx 28 | template: 29 | metadata: 30 | name: nginx 31 | labels: 32 | app: nginx 33 | spec: 34 | containers: 35 | - name: nginx 36 | imagePullPolicy: Always 37 | image: nginx 38 | ports: 39 | - containerPort: 80 40 | volumeMounts: 41 | - name: www 42 | mountPath: /usr/share/nginx/html 43 | volumeClaimTemplates: 44 | - metadata: 45 | name: www 46 | annotations: 47 | volume.beta.kubernetes.io/storage-class: host-storage 48 | spec: 49 | accessModes: 50 | - ReadWriteOnce 51 | resources: 52 | requests: 53 | storage: 1Gi 54 | 55 | 56 | 57 | 58 | -------------------------------------------------------------------------------- /yaml/nginx/nginx.yaml: -------------------------------------------------------------------------------- 1 | kind: Pod 2 | apiVersion: v1 3 | metadata: 4 | name: nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | restartPolicy: Always 9 | containers: 10 | - name: nginx 11 | image: nginx 12 | imagePullPolicy: Always 13 | ports: 14 | - containerPort: 80 15 | volumeMounts: 16 | - name: nginx-storage 17 | mountPath: /data/nginx 18 | volumes: 19 | - name: nginx-storage 20 | emptyDir: {} 21 | -------------------------------------------------------------------------------- /yaml/nginx/test.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: host-storage 5 | provisioner: kubernetes.io/host-path 6 | -------------------------------------------------------------------------------- /yaml/rabbitmq/cluster/README.md: -------------------------------------------------------------------------------- 1 | # 构建镜像 2 | ```bash 3 | cd docker 4 | docker build -t szss/kubernetes-rabbitmq-autocluster:3.6.12 . 5 | ``` 6 | # 推送到阿里云 7 | ```bash 8 | docker tag szss/kubernetes-rabbitmq-autocluster:3.6.12 registry.cn-hangzhou.aliyuncs.com/szss/kubernetes-rabbitmq-autocluster:3.6.12 9 | docker push registry.cn-hangzhou.aliyuncs.com/szss/kubernetes-rabbitmq-autocluster:3.6.12 10 | ``` 11 | 12 | 部署: 13 | ```bash 14 | echo $(openssl rand -base64 32) > erlang.cookie 15 | kubectl -n default create secret generic erlang.cookie --from-file=erlang.cookie 16 | kubectl create -f ./ 17 | ``` 18 | 验证: 19 | 20 | ```bash 21 | FIRST_POD=$(kubectl get pods -n cat -l 'app=rabbitmq' -o jsonpath='{.items[0].metadata.name }') 22 | kubectl -n default exec -ti $FIRST_POD rabbitmqctl cluster_status 23 | ``` 24 | 25 | 参考: 26 | https://github.com/kuberstack/kubernetes-rabbitmq-autocluster 27 | 28 | https://segmentfault.com/a/1190000009733119 29 | -------------------------------------------------------------------------------- /yaml/rabbitmq/cluster/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rabbitmq:3.6.12-management 2 | 3 | ENV RABBITMQ_USE_LONGNAME=true \ 4 | AUTOCLUSTER_LOG_LEVEL=debug \ 5 | AUTOCLUSTER_CLEANUP=true \ 6 | CLEANUP_INTERVAL=60 \ 7 | CLEANUP_WARN_ONLY=false \ 8 | AUTOCLUSTER_TYPE=k8s \ 9 | LANG=en_US.UTF-8 10 | 11 | ADD plugins/*.ez /usr/lib/rabbitmq/lib/rabbitmq_server-3.6.12/plugins/ 12 | RUN rabbitmq-plugins enable --offline autocluster 13 | -------------------------------------------------------------------------------- /yaml/rabbitmq/cluster/docker/plugins/autocluster-0.10.0.ez: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhuchuangang/k8s-install-scripts/3209bc16d1d0a8b96c797f4796a2ff9a981e94fa/yaml/rabbitmq/cluster/docker/plugins/autocluster-0.10.0.ez -------------------------------------------------------------------------------- /yaml/rabbitmq/cluster/docker/plugins/rabbitmq_aws-0.10.0.ez: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zhuchuangang/k8s-install-scripts/3209bc16d1d0a8b96c797f4796a2ff9a981e94fa/yaml/rabbitmq/cluster/docker/plugins/rabbitmq_aws-0.10.0.ez -------------------------------------------------------------------------------- /yaml/rabbitmq/cluster/yaml/rabbitmq-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: rabbitmq 5 | namespace: default 6 | labels: 7 | app: rabbitmq 8 | spec: 9 | replicas: 3 10 | template: 11 | metadata: 12 | labels: 13 | app: rabbitmq 14 | spec: 15 | containers: 16 | - name: rabbitmq 17 | image: registry.cn-hangzhou.aliyuncs.com/patterncat/kubernetes-rabbitmq-autocluster:mgr 18 | ports: 19 | - containerPort: 5672 20 | name: port-5672 21 | - containerPort: 4369 22 | name: port-4369 23 | - containerPort: 5671 24 | name: port-5671 25 | - containerPort: 15672 26 | name: port-15672 27 | - containerPort: 25672 28 | name: port-25672 29 | env: 30 | - name: HOSTNAME 31 | valueFrom: 32 | fieldRef: 33 | fieldPath: status.podIP 34 | - name: MY_POD_IP 35 | valueFrom: 36 | fieldRef: 37 | fieldPath: status.podIP 38 | - name: AUTOCLUSTER_CLEANUP 39 | value: "true" 40 | - name: CLEANUP_INTERVAL 41 | value: "60" 42 | - name: CLEANUP_WARN_ONLY 43 | value: "false" 44 | # start.sh will store this value to a proper location in filesystem 45 | - name: RABBITMQ_ERLANG_COOKIE 46 | valueFrom: 47 | secretKeyRef: 48 | name: erlang.cookie 49 | key: erlang.cookie 50 | -------------------------------------------------------------------------------- /yaml/rabbitmq/cluster/yaml/rabbitmq-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | labels: 5 | app: rabbitmq 6 | name: rabbitmq 7 | namespace: default 8 | spec: 9 | ports: 10 | - port: 5672 11 | name: port-5672 12 | protocol: TCP 13 | targetPort: 5672 14 | - port: 4369 15 | name: port-4369 16 | protocol: TCP 17 | targetPort: 4369 18 | - port: 5671 19 | name: port-5671 20 | protocol: TCP 21 | targetPort: 5671 22 | - port: 15672 23 | name: port-15672 24 | protocol: TCP 25 | targetPort: 15672 26 | - port: 25672 27 | name: port-25672 28 | protocol: TCP 29 | targetPort: 25672 30 | selector: 31 | app: rabbitmq 32 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/image/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM redis:4.0.6 2 | 3 | COPY redis-master.conf /redis-master/redis.conf 4 | COPY redis-slave.conf /redis-slave/redis.conf 5 | COPY run.sh /run.sh 6 | 7 | RUN chmod a+x /run.sh 8 | 9 | VOLUME /data 10 | 11 | CMD [ "/run.sh" ] 12 | 13 | ENTRYPOINT [ "bash", "-c" ] 14 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/image/redis-master.conf: -------------------------------------------------------------------------------- 1 | ## 以下为rdb配置 2 | #dbfilename:持久化数据存储在本地的文件 3 | dbfilename dump.rdb 4 | 5 | #dir:持久化数据存储在本地的路径,如果是在/redis/redis-3.0.6/src下启动的redis-cli,则数据会存储在当前src目录下 6 | dir /data 7 | 8 | ##snapshot触发的时机,save 9 | ##如下为900秒后,至少有一个变更操作,才会snapshot 10 | ##对于此值的设置,需要谨慎,评估系统的变更操作密集程度 11 | ##可以通过“save “””来关闭snapshot功能 12 | #save时间,以下分别表示更改了1个key时间隔900s进行持久化存储;更改了10个key300s进行存储;更改10000个key60s进行存储。 13 | save 900 1 14 | save 300 10 15 | save 60 10000 16 | 17 | ##当snapshot时出现错误无法继续时,是否阻塞客户端“变更操作”,“错误”可能因为磁盘已满/磁盘故障/OS级别异常等 18 | stop-writes-on-bgsave-error yes 19 | ##是否启用rdb文件压缩,默认为“yes”,压缩往往意味着“额外的cpu消耗”,同时也意味这较小的文件尺寸以及较短的网络传输时间 20 | rdbcompression yes 21 | 22 | ##以下为aof配置 23 | ##此选项为aof功能的开关,默认为“no”,可以通过“yes”来开启aof功能 24 | ##只有在“yes”下,aof重写/文件同步等特性才会生效 25 | appendonly yes 26 | 27 | ##指定aof文件名称 28 | appendfilename appendonly.aof 29 | 30 | ##指定aof操作中文件同步策略,有三个合法值:always everysec no,默认为everysec 31 | appendfsync everysec 32 | 33 | ##在aof-rewrite期间,appendfsync是否暂缓文件同步,"no"表示“不暂缓”,“yes”表示“暂缓”,默认为“no” 34 | no-appendfsync-on-rewrite no 35 | 36 | ##aof文件rewrite触发的最小文件尺寸(mb,gb),只有大于此aof文件大于此尺寸是才会触发rewrite,默认“64mb”,建议“512mb” 37 | auto-aof-rewrite-min-size 64mb 38 | 39 | ##相对于“上一次”rewrite,本次rewrite触发时aof文件应该增长的百分比。 40 | ##每一次rewrite之后,redis都会记录下此时“新aof”文件的大小(例如A),那么当aof文件增长到A*(1 + p)之后 41 | ##触发下一次rewrite,每一次aof记录的添加,都会检测当前aof文件的尺寸。 42 | auto-aof-rewrite-percentage 100 43 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/image/redis-slave.conf: -------------------------------------------------------------------------------- 1 | ## 以下为rdb配置 2 | #dbfilename:持久化数据存储在本地的文件 3 | dbfilename dump.rdb 4 | 5 | #dir:持久化数据存储在本地的路径,如果是在/redis/redis-3.0.6/src下启动的redis-cli,则数据会存储在当前src目录下 6 | dir /data 7 | 8 | ##snapshot触发的时机,save 9 | ##如下为900秒后,至少有一个变更操作,才会snapshot 10 | ##对于此值的设置,需要谨慎,评估系统的变更操作密集程度 11 | ##可以通过“save “””来关闭snapshot功能 12 | #save时间,以下分别表示更改了1个key时间隔900s进行持久化存储;更改了10个key300s进行存储;更改10000个key60s进行存储。 13 | save 900 1 14 | save 300 10 15 | save 60 10000 16 | 17 | ##当snapshot时出现错误无法继续时,是否阻塞客户端“变更操作”,“错误”可能因为磁盘已满/磁盘故障/OS级别异常等 18 | stop-writes-on-bgsave-error yes 19 | ##是否启用rdb文件压缩,默认为“yes”,压缩往往意味着“额外的cpu消耗”,同时也意味这较小的文件尺寸以及较短的网络传输时间 20 | rdbcompression yes 21 | 22 | ##以下为aof配置 23 | ##此选项为aof功能的开关,默认为“no”,可以通过“yes”来开启aof功能 24 | ##只有在“yes”下,aof重写/文件同步等特性才会生效 25 | appendonly yes 26 | 27 | ##指定aof文件名称 28 | appendfilename appendonly.aof 29 | 30 | ##指定aof操作中文件同步策略,有三个合法值:always everysec no,默认为everysec 31 | appendfsync everysec 32 | 33 | ##在aof-rewrite期间,appendfsync是否暂缓文件同步,"no"表示“不暂缓”,“yes”表示“暂缓”,默认为“no” 34 | no-appendfsync-on-rewrite no 35 | 36 | ##aof文件rewrite触发的最小文件尺寸(mb,gb),只有大于此aof文件大于此尺寸是才会触发rewrite,默认“64mb”,建议“512mb” 37 | auto-aof-rewrite-min-size 64mb 38 | 39 | ##相对于“上一次”rewrite,本次rewrite触发时aof文件应该增长的百分比。 40 | ##每一次rewrite之后,redis都会记录下此时“新aof”文件的大小(例如A),那么当aof文件增长到A*(1 + p)之后 41 | ##触发下一次rewrite,每一次aof记录的添加,都会检测当前aof文件的尺寸。 42 | auto-aof-rewrite-percentage 100 43 | 44 | #指定主节点 45 | slaveof %master-ip% %master-port% 46 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/image/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright 2014 The Kubernetes Authors. 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | function launchmaster() { 18 | if [[ ! -e /data ]]; then 19 | echo "Redis master data doesn't exist, data won't be persistent!" 20 | mkdir /data 21 | fi 22 | if [[ -n ${PASSWORD} ]]; then 23 | echo "requirepass ${PASSWORD}" >> /redis-master/redis.conf 24 | echo "masterauth ${PASSWORD}" >> /redis-master/redis.conf 25 | fi 26 | redis-server /redis-master/redis.conf --protected-mode no 27 | } 28 | 29 | function launchsentinel() { 30 | while true; do 31 | # SENTINEL命令参考:http://doc.redisfans.com/topic/sentinel.html 32 | master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1) 33 | if [[ -n ${master} ]]; then 34 | master="${master//\"}" 35 | else 36 | master=$(hostname -i) 37 | fi 38 | 39 | redis-cli -h ${master} INFO 40 | if [[ "$?" == "0" ]]; then 41 | break 42 | fi 43 | echo "Connecting to master failed. Waiting..." 44 | sleep 10 45 | done 46 | 47 | mkdir /redis-sentinel 48 | sentinel_conf=/redis-sentinel/sentinel.conf 49 | 50 | echo "sentinel monitor mymaster ${master} 6379 2" > ${sentinel_conf} 51 | echo "sentinel down-after-milliseconds mymaster 30000" >> ${sentinel_conf} 52 | echo "sentinel failover-timeout mymaster 180000" >> ${sentinel_conf} 53 | echo "sentinel parallel-syncs mymaster 1" >> ${sentinel_conf} 54 | echo "min-slaves-to-write 1" >> ${sentinel_conf} 55 | echo "min-slaves-max-lag 10" >> ${sentinel_conf} 56 | echo "bind 0.0.0.0" >> ${sentinel_conf} 57 | if [[ -n ${PASSWORD} ]]; then 58 | echo "sentinel auth-pass mymaster ${PASSWORD}" >> ${sentinel_conf} 59 | fi 60 | redis-sentinel ${sentinel_conf} --protected-mode no 61 | } 62 | 63 | function launchslave() { 64 | if [[ ! -e /data ]]; then 65 | echo "Redis master data doesn't exist, data won't be persistent!" 66 | mkdir /data 67 | fi 68 | while true; do 69 | master=$(redis-cli -h ${REDIS_SENTINEL_SERVICE_HOST} -p ${REDIS_SENTINEL_SERVICE_PORT} --csv SENTINEL get-master-addr-by-name mymaster | tr ',' ' ' | cut -d' ' -f1) 70 | if [[ -n ${master} ]]; then 71 | master="${master//\"}" 72 | else 73 | echo "Failed to find master." 74 | sleep 60 75 | exit 1 76 | fi 77 | redis-cli -h ${master} INFO 78 | if [[ "$?" == "0" ]]; then 79 | break 80 | fi 81 | echo "Connecting to master failed. Waiting..." 82 | sleep 10 83 | done 84 | sed -i "s/%master-ip%/${master}/" /redis-slave/redis.conf 85 | sed -i "s/%master-port%/6379/" /redis-slave/redis.conf 86 | if [[ -n ${PASSWORD} ]]; then 87 | echo "requirepass ${PASSWORD}" >> /redis-slave/redis.conf 88 | echo "masterauth ${PASSWORD}" >> /redis-slave/redis.conf 89 | fi 90 | redis-server /redis-slave/redis.conf --protected-mode no 91 | } 92 | 93 | 94 | if [[ "${MASTER}" == "true" ]]; then 95 | launchmaster 96 | exit 0 97 | fi 98 | 99 | if [[ "${SENTINEL}" == "true" ]]; then 100 | launchsentinel 101 | exit 0 102 | fi 103 | 104 | launchslave 105 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/redis-master-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: redis-master 5 | labels: 6 | redis-sentinel: "true" 7 | spec: 8 | containers: 9 | - name: redis-master 10 | image: registry.cn-hangzhou.aliyuncs.com/szss/redis:4.0.6 11 | imagePullPolicy: Always 12 | env: 13 | - name: MASTER 14 | value: "true" 15 | - name: PASSWORD 16 | value: "123456" 17 | ports: 18 | - containerPort: 6379 19 | name: redis-server 20 | readinessProbe: 21 | exec: 22 | command: ['redis-cli','-a','123456', 'info', 'server'] 23 | - name: redis-sentinel 24 | image: registry.cn-hangzhou.aliyuncs.com/szss/redis:4.0.6 25 | imagePullPolicy: Always 26 | env: 27 | - name: SENTINEL 28 | value: "true" 29 | - name: PASSWORD 30 | value: "123456" 31 | ports: 32 | - containerPort: 26379 33 | name: redis-sentinel 34 | readinessProbe: 35 | exec: 36 | command: ['redis-cli','-p','26379', 'info', 'server'] 37 | 38 | 39 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/redis-sentinel-deploy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: redis-sentinel 5 | spec: 6 | replicas: 3 7 | template: 8 | metadata: 9 | labels: 10 | app: redis-sentinel 11 | redis-sentinel: "true" 12 | spec: 13 | containers: 14 | - name: redis-sentinel 15 | image: registry.cn-hangzhou.aliyuncs.com/szss/redis:4.0.6 16 | imagePullPolicy: Always 17 | env: 18 | - name: SENTINEL 19 | value: "true" 20 | - name: PASSWORD 21 | value: "123456" 22 | ports: 23 | - containerPort: 26379 24 | name: redis-sentinel 25 | volumes: 26 | - name: redis-sentinel-storage 27 | affinity: 28 | podAntiAffinity: 29 | preferredDuringSchedulingIgnoredDuringExecution: 30 | - weight: 100 31 | podAffinityTerm: 32 | labelSelector: 33 | matchExpressions: 34 | - key: app 35 | operator: In 36 | values: 37 | - redis-sentinel 38 | topologyKey: kubernetes.io/hostname 39 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/redis-sentinel-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis-sentinel 5 | labels: 6 | app: redis-sentinel 7 | spec: 8 | ports: 9 | - port: 26379 10 | name: redis-sentinel 11 | selector: 12 | redis-sentinel: "true" 13 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/redis-slave-statefulset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: StatefulSet 3 | metadata: 4 | name: redis-slave 5 | spec: 6 | serviceName: redis-slave 7 | replicas: 3 8 | template: 9 | metadata: 10 | labels: 11 | app: redis-slave 12 | spec: 13 | containers: 14 | - name: redis-slave 15 | image: registry.cn-hangzhou.aliyuncs.com/szss/redis:4.0.6 16 | imagePullPolicy: Always 17 | env: 18 | - name: PASSWORD 19 | value: "123456" 20 | ports: 21 | - containerPort: 6379 22 | name: redis-slave 23 | readinessProbe: 24 | exec: 25 | command: ['redis-cli','-a','123456', 'info', 'server'] 26 | volumeMounts: 27 | - name: redis-slave-storage 28 | mountPath: /data 29 | volumes: 30 | - name: redis-slave-storage 31 | affinity: 32 | podAntiAffinity: 33 | preferredDuringSchedulingIgnoredDuringExecution: 34 | - weight: 100 35 | podAffinityTerm: 36 | labelSelector: 37 | matchExpressions: 38 | - key: app 39 | operator: In 40 | values: 41 | - redis-slave 42 | topologyKey: kubernetes.io/hostname 43 | volumeClaimTemplates: 44 | - metadata: 45 | name: redis-slave-storage 46 | annotations: 47 | volume.beta.kubernetes.io/storage-class: "aliyun-nas" 48 | spec: 49 | accessModes: [ "ReadWriteOnce" ] 50 | resources: 51 | requests: 52 | storage: 10Gi 53 | -------------------------------------------------------------------------------- /yaml/redis/redis-sentinel/redis-slave-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: redis-slave 5 | labels: 6 | app: redis-slave 7 | spec: 8 | type: NodePort 9 | ports: 10 | - name: redis-slave 11 | port: 6379 12 | nodePort: 30080 13 | selector: 14 | app: redis-slave 15 | -------------------------------------------------------------------------------- /yaml/rocketmq/README.md: -------------------------------------------------------------------------------- 1 | rocketmq kubernetes镜像和脚本请参考下面的项目: 2 | https://github.com/zhuchuangang/rocketmq-docker 3 | -------------------------------------------------------------------------------- /yaml/storage-class/README.md: -------------------------------------------------------------------------------- 1 | # kubernetes 基于 StorageClass 的 NFS 动态卷 2 | http://blog.xianshiyue.com/216 3 | 4 | # StatefulSet存储 5 | http://blog.csdn.net/styshoo/article/details/73731993 6 | 7 | https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/ 8 | 9 | 【官方脚本】https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client 10 | -------------------------------------------------------------------------------- /yaml/storage-class/aliyun-nas-storageclass.yaml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: aliyun-nas 5 | provisioner: nas.aliyun.com/nfs # or choose another name, must match deployment's env PROVISIONER_NAME' 6 | -------------------------------------------------------------------------------- /yaml/storage-class/clusterrole.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRole 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: nfs-client-provisioner-runner 5 | rules: 6 | - apiGroups: [""] 7 | resources: ["persistentvolumes"] 8 | verbs: ["get", "list", "watch", "create", "delete"] 9 | - apiGroups: [""] 10 | resources: ["persistentvolumeclaims"] 11 | verbs: ["get", "list", "watch", "update"] 12 | - apiGroups: ["storage.k8s.io"] 13 | resources: ["storageclasses"] 14 | verbs: ["get", "list", "watch"] 15 | - apiGroups: [""] 16 | resources: ["events"] 17 | verbs: ["list", "watch", "create", "update", "patch"] 18 | -------------------------------------------------------------------------------- /yaml/storage-class/clusterrolebinding.yaml: -------------------------------------------------------------------------------- 1 | kind: ClusterRoleBinding 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | metadata: 4 | name: run-nfs-client-provisioner 5 | subjects: 6 | - kind: ServiceAccount 7 | name: nfs-client-provisioner 8 | namespace: default 9 | roleRef: 10 | kind: ClusterRole 11 | name: nfs-client-provisioner-runner 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /yaml/storage-class/nfs-client-deploy.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: extensions/v1beta1 3 | metadata: 4 | name: nfs-client-provisioner 5 | namespace: default 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: 13 | app: nfs-client-provisioner 14 | spec: 15 | serviceAccount: nfs-client-provisioner 16 | restartPolicy: Always 17 | containers: 18 | - name: redis-nfs-client-provisioner 19 | image: registry.cn-hangzhou.aliyuncs.com/szss_quay_io/nfs-client-provisioner:v2.0.1 20 | imagePullPolicy: Always 21 | volumeMounts: 22 | - name: nfs-client-root 23 | mountPath: /persistentvolumes 24 | env: 25 | - name: PROVISIONER_NAME 26 | value: nas.aliyun.com/nfs 27 | - name: NFS_SERVER 28 | value: xxxx.cn-hangzhou.nas.aliyuncs.com 29 | - name: NFS_PATH 30 | value: / 31 | volumes: 32 | - name: nfs-client-root 33 | nfs: 34 | server: xxxx.cn-hangzhou.nas.aliyuncs.com 35 | path: / 36 | -------------------------------------------------------------------------------- /yaml/storage-class/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: nfs-client-provisioner 5 | -------------------------------------------------------------------------------- /yaml/traefik/README.md: -------------------------------------------------------------------------------- 1 | # 1 权限 2 | kubernetes 1.6之后有rbac的权限控制,需要先执行traefik-rbac.yaml文件。 3 | 4 | # 2 部署Daemon Set 5 | traefik在官方文档 https://docs.traefik.io/user-guide/kubernetes/ 中有2中部署方式,分别是daemon set和deployment。这里我们选择daemon set。 6 | 部署脚本是traefik-ds.yaml文件中。 7 | 8 | ***traefik镜像默认会绑定主机8080端口,所以master节点上如果部署了traefik会发生端口冲突。在traefik-ds.yaml中- --web.address=:8081配置修改默认绑定的8080端口,避免端口冲突。*** 9 | 10 | # 3 部署ingress 11 | ```yaml 12 | apiVersion: extensions/v1beta1 13 | kind: Ingress 14 | metadata: 15 | name: traefik-ingress 16 | spec: 17 | rules: 18 | - host: registry.test.com 19 | http: 20 | paths: 21 | - path: /registry-peer01 22 | backend: 23 | serviceName: registry-peer01 24 | servicePort: 8761 25 | - path: /registry-peer02 26 | backend: 27 | serviceName: registry-peer02 28 | servicePort: 8761 29 | - path: /registry-peer03 30 | backend: 31 | serviceName: registry-peer03 32 | servicePort: 8761 33 | ``` 34 | 我们绑定registry.test.com,用来访问3个服务,后端服务只需要指定服务名和服务端口号即可。 35 | 36 | 37 | 如果域名是已经注册过的,那么域名需要映射到traefik运行的任何一个节点的外面IP。 38 | 39 | 如果没有注册的域名,并且需要外网访问,那么在需要在客户端上配置主机名映射到traefik运行的任何一个节点的外网IP。配置方法如下: 40 | ```bash 41 | echo "10.10.0.10 registry.test.com" | sudo tee -a /etc/hosts 42 | ``` 43 | 44 | 如果需要内网访问,那么在需要内网客户端机器上配置主机名映射到traefik运行的任何一个节点的内网IP。配置方法如下: 45 | ```bash 46 | echo "192.168.0.10 registry.test.com" | sudo tee -a /etc/hosts 47 | ``` 48 | 49 | # 4 部署UI 50 | traefik的控制台web ui外部映射执行ui.yaml配置文件即可。 51 | 52 | 53 | 官方文档: 54 | https://docs.traefik.io/user-guide/kubernetes/ 55 | 56 | 参考: 57 | https://mritd.me/2016/12/06/try-traefik-on-kubernetes/ 58 | 59 | https证书配置: 60 | https://medium.com/@patrickeasters/using-traefik-with-tls-on-kubernetes-cb67fb43a948 61 | -------------------------------------------------------------------------------- /yaml/traefik/traefik-ds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: traefik-ingress-controller 6 | namespace: kube-system 7 | --- 8 | kind: DaemonSet 9 | apiVersion: extensions/v1beta1 10 | metadata: 11 | name: traefik-ingress-controller 12 | namespace: kube-system 13 | labels: 14 | k8s-app: traefik-ingress-lb 15 | spec: 16 | template: 17 | metadata: 18 | labels: 19 | k8s-app: traefik-ingress-lb 20 | name: traefik-ingress-lb 21 | spec: 22 | serviceAccountName: traefik-ingress-controller 23 | terminationGracePeriodSeconds: 60 24 | hostNetwork: true 25 | containers: 26 | - image: traefik 27 | name: traefik-ingress-lb 28 | ports: 29 | - name: http 30 | containerPort: 80 31 | hostPort: 80 32 | - name: admin 33 | containerPort: 8081 34 | securityContext: 35 | privileged: true 36 | args: 37 | - -d 38 | - --web 39 | - --web.address=:8081 40 | - --kubernetes 41 | --- 42 | kind: Service 43 | apiVersion: v1 44 | metadata: 45 | name: traefik-ingress-service 46 | spec: 47 | selector: 48 | k8s-app: traefik-ingress-lb 49 | ports: 50 | - protocol: TCP 51 | port: 80 52 | name: web 53 | - protocol: TCP 54 | port: 8081 55 | name: admin 56 | type: NodePort 57 | -------------------------------------------------------------------------------- /yaml/traefik/traefik-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: extensions/v1beta1 2 | kind: Ingress 3 | metadata: 4 | name: traefik-ingress 5 | spec: 6 | rules: 7 | - host: traefik.www.test.com 8 | http: 9 | paths: 10 | - path: / 11 | backend: 12 | serviceName: registry-peer01 13 | servicePort: 8761 14 | -------------------------------------------------------------------------------- /yaml/traefik/traefik-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | metadata: 5 | name: traefik-ingress-controller 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - services 11 | - endpoints 12 | - secrets 13 | verbs: 14 | - get 15 | - list 16 | - watch 17 | - apiGroups: 18 | - extensions 19 | resources: 20 | - ingresses 21 | verbs: 22 | - get 23 | - list 24 | - watch 25 | --- 26 | kind: ClusterRoleBinding 27 | apiVersion: rbac.authorization.k8s.io/v1beta1 28 | metadata: 29 | name: traefik-ingress-controller 30 | roleRef: 31 | apiGroup: rbac.authorization.k8s.io 32 | kind: ClusterRole 33 | name: traefik-ingress-controller 34 | subjects: 35 | - kind: ServiceAccount 36 | name: traefik-ingress-controller 37 | namespace: kube-system 38 | -------------------------------------------------------------------------------- /yaml/traefik/ui.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: traefik-web-ui 5 | namespace: kube-system 6 | spec: 7 | selector: 8 | k8s-app: traefik-ingress-lb 9 | ports: 10 | - port: 80 11 | targetPort: 8081 12 | --- 13 | apiVersion: extensions/v1beta1 14 | kind: Ingress 15 | metadata: 16 | name: traefik-web-ui 17 | namespace: kube-system 18 | annotations: 19 | kubernetes.io/ingress.class: traefik 20 | spec: 21 | rules: 22 | - host: traefik-ui.local 23 | http: 24 | paths: 25 | - backend: 26 | serviceName: traefik-web-ui 27 | servicePort: 80 28 | -------------------------------------------------------------------------------- /yaml/zookeeper/README.md: -------------------------------------------------------------------------------- 1 | # zookeeper集群 2 | 3 | 参考: 4 | 5 | https://kubernetes.io/docs/tutorials/stateful-application/zookeeper/ 6 | 7 | https://kubernetes.io/cn/docs/tutorials/stateful-application/zookeeper/ 8 | --------------------------------------------------------------------------------