├── .vscode └── settings.json ├── 00-sre ├── chaos ├── onekey │ ├── get-docker.sh │ ├── init_os.sh │ ├── install_docker_ce.sh │ ├── install_k8s.sh │ ├── kubeadm_init_k8s.sh │ └── update_kernel.sh └── repo.md ├── 01-troubleshooting ├── c7添加中文字体.md ├── dns.md ├── initial-c7-network-errors.md ├── install-ffmpeg.md ├── k8s │ ├── change_master_ip.md │ ├── command.md │ └── k8s_errors.md ├── log │ └── c7-install-log.io.md ├── remote-ssh-errors.md ├── tomcat-accesslog.md └── vim-use.md ├── CICD └── jenkins │ └── docker-install-jenkins.md ├── README.md ├── bigdata ├── cloudera │ └── c6-install-cdh.md └── hbase │ └── hbase-use.md ├── cloud └── aliyun │ ├── aliyun-edu-learn.md │ ├── ecs-use.md │ ├── 云安全专业 │ └── 入学测试题.md │ └── 云计算专业 │ └── 入学测试题.md ├── config manage ├── ansible │ ├── ansible-errors.md │ ├── ansible-plugins.md │ ├── ansible-tower.md │ ├── c7-install-ansible-use.md │ ├── c7-install-awx.md │ └── playbook │ │ └── initial-vm-env-for-k8s.md └── config │ ├── default.conf │ ├── ftp.aniu.so.conf │ ├── grafana.aniu.so.conf │ ├── jenkins.aniu.so.conf │ ├── jira.aniu.so.conf │ ├── jira.aniu.so.conf_bak │ ├── jumpserver.aniu.so.conf │ ├── log.aniu.so.conf │ ├── mattermost.aniu.so.conf │ ├── netdata.aniu.so.conf │ ├── oa.aniu.so.conf │ ├── piwik.aniu.so.conf │ ├── repo.aniu.so.conf │ ├── sonar.aniu.so.conf │ ├── ssl.conf │ ├── test.aniu.co.conf │ ├── virtual.conf │ ├── wiki.aniu.so.conf │ └── zabbix.aniu.so.conf ├── containers ├── docker │ ├── docker-ce │ │ └── c7-docker-turorial.md │ ├── docker-compose.md │ ├── docker-direct-lvm.md │ ├── docker-errors.md │ ├── docker-guide.md │ ├── manager-devicemapper.md │ ├── test │ └── volumes.md ├── kubernetes │ ├── dashboard.yaml │ ├── install_helm.md │ ├── install_k8s_cluster_kubeadm.md │ ├── k8s+prometheus-grafana.md │ ├── k8s-proxy.md │ ├── kubeadm-create-k8s-cluster.md │ ├── kubeadm-install-k8s.md │ ├── kubernetes-Guide..md │ ├── kubernetes-errors.md │ ├── quick-install-k8s.md │ └── tasks │ │ └── installing-kubeadm.md ├── openshift │ ├── c7-install-openshift.md │ ├── learn-openshift.md │ ├── openshift-error.md │ └── openshift-origin-latest │ │ ├── openshift-use.md │ │ └── 安装和配置 │ │ └── 安装集群 │ │ ├── 安装OpenShift容器注册表的独立部署.md │ │ └── 系统准备.md └── rancher │ ├── quick-start-rancher2.0.md │ └── rancher-learn.md ├── database ├── mariadb │ ├── MHA.md │ └── yum-install-mariadb.md ├── mysql │ ├── grant-privileges.md │ └── mysql-input-txt.md └── postgresql │ ├── postgresql-grant.md │ ├── postgresql-install-use.md │ └── postgresql-use.md ├── images ├── ovirt-initial-dashboard.png └── vnc-viewwr-centos7.png ├── initial ├── add-hard-drive.md ├── add_repo.md ├── config-ssh-server.md ├── firewalld-use.md ├── lvm │ ├── lvm-errors.md │ └── lvm-use.md ├── timing-contributions.md ├── virtual-machine-config.md ├── 初始化设置.md └── 系统安装.md ├── language ├── nodejs │ └── 通过包管理器安装Node.js.md └── python │ └── python-errors.md ├── load balancer ├── f5 │ └── f5-use.md ├── haproxy │ └── install-haproxy.md └── pound │ └── c7-install-pound.md ├── memory cache └── redis │ ├── 6379.conf │ ├── Redis_Cluster.pdf │ ├── cluster-slots.md │ ├── cluster │ └── cluster-command.md │ ├── create-cluster │ ├── install.md │ ├── onekey-install-redis-cluster.md │ ├── redis-cluster-data-migrate.md │ ├── redis-cluster.md │ ├── redis-errors.md │ ├── redis-master.conf │ ├── redis-optimization.md │ ├── redis-replication.md │ ├── redis-sentinel.md │ └── redis.service ├── monitoring ├── centos7+prometheus.md ├── netdata ├── netdata+prometheus+grafana.md ├── telegraf+influxdb+grafana.md └── zabbix │ ├── c7-install-zabbix-agent.md │ └── monitor-tomcat-discovery.md ├── mq └── rabbitmq │ ├── backup-restore.md │ ├── c7-install-rabbitmq.md │ ├── install-erlang.md │ ├── rabbitmq-cluster.md │ ├── rabbitmq-erros.md │ ├── rabbitmq-management.md │ ├── rabbitmq-plugins.md │ └── rabbitmq-use.md ├── network ├── bonding-config.md ├── disbale-ipv6.md ├── ntp │ └── chrony-use.md ├── packetfence │ ├── PacketFence_Installation_Guide.pdf │ └── learn-packetfence.md ├── route │ └── wireless-route-config.md ├── switch │ ├── Catalyst-3750-X.pdf │ ├── cisco-3750x.md │ └── config-vlan.md └── vpn │ ├── free-vpn.md │ ├── openvpn │ ├── c7-install-openvpn.md │ └── learn-openvpn.md │ └── shadowsocks │ ├── c7-install-ss.md │ ├── c7-yum-install-ss.md │ ├── errors.md │ ├── shadowsocks-libev-debian.sh │ └── shadowsocks-manager.md ├── reading notes ├── Enterprise Security │ └── opensource-network-security.md └── read-book-list.md ├── security ├── jumpserver │ ├── C7-install-jumpserver.md │ ├── restart-coco.md │ └── update.md ├── kali │ ├── Kali渗透测试技术实战.pdf │ ├── kali-learn.md │ └── vm-install-kali.md ├── set-password-rules.md └── ssl │ ├── free_ssl_create.md │ └── free_ssl_website.md ├── shell ├── cmd_track.sh ├── shell-learn-guide.md ├── ssh │ └── set-batch-cert.md └── tools ├── storage ├── glusterfs │ ├── c7-install-gluster.md │ ├── glusterfs-command-use.md │ └── glusterfs-for-ovirt.md └── nfs │ ├── c7-install-nfs.md │ └── nfs-for-ovirt.md ├── tooling ├── accet │ ├── c7-install-Snipe-IT.md │ └── opendevops.md ├── apm │ ├── free-apm.md │ └── pinpoint │ │ ├── C7-install-pinpoint.md │ │ ├── pinpoint-docker.md │ │ ├── pinpoint-overview.md │ │ └── troubleshooting.md ├── elk │ └── c7-install-elasticsearch.md ├── itil │ └── itop.md └── jvm │ └── jvm-config.md ├── version control ├── gitlab │ ├── Admin-tools.md │ ├── Administrator-documentation.md │ ├── GitLab Prometheus.md │ ├── Initial-OmniAuth.md │ ├── centos-install-gitlab.md │ ├── db-config.md │ ├── gitlab-bakcup-restore.md │ ├── gitlab-ce.md │ ├── gitlab-ci-multi-runner │ ├── gitlab-errors.md │ ├── gitlab-grant-set.md │ ├── gitlab-intergration-jira.md │ ├── gitlab-nginx-ssl.md │ ├── gitlab-use-turorial.md │ └── open-remote-connect-postgresql-with-omnibus-gitlab.md └── svn │ └── c7-install-svn.md ├── virtualization ├── cloudstack │ └── c7-install-cloudstack.md ├── kvm │ └── kvm-guests-migrate-ovirt.md ├── openstack │ └── packstack │ │ └── c7-install-packstack.md ├── ovirt │ ├── Red_Hat_Enterprise_Virtualization-3.4-Administration_Guide-zh-CN.pdf │ ├── create-vm.md │ ├── install-ovirt-4.2.md │ ├── install-ovirt-command.md │ ├── ovirt-backup-restore.md │ ├── ovirt-clone-vm-use.md │ ├── ovirt-errors.md │ ├── ovirt-guset-agent.md │ ├── ovirt-iso.md │ ├── ovirt-ldap-roles.md │ ├── ovirt-notifier-email.md │ ├── ovirt-remove.md │ ├── ovirt-storage.md │ ├── ovirt-use-virsh.md │ ├── ovirt-vm-attach-usb-device.md │ ├── ovirt-vm-types.md │ ├── sealing-linux-vm.md │ ├── vm-attach-disk.md │ ├── vm-netowrk-performance-test.md │ └── vm-template.md └── xen │ └── xen-migrate-ovirt.md └── web ├── jboss └── jboss-learn.md ├── nginx ├── c7-install-nginx.md └── docker-install-nginx.md ├── php ├── C7-install-php7.md └── event.md ├── resin └── resin-learn.md ├── tomcat └── c7-install-tomcat.md └── wordpress └── install-wordpress.md /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.minimap.enabled": false 3 | } -------------------------------------------------------------------------------- /00-sre/onekey/install_docker_ce.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # command from office website (https://docs.docker.com/) 4 | 5 | # remove old docker 6 | sudo yum remove docker \ 7 | docker-client \ 8 | docker-client-latest \ 9 | docker-common \ 10 | docker-latest \ 11 | docker-latest-logrotate \ 12 | docker-logrotate \ 13 | docker-engine 14 | 15 | # config docker_ce repo 16 | sudo yum install -y yum-utils device-mapper-persistent-data lvm2 17 | #sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo 18 | yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo 19 | 20 | # install latest docker ce 21 | sudo yum install docker-ce docker-ce-cli containerd.io -y 22 | sudo systemctl start docker && sudo systemctl enable docker 23 | 24 | # use office shell 25 | 26 | #curl -fsSL https://get.docker.com -o get-docker. 27 | #sudo sh get-docker.sh 28 | 29 | 30 | 31 | docker pull prom/node-exporter 32 | docker pull grafana/grafana 33 | docker pull prom/prometheus 34 | 35 | # start node_exporter 36 | 37 | docker run -d -p 9100:9100 \ 38 | -v "/proc:/host/proc:ro" \ 39 | -v "/sys:/host/sys:ro" \ 40 | -v "/:/rootfs:ro" \ 41 | --net="host" \ 42 | prom/node-exporter 43 | 44 | 45 | 46 | # start prometheus 47 | 48 | mkdir /etc/prometheus 49 | 50 | vim prometheus.yml 51 | 52 | global: 53 | scrape_interval: 60s 54 | evaluation_interval: 60s 55 | 56 | scrape_configs: 57 | - job_name: prometheus 58 | static_configs: 59 | - targets: ['localhost:9090'] 60 | labels: 61 | instance: prometheus 62 | 63 | - job_name: linux 64 | static_configs: 65 | - targets: ['10.10.124.58:9100'] 66 | labels: 67 | instance: localhost 68 | 69 | docker run -d \ 70 | -p 9090:9090 \ 71 | -v /etc/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml \ 72 | prom/prometheus 73 | 74 | # start grafana 75 | 76 | mkdir /data/grafana-storage 77 | 78 | docker run -d \ 79 | -p 3000:3000 \ 80 | --name=grafana \ 81 | -v /data/grafana-storage:/var/lib/grafana \ 82 | grafana/grafana 83 | 84 | 85 | # start elk 86 | 87 | grafana 3000 88 | prometheus 9090 9100 89 | consul 8500 90 | elastic 9200 91 | kibana 5601 92 | rancher 80/443 93 | 94 | # start influxdb 95 | 96 | docker run -p 8086:8086 -v /data/influxdb:/var/lib/influxdb influxdb 97 | 98 | mkdir /etc/influxdb 99 | docker run -p 8086:8086 \ 100 | -v /data/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf:ro \ 101 | influxdb -config /etc/influxdb/influxdb.conf 102 | 103 | docker run --name=influxdb -d -p 8086:8086 -v /data/influxdb:/var/lib/influxdb -v /data/influxdb/influxdb.conf:/etc/influxdb/influxdb.conf:ro influxdb 104 | /data 105 | # start consul 106 | docker run -d --net=host -e 'CONSUL_LOCAL_CONFIG={"skip_leave_on_interrupt": true}' consul agent -server -bind= -retry-join= -bootstrap-expect= 107 | 108 | 109 | docker run -p 8500:8500 -p 8600:8600/udp -v /data/consul:/etc/consul.d --name=consul consulconsul agent -server -bootstrap -ui -client=0.0.0.0 -advertise=10.10.124.58 -bind=10.10.124.58 -data-dir=/var/lib/consul -node=consul-1 -config-dir=/etc/consul.d 110 | # dev 111 | docker run -d --name=c1 -p 8500:8500 consul agent -dev -client=0.0.0.0 -bind=0.0.0.0 112 | # prod 113 | 114 | docker run -d --name=c1 -p 8500:8500 consul agent -server -bootstrap -ui -client=0.0.0.0 -bind=0.0.0.0 -node=consul-1 115 | 116 | IP=$(docker inspect --format '{{ .NetworkSettings.IPAddress }}' c1); echo $IP 117 | 172.17.0.5 118 | 119 | docker run -d -p 8500:8500 -p 8600:8600/udp --name=consul consul agent -server -bootstrap -ui -client=0.0.0.0 120 | 121 | docker run -d -p 8500:8500 --name=consul consul agent -server -ui \ 122 | -bind="10.10.124.58" \ 123 | -client="0.0.0.0" \ 124 | -bootstrap-expect="1" 125 | 126 | docker run -d --name c2 consul agent -dev -bind=0.0.0.0 -join=$IP 127 | -------------------------------------------------------------------------------- /00-sre/onekey/install_k8s.sh: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /00-sre/onekey/kubeadm_init_k8s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # netwoprk add-on Calico 4 | 5 | kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.15.0 --pod-network-cidr=192.168.0.0/16 --token-ttl 0 6 | 7 | kubeadm join 192.168.1.103:6443 --token dse5zn.usj4nqvu069a7sgk \ 8 | --discovery-token-ca-cert-hash sha256:4034c8478d1f39b873d5f661f156b911a39ce1d5081b88e54e1c08dda453d63d 9 | 10 | # warnings 11 | 12 | vim /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 13 | 14 | # node install 15 | 16 | docker install 17 | 18 | -------------------------------------------------------------------------------- /00-sre/onekey/update_kernel.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | update_kernel() 4 | { 5 | rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org 6 | rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm 7 | yum --enablerepo=elrepo-kernel install -y kernel-lt kernel-lt-devel 8 | grub2-set-default 0 9 | reboot 10 | } 11 | 12 | update_kernel -------------------------------------------------------------------------------- /00-sre/repo.md: -------------------------------------------------------------------------------- 1 | 阿里镜像:https://opsx.alibaba.com/mirror 2 | 3 | googlekubernetes源: https://hub.docker.com/u/mirrorgooglecontainers/ 4 | 5 | 企业贡献: 6 | 搜狐开源镜像站:http://mirrors.sohu.com/ 7 | 8 | 网易开源镜像站:http://mirrors.163.com/ 9 | 10 | 阿里云开源镜像站:http://mirrors.aliyun.com/ 11 | 12 | Centos各个版本下载站: http://vault.centos.org/ 13 | 14 | 大学教学: 15 | 北京理工大学: 16 | 17 | http://mirror.bit.edu.cn(IPv4 only) 18 | 19 | http://mirror.bit6.edu.cn(IPv6 only) 20 | 21 | 北京交通大学: 22 | 23 | http://mirror.bjtu.edu.cn(IPv4 only) 24 | 25 | http://mirror6.bjtu.edu.cn(IPv6 only) 26 | 27 | http://debian.bjtu.edu.cn(IPv4+IPv6) 28 | 29 | 兰州大学:http://mirror.lzu.edu.cn/ 30 | 31 | 厦门大学:http://mirrors.xmu.edu.cn/ 32 | 33 | 上海交通大学: 34 | 35 | http://ftp.sjtu.edu.cn/(IPv4 only) 36 | 37 | http://ftp6.sjtu.edu.cn(IPv6 only) 38 | 39 | 清华大学: 40 | 41 | http://mirrors.tuna.tsinghua.edu.cn/(IPv4+IPv6) 42 | 43 | http://mirrors.6.tuna.tsinghua.edu.cn/(IPv6 only) 44 | 45 | http://mirrors.4.tuna.tsinghua.edu.cn/(IPv4 only) 46 | 47 | 天津大学:http://jx.tju.zyrj.org/ 48 | 49 | 中国科学技术大学: 50 | 51 | http://mirrors.ustc.edu.cn/(IPv4+IPv6) 52 | 53 | http://mirrors4.ustc.edu.cn/ 54 | 55 | http://mirrors6.ustc.edu.cn/ 56 | 57 | 西南大学:http://linux.swu.edu.cn/(正在建设中) 58 | 59 | 东北大学: 60 | 61 | http://mirror.neu.edu.cn/(IPv4 only) 62 | 63 | http://mirror.neu6.edu.cn/(IPv6 only) 64 | 65 | 电子科技大学:http://ubuntu.uestc.edu.cn/ 66 | 67 | 青岛大学:http://mirror.qdu.edu.cn/ 68 | 69 | 重庆大学:http://mirrors.cqu.edu.cn 70 | 71 | 浙江大学:http://mirrors.zju.edu.cn 72 | 73 | 华中科技大学:http://mirrors.hust.edu.cn/ 74 | 75 | 中山大学:http://mirror.sysu.edu.cn/ 76 | 77 | 大连理工大学:http://mirror.dlut.edu.cn/ 78 | 79 | 中国科学技术大学:http://mirrors.ustc.edu.cn/ -------------------------------------------------------------------------------- /01-troubleshooting/c7添加中文字体.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/01-troubleshooting/c7添加中文字体.md -------------------------------------------------------------------------------- /01-troubleshooting/dns.md: -------------------------------------------------------------------------------- 1 | # 2 | A记录:将域名指向一个IPv4地址(例如:10.10.10.10),需要增加A记录 3 | CNAME记录:如果将域名指向一个域名,实现与被指向域名相同的访问效果,需要增加CNAME记录 4 | MX记录:建立电子邮箱服务,将指向邮件服务器地址,需要设置MX记录 5 | NS记录:域名解析服务器记录,如果要将子域名指定某个域名服务器来解析,需要设置NS记录 6 | TXT记录:可任意填写(可为空),通常用做SPF记录(反垃圾邮件)使用 7 | AAAA记录:将主机名(或域名)指向一个IPv6地址(例如:ff03:0:0:0:0:0:0:c1),需要添加AAAA记录 8 | SRV记录:记录了哪台计算机提供了哪个服务。格式为:服务的名字.协议的类型(例如:_example-server._tcp) 9 | 显性URL:将域名指向一个http(s)协议地址,访问域名时,自动跳转至目标地址(例如:将www.net.cn显性转发到www.hichina.com后,访问www.net.cn时,地址栏显示的地址为:www.hichina.com) 10 | 隐性URL:与显性URL类似,但隐性转发会隐藏真实的目标地址(例如:将www.net.cn隐性转发到www.hichina.com后,访问www.net.cn时,地址栏显示的地址仍然为:www.net.cn) -------------------------------------------------------------------------------- /01-troubleshooting/initial-c7-network-errors.md: -------------------------------------------------------------------------------- 1 | # 初始化centos7 时,使用nmcli d 查看网络设备报错如下: 2 | 3 | ``` 4 | [root@localhost ~]# nmcli d 5 | Error: Could not create NMClient object: GDBus.Error:org.freedesktop.DBus.Error.UnknownMethod: Method "GetManagedObjects" with signature "" on interface "org.freedesktop.DBus.ObjectManager" doesn't exist 6 | 7 | ``` 8 | 9 | - 解决:重启NetworkManager服务 10 | 11 | ``` 12 | systemctl restart NetworkManager 13 | 14 | [root@localhost ~]# nmcli d 15 | DEVICE TYPE STATE CONNECTION 16 | em1 ethernet connected em1 17 | em2 ethernet unavailable -- 18 | lo loopback unmanaged -- 19 | ``` -------------------------------------------------------------------------------- /01-troubleshooting/k8s/change_master_ip.md: -------------------------------------------------------------------------------- 1 | # kubeadm reset 2 | 3 | [root@master ~]# kubeadm reset 4 | [reset] Reading configuration from the cluster... 5 | [reset] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml' 6 | W0701 15:02:42.182657 22228 reset.go:98] [reset] Unable to fetch the kubeadm-config ConfigMap from cluster: failed to get config map: Get https://192.168.1.103:6443/api/v1/namespaces/kube-system/configmaps/kubeadm-config: dial tcp 192.168.1.103:6443: i/o timeout 7 | [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted. 8 | [reset] Are you sure you want to proceed? [y/N]: y 9 | [preflight] Running pre-flight checks 10 | [reset] WARNING: Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted. 11 | [reset] Are you sure you want to proceed? [y/N]: y 12 | [preflight] Running pre-flight checks 13 | [reset] Stopping the kubelet service 14 | [reset] Unmounting mounted directories in "/var/lib/kubelet" 15 | [reset] Deleting contents of config directories: [/etc/kubernetes/manifests /etc/kubernetes/pki] 16 | [reset] Deleting files: [/etc/kubernetes/admin.conf /etc/kubernetes/kubelet.conf /etc/kubernetes/bootstrap-kubelet.conf /etc/kubernetes/controller-manager.conf /etc/kubernetes/scheduler.conf] 17 | [reset] Deleting contents of stateful directories: [/var/lib/kubelet /etc/cni/net.d /var/lib/dockershim /var/run/kubernetes] 18 | 19 | The reset process does not reset or clean up iptables rules or IPVS tables. 20 | If you wish to reset iptables, you must do so manually. 21 | For example: 22 | iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X 23 | 24 | If your cluster was setup to utilize IPVS, run ipvsadm --clear (or similar) 25 | to reset your system's IPVS tables. 26 | 27 | The reset process does not clean your kubeconfig files and you must remove them manually. 28 | Please, check the contents of the $HOME/.kube/config file. 29 | 30 | # kubeadm init 31 | 32 | -------------------------------------------------------------------------------- /01-troubleshooting/k8s/command.md: -------------------------------------------------------------------------------- 1 | # 2 | 3 | kubectl create deployment first-deployment --image=katacoda/docker-http-server 4 | 5 | kubectl expose deployment first-deployment --port=80 --type=NodePort 6 | 7 | export PORT=$(kubectl get svc first-deployment -o go-template='{{range.spec.ports}}{{if .nodePort}}{{.nodePort}}{{"\n"}}{{end}}{{end}}') 8 | echo "Accessing host01:$PORT" -------------------------------------------------------------------------------- /01-troubleshooting/k8s/k8s_errors.md: -------------------------------------------------------------------------------- 1 | # k8s 常见报错处理 2 | 3 | - [WARNING IsDockerSystemdCheck]: detected "cgroupfs" as the Docker cgroup driver. The recommended driver is "systemd". Please follow the guide at https://kubernetes.io/docs/setup/cri/ 4 | error execution phase preflight: [preflight] Some fatal errors occurred: 5 | 6 | ``` 7 | cat > /etc/docker/daemon.json < /proc/sys/net/ipv4/ip_forward 31 | 32 | - [ERROR DirAvailable--var-lib-etcd]: /var/lib/etcd is not empty 33 | 34 | rm -rf /var/lib/etcd 35 | 36 | - failed to load Kubelet config file /var/lib/kubelet/config.yaml 37 | - unable to load client CA file /etc/kubernetes/pki/ca.crt 38 | - failed to run Kubelet: unable to load bootstrap kubeconfig: stat /etc/kubernetes/bootstrap-kubelet.conf: no such file or directory 39 | 40 | 背景:node 节点 kubeadm reset 后 重新kubeadm join失败 41 | 42 | - Failed to execute iptables-restore: exit status 1(invalid option -- '5') 43 | 44 | 解决:降低iptables版本,当前iptables-1.4.21-28.el7.x86_64,回滚为iptables-1.4.21-24.el7.x86_64 (centos7.5) 45 | 46 | - Error from server (Forbidden): secrets is forbidden: User "system:node:master" cannot create resource "secrets" in API group "" in the namespace "kube-system": can only read resources of this type 47 | 48 | export KUBECONFIG=/etc/kubernetes/admin.conf 49 | 50 | - Error from server (AlreadyExists): secrets "kubernetes-dashboard-certs" already exists 51 | 52 | kubectl delete -f kubernetes-dashboard.yaml 重新apply 53 | 54 | - The Service "kubernetes-dashboard" is invalid: spec.ports[0].nodePort: Forbidden: may not be used when `type` is 'ClusterIP' 55 | 56 | kubectl -n kube-system describe $(kubectl -n kube-system get secret -n kube-system -o name | grep namespace) | grep token 57 | 58 | 更改ClusterIP 为NodePort 59 | 60 | 61 | - had taints that the pod didn't tolerate 62 | 63 | kubectl taint nodes --all node-role.kubernetes.io/master- 64 | 65 | 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /01-troubleshooting/log/c7-install-log.io.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装log.io服务 2 | 3 | > 官网: http://logio.org/ 4 | 5 | - 添加epel源 6 | 7 | ``` 8 | yum install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 9 | `` 10 | 11 | ## 安裝Log.io: 12 | 13 | ``` 14 | # 安装依赖 15 | 16 | ``` 17 | # 安装开发工具包或者 yum install gcc-c++ 18 | yum install npm nodejs 19 | 20 | # npm是jabascript的软件包管理器,允许管理应用程序的依赖关系,也允许用户从npm注册表安装node.js应用程序。输入一个用户名来安装,筆者使用了“yunwei”用户 21 | npm install -g log.io --user "root" 22 | ``` 23 | 24 | ## 配置 Log.io 25 | 26 | > Log.io的Installed目录是〜/ .log.io,它是用户主目录下的一个隐藏目录,在prevoius步骤中用于安装,它有三个配置文件来控制它的工作性质。 27 | 28 | - harvester.conf 29 | 30 | > 这是收割机的配置文件,它不过是一个日志转发器,它不断监视日志文件的变化,向服务器发送新日志。 31 | 32 | ``` 33 | # vi ~/.log.io/harvester.conf 34 | # cat harvester.conf 35 | exports.config = { 36 | nodeName: "log.io", 37 | logStreams: { 38 | systeminfo: [ 39 | "/var/log/messages", 40 | "/var/log/secure" 41 | ], 42 | logioaccess: [ 43 | "/var/log/nginx/logio.aniu.so.access.log" 44 | ], 45 | logiolog: [ 46 | "/root/.log.io/log.io-server.log" 47 | ] 48 | }, 49 | server: { 50 | host: '192.168.0.24', 51 | port: 28777 52 | } 53 | } 54 | 55 | # 编辑log_server.conf 56 | [root@ecs-01 .log.io]# cat log_server.conf 57 | exports.config = { 58 | host: '192.168.0.24', 59 | port: 28777 60 | } 61 | 62 | # 编辑web_server.conf 63 | [root@nkmapi-1 .log.io]# cat web_server.conf 64 | exports.config = { 65 | host: '0.0.0.0', 66 | port: 28778, 67 | 68 | /* 69 | // Enable HTTP Basic Authentication 70 | auth: { 71 | user: "admin", 72 | pass: "1234" 73 | }, 74 | */ 75 | 76 | /* 77 | // Enable HTTPS/SSL 78 | ssl: { 79 | key: '/path/to/privatekey.pem', 80 | cert: '/path/to/certificate.pem' 81 | }, 82 | */ 83 | 84 | /* 85 | // Restrict access to websocket (socket.io) 86 | // Uses socket.io 'origins' syntax 87 | restrictSocket: '*:*', 88 | */ 89 | 90 | /* 91 | // Restrict access to http server (express) 92 | restrictHTTP: [ 93 | "192.168.29.*", # 笔者只改了这里 其他没改 94 | "192.168.0.*" 95 | ] 96 | */ 97 | 98 | } 99 | 100 | ``` 101 | 102 | - 配置启动脚本 103 | 104 | ``` 105 | # cat /etc/init.d/log.io 106 | #!/bin/bash 107 | 108 | start() { 109 | echo "Starting log.io process..." 110 | /usr/bin/nohup /usr/bin/log.io-server >> /root/.log.io/log.io-server.log 2>&1 & 111 | /usr/bin/nohup /usr/bin/log.io-harvester >> /root/.log.io/log.io-harvester.log 2>&1 & 112 | } 113 | 114 | stop() { 115 | echo "Stopping io-log process..." 116 | pkill node 117 | } 118 | 119 | status() { 120 | echo "Status io-log process..." 121 | netstat -tlp | grep node 122 | } 123 | 124 | case "$1" in 125 | start) 126 | start 127 | ;; 128 | stop) 129 | stop 130 | ;; 131 | status) 132 | status 133 | ;; 134 | restart) 135 | echo "Restart log.io process..." 136 | $0 stop 137 | $0 start 138 | ;; 139 | *) 140 | echo "Usage: start|stop|restart|status" 141 | ;; 142 | esac 143 | ``` 144 | 145 | - 启动logio 146 | 147 | ``` 148 | # 正常安装配置完成,启动logio 149 | /etc/init.d/log.io start 150 | ``` 151 | 152 | - 浏览器查看 153 | 154 | ``` 155 | 156 | ``` 157 | 158 | ## 收集tomcat实时日志 159 | 160 | - centos6/7 上java api客户端执行: 161 | 162 | ``` 163 | # 配置epel源 164 | yum install npm nodejs gcc-c++ 165 | npm config set strict-ssl false 166 | npm install -g log.io --user "root" 167 | ``` 168 | 169 | - 配置log.io配置 170 | 171 | ``` 172 | # 注意client端只需要修改harvester配置文件即可 173 | # cat harvester.conf 174 | exports.config = { 175 | nodeName: "liquidation-master", 176 | logStreams: { 177 | tomcat_8082: [ 178 | "/data/tomcats/tomcat-8082/logs/catalina.out" 179 | ] 180 | }, 181 | server: { 182 | host: '192.168.0.24', 183 | port: 28777 184 | } 185 | } 186 | ``` 187 | 188 | - 收集日志的服务器,也要安装log.io,正常笔者认为启动harvester即可,但是没成功,笔者还是每个客户端都启动了两个服务 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | 197 | 198 | -------------------------------------------------------------------------------- /01-troubleshooting/remote-ssh-errors.md: -------------------------------------------------------------------------------- 1 | # 短时间内重复更改hostname导致远程ssh登录被拒绝 2 | 3 | 报错页面: 4 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 5 | @ WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED! @ 6 | @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ 7 | IT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY! 8 | Someone could be eavesdropping on you right now (man-in-the-middle attack)! 9 | It is also possible that a host key has just been changed. 10 | The fingerprint for the ECDSA key sent by the remote host is 11 | SHA256:URdLU4Jx00fGJ/yBdu8DZwGj3JysNoK3U2DaSfErQQA. 12 | Please contact your system administrator. 13 | Add correct host key in /root/.ssh/known_hosts to get rid of this message. 14 | Offending ECDSA key in /root/.ssh/known_hosts:1 15 | ECDSA host key for 192.168.1.115 has changed and you have requested strict checking. 16 | Host key verification failed. 17 | 18 | 19 | - 解决: -------------------------------------------------------------------------------- /01-troubleshooting/tomcat-accesslog.md: -------------------------------------------------------------------------------- 1 | # 按时间查看tomcat慢访问日志 2 | 3 | - 查看早上九点到十点的访问日志: 4 | 5 | sed -n '/03\/Jul\/2018:09/,/03\/Jul\/2018:10/p' /data/tomcats/tomcat-7081/logs/localhost_access_log.2018-07-03.txt | less 6 | 7 | # 接口故障处理 8 | 9 | 10 | web访问响应慢 - 查看接口日志 接口日志有报错,acces慢日志特别多 接口取数据慢 数据源有问题 缓存问题 缓存所在服务器redis做持久化导致进程堵塞 关闭持久化 重启接口 页面恢复 加载速度变快 11 | 12 | -------------------------------------------------------------------------------- /01-troubleshooting/vim-use.md: -------------------------------------------------------------------------------- 1 | # 2 | 最方便的方法就是在.vimrc中加一句: 3 | 4 | set pastetoggle= -------------------------------------------------------------------------------- /CICD/jenkins/docker-install-jenkins.md: -------------------------------------------------------------------------------- 1 | # docker运行jenkins 2 | 3 | ## 安装 4 | 5 | [root@vm-06 ~]# docker verison 6 | docker: 'verison' is not a docker command. 7 | See 'docker --help' 8 | [root@vm-06 ~]# docker version 9 | Client: 10 | Version: 18.03.1-ce 11 | API version: 1.37 12 | Go version: go1.9.5 13 | Git commit: 9ee9f40 14 | Built: Thu Apr 26 07:20:16 2018 15 | OS/Arch: linux/amd64 16 | Experimental: false 17 | Orchestrator: swarm 18 | 19 | Server: 20 | Engine: 21 | Version: 18.03.1-ce 22 | API version: 1.37 (minimum version 1.12) 23 | Go version: go1.9.5 24 | Git commit: 9ee9f40 25 | Built: Thu Apr 26 07:23:58 2018 26 | OS/Arch: linux/amd64 27 | Experimental: false 28 | 29 | 30 | ## 使用docker直接运行jenkins 31 | 32 | - 参考:https://docs.docker.com/samples/library/jenkins/ 33 | - https://github.com/jenkinsci/docker 34 | 35 | 36 | 37 | 38 | docker run -d -p 8080:8080 -p 50000:50000 --env JAVA_OPTS="-Djava.util.logging.config.file=/var/jenkins_home/log.properties" -v /data/jenkins_home:/var/jenkins_home -v /usr/share/apache-maven:/var/maven_home -u 0 jenkins 39 | 40 | 41 | docker run --name aniu-jenkins -d -p 8080:8080 -p 50000:50000 -v /data/jenkins_home:/var/jenkins_home -v /usr/share/apache-maven:/var/maven_home -u 0 jenkins 42 | 43 | - 日志配置 44 | 45 | cat > /data/jenkins_home/log.properties < 入学测试题 2 | 3 | 4 | -------------------------------------------------------------------------------- /cloud/aliyun/云计算专业/入学测试题.md: -------------------------------------------------------------------------------- 1 | # 阿里云大学互联网学院 <云计算专业> 入学测试题 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /config manage/ansible/ansible-errors.md: -------------------------------------------------------------------------------- 1 | # 使用普通用户进行ansible分发 2 | 3 | [yunwei@wanghui ~]$ ansible web -m command -a "ls -l /home/yunwei" 4 | [DEPRECATION WARNING]: DEFAULT_SUDO_USER option, In favor of Ansible Become, which is a generic framework. See become_user. , use become instead. This feature will be removed in version 2.8. 5 | Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg. 6 | 7 | ## 编辑sudo vim /etc/ansible/ansible.cfg,注释#sudo_user = yunwei,执行ansible时添加--become-user=yunwei参数 8 | 9 | [yunwei@wanghui ~]$ ansible web -m command -a "ls -l /home/yunwei" --become-user=yunwei 10 | 192.168.0.51 | SUCCESS | rc=0 >> 11 | total 0 12 | -rw------- 1 yunwei yunwei 0 Apr 28 15:06 test.conf 13 | 14 | 192.168.0.50 | SUCCESS | rc=0 >> 15 | total 0 16 | -rw------- 1 yunwei yunwei 0 Apr 28 15:06 test.conf 17 | 18 | 19 | - [WARNING]: Consider using 'become', 'become_method', and 'become_user' rather than running sudo 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /config manage/ansible/ansible-plugins.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/config manage/ansible/ansible-plugins.md -------------------------------------------------------------------------------- /config manage/ansible/ansible-tower.md: -------------------------------------------------------------------------------- 1 | # 三步搞定ansible-tower 2 | 3 | ## 环境准备 4 | 5 | - centos7 + 2c/G (笔者测试) 6 | 7 | ## step1 下载ansible-tower最新版 8 | 9 | - wget https://releases.ansible.com/ansible-tower/setup/ansible-tower-setup-latest.tar.gz 10 | 11 | ## step2 解压tower到opt下,并配置初始密码 12 | 13 | ``` 14 | tar zxvf ansible-tower-setup-bundle-latest.el7.tar.gz -C /opt/ 15 | cd /opt/ansible-tower-setup-bundle-3.5.2-1.el7/ 16 | ``` 17 | 18 | - 更改配置如下: 19 | 20 | ``` 21 | # cat inventory 22 | [tower] 23 | localhost ansible_connection=local 24 | 25 | [database] 26 | 27 | [all:vars] 28 | admin_password='admin' #增加,默认无 29 | 30 | pg_host='' 31 | pg_port='' 32 | 33 | pg_database='awx' 34 | pg_username='awx' 35 | pg_password='awx' #增加,默认无 36 | 37 | rabbitmq_username=tower 38 | rabbitmq_password='tower' #增加,默认无 39 | rabbitmq_cookie=cookiemonster 40 | ``` 41 | 42 | ## step3 执行sh setup 43 | 44 | - sh setup.sh # 无报错,执行完成即可 45 | 46 | ## step4 访问并激活无限hosts 47 | 48 | - 访问:https://ip 49 | 50 | ``` 51 | # 执行下面命令,刷新tower页面即可 52 | echo codyguo > /var/lib/awx/i18n.db 53 | ``` 54 | 55 | - 友情链接:https://blog.csdn.net/CodyGuo/article/details/84136181 56 | -------------------------------------------------------------------------------- /config manage/ansible/c7-install-ansible-use.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装ansible并配置使用 2 | 3 | ## 实验环境 4 | 5 | - CentOS7, epel源 6 | 7 | ## 安装ansible 8 | 9 | yum -y install ansible openssh-clients 10 | 11 | - 配置ansible 12 | 13 | [yunwei@wanghui ~]$ egrep -v "^#|^$" /etc/ansible/ansible.cfg 14 | [defaults] 15 | inventory = /etc/ansible/hosts 16 | remote_port = 54077 # 更改ssh端口 17 | host_key_checking = False #取消此行注释 18 | [inventory] 19 | [privilege_escalation] 20 | [paramiko_connection] 21 | [ssh_connection] 22 | [persistent_connection] 23 | [accelerate] 24 | [selinux] 25 | [colors] 26 | [diff] 27 | 28 | 其他参数修改,直接编辑sudo vim /etc/ansible/ansible.cfg。 29 | 30 | - 配置hosts 31 | [yunwei@wanghui ~]$ egrep -v "^#|^$" /etc/ansible/hosts 32 | 192.168.0.111 33 | [web] 34 | 192.168.0.50 35 | 192.168.0.51 36 | 37 | - 使用yunwei执行ansible前,需要在不同的服务器上设置yunwei用户间的免密认证登录 38 | 39 | [yunwei@wanghui ~]$ ansible all --list-hosts 40 | hosts (3): 41 | 192.168.0.111 42 | 192.168.0.50 43 | 192.168.0.51 44 | [yunwei@wanghui ~]$ ansible web --list-hosts 45 | hosts (2): 46 | 192.168.0.50 47 | 192.168.0.51 48 | 49 | - ansible 同步文件 50 | 51 | ansible allapi -m copy -a "src=/tmp/tomcatall dest=/etc/init.d/tomcatall" 52 | 53 | 54 | ansible all -m copy -a "src=/etc/chrony.conf dest=/etc/chrony.conf" 55 | 56 | - 同步hosts文件 57 | 58 | ansible k8s -m copy -a "src=/etc/hosts dest=/etc/hosts" 59 | 60 | ansible ovirt -a "yum install chrony -y && systemctl restart chronyd && systemctl enable chronyd" 61 | 62 | ansible ovirt -m copy -a "src=/etc/chrony.conf dest=/etc/chrony.conf" 63 | 64 | ansible ovirt -a "/usr/bin/chronyc sourcestats" 65 | 66 | 67 | ansible wengu -m copy -a "src=/tmp/zabbix_agentd.conf dest=/opt/zabbix/etc/zabbix_agentd.conf" 68 | 69 | ansible wengu -a "/etc/init.d/zabbix_agentd restart" 70 | -------------------------------------------------------------------------------- /config manage/ansible/playbook/initial-vm-env-for-k8s.md: -------------------------------------------------------------------------------- 1 | # 初始化vm配置为了k8s集群安装 2 | 3 | - hosts: k8s 4 | become: yes 5 | become_method: sudo 6 | tasks: 7 | - name: General packages are installed 8 | yum: name={{ item }} state=installed 9 | with_items: 10 | - vim-enhanced 11 | - wget 12 | - net-tools 13 | tags: General_Packages 14 | 15 | -------------------------------------------------------------------------------- /config manage/config/default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | 5 | #charset koi8-r; 6 | access_log /var/log/nginx/host.access.log main; 7 | 8 | location / { 9 | root /usr/share/nginx/html; 10 | index index.html index.htm; 11 | } 12 | 13 | error_page 404 /404.html; 14 | 15 | # redirect server error pages to the static page /50x.html 16 | # 17 | error_page 500 502 503 504 /50x.html; 18 | location = /50x.html { 19 | root /usr/share/nginx/html; 20 | internal; 21 | 22 | } 23 | 24 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 25 | # 26 | #location ~ \.php$ { 27 | # proxy_pass http://127.0.0.1; 28 | #} 29 | 30 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 31 | # 32 | #location ~ \.php$ { 33 | # root html; 34 | # fastcgi_pass phpfpm-pool; 35 | # fastcgi_index index.php; 36 | # fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name; 37 | # include fastcgi_params; 38 | #} 39 | 40 | # deny access to .htaccess files, if Apache's document root 41 | # concurs with nginx's one 42 | # 43 | #location ~ /\.ht { 44 | # deny all; 45 | #} 46 | } 47 | 48 | -------------------------------------------------------------------------------- /config manage/config/ftp.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name ftp.aniu.so; 4 | access_log /var/log/nginx/repo.aniu.so.access.log main; 5 | error_log /var/log/nginx/error/repo.aniu.so.error.log; 6 | 7 | location / { 8 | root /var/www/html/yunwei; 9 | index index.html index.htm index.php; 10 | } 11 | error_page 404 /404.html; 12 | # redirect server error pages to the static page /50x.html 13 | error_page 500 502 503 504 /50x.html; 14 | location = /50x.html { 15 | root /usr/share/nginx/html; 16 | } 17 | 18 | location ~ ^/(.*)$ { 19 | proxy_pass http://192.168.0.99:8888; 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /config manage/config/grafana.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name grafana.aniu.so; 4 | 5 | #charset koi8-r; 6 | access_log /var/log/nginx/grafana.aniu.so.access.log main; 7 | error_log /var/log/nginx/error/grafana.aniu.so.error.log; 8 | 9 | location / { 10 | root /var/www/html; 11 | index index.html index.htm index.php; 12 | } 13 | 14 | error_page 404 /404.html; 15 | 16 | # redirect server error pages to the static page /50x.html 17 | # 18 | error_page 500 502 503 504 /50x.html; 19 | location = /50x.html { 20 | root /usr/share/nginx/html; 21 | } 22 | 23 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 24 | # 25 | location ~ { 26 | proxy_pass http://0.0.0.0:3000; 27 | } 28 | 29 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 30 | # 31 | #location ~ \.php$ { 32 | # root /var/www/html; 33 | # fastcgi_pass phpfpm-pool; 34 | # fastcgi_index index.php; 35 | # fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 36 | # include fastcgi_params; 37 | #} 38 | 39 | #location ~ [^/]\.php(/|$) { 40 | #fastcgi_split_path_info ^(.+?\.php)(/.*)$; 41 | #if (!-f $document_root$fastcgi_script_name) { 42 | # return 404; 43 | #} 44 | 45 | # Mitigate https://httpoxy.org/ vulnerabilities 46 | #fastcgi_param HTTP_PROXY ""; 47 | 48 | #fastcgi_pass phpfpm-pool; 49 | #fastcgi_index index.php; 50 | #include fastcgi_params; 51 | #} 52 | # deny access to .htaccess files, if Apache's document root 53 | # concurs with nginx's one 54 | # 55 | #location ~ /\.ht { 56 | # deny all; 57 | #} 58 | } 59 | 60 | -------------------------------------------------------------------------------- /config manage/config/jenkins.aniu.so.conf: -------------------------------------------------------------------------------- 1 | upstream jenkins { 2 | server 0.0.0.0:8080 fail_timeout=0; 3 | } 4 | 5 | server { 6 | listen 80; 7 | server_name jenkins.aniu.so; 8 | return 301 https://$host$request_uri; 9 | } 10 | 11 | server { 12 | listen 443 ssl; 13 | server_name jenkins.aniu.so; 14 | access_log /var/log/nginx/jenkins.aniu.so.access.log main; 15 | error_log /var/log/nginx/jenkins.aniu.so.error.log; 16 | 17 | ssl on; 18 | ssl_certificate /etc/pki/tls/certs/server.crt; 19 | ssl_certificate_key /etc/pki/tls/certs/server.key; 20 | 21 | location / { 22 | proxy_set_header Host $host:$server_port; 23 | proxy_set_header X-Real-IP $remote_addr; 24 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 25 | proxy_set_header X-Forwarded-Proto $scheme; 26 | proxy_redirect http:// https://; 27 | proxy_pass http://jenkins; 28 | # Required for new HTTP-based CLI 29 | proxy_http_version 1.1; 30 | proxy_request_buffering off; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /config manage/config/jira.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name jira.aniu.so; 4 | return 301 https://$host$request_uri; 5 | } 6 | 7 | server { 8 | listen 443 ssl; 9 | server_name jenkins.aniu.so; 10 | access_log /var/log/nginx/jira.aniu.so.access.log main; 11 | error_log /var/log/nginx/jira.aniu.so.error.log; 12 | 13 | ssl on; 14 | ssl_certificate /etc/pki/tls/certs/jira.aniu.so.crt; 15 | ssl_certificate_key /etc/pki/tls/certs/jira.aniu.so.key; 16 | 17 | location /jira { 18 | proxy_set_header Host $host:$server_port; 19 | proxy_set_header X-Real-IP $remote_addr; 20 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 21 | proxy_set_header X-Forwarded-Proto $scheme; 22 | proxy_redirect http:// https://; 23 | proxy_pass http://sh-kvm-3-1:8080/jira; 24 | client_max_body_size 10M; 25 | proxy_set_header X-Forwarded-Host $host; 26 | proxy_set_header X-Forwarded-Server $host; 27 | # Required for new HTTP-based CLI 28 | proxy_http_version 1.1; 29 | proxy_request_buffering off; 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /config manage/config/jira.aniu.so.conf_bak: -------------------------------------------------------------------------------- 1 | server { 2 | listen jira.aniu.so:80; 3 | server_name jira.aniu.so; 4 | access_log /var/log/nginx/jira.aniu.so.access.log main; 5 | error_log /var/log/nginx/jira.aniu.so.error.log; 6 | location /jira { 7 | proxy_set_header X-Forwarded-Host $host; 8 | proxy_set_header X-Forwarded-Server $host; 9 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 10 | proxy_pass http://sh-kvm-3-1:8080/jira; 11 | client_max_body_size 10M; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /config manage/config/jumpserver.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name jumpserver.aniu.so; 4 | 5 | proxy_set_header X-Real-IP $remote_addr; 6 | proxy_set_header Host $host; 7 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 8 | 9 | location /luna/ { 10 | try_files $uri / /index.html; 11 | alias /opt/luna/; 12 | } 13 | 14 | location /media/ { 15 | add_header Content-Encoding gzip; 16 | root /opt/jumpserver/data/; 17 | } 18 | 19 | location /static/ { 20 | root /opt/jumpserver/data/; 21 | } 22 | 23 | location /socket.io/ { 24 | proxy_pass http://localhost:5000/socket.io/; # 如果coco安装在别的服务器,请填写它的ip 25 | proxy_buffering off; 26 | proxy_http_version 1.1; 27 | proxy_set_header Upgrade $http_upgrade; 28 | proxy_set_header Connection "upgrade"; 29 | } 30 | 31 | location /guacamole/ { 32 | proxy_pass http://localhost:8081/; # 如果guacamole安装在别的服务器,请填写它的ip 33 | proxy_buffering off; 34 | proxy_http_version 1.1; 35 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 36 | proxy_set_header Upgrade $http_upgrade; 37 | proxy_set_header Connection $http_connection; 38 | access_log off; 39 | } 40 | 41 | location / { 42 | proxy_pass http://localhost:8080; # 如果jumpserver安装在别的服务器,请填写它的ip 43 | } 44 | } 45 | 46 | server { 47 | listen 443; 48 | server_name jumpserver.aniu.so; 49 | 50 | proxy_set_header X-Real-IP $remote_addr; 51 | proxy_set_header Host $host; 52 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 53 | 54 | location /luna/ { 55 | try_files $uri / /index.html; 56 | alias /opt/luna/; 57 | } 58 | 59 | location /media/ { 60 | add_header Content-Encoding gzip; 61 | root /opt/jumpserver/data/; 62 | } 63 | 64 | location /static/ { 65 | root /opt/jumpserver/data/; 66 | } 67 | 68 | location /socket.io/ { 69 | proxy_pass http://localhost:5000/socket.io/; # 如果coco安装在别的服务器,请填写它的ip 70 | proxy_buffering off; 71 | proxy_http_version 1.1; 72 | proxy_set_header Upgrade $http_upgrade; 73 | proxy_set_header Connection "upgrade"; 74 | } 75 | 76 | # location /guacamole/ { 77 | # proxy_pass http://localhost:8081/; # 如果guacamole安装在别的服务器,请填写它的ip 78 | # proxy_buffering off; 79 | # proxy_http_version 1.1; 80 | # proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 81 | # proxy_set_header Upgrade $http_upgrade; 82 | # proxy_set_header Connection $http_connection; 83 | # access_log off; 84 | # } 85 | 86 | location / { 87 | proxy_pass http://localhost:8080; # 如果jumpserver安装在别的服务器,请填写它的ip 88 | } 89 | 90 | 91 | } 92 | -------------------------------------------------------------------------------- /config manage/config/log.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name log.aniu.so; 4 | access_log /var/log/nginx/log.aniu.so.access.log main; 5 | error_log /var/log/nginx/log.aniu.so.error.log; 6 | 7 | location / { 8 | root /var/www/html/loganalyzer; 9 | index index.html index.htm index.php; 10 | } 11 | 12 | error_page 404 /404.html; 13 | 14 | # redirect server error pages to the static page /50x.html 15 | error_page 500 502 503 504 /50x.html; 16 | location = /50x.html { 17 | root /var/www/html/loganalyzer; 18 | } 19 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 20 | location ~ \.php$ { 21 | root /var/www/html/loganalyzer; 22 | fastcgi_pass phpfpm-pool; 23 | fastcgi_index index.php; 24 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 25 | include fastcgi_params; 26 | include proxy.conf; 27 | } 28 | } 29 | 30 | -------------------------------------------------------------------------------- /config manage/config/mattermost.aniu.so.conf: -------------------------------------------------------------------------------- 1 | upstream backend { 2 | server 127.0.0.1:8065; 3 | } 4 | 5 | proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=mattermost_cache:10m max_size=3g inactive=120m use_temp_path=off; 6 | 7 | server { 8 | listen 80; 9 | server_name mattermost.aniu.so; 10 | 11 | location ~ /api/v[0-9]+/(users/)?websocket$ { 12 | proxy_set_header Upgrade $http_upgrade; 13 | proxy_set_header Connection "upgrade"; 14 | client_max_body_size 50M; 15 | proxy_set_header Host $http_host; 16 | proxy_set_header X-Real-IP $remote_addr; 17 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 18 | proxy_set_header X-Forwarded-Proto $scheme; 19 | proxy_set_header X-Frame-Options SAMEORIGIN; 20 | proxy_buffers 256 16k; 21 | proxy_buffer_size 16k; 22 | proxy_read_timeout 600s; 23 | proxy_pass http://backend; 24 | } 25 | 26 | location / { 27 | client_max_body_size 50M; 28 | proxy_set_header Connection ""; 29 | proxy_set_header Host $http_host; 30 | proxy_set_header X-Real-IP $remote_addr; 31 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 32 | proxy_set_header X-Forwarded-Proto $scheme; 33 | proxy_set_header X-Frame-Options SAMEORIGIN; 34 | proxy_buffers 256 16k; 35 | proxy_buffer_size 16k; 36 | proxy_read_timeout 600s; 37 | proxy_cache mattermost_cache; 38 | proxy_cache_revalidate on; 39 | proxy_cache_min_uses 2; 40 | proxy_cache_use_stale timeout; 41 | proxy_cache_lock on; 42 | proxy_pass http://backend; 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /config manage/config/netdata.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name netdata.aniu.so; 4 | 5 | #charset koi8-r; 6 | access_log /var/log/nginx/netdata.aniu.so.access.log main; 7 | error_log /var/log/nginx/error/netdata.aniu.so.error.log; 8 | 9 | location / { 10 | root /var/www/html; 11 | index index.html index.htm index.php; 12 | } 13 | 14 | error_page 404 /404.html; 15 | 16 | # redirect server error pages to the static page /50x.html 17 | # 18 | error_page 500 502 503 504 /50x.html; 19 | location = /50x.html { 20 | root /usr/share/nginx/html; 21 | } 22 | 23 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 24 | # 25 | location ~ { 26 | proxy_pass http://0.0.0.0:19999; 27 | } 28 | 29 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 30 | # 31 | #location ~ \.php$ { 32 | # root /var/www/html; 33 | # fastcgi_pass phpfpm-pool; 34 | # fastcgi_index index.php; 35 | # fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 36 | # include fastcgi_params; 37 | #} 38 | 39 | #location ~ [^/]\.php(/|$) { 40 | #fastcgi_split_path_info ^(.+?\.php)(/.*)$; 41 | #if (!-f $document_root$fastcgi_script_name) { 42 | # return 404; 43 | #} 44 | 45 | # Mitigate https://httpoxy.org/ vulnerabilities 46 | #fastcgi_param HTTP_PROXY ""; 47 | 48 | #fastcgi_pass phpfpm-pool; 49 | #fastcgi_index index.php; 50 | #include fastcgi_params; 51 | #} 52 | # deny access to .htaccess files, if Apache's document root 53 | # concurs with nginx's one 54 | # 55 | #location ~ /\.ht { 56 | # deny all; 57 | #} 58 | } 59 | 60 | -------------------------------------------------------------------------------- /config manage/config/oa.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name oa.aniu.so; 4 | index index.php index.html index.htm; 5 | root /data/gitlab/aniu-oa; 6 | access_log /var/log/nginx/aniu-oa/oa.aniu.so.access.log main; 7 | error_log /var/log/nginx/aniu-oa/oa.aniu.so.error.log; 8 | 9 | error_page 500 502 503 504 403 /50x.html; 10 | location = /50x.html { 11 | root html; 12 | } 13 | 14 | #location ~ .*\.php { 15 | # root /data/gitlab/aniu-oa; 16 | # add_header Pragma public; 17 | # fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 18 | # fastcgi_pass 127.0.0.1:9000; 19 | # fastcgi_index index.php; 20 | # include fastcgi_params; 21 | #} 22 | 23 | location ~ [^/]\.php(/|$) { 24 | root /data/gitlab/aniu-oa; 25 | fastcgi_split_path_info ^(.+?\.php)(/.*)$; 26 | if (!-f $document_root$fastcgi_script_name) { 27 | return 404; 28 | } 29 | fastcgi_param HTTP_PROXY ""; 30 | fastcgi_pass 127.0.0.1:9000; 31 | fastcgi_index index.php; 32 | include fastcgi_params; 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /config manage/config/piwik.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name piwik.aniu.so; 4 | #rewrite ^(.*)$ https://$server_name/$1 permanent; 5 | return 301 https://$server_name$request_uri; 6 | #location / { 7 | # rewrite ^(.*)$ https://$server_name/$1; 8 | #} 9 | } 10 | 11 | server { 12 | listen 443 ssl; 13 | server_name piwik.aniu.so; 14 | 15 | access_log /var/log/nginx/piwik.aniu.so.access.log main; 16 | error_log /var/log/nginx/piwik.aniu.so.error.log; 17 | 18 | ssl on; 19 | ssl_certificate /etc/pki/tls/certs/server.crt; 20 | ssl_certificate_key /etc/pki/tls/certs/server.key; 21 | 22 | fastcgi_param HTTPS on; 23 | 24 | location / { 25 | root /opt/piwik; 26 | index index.html index.htm index.php; 27 | } 28 | 29 | error_page 404 /404.html; 30 | 31 | # redirect server error pages to the static page /50x.html 32 | error_page 500 502 503 504 /50x.html; 33 | location = /50x.html { 34 | root /usr/share/nginx; 35 | } 36 | 37 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 38 | location ~ \.php$ { 39 | root /opt/piwik; 40 | fastcgi_pass phpfpm-pool; 41 | fastcgi_index index.php; 42 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 43 | include fastcgi_params; 44 | include proxy.conf; 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /config manage/config/repo.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name repo.aniu.so repo.aniu.co; 4 | access_log /var/log/nginx/repo.aniu.so.access.log main; 5 | error_log /var/log/nginx/error/repo.aniu.so.error.log; 6 | autoindex on; 7 | charset utf-8; 8 | 9 | location / { 10 | root /var/www/html; 11 | index index.html index.htm index.php; 12 | } 13 | 14 | error_page 404 /404.html; 15 | 16 | # redirect server error pages to the static page /50x.html 17 | error_page 500 502 503 504 /50x.html; 18 | location = /50x.html { 19 | root /usr/share/nginx/html; 20 | } 21 | 22 | location ~ ^/yunwei/ { 23 | proxy_pass http://192.168.0.99:8888/yunwei/$1; 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /config manage/config/sonar.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name sonar.aniu.so; 4 | access_log /var/log/nginx/sonar.aniu.so.access.log main; 5 | error_log /var/log/nginx/error/sonar.aniu.so.error.log; 6 | 7 | location / { 8 | root /opt/sonarqube; 9 | index index.html index.htm index.php; 10 | } 11 | 12 | error_page 404 /404.html; 13 | 14 | error_page 500 502 503 504 /50x.html; 15 | location = /50x.html { 16 | root /usr/share/nginx/html; 17 | } 18 | 19 | location ~ { 20 | proxy_pass http://0.0.0.0:9009; 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /config manage/config/ssl.conf: -------------------------------------------------------------------------------- 1 | # 2 | # HTTPS server configuration 3 | # 4 | 5 | #server { 6 | # listen 443 ssl http2 default_server; 7 | # listen [::]:443 ssl; 8 | # server_name _; 9 | # root /usr/share/nginx/html; 10 | # 11 | # ssl_certificate cert.pem; 12 | # ssl_certificate_key cert.key; 13 | # ssl_session_cache shared:SSL:1m; 14 | # ssl_session_timeout 10m; 15 | # ssl_ciphers HIGH:!aNULL:!MD5; 16 | # ssl_prefer_server_ciphers on; 17 | # 18 | # # Load configuration files for the default server block. 19 | # include /etc/nginx/default.d/*.conf; 20 | # 21 | # location / { 22 | # } 23 | # 24 | # error_page 404 /404.html; 25 | # location = /40x.html { 26 | # } 27 | # 28 | # error_page 500 502 503 504 /50x.html; 29 | # location = /50x.html { 30 | # } 31 | #} 32 | 33 | -------------------------------------------------------------------------------- /config manage/config/test.aniu.co.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name test.aniu.co 192.168.0.33; 4 | 5 | #charset koi8-r; 6 | access_log /var/log/nginx/test.aniu.co.access.log main; 7 | error_log /var/log/nginx/error/test.aniu.co.error.log; 8 | 9 | location / { 10 | root /usr/share/nginx/html; 11 | index index.html index.htm index.php; 12 | } 13 | 14 | error_page 404 /404.html; 15 | 16 | # redirect server error pages to the static page /50x.html 17 | # 18 | error_page 500 502 503 504 /50x.html; 19 | location = /50x.html { 20 | root /usr/share/nginx/html; 21 | } 22 | 23 | # proxy the PHP scripts to Apache listening on 127.0.0.1:80 24 | # 25 | #location ~ \.php$ { 26 | # proxy_pass http://127.0.0.1; 27 | #} 28 | 29 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 30 | # 31 | location ~ \.php$ { 32 | root /usr/share/nginx/html; 33 | fastcgi_pass phpfpm-pool; 34 | fastcgi_index index.php; 35 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 36 | include fastcgi_params; 37 | } 38 | 39 | #location ~ [^/]\.php(/|$) { 40 | #fastcgi_split_path_info ^(.+?\.php)(/.*)$; 41 | #if (!-f $document_root$fastcgi_script_name) { 42 | # return 404; 43 | #} 44 | 45 | # Mitigate https://httpoxy.org/ vulnerabilities 46 | #fastcgi_param HTTP_PROXY ""; 47 | 48 | #fastcgi_pass phpfpm-pool; 49 | #fastcgi_index index.php; 50 | #include fastcgi_params; 51 | #} 52 | # deny access to .htaccess files, if Apache's document root 53 | # concurs with nginx's one 54 | # 55 | #location ~ /\.ht { 56 | # deny all; 57 | #} 58 | } 59 | 60 | -------------------------------------------------------------------------------- /config manage/config/virtual.conf: -------------------------------------------------------------------------------- 1 | # 2 | # A virtual host using mix of IP-, name-, and port-based configuration 3 | # 4 | 5 | #server { 6 | # listen 8000; 7 | # listen somename:8080; 8 | # server_name somename alias another.alias; 9 | 10 | # location / { 11 | # root html; 12 | # index index.html index.htm; 13 | # } 14 | #} 15 | 16 | -------------------------------------------------------------------------------- /config manage/config/wiki.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen wiki.aniu.so:80; 3 | server_name wiki.aniu.so; 4 | access_log /var/log/nginx/wiki.aniu.so.access.log main; 5 | error_log /var/log/nginx/wiki.aniu.so.error.log; 6 | 7 | location /confluence { 8 | client_max_body_size 100m; 9 | proxy_set_header X-Forwarded-Host $host; 10 | proxy_set_header X-Forwarded-Server $host; 11 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 12 | proxy_pass http://localhost:8090/confluence; 13 | } 14 | location /synchrony { 15 | proxy_set_header X-Forwarded-Host $host; 16 | proxy_set_header X-Forwarded-Server $host; 17 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 18 | proxy_pass http://localhost:8091/synchrony; 19 | proxy_http_version 1.1; 20 | proxy_set_header Upgrade $http_upgrade; 21 | proxy_set_header Connection "Upgrade"; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /config manage/config/zabbix.aniu.so.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name 192.168.0.99 zabbix.aniu.so 99.zabbix.com 99.aniu.so; 4 | access_log /var/log/nginx/zabbix.aniu.so.access.log main; 5 | error_log /var/log/nginx/zabbix.aniu.so.error.log; 6 | 7 | location / { 8 | root /usr/share/zabbix; 9 | index index.html index.htm index.php; 10 | } 11 | 12 | error_page 404 /404.html; 13 | 14 | # redirect server error pages to the static page /50x.html 15 | error_page 500 502 503 504 /50x.html; 16 | location = /50x.html { 17 | root /usr/share/zabbix; 18 | } 19 | # pass the PHP scripts to FastCGI server listening on phpfpm-pool 20 | location ~ \.php$ { 21 | root /usr/share/zabbix; 22 | fastcgi_pass phpfpm-pool; 23 | fastcgi_index index.php; 24 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 25 | include fastcgi_params; 26 | include proxy.conf; 27 | } 28 | } 29 | 30 | -------------------------------------------------------------------------------- /containers/docker/docker-ce/c7-docker-turorial.md: -------------------------------------------------------------------------------- 1 | # Docker 快速上手教程 2 | 3 | > Docker 是一个开源的应用容器引擎,基于 Go 语言 并遵从Apache2.0协议开源。Docker 可以让开发者打包他们的应用以及依赖包到一个轻量级、可移植的容器中,然后发布到任何流行的 Linux 机器上,也可以实现虚拟化。容器是完全使用沙箱机制,相互之间不会有任何接口(类似 iPhone 的 app),更重要的是容器性能开销极低。 4 | 5 | ## Docker的应用场景 6 | 7 | - Web 应用的自动化打包和发布。 8 | - 自动化测试和持续集成、发布。 9 | - 在服务型环境中部署和调整数据库或其他的后台应用。 10 | - 从头编译或者扩展现有的OpenShift或Cloud Foundry平台来搭建自己的PaaS环境。 11 | 12 | ## Docker优点 13 | 14 | - 1、简化程序: 15 | Docker 让开发者可以打包他们的应用以及依赖包到一个可移植的容器中,然后发布到任何流行的 Linux 机器上,便可以实现虚拟化。Docker改变了虚拟化的方式,使开发者可以直接将自己的成果放入Docker中进行管理。方便快捷已经是 Docker的最大优势,过去需要用数天乃至数周的 任务,在Docker容器的处理下,只需要数秒就能完成。 16 | - 2、避免选择恐惧症: 17 | 如果你有选择恐惧症,还是资深患者。Docker 帮你 打包你的纠结!比如 Docker 镜像;Docker 镜像中包含了运行环境和配置,所以 Docker 可以简化部署多种应用实例工作。比如 Web 应用、后台应用、数据库应用、大数据应用比如 Hadoop 集群、消息队列等等都可以打包成一个镜像部署。 18 | - 3、节省开支: 19 | 一方面,云计算时代到来,使开发者不必为了追求效果而配置高额的硬件,Docker 改变了高性能必然高价格的思维定势。Docker 与云的结合,让云空间得到更充分的利用。不仅解决了硬件管理的问题,也改变了虚拟化的方式。 20 | 21 | > 参考链接: 22 | 23 | - Docker 官网:http://www.docker.com 24 | - Github Docker 源码:https://github.com/docker/docker 25 | 26 | ## CentOS7 快速安装并使用docker 27 | 28 | # Docker CE for CentOS 29 | 30 | ## 特点和优点 31 | 32 | 轻松安装和设置优化的Docker环境,以便在裸机服务器和虚拟机上进行CentOS分发。 最新的Docker平台版本,具有内置的业务流程(集群和调度),运行时安全性,容器网络和卷,Docker CE可免费下载,并提供社区支持的每月Edge或季度稳定版本。 Docker EE订阅包括季度版本,每个版本有一年的维护和SLA的企业级支持。 33 | 34 | ## CentOS上安装docker-ce 35 | 36 | - 卸载旧版本 37 | > Docker的旧版本被称为docker或docker引擎。如果这些已安装,请卸载它们以及关联的依赖关系。 38 | 39 | ``` 40 | sudo yum remove docker \ 41 | docker-common \ 42 | docker-selinux \ 43 | docker-engine 44 | ``` 45 | 46 | - Centore 7.3 64位支持Docker CE 47 | 48 | > 在CentOS上设置Docker CE存储库 49 | 50 | ``` 51 | sudo yum install -y yum-utils 52 | 53 | sudo yum-config-manager \ 54 | --add-repo \ 55 | https://download.docker.com/linux/centos/docker-ce.repo 56 | 57 | sudo yum makecache fast 58 | ``` 59 | > 可选:启用边缘和测试存储库。这些存储库包含在上面的docker.repo文件中,但默认情况下是禁用的。您可以将它们与稳定的存储库一起启用 60 | 61 | ``` 62 | sudo yum-config-manager --enable docker-ce-edge 63 | 64 | sudo yum-config-manager --enable docker-ce-test 65 | # 禁用 66 | sudo yum-config-manager --disable docker-ce-edge 67 | ``` 68 | 69 | ## 在CentOS上安装最新版本的Docker CE 70 | 71 | ``` 72 | sudo yum -y install docker-ce 73 | # 启动docker 74 | sudo systemctl start docker 75 | ``` 76 | 77 | - 在生产系统上,我们可以安装特定版本的Docker CE,而不是始终使用最新版本。列出可用的版本。此示例使用sort -r命令按版本号排序结果,从最高到最低,并被截断。 78 | 79 | ``` 80 | [root@aniu-k8s yum.repos.d]# yum list docker-ce --showduplicates | sort -r 81 | * updates: mirrors.cn99.com 82 | Loading mirror speeds from cached hostfile 83 | Loaded plugins: fastestmirror 84 | Installed Packages 85 | * extras: mirrors.shuosc.org 86 | * epel: mirrors.tongji.edu.cn 87 | docker-ce.x86_64 17.09.0.ce-1.el7.centos docker-ce-stable 88 | docker-ce.x86_64 17.09.0.ce-1.el7.centos @docker-ce-stable 89 | docker-ce.x86_64 17.06.2.ce-1.el7.centos docker-ce-stable 90 | docker-ce.x86_64 17.06.1.ce-1.el7.centos docker-ce-stable 91 | docker-ce.x86_64 17.06.0.ce-1.el7.centos docker-ce-stable 92 | docker-ce.x86_64 17.03.2.ce-1.el7.centos docker-ce-stable 93 | docker-ce.x86_64 17.03.1.ce-1.el7.centos docker-ce-stable 94 | docker-ce.x86_64 17.03.0.ce-1.el7.centos docker-ce-stable 95 | # 如果需要安装指定版本,参考: 96 | sudo yum install (17.06.2.ce-1.el7.centos) 97 | ``` 98 | 99 | 100 | - 测试Docker CE的安装 101 | 102 | ``` 103 | sudo docker run hello-world 104 | ``` 105 | 106 | - 升级Docker CE,可以把最新版本的rpm下载下来,使用 yum localinstall rpm-name升级 107 | 108 | - 卸载Docker CE 109 | 110 | ``` 111 | sudo yum remove docker-ce 112 | sudo rm -rf /var/lib/docker 113 | ``` 114 | 115 | ## 参考教程 116 | 117 | - https://docs.docker.com/engine/installation/linux/docker-ce/centos 118 | 119 | -------------------------------------------------------------------------------- /containers/docker/docker-compose.md: -------------------------------------------------------------------------------- 1 | # install docker-compose 2 | 3 | sudo curl -L "https://github.com/docker/compose/releases/download/1.24.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose 4 | 5 | 6 | sudo chmod +x /usr/local/bin/docker-compose 7 | 8 | sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose 9 | 10 | docker-compose --version 11 | 12 | curl -L https://github.com/docker/compose/releases/download/1.24.1/docker-compose-Linux-x86_64 -o /usr/local/bin/docker-compose 13 | 14 | 15 | version: '1.0' 16 | 17 | services: 18 | prometheus: 19 | image: 'prom/prometheus' 20 | container_name: prometheus 21 | restart: unless-stopped 22 | ports: 23 | - '9090:9090' 24 | command: 25 | - '--config.file=/etc/prometheus/prometheus.yml' 26 | - '--storage.tsdb.path=/prometheus/data' 27 | - '--storage.tsdb.retention=90d' 28 | - '--web.enable-lifecycle' 29 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 30 | - '--web.console.templates=/usr/share/prometheus/consoles' 31 | volumes: 32 | - './etc/prometheus:/etc/prometheus:ro' 33 | - './data/prometheus:/prometheus/data' 34 | depends_on: 35 | - cadvisor 36 | networks: 37 | - samplenet 38 | cadvisor: 39 | image: google/cadvisor 40 | container_name: cadvisor 41 | ports: 42 | - '8081:8081' 43 | volumes: 44 | - '/:/rootfs:ro' 45 | - '/var/run:/var/run:rw' 46 | - '/sys:/sys:ro' 47 | - '/var/lib/docker/:/var/lib/docker:ro' 48 | networks: 49 | - samplenet 50 | alertmanager: 51 | image: prom/alertmanager 52 | ports: 53 | - '9093:9093' 54 | volumes: 55 | - './alertmanager/:/etc/alertmanager/' 56 | restart: always 57 | command: 58 | - '--config.file=/etc/alertmanager/config.yml' 59 | - '--storage.path=/alertmanager' 60 | networks: 61 | - samplenet 62 | node-exporter: 63 | image: prom/node-exporter 64 | volumes: 65 | - '/proc:/host/proc:ro' 66 | - '/sys:/host/sys:ro' 67 | - '/:/rootfs:ro' 68 | command: 69 | - '--path.procfs=/host/proc' 70 | - '--path.sysfs=/host/sys' 71 | - '--collector.filesystem.ignored-mount-points' 72 | - ^/(sys|proc|dev|host|etc|rootfs/var/lib/docker/containers|rootfs/var/lib/docker/overlay2|rootfs/run/docker/netns|rootfs/var/lib/docker/aufs)($$|/) 73 | ports: 74 | - '9100:9100' 75 | networks: 76 | - samplenet 77 | restart: always 78 | grafana: 79 | image: grafana/grafana 80 | depends_on: 81 | - prometheus 82 | ports: 83 | - '3000:3000' 84 | volumes: 85 | - './data/grafana:/var/lib/grafana' 86 | - './data/grafana/provisioning/:/etc/grafana/provisioning/' 87 | env_file: 88 | - ./grafana/config.monitoring 89 | networks: 90 | - samplenet 91 | restart: always 92 | volumes: 93 | grafana_data: {} 94 | prometheus_data: {} 95 | 96 | 97 | args = [ 98 | "--config.file=/etc/prometheus/prometheus.yml", 99 | "--storage.tsdb.path=/data", 100 | "--storage.tsdb.no-lockfile", 101 | "--storage.tsdb.min-block-duration=2h", 102 | "--storage.tsdb.max-block-duration=2h", 103 | "--storage.tsdb.retention.time=1d", 104 | "--web.enable-lifecycle", 105 | "--web.console.libraries=/etc/prometheus/console_libraries", 106 | "--web.console.templates=/etc/prometheus/consoles", 107 | ] -------------------------------------------------------------------------------- /containers/docker/docker-errors.md: -------------------------------------------------------------------------------- 1 | # docker挂载卷提示权限错误 2 | 3 | ``` 4 | touch: cannot touch '/var/jenkins_home/copy_reference_file.log': Permission denied 5 | Can not write to /var/jenkins_home/copy_reference_file.log. Wrong volume permissions? 6 | ``` 7 | 8 | 将jenkins数据存储在主机上的/your/home中。确保容器中的jenkins用户(jenkins user - uid 1000)可以访问/your/home,或者在docker run中使用-u some_other_user参数。 9 | 10 | 笔者以root用户为例: 11 | docker run -d -p 8080:8080 -p 50000:50000 --env JAVA_OPTS="-Djava.util.logging.config.file=/var/jenkins_home/log.properties" -v /data/jenkins_home:/var/jenkins_home -u 0 jenkins -------------------------------------------------------------------------------- /containers/docker/manager-devicemapper.md: -------------------------------------------------------------------------------- 1 | # 管理devicemapper 2 | 3 | 不要单独依靠LVM自动扩展。卷组自动扩展,但卷仍可填满,使用zabbix监控 4 | 5 | - 要查看LVM日志,使用journalctl: 6 | 7 | ``` 8 | journalctl -fu dm-event.service 9 | ``` 10 | 11 | 如果遇到精简池问题,可以将存储选项设置dm.min_free_space为一个值(表示百分比) /etc/docker.daemon.json。例如,将其设置为10在可用空间达到或接近10%时确保操作失败并发出警告。 12 | 13 | 14 | ## 调整直接LVM​​精简池的大小 15 | 16 | 要扩展direct-lvm精简池,您需要首先将新的块设备连接到Docker主机,并记下内核分配的名称。在这个例子中,新的块设备是/dev/xvdg。 17 | 18 | - 使用此pvdisplay命令查找精简池当前正在使用的物理块设备以及卷组的名称。 19 | 20 | ``` 21 | $ sudo pvdisplay |grep 'VG Name' 22 | PV Name /dev/xvdf 23 | VG Name docker 24 | ``` 25 | 26 | - 扩展vg 27 | 28 | ``` 29 | sudo vgextend docker /dev/xvdg 30 | ``` 31 | 32 | - 扩展docker/thinpool逻辑卷。该命令立即使用100%的体积,而不会自动扩展。要扩展元数据精简池,请使用docker/thinpool_tmeta 33 | 34 | ``` 35 | $ sudo lvextend -l+100%FREE -n docker/thinpool 36 | ``` 37 | 38 | - 4、使用Data Space Available输出中的字段验证新的精简池大小docker info。如果您扩展了docker/thinpool_tmeta逻辑卷,请查找Metadata Space Available。 39 | 40 | ``` 41 | Storage Driver: devicemapper 42 | Pool Name: docker-thinpool 43 | Pool Blocksize: 524.3 kB 44 | Base Device Size: 10.74 GB 45 | Backing Filesystem: xfs 46 | Data file: 47 | Metadata file: 48 | Data Space Used: 212.3 MB 49 | Data Space Total: 212.6 GB 50 | Data Space Available: 212.4 GB 51 | Metadata Space Used: 286.7 kB 52 | Metadata Space Total: 1.07 GB 53 | Metadata Space Available: 1.069 GB 54 | 55 | ``` 56 | 57 | ## 激活devicemapper后重新启动 58 | 59 | - 如果您重新启动主机并发现docker服务无法启动,请查找错误“非现有设备”。您需要使用此命令重新激活逻辑卷: 60 | 61 | ``` 62 | sudo lvchange -ay docker/thinpool 63 | ``` -------------------------------------------------------------------------------- /containers/docker/test: -------------------------------------------------------------------------------- 1 | influxdb: 2 | image: influxdb:latest 3 | container_name: influxdb 4 | ports: 5 | - "8083:8083" 6 | - "8086:8086" 7 | - "8090:8090" 8 | env_file: 9 | - 'env.influxdb' 10 | volumes: 11 | - /data/influxdb/data:/var/lib/influxdb 12 | 13 | telegraf: 14 | image: telegraf:latest 15 | container_name: telegraf 16 | links: 17 | - influxdb 18 | volumes: 19 | - /data/telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro 20 | 21 | 22 | docker run -d --name telegraf -v /var/run/docker.sock:/var/run/docker.sock -v /data/telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro telegraf -config /etc/telegraf/telegraf.conf -------------------------------------------------------------------------------- /containers/docker/volumes.md: -------------------------------------------------------------------------------- 1 | # docker change default root dir 2 | 3 | ## 停所有docker服务 4 | 5 | docker stop $(docker ps -aq) 6 | 7 | docker rm $(docker ps -aq) 8 | 9 | docker rmi $(docker images -q) 10 | 11 | # volume 12 | // 删除所有的数据卷 13 | docker volume rm $(docker volume ls -q) 14 | 15 | // 停止所有的容器 16 | docker stop $(docker ps -a -q) 17 | 18 | // 删除所有的容器 19 | docker rm $(docker ps -a -q) 20 | 21 | # change docker root dir 22 | 23 | { 24 | "graph": "/data/docker", 25 | "storage-driver": "overlay" 26 | } 27 | 28 | or 29 | 30 | [Service] 31 | ExecStart=/usr/bin/dockerd -H fd:// --data-root="/data/docker"3 32 | 33 | docker volume inspect my-vol 34 | 35 | docker volume ls -f dangling=true 36 | 37 | docker volume ls -q 38 | 39 | docker volume prune 40 | 41 | -------------------------------------------------------------------------------- /containers/kubernetes/install_helm.md: -------------------------------------------------------------------------------- 1 | # helm 安装与使用 2 | 3 | - 参考:https://helm.sh/docs/using_helm/#installing-helm 4 | - https://blog.csdn.net/bbwangj/article/details/81087911 5 | - https://www.hi-linux.com/posts/21466.html 6 | 7 | ## 安装helm 8 | 9 | - 一键安装 10 | 11 | ``` 12 | curl -L https://git.io/get_helm.sh | bash 13 | ``` 14 | 15 | - 二进制安装 16 | 17 | ``` 18 | #从官网下载最新版本的二进制安装包到本地:https://github.com/kubernetes/helm/releases 19 | tar -zxvf helm-v2.14.1-linux-amd64.tgz # 解压压缩包 20 | # 把 helm 指令放到bin目录下 21 | mv linux-amd64/helm /usr/local/bin/helm 22 | helm help # 验证 23 | ``` 24 | 25 | - 源码安装 26 | 27 | ``` 28 | $ cd $GOPATH 29 | $ mkdir -p src/k8s.io 30 | $ cd src/k8s.io 31 | $ git clone https://github.com/helm/helm.git 32 | $ cd helm 33 | $ make bootstrap build 34 | ``` 35 | 36 | ## 安装TILLER 37 | 38 | - -------------------------------------------------------------------------------- /containers/kubernetes/k8s+prometheus-grafana.md: -------------------------------------------------------------------------------- 1 | # k8s + promtheus + grafana 2 | 3 | ## k8s install (1.15.0) 4 | 5 | - pre vm & update os 6 | 7 | kubeadm init --image-repository registry.aliyuncs.com/google_containers --kubernetes-version v1.15.0 --pod-network-cidr=192.168.0.0/16 8 | 9 | ## install prometheus 10 | 11 | ## install grafana 12 | 13 | ## install kubernetes dashboard 14 | 15 | ## install helm 16 | 17 | ## -------------------------------------------------------------------------------- /containers/kubernetes/k8s-proxy.md: -------------------------------------------------------------------------------- 1 | # 安装kubernetes集群时使用代理 2 | 3 | - 参考:https://www.centos.bz/2017/12/%E4%BD%BF%E7%94%A8kubeadm%E5%9C%A8centos-7%E4%B8%8A%E5%AE%89%E8%A3%85kubernetes-1-8/ 4 | 5 | ## -------------------------------------------------------------------------------- /containers/kubernetes/kubeadm-create-k8s-cluster.md: -------------------------------------------------------------------------------- 1 | # 使用kubeadm创建高度可用的kubernetes群集 2 | 3 | ## -------------------------------------------------------------------------------- /containers/kubernetes/kubernetes-Guide..md: -------------------------------------------------------------------------------- 1 | # kubernetes基本概念 2 | 3 | ## -------------------------------------------------------------------------------- /containers/kubernetes/kubernetes-errors.md: -------------------------------------------------------------------------------- 1 | # 启动kubelet报错 2 | 3 | failed to load Kubelet config file /var/lib/kubelet/config.yaml, error failed to read kubelet config file "/var/lib/kubelet/config.yaml", error: open /var/lib/kubelet/config.yaml: no such file or directory -------------------------------------------------------------------------------- /containers/kubernetes/quick-install-k8s.md: -------------------------------------------------------------------------------- 1 | # 快速安装k8s教程 2 | 3 | - 安装docker 4 | 5 | ``` 6 | yum install -y docker 7 | systemctl enable docker && systemctl start docker 8 | ``` 9 | 10 | - 安装kubeadm, kubelet and kubectl 11 | 12 | ``` 13 | cat < /etc/yum.repos.d/kubernetes.repo 14 | [kubernetes] 15 | name=Kubernetes 16 | baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64 17 | enabled=1 18 | gpgcheck=1 19 | repo_gpgcheck=1 20 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 21 | EOF 22 | setenforce 0 23 | yum install -y kubelet kubeadm kubectl 24 | systemctl enable kubelet && systemctl start kubelet 25 | ``` 26 | 27 | - 系统优化 28 | 29 | ``` 30 | setenforce 0 31 | 32 | cat < /etc/sysctl.d/k8s.conf 33 | net.bridge.bridge-nf-call-ip6tables = 1 34 | net.bridge.bridge-nf-call-iptables = 1 35 | EOF 36 | sysctl --system 37 | `` 38 | 39 | - 创建集群 40 | 41 | 42 | 43 | -------------------------------------------------------------------------------- /containers/kubernetes/tasks/installing-kubeadm.md: -------------------------------------------------------------------------------- 1 | # 安装kubeadm 2 | 3 | - 参考:https://kubernetes.io/docs/tasks/tools/install-kubeadm 4 | 5 | ## 实验环境 6 | 7 | OS:CentOS7.5 Memory: 8GB Vcpu:2 ,禁用swap、节点之间网络互通,禁用selinux 8 | 9 | 10 | - 验证MAC地址和product_uuid对于每个节点都是唯一的 11 | 12 | ``` 13 | swapoff -a 14 | # 修改/ets/fatab,注释swap的挂载配置/重要 15 | ansible k8 -a "sed -i '/swap/d' /etc/fstab" 16 | 17 | # 使用 ip link or ifconfig -a 查看 MAC地址 18 | 19 | # 可以使用命令下面检查product_uuid: 20 | sudo cat /sys/class/dmi/id/product_uuid 21 | ``` 22 | 23 | ## 安装docker 24 | 25 | 使用操作系统的捆绑软件包安装Docker 26 | ``` 27 | yum install -y docker && systemctl enable docker && systemctl start docker 28 | ``` 29 | 30 | 31 | -------------------------------------------------------------------------------- /containers/openshift/learn-openshift.md: -------------------------------------------------------------------------------- 1 | # https://www.openshift.com/ -------------------------------------------------------------------------------- /containers/openshift/openshift-error.md: -------------------------------------------------------------------------------- 1 | # 2 | CHECK [memory_availability : master.aniu.so] ********************************************************************************************************************************************************** 3 | fatal: [master.aniu.so]: FAILED! => {"changed": true, "checks": {"disk_availability": {"failed": true, "failures": [["OpenShiftCheckException", "Available disk space in \"/var\" (23.8 GB) is below minimum recommended (40.0 GB)"]], "msg": "Available disk space in \"/var\" (23.8 GB) is below minimum recommended (40.0 GB)"}, "docker_image_availability": {"changed": true}, "docker_storage": {"changed": true}, "memory_availability": {}, "package_availability": {"changed": false, "invocation": {"module_args": {"packages": ["PyYAML", "bash-completion", "bind", "ceph-common", "cockpit-bridge", "cockpit-docker", "cockpit-system", "cockpit-ws", "dnsmasq", "docker", "etcd", "firewalld", "flannel", "glusterfs-fuse", "httpd-tools", "iptables", "iptables-services", "iscsi-initiator-utils", "libselinux-python", "nfs-utils", "ntp", "openssl", "origin", "origin-clients", "origin-master", "origin-node", "origin-sdn-ovs", "pyparted", "python-httplib2", "yum-utils"]}}}, "package_version": {"changed": false, "invocation": {"module_args": {"package_list": [{"check_multi": false, "name": "openvswitch", "version": ["2.6", "2.7", "2.8", "2.9"]}, {"check_multi": false, "name": "docker", "version": ["1.12", "1.13"]}, {"check_multi": false, "name": "origin", "version": ""}, {"check_multi": false, "name": "origin-master", "version": ""}, {"check_multi": false, "name": "origin-node", "version": ""}], "package_mgr": "yum"}}}}, "msg": "One or more checks failed", "playbook_context": "install"} 4 | 5 | NO MORE HOSTS LEFT ************************************************************************************************************************************************************************************ 6 | [WARNING]: Could not create retry file '/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.retry'. [Errno 13] Permission denied: u'/usr/share/ansible/openshift- 7 | ansible/playbooks/deploy_cluster.retry' 8 | 9 | 10 | PLAY RECAP ******************************************************************************************************************************************************************************************** 11 | localhost : ok=11 changed=0 unreachable=0 failed=0 12 | master.aniu.so : ok=38 changed=2 unreachable=0 failed=1 13 | node1.aniu.so : ok=13 changed=0 unreachable=0 failed=1 14 | node2.aniu.so : ok=13 changed=0 unreachable=0 failed=1 15 | 16 | 17 | INSTALLER STATUS ************************************************************************************************************************************************************************************** 18 | Initialization : Complete (0:00:50) 19 | Health Check : In Progress (0:02:18) 20 | This phase can be restarted by running: playbooks/openshift-checks/pre-install.yml 21 | 22 | 23 | 24 | Failure summary: 25 | 26 | 27 | 1. Hosts: node1.aniu.so, node2.aniu.so 28 | Play: Initialize cluster facts 29 | Task: Gather Cluster facts 30 | Message: MODULE FAILURE 31 | 32 | 2. Hosts: master.aniu.so 33 | Play: OpenShift Health Checks 34 | Task: Run health checks (install) - EL 35 | Message: One or more checks failed 36 | Details: check "disk_availability": 37 | Available disk space in "/var" (23.8 GB) is below minimum recommended (40.0 GB) 38 | 39 | The execution of "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml" includes checks designed to fail early if the requirements of the playbook are not met. One or more of these checks failed. To disregard these results,explicitly disable checks by setting an Ansible variable: 40 | openshift_disable_check=disk_availability 41 | Failing check names are shown in the failure details above. Some checks may be configurable by variables if your requirements are different from the defaults; consult check documentation. 42 | Variables can be set in the inventory or passed on the command line using the -e flag to ansible-playbook. -------------------------------------------------------------------------------- /containers/openshift/openshift-origin-latest/openshift-use.md: -------------------------------------------------------------------------------- 1 | # 在openshift部署应用程序 2 | 3 | - 在主节点上添加一个作为Openshift用户的用户登录。 4 | 5 | ``` 6 | [root@master ~]# oc login 7 | Authentication required for https://master.aniu.so:8443 (openshift) 8 | Username: aniu 9 | Password: 10 | Login successful. 11 | 12 | You don't have any projects. You can try to create a new project, by running 13 | 14 | oc new-project 15 | 16 | [root@master ~]# oc new-project aniu-project 17 | Now using project "aniu-project" on server "https://master.aniu.so:8443". 18 | 19 | You can add applications to this project with the 'new-app' command. For example, try: 20 | 21 | oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git 22 | 23 | to build a new example application in Ruby. 24 | [root@master ~]# oc new-app centos/ruby-22-centos7~https://github.com/openshift/ruby-ex.git 25 | --> Found Docker image 90e3807 (4 hours old) from Docker Hub for "centos/ruby-22-centos7" 26 | 27 | Ruby 2.2 28 | -------- 29 | Ruby 2.2 available as container is a base platform for building and running various Ruby 2.2 applications and frameworks. Ruby is the interpreted scripting language for quick and easy object-oriented programming. It has many features to process text files and to do system management tasks (as in Perl). It is simple, straight-forward, and extensible. 30 | 31 | Tags: builder, ruby, ruby22 32 | 33 | * An image stream will be created as "ruby-22-centos7:latest" that will track the source image 34 | * A source build using source code from https://github.com/openshift/ruby-ex.git will be created 35 | * The resulting image will be pushed to image stream "ruby-ex:latest" 36 | * Every time "ruby-22-centos7:latest" changes a new build will be triggered 37 | * This image will be deployed in deployment config "ruby-ex" 38 | * Port 8080/tcp will be load balanced by service "ruby-ex" 39 | * Other containers can access this service through the hostname "ruby-ex" 40 | 41 | --> Creating resources ... 42 | imagestream "ruby-22-centos7" created 43 | imagestream "ruby-ex" created 44 | buildconfig "ruby-ex" created 45 | deploymentconfig "ruby-ex" created 46 | service "ruby-ex" created 47 | --> Success 48 | Build scheduled, use 'oc logs -f bc/ruby-ex' to track its progress. 49 | Application is not exposed. You can expose services to the outside world by executing one or more of the commands below: 50 | 'oc expose svc/ruby-ex' 51 | Run 'oc status' to view your app 52 | #查看状态 53 | [root@master ~]# oc status 54 | In project aniu-project on server https://master.aniu.so:8443 55 | 56 | svc/ruby-ex - 172.30.195.161:8080 57 | dc/ruby-ex deploys istag/ruby-ex:latest <- 58 | bc/ruby-ex source builds https://github.com/openshift/ruby-ex.git on istag/ruby-22-centos7:latest 59 | build #1 pending for 7 seconds 60 | deployment #1 waiting on image or update 61 | 62 | 63 | 2 infos identified, use 'oc status -v' to see details. 64 | 65 | [root@master ~]# oc status -v 66 | In project aniu-project on server https://master.aniu.so:8443 67 | 68 | svc/ruby-ex - 172.30.195.161:8080 69 | dc/ruby-ex deploys istag/ruby-ex:latest <- 70 | bc/ruby-ex source builds https://github.com/openshift/ruby-ex.git on istag/ruby-22-centos7:latest 71 | build #1 running for about a minute - bbb6701: Merge pull request #18 from durandom/master (Ben Parees ) 72 | deployment #1 waiting on image or update 73 | 74 | Info: 75 | * dc/ruby-ex has no readiness probe to verify pods are ready to accept traffic or ensure deployment is successful. 76 | try: oc set probe dc/ruby-ex --readiness ... 77 | * dc/ruby-ex has no liveness probe to verify pods are still running. 78 | try: oc set probe dc/ruby-ex --liveness ... 79 | 80 | View details with 'oc describe /' or list everything with 'oc get all'. 81 | 82 | # 程序描述 83 | [root@master ~]# oc describe svc/ruby-ex 84 | Name: ruby-ex 85 | Namespace: aniu-project 86 | Labels: app=ruby-ex 87 | Annotations: openshift.io/generated-by=OpenShiftNewApp 88 | Selector: app=ruby-ex,deploymentconfig=ruby-ex 89 | Type: ClusterIP 90 | IP: 172.30.195.161 91 | Port: 8080-tcp 8080/TCP 92 | TargetPort: 8080/TCP 93 | Endpoints: 94 | Session Affinity: None 95 | Events: 96 | 97 | # 删除应用 98 | [root@master ~]# oc delete all -l app=ruby-ex 99 | deploymentconfig "ruby-ex" deleted 100 | buildconfig "ruby-ex" deleted 101 | imagestream "ruby-22-centos7" deleted 102 | imagestream "ruby-ex" deleted 103 | service "ruby-ex" deleted 104 | ``` 105 | 106 | -------------------------------------------------------------------------------- /containers/openshift/openshift-origin-latest/安装和配置/安装集群/安装OpenShift容器注册表的独立部署.md: -------------------------------------------------------------------------------- 1 | # 安装OpenShift容器注册表的独立部署 2 | 3 | ## 关于OpenShift容器注册表 4 | 5 | OpenShift Origin是一个全功能的企业解决方案,包括一个名为OpenShift Container Registry(OCR)的集成容器注册表 。或者,您可以将OCR作为独立的容器注册表安装在本地或云中运行,而不是将OpenShift Origin作为开发人员的完整PaaS环境部署。 6 | 7 | 在安装OCR的独立部署时,仍会安装一个主节点和节点集群,与典型的OpenShift Origin安装类似。然后,将容器注册表部署为在集群上运行。此独立部署选项对于需要容器注册表的管理员非常有用,但不需要包含以开发人员为中心的Web控制台和应用程序构建和部署工具的完整OpenShift Origin环境。 8 | 9 | OCR提供以下功能: 10 | 11 | - 以用户为中心的注册表Web控制台。 12 | 13 | - 默认安全流量,通过TLS提供。 14 | 15 | - 全球身份提供商认证。 16 | 17 | - 一个 项目命名空间模型,使团队可以通过基于角色的访问控制(RBAC)授权进行协作 。 18 | 19 | - 一个基于Kubernetes集群管理服务。 20 | 21 | - 称为图像流的图像抽象以增强图像管理。 22 | 23 | 管理员可能希望部署独立的OCR来单独管理支持多个OpenShift Origin群集的注册表。独立的OCR还使管理员能够分离其注册表以满足其自己的安全或合规要求。 24 | 25 | ## 最低硬件要求 26 | 27 | 安装独立的OCR具有以下硬件要求: 28 | 29 | - 物理或虚拟系统,或运行在公共或私人IaaS上的实例。 30 | 31 | - 基本操作系统:Fedora 21,CentOS 7.4或RHEL 7.3,7.4或7.5,带有“最小”安装选项以及来自RHEL 7 Extras频道或RHEL Atomic Host 7.4.5或更高版本的最新软件包。 32 | 33 | - NetworkManager 1.0或更高版本 34 | 35 | - 2个vCPU。 36 | 37 | - 最小16 GB RAM。 38 | 39 | - 包含/ var /的文件系统的最小15 GB硬盘空间。 40 | 41 | - Docker存储后端使用额外的最小15 GB未分配空间; 详情请参阅配置Docker存储。 42 | 43 | - OpenShift Origin仅支持x86_64架构的服务器。 44 | 45 | 会议将在/ var /在RHEL原子主机文件系统的大小要求,需要更改默认配置。请参阅 在Red Hat Enterprise Linux Atomic Host中管理存储器,以获取有关在安装期间或安装后进行配置的说明。 46 | 47 | ## 支持的系统拓扑 48 | 49 | 独立OCR支持以下系统拓扑: 50 | 51 | - All-in-one 包含主节点,节点,etcd和注册表组件的单个主机。 52 | - Multiple Masters (Highly-Available) 每台主机上都包含三台主机(主节点,节点,etcd和注册表),主设备配置为实现高可用性。 53 | 54 | ## 主机准备 -------------------------------------------------------------------------------- /containers/openshift/openshift-origin-latest/安装和配置/安装集群/系统准备.md: -------------------------------------------------------------------------------- 1 | # 主机准备 2 | 3 | ## 设置PATH 4 | 5 | 每个主机上的root用户的PATH必须包含以下目录: 6 | /bin 7 | /sbin 8 | /usr/bin 9 | /usr/sbin 10 | 11 | 这些都应该默认包含在全新的RHEL 7.x安装中。 12 | 13 | ## 安装基础包 14 | 15 | -------------------------------------------------------------------------------- /containers/rancher/quick-start-rancher2.0.md: -------------------------------------------------------------------------------- 1 | # 快速安装rancher环境指南 2 | 3 | ## 准备linux主机 4 | 5 | - A cloud-host vm 6 | - 内部部署VM 7 | - 裸机(物理机) 8 | 9 | > 使用云托管虚拟机时,需要允许到端口80和443的入站TCP通信。 10 | 11 | 根据以下要求配置主机 12 | 13 | - Ubuntu 16.04 (64-bit) 14 | - Red Hat Enterprise Linux 7.5 (64-bit) 15 | - RancherOS 1.3.0 (64-bit) 16 | 17 | 要求:Memory:4GB ,disk:50GB Docker:1.12.6、1.13.1、17.03.2 ,docker版本henz 18 | 19 | ## 安装Rancher 20 | 21 | - docker安装 22 | 23 | ``` 24 | sudo docker run -d --restart=unless-stopped -p 80:80 -p 443:443 rancher/rancher:stable 25 | ``` 26 | 27 | - 登录,登录Rancher开始使用该应用程序。登录后,将进行一些一次性配置。 28 | 29 | 1、通过浏览器访问:https:// 30 | 2、初始为admin创建密码 31 | 3、设置Rancher Server URL。 URL可以是IP地址或主机名。但是,添加到群集的每个节点都必须能够连接到此URL 32 | ![这里写图片描述](https://img-blog.csdn.net/20180705175244606?watermark/2/text/aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3doMjExMjEy/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70) 33 | ![这里写图片描述](https://img-blog.csdn.net/20180705175253562?watermark/2/text/aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3doMjExMjEy/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70) 34 | 35 | ## 创建集群 36 | 37 | - 步骤,add cluster - 选择 custom- 输入名字:rancher-1(自定义)- 默认next,选择所有角色:etcd、control、worker 38 | ![这里写图片描述](https://img-blog.csdn.net/20180705175704145?watermark/2/text/aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3doMjExMjEy/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70) 39 | 40 | - 复制docker run命令。贴到vm上执行,然后点击done完成。成功结果如下: 41 | 42 | ![这里写图片描述](https://img-blog.csdn.net/20180705175822554?watermark/2/text/aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L3doMjExMjEy/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70) 43 | 44 | 45 | ## 注意事项 46 | 47 | - docker版本选择,根据官网提供的版本进行安装 48 | - 笔者关闭了防火墙 49 | - 创建cluster的时候先执行生成的docker run命令再点击done 50 | - 如果安装失败删除容器重新来一遍 51 | 52 | ## 部署工作负载 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 | 133 | 134 | 135 | 136 | 137 | 138 | 139 | 140 | 141 | 142 | 143 | 144 | 145 | 146 | 147 | 148 | 149 | 150 | 151 | 152 | 153 | 154 | 155 | 156 | 157 | 158 | 159 | 160 | 161 | 162 | 163 | 164 | 165 | 166 | 167 | 168 | 169 | 170 | 171 | 172 | 173 | 174 | 175 | 176 | 177 | 178 | 179 | -------------------------------------------------------------------------------- /containers/rancher/rancher-learn.md: -------------------------------------------------------------------------------- 1 | # Rancher(2.0)是Kubernetes的企业管理 2 | 3 | ## Architecture 4 | 5 | 介绍Rancher如何与Rancher构建的两种基本技术进行交互:Docker和Kubernetes 6 | 7 | ### Docker 8 | 9 | Docker是容器包装和运行时标准。开发人员从Dockerfiles构建容器映像,并从Docker注册表中分发容器映像。Docker Hub是最受欢迎的公共注册中心。许多组织还设置了私有Docker注册表。 Docker主要用于管理各个节点上的容器。 10 | 11 | ### Kubernetes 12 | 13 | Kubernetes是容器集群管理标准。YAML文件指定形成应用程序的容器和其他资源。Kubernetes执行诸如调度,扩展,服务发现,运行状况检查,私钥管理和配置管理等功能。 14 | 15 | Kubernetes集群由多个节点组成 16 | 17 | - etcd database 18 | 19 | 虽然只能在一个节点上运行etcd,但通常需要3个,5个或更多节点来创建HA配置。 20 | 21 | - Master nodes 22 | 23 | 主节点是无状态的,用于运行API服务器,调度程序和控制器。 24 | 25 | - Worker nodes 26 | 27 | 应用程序工作负载在工作节点上运行 28 | 29 | ### Rancher -------------------------------------------------------------------------------- /database/mariadb/MHA.md: -------------------------------------------------------------------------------- 1 | # MHA (Master High Availability) 2 | 3 | -------------------------------------------------------------------------------- /database/mariadb/yum-install-mariadb.md: -------------------------------------------------------------------------------- 1 | # CentOS7 yum安装mariadb 2 | 3 | - 参考:https://mariadb.com/kb/en/library/yum/ 4 | - https://downloads.mariadb.org/mariadb/repositories/#mirror=neusoft 5 | 6 | ## 添加MariaDB YUM存储库 7 | 8 | echo '[mariadb] 9 | name = MariaDB 10 | baseurl = http://yum.mariadb.org/10.3/centos7-amd64 11 | gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB 12 | gpgcheck=1 13 | ' | sudo tee /etc/yum.repos.d/MariaDB.repo 14 | 15 | 16 | sudo yum install MariaDB-server MariaDB-client 17 | 18 | systemctl start mariadb 19 | systemctl enable mariadb 20 | 21 | - 初始化 22 | 23 | mysql_secure_installation 24 | -------------------------------------------------------------------------------- /database/mysql/mysql-input-txt.md: -------------------------------------------------------------------------------- 1 | # mysql表字段导入文本数据 2 | 3 | load data infile '/data/mysql/tysx_s/filter.txt' into table aniu_message_filter lines terminated by'\r\n' (name); -------------------------------------------------------------------------------- /database/postgresql/postgresql-grant.md: -------------------------------------------------------------------------------- 1 | # postgrqsql权限管理 2 | 3 | 4 | -------------------------------------------------------------------------------- /database/postgresql/postgresql-use.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装PostgreSQL 2 | 3 | ## 配置PostgreSQL源 4 | 5 | ```bash 6 | # 安装postgresql96源 7 | rpm -Uvh https://download.postgresql.org/pub/repos/yum/9.6/redhat/rhel-7-x86_64/pgdg-centos96-9.6-3.noarch.rpm 8 | 9 | yum install postgresql96 -y 10 | yum install postgresql96-server -y 11 | # 初始化postgresql 12 | /usr/pgsql-9.6/bin/postgresql96-setup initdb 13 | ``` 14 | 15 | - 初始化完成可以执行:启动 16 | 17 | ```bash 18 | /usr/pgsql-9.6/bin/pg_ctl -D /var/lib/pgsql/9.6/data/ -l logfile start 19 | 20 | systemctl enable postgresql-9.6 21 | systemctl start postgresql-9.6 22 | # 23 | systemctl restart postgresql-9.6 24 | ``` 25 | 26 | ## postgres用户初始配置 27 | 28 | # set password 29 | # su - postgres 30 | -bash-4.2$ psql -c "alter user postgres with password 'password'" 31 | ALTER ROLE 32 | 33 | ## 配置远程连接PostgreSQL 34 | 35 | > 需要修改data目录下的pg_hba.conf和postgresql.conf 36 | 37 | - 编辑/var/lib/pgsql/9.6/data/pg_hba.conf 38 | ```bash 39 | # 82行添加访问 40 | TYPE DATABASE USER ADDRESS METHOD 41 | host all all 192.168.103.0/24 trust 42 | # 43 | METHOD can be "trust", "reject", "md5", "password", "gss", "sspi", 44 | 46 # "ident", "peer", "pam", "ldap", "radius" or "cert". 45 | ``` 46 | 47 | - 编辑/var/lib/pgsql/9.6/data/postgresql.conf 48 | 49 | # 59行 localhost更改* 50 | ```bash 51 | listen_addresses = '*' 52 | ``` 53 | - 查看日志 54 | 55 | ```bash 56 | tailf /var/lib/pgsql/9.6/data/pg_log 57 | ``` 58 | -------------------------------------------------------------------------------- /images/ovirt-initial-dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/images/ovirt-initial-dashboard.png -------------------------------------------------------------------------------- /images/vnc-viewwr-centos7.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/images/vnc-viewwr-centos7.png -------------------------------------------------------------------------------- /initial/add_repo.md: -------------------------------------------------------------------------------- 1 | # centos7配置第三方yum源 2 | 3 | ## 安装一个插件为每个已安装的存储库添加优先级 4 | 5 | yum -y install yum-plugin-priorities 6 | 7 | - epel 8 | 9 | yum -y install epel-release 10 | 11 | - CentOS SCLo 12 | 13 | yum -y install centos-release-scl-rh centos-release-scl 14 | sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/CentOS-SCLo-scl-rh.repo 15 | sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/CentOS-SCLo-scl.repo 16 | 17 | yum --enablerepo=centos-sclo-rh install [Package] 18 | 19 | - remi 20 | 21 | 22 | -------------------------------------------------------------------------------- /initial/config-ssh-server.md: -------------------------------------------------------------------------------- 1 | # 配置SSH免密登录 2 | 3 | - 为yunwei创建密钥对, 4 | 5 | [yunwei@wanghui ~]$ ssh-keygen -t rsa 6 | Generating public/private rsa key pair. 7 | Enter file in which to save the key (/home/yunwei/.ssh/id_rsa): 8 | Created directory '/home/yunwei/.ssh'. 9 | Enter passphrase (empty for no passphrase): 10 | Enter same passphrase again: 11 | Your identification has been saved in /home/yunwei/.ssh/id_rsa. 12 | Your public key has been saved in /home/yunwei/.ssh/id_rsa.pub. 13 | The key fingerprint is: 14 | SHA256:dd9J3hk98AgcpHE4XOy21jWMvkQ87xDL7dol0nGzSJ0 yunwei@wanghui.io 15 | The key's randomart image is: 16 | +---[RSA 2048]----+ 17 | | ..**.. | 18 | | ++o. + .| 19 | | .+ o.o=.| 20 | | . + B+=B| 21 | | S . *.XE=| 22 | | o.O.=o| 23 | | . o.B..| 24 | | o.+.| 25 | | ... | 26 | +----[SHA256]-----+ 27 | 28 | mv ~/.ssh/id_rsa.pub ~/.ssh/authorized_keys 29 | chmod 600 ~/.ssh/authorized_keys 30 | 31 | 上面操作是在统一服务器上设置ssh免密认证,第一次ssh登录时,需要密码 第二次就不需要啦 32 | 33 | 34 | ## 不同服务器上,为yunwei设置免密登录 35 | 36 | - 在server2上,yunwei用户下执行 37 | 38 | ssh-keygen -t rsa # 会自动生成.ssh目录 39 | 40 | 或者手动创建: 41 | 42 | mkdir ~/.ssh 43 | 44 | chmod 700 ~/.ssh 45 | 46 | - 拷贝server1上,yunwei用户的公钥 47 | 48 | [yunwei@ecs-07 ~]$ scp yunwei@192.168.0.111:/home/yunwei/.ssh/id_rsa.pub ~/.ssh/id_rsa.pub_key 49 | yunwei@192.168.0.111's password: 50 | id_rsa.pub 51 | 52 | 53 | cat ~/.ssh/id_rsa.pub_key >> ~/.ssh/authorized_keys 54 | chmod 600 ~/.ssh/authorized_keys 55 | chmod 600 ~/.ssh/authorized_keys 56 | 57 | - 到server1上,在yunwei上 ssh登录到server2 58 | 59 | [yunwei@wanghui ~]$ ssh yunwei@192.168.0.110 # server1 60 | The authenticity of host '192.168.0.110 (192.168.0.110)' can't be established. 61 | ECDSA key fingerprint is SHA256:PMh21nLllg43mGbQ4lGswPBv8pFkESckgTyLWxNzM8g. 62 | ECDSA key fingerprint is MD5:45:4a:3f:61:c6:57:d5:38:1d:03:cf:a9:3d:7c:8d:38. 63 | Are you sure you want to continue connecting (yes/no)? yes 64 | Warning: Permanently added '192.168.0.110' (ECDSA) to the list of known hosts. 65 | Last login: Sat Apr 28 22:55:59 2018 from 192.168.0.51 66 | [yunwei@ecs-07 ~]$ # server2 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /initial/lvm/lvm-errors.md: -------------------------------------------------------------------------------- 1 | # 2 | 3 | [root@node2 ~]# vgextend cl /dev/sdb1 4 | WARNING: PV /dev/sdb1 is marked in use but no VG was found using it. 5 | WARNING: PV /dev/sdb1 might need repairing. 6 | PV /dev/sdb1 is used by a VG but its metadata is missing. 7 | Can't initialize PV '/dev/sdb1' without -ff. 8 | /dev/sdb1: physical volume not initialized. 9 | 10 | 11 | 恢复物理卷元数据 -------------------------------------------------------------------------------- /initial/lvm/lvm-use.md: -------------------------------------------------------------------------------- 1 | # lvm创建使用 2 | 3 | - 安装软件包 4 | 5 | yum install xfsprogs -y # 重新格式化 resiize分区的时候使用 6 | 7 | 8 | ``` 9 | umount /dev/mapper/centos-home && lvremove /dev/centos/home 10 | 11 | # 编辑fstab 删除挂载home的配置文件,笔者安装的时候让其自动创建lvm卷组并自动挂载,想在这里自定义 12 | ``` 13 | 14 | ## 创建pv 15 | 16 | pvcreate /dev/sdb1 17 | 18 | pvcreate --setphysicalvolumesize 50G /dev/sdb1 19 | 20 | - 查看 pv 21 | 22 | pvdisplay /dev/sdb1 23 | 24 | - 改变pv的大小 25 | 26 | pvresize --setphysicalvolumesize 50G /dev/sdb1 27 | 28 | - 显示物理卷的报告 29 | 30 | pvs /dev/sdb1 31 | 32 | - 扫描 pv 33 | 34 | pvscan 35 | 36 | - 删除pv 37 | 38 | pvremove /dev/sdb1 39 | 40 | ## 创建vg组,添加硬盘,新加硬盘前提 41 | 42 | - 创建volume group 43 | 44 | vgcreate centos /dev/sdb1 45 | 46 | vgcreate vg_name /dev/sdb1 /dev/sdc1 47 | 48 | - 展示卷组 49 | 50 | vgdisplay vg_dlp 51 | 52 | - 重命名卷组 53 | 54 | vgrename vg_name vg_newname 55 | 56 | vgdisplay vg_newname # 对比 vg_name 57 | 58 | - vg查看 扫描 59 | 60 | vgs 61 | 62 | vgscan 63 | 64 | ## 笔者建议熟练使用--help以及 man 65 | 66 | - 扩展卷组 67 | 68 | vgextend vg_data /dev/sdd1 69 | 70 | - 缩减卷组 71 | 72 | vgreduce vg_data /dev/sdd1 73 | 74 | - 删除卷组 75 | 76 | 先禁用目标卷组并将其删除 77 | 78 | vgchange -a n vg_data 79 | 80 | vgremove vg_data 81 | 82 | ## 管理 物理卷 83 | 84 | lvcreate -L 50G -n lv_name vg_name 85 | 86 | lvcreate -l 100%FREE -n lv_name vg_name # 使用全部空闲区域 87 | 88 | lvcreate -l 100%FREE -n data centos 89 | 90 | mkfs.ext4 /dev/mapper/centos-data 91 | 92 | mkfs.xfs /dev/mapper/centos-data 93 | 94 | echo "/dev/mapper/centos-data /data xfs defaults 0 0" >> /etc/fstab 95 | 96 | - 显示lv 97 | 98 | lvdisplay /dev/vg_name/lv_name 99 | 100 | - 展示 扫描 lv 101 | 102 | lvs 103 | lvscan 104 | 105 | - 拍摄逻辑卷的快照 106 | 107 | lvcreate -s -L 50G -n snap-lv_name /dev/vg_name/lv_name 108 | 109 | lvdisplay /dev/vg_name/lv_name /dev/vg_name/snap-lv_name 110 | 111 | - 扩展卷组 112 | 113 | lvextend -L 50G /dev/vg_name/lv_name # 扩展到50G 114 | 115 | lvextend -L +50G /dev/vg_name/lv_name # 添加50G 116 | 117 | lvdisplay /dev/vg_name/lv_name 118 | 119 | - 对于扩展xfs文件系统的情况(指定挂载点) 120 | 121 | xfs_growfs /mnt 122 | 123 | # resize2fs /dev/vg_name/lv_name 124 | 125 | mkfs.xfs -f /dev/vg_name/lv_name 126 | 127 | xfs_growfs /dev/vg_name/lv_name 128 | 129 | ## 缩减lv前需要先卸载 130 | 131 | # 针对 132 | 133 | e2fsck -f /dev/vg_name/lv_name 50G 134 | resize2fs /dev/vg_name/lv_name 50G 135 | 136 | lvreduce -L 50G /dev/vg_name/lv_name 50G # 缩减到50G # 先卸载 137 | 138 | - 删除卷组 139 | lvchange -an /dev/vg_name/lv_name 140 | 141 | lvremove /dev/vg_name/lv_name 142 | 143 | 144 | ## 创建镜像卷 145 | 146 | vgcreate vg_mirror /dev/sdb1 /dev/sdc1 147 | 148 | lvcreate -L 50G -m1 -n lv_mirror vg_mirror 149 | 150 | lvdisplay /dev/vg_mirror/lv_mirror 151 | 152 | vgextend vg_name /dev/sdc1 153 | 154 | - 设置镜像卷 155 | lvconvert -m1 /dev/vg_name/lv_name /dev/sdc1 156 | 157 | - 指定-m0以取消设置 158 | 159 | lvconvert -m0 /dev/vg_name/lv_name 160 | 161 | lvs -a -o vg_name,name,devices,size 162 | 163 | ## 创建带区卷 164 | 165 | vgcreate vg_striped /dev/sdb1 /dev/sdc1 166 | 167 | lvcreate -L 50G -i2 -I 64 -n lv_striped vg_striped 168 | 169 | lvdisplay /dev/vg_striped/lv_striped 170 | 171 | lvs -a -o vg_name,name,devices,size -------------------------------------------------------------------------------- /initial/timing-contributions.md: -------------------------------------------------------------------------------- 1 | 路漫漫其修远兮 吾将上下而求索 2 | 路漫漫其修远兮 吾将上下而求索 3 | 路漫漫其修远兮 吾将上下而求索 4 | 路漫漫其修远兮 吾将上下而求索 5 | 路漫漫其修远兮 吾将上下而求索 6 | 路漫漫其修远兮 吾将上下而求索 7 | 路漫漫其修远兮 吾将上下而求索 8 | 路漫漫其修远兮 吾将上下而求索 9 | 路漫漫其修远兮 吾将上下而求索 10 | 路漫漫其修远兮 吾将上下而求索 11 | 路漫漫其修远兮 吾将上下而求索 12 | 路漫漫其修远兮 吾将上下而求索 13 | 路漫漫其修远兮 吾将上下而求索 14 | 路漫漫其修远兮 吾将上下而求索 15 | 路漫漫其修远兮 吾将上下而求索 16 | 路漫漫其修远兮 吾将上下而求索 17 | 路漫漫其修远兮 吾将上下而求索 18 | 路漫漫其修远兮 吾将上下而求索 19 | 路漫漫其修远兮 吾将上下而求索 20 | 路漫漫其修远兮 吾将上下而求索 21 | 路漫漫其修远兮 吾将上下而求索 22 | 路漫漫其修远兮 吾将上下而求索 23 | 路漫漫其修远兮 吾将上下而求索 24 | 路漫漫其修远兮 吾将上下而求索 25 | 路漫漫其修远兮 吾将上下而求索 26 | 路漫漫其修远兮 吾将上下而求索 27 | 路漫漫其修远兮 吾将上下而求索 28 | 路漫漫其修远兮 吾将上下而求索 29 | 路漫漫其修远兮 吾将上下而求索 30 | 路漫漫其修远兮 吾将上下而求索 31 | 路漫漫其修远兮 吾将上下而求索 32 | 路漫漫其修远兮 吾将上下而求索 33 | 路漫漫其修远兮 吾将上下而求索 34 | 路漫漫其修远兮 吾将上下而求索 35 | 路漫漫其修远兮 吾将上下而求索 36 | 路漫漫其修远兮 吾将上下而求索 37 | 路漫漫其修远兮 吾将上下而求索 38 | 路漫漫其修远兮 吾将上下而求索 39 | 路漫漫其修远兮 吾将上下而求索 40 | 路漫漫其修远兮 吾将上下而求索 41 | 路漫漫其修远兮 吾将上下而求索 42 | 路漫漫其修远兮 吾将上下而求索 43 | 路漫漫其修远兮 吾将上下而求索 44 | 路漫漫其修远兮 吾将上下而求索 45 | 路漫漫其修远兮 吾将上下而求索 46 | 路漫漫其修远兮 吾将上下而求索 47 | 路漫漫其修远兮 吾将上下而求索 48 | 路漫漫其修远兮 吾将上下而求索 49 | 路漫漫其修远兮 吾将上下而求索 50 | 路漫漫其修远兮 吾将上下而求索 51 | 路漫漫其修远兮 吾将上下而求索 52 | 路漫漫其修远兮 吾将上下而求索 53 | 路漫漫其修远兮 吾将上下而求索 54 | 路漫漫其修远兮 吾将上下而求索 55 | 路漫漫其修远兮 吾将上下而求索 56 | 路漫漫其修远兮 吾将上下而求索 57 | 路漫漫其修远兮 吾将上下而求索 58 | 路漫漫其修远兮 吾将上下而求索 59 | 路漫漫其修远兮 吾将上下而求索 60 | 路漫漫其修远兮 吾将上下而求索 61 | 路漫漫其修远兮 吾将上下而求索 62 | 路漫漫其修远兮 吾将上下而求索 63 | 路漫漫其修远兮 吾将上下而求索 64 | 路漫漫其修远兮 吾将上下而求索 65 | 路漫漫其修远兮 吾将上下而求索 66 | 路漫漫其修远兮 吾将上下而求索 67 | 路漫漫其修远兮 吾将上下而求索 68 | 路漫漫其修远兮 吾将上下而求索 69 | 路漫漫其修远兮 吾将上下而求索 70 | 路漫漫其修远兮 吾将上下而求索 71 | 路漫漫其修远兮 吾将上下而求索 72 | 路漫漫其修远兮 吾将上下而求索 73 | 路漫漫其修远兮 吾将上下而求索 74 | 路漫漫其修远兮 吾将上下而求索 75 | 路漫漫其修远兮 吾将上下而求索 76 | 路漫漫其修远兮 吾将上下而求索 77 | 路漫漫其修远兮 吾将上下而求索 78 | 路漫漫其修远兮 吾将上下而求索 79 | 路漫漫其修远兮 吾将上下而求索 80 | 路漫漫其修远兮 吾将上下而求索 81 | 路漫漫其修远兮 吾将上下而求索 82 | 路漫漫其修远兮 吾将上下而求索 83 | 路漫漫其修远兮 吾将上下而求索 84 | 路漫漫其修远兮 吾将上下而求索 85 | 路漫漫其修远兮 吾将上下而求索 86 | 路漫漫其修远兮 吾将上下而求索 87 | 路漫漫其修远兮 吾将上下而求索 88 | 路漫漫其修远兮 吾将上下而求索 89 | 路漫漫其修远兮 吾将上下而求索 90 | 路漫漫其修远兮 吾将上下而求索 91 | 路漫漫其修远兮 吾将上下而求索 92 | 路漫漫其修远兮 吾将上下而求索 93 | 路漫漫其修远兮 吾将上下而求索 94 | 路漫漫其修远兮 吾将上下而求索 95 | 路漫漫其修远兮 吾将上下而求索 96 | 路漫漫其修远兮 吾将上下而求索 97 | 路漫漫其修远兮 吾将上下而求索 98 | 路漫漫其修远兮 吾将上下而求索 99 | 路漫漫其修远兮 吾将上下而求索 100 | 路漫漫其修远兮 吾将上下而求索 101 | 路漫漫其修远兮 吾将上下而求索 102 | 路漫漫其修远兮 吾将上下而求索 103 | 路漫漫其修远兮 吾将上下而求索 104 | 路漫漫其修远兮 吾将上下而求索 105 | 路漫漫其修远兮 吾将上下而求索 106 | 路漫漫其修远兮 吾将上下而求索 107 | 路漫漫其修远兮 吾将上下而求索 108 | 路漫漫其修远兮 吾将上下而求索 109 | 路漫漫其修远兮 吾将上下而求索 110 | 路漫漫其修远兮 吾将上下而求索 111 | 路漫漫其修远兮 吾将上下而求索 112 | 路漫漫其修远兮 吾将上下而求索 113 | 路漫漫其修远兮 吾将上下而求索 114 | 路漫漫其修远兮 吾将上下而求索 115 | -------------------------------------------------------------------------------- /initial/virtual-machine-config.md: -------------------------------------------------------------------------------- 1 | # ovirt虚拟机最小安装初始设置 2 | 3 | - 更新 4 | 5 | yum update -y 6 | 7 | - 更改主机名 8 | 9 | hostnamectl set-hostname ecs-119 10 | 11 | - 禁用ipv6,selinux 12 | 13 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config 14 | 15 | [root@localhost ~]# vi /etc/default/grub 16 | # line 6: add 17 | GRUB_CMDLINE_LINUX="ipv6.disable=1 rd.lvm.lv=fedora-server/root..... 18 | # apply changing 19 | [root@localhost ~]# grub2-mkconfig -o /boot/grub2/grub.cfg 20 | 21 | - 关闭防火墙 22 | 23 | systemctl stop firewalld && systemctl disable firewalld 24 | 25 | - 添加epel源 26 | 27 | yum -y install epel-release 28 | -------------------------------------------------------------------------------- /initial/系统安装.md: -------------------------------------------------------------------------------- 1 | # 下载CentOS7 2 | 3 | - https://www.centos.org/download/ 4 | 5 | # 制作启动盘 6 | 7 | - 利用工具rufus制作CentOS7 U盘启动 8 | - 利用光驱刻录CentOS7系统到CD盘中 9 | 10 | ## 安装CentOS7 -------------------------------------------------------------------------------- /language/nodejs/通过包管理器安装Node.js.md: -------------------------------------------------------------------------------- 1 | # 通过包管理器安装Node.js 2 | 3 | 注意:此页面上的软件包由各自的软件包维护和支持,而不是 Node.js核心团队。请向维护人员报告您遇到的任何问题。如果事实证明你的问题是Node.js本身的错误,那么维护者会向上游报告问题。 -------------------------------------------------------------------------------- /language/python/python-errors.md: -------------------------------------------------------------------------------- 1 | # 日常python使用报错记录 2 | 3 | - pip install redis-trib 4 | 5 | src/hiredis.h:4:20: fatal error: Python.h: No such file or directory 6 | 7 | 解决:sudo yum install python-devel # for python2.x installs -------------------------------------------------------------------------------- /load balancer/f5/f5-use.md: -------------------------------------------------------------------------------- 1 | # 创建池 automap 2 | 3 | # 创建虚拟主机 4 | 5 | # 添加节点 -------------------------------------------------------------------------------- /load balancer/haproxy/install-haproxy.md: -------------------------------------------------------------------------------- 1 | # haproxy安装使用 2 | 3 | 安装HAProxy以配置负载平衡服务器 4 | 5 | - 实验环境: 6 | 7 | HAproxy:192.168.0.111 8 | Backend1:192.168.10.20 9 | Backend2:192.168.10.21 10 | 11 | ## 安装haproxy 12 | 13 | ``` 14 | [root@wanghui ~]# yum -y install haproxy 15 | ``` 16 | 17 | - 配置haproxy 18 | 19 | ``` 20 | [root@wanghui ~]# mv /etc/haproxy/haproxy.cfg /etc/haproxy/haproxy.cfg.org 21 | [root@wanghui ~]# vi /etc/haproxy/haproxy.cfg 22 | # create new 23 | global 24 | # for logging section 25 | log 127.0.0.1 local2 info 26 | chroot /var/lib/haproxy 27 | pidfile /var/run/haproxy.pid 28 | # max per-process number of connections 29 | maxconn 256 30 | # process' user and group 31 | user haproxy 32 | group haproxy 33 | # makes the process fork into background 34 | daemon 35 | 36 | defaults 37 | # running mode 38 | mode http 39 | # use global settings 40 | log global 41 | # get HTTP request log 42 | option httplog 43 | # timeout if backends do not reply 44 | timeout connect 10s 45 | # timeout on client side 46 | timeout client 30s 47 | # timeout on server side 48 | timeout server 30s 49 | 50 | # define frontend ( set any name for "http-in" section ) 51 | frontend http-in 52 | # listen 80 53 | bind *:80 54 | # set default backend 55 | default_backend backend_servers 56 | # send X-Forwarded-For header 57 | option forwardfor 58 | 59 | # define backend 60 | backend backend_servers 61 | # balance with roundrobin 62 | balance roundrobin 63 | # define backend servers 64 | server www01 192.168.10.20:80 check 65 | server www02 192.168.10.21:80 check 66 | 67 | [root@wanghui ~]# systemctl start haproxy 68 | [root@wanghui ~]# systemctl enable haproxy 69 | ``` 70 | 71 | - 配置Rsyslog以获取HAProxy的日志 72 | 73 | ``` 74 | [root@wanghui ~]# vi /etc/rsyslog.conf 75 | # line 15,16: uncomment, lne 17: add 76 | $ModLoad imudp 77 | $UDPServerRun 514 78 | $AllowedSender UDP, 127.0.0.1 79 | # line 54: change like follows 80 | *.info;mail.none;authpriv.none;cron.none,local2.none /var/log/messages 81 | local2.* /var/log/haproxy.log 82 | 83 | [root@wanghui ~]# systemctl restart rsyslog 84 | ``` 85 | 86 | ## 配置HAProxy以在Web上查看HAProxy的统计信息 87 | 88 | - 配置HAProxy以在Web上查看HAProxy的统计信息 89 | 90 | ``` 91 | [root@wanghui ~]# vi /etc/haproxy/haproxy.cfg 92 | # add follows in the "frontend" section 93 | frontend http-in 94 | bind *:80 95 | # enable statistics reports 96 | stats enable 97 | # auth info for statistics site 98 | stats auth admin:adminpassword 99 | # hide version of HAProxy 100 | stats hide-version 101 | # display HAProxy hostname 102 | stats show-node 103 | # refresh time 104 | stats refresh 60s 105 | # statistics reports' URI 106 | stats uri /haproxy?stats 107 | 108 | [root@wanghui ~]# systemctl restart haproxy 109 | ``` 110 | 111 | - 查看状态 112 | 113 | -------------------------------------------------------------------------------- /load balancer/pound/c7-install-pound.md: -------------------------------------------------------------------------------- 1 | # pound安装使用 2 | 3 | 安装Pound是HTTP/HTTPS负载平衡软件 4 | 5 | 配置Pound以负载均衡到后端#1,后端#2,后端#3 Web服务器 6 | 7 | Fronted Pound:192.168.10.24 8 | Backend1::12.168.10.20 9 | Backend2::12.168.10.21 10 | Backend3::12.168.10.22 11 | 12 | ## 安装pound 13 | 14 | ``` 15 | # 安装epel源 16 | yum -y install Pound 17 | ``` 18 | 19 | - 配置pound 20 | 21 | ``` 22 | [root@wanghui ~]# mv /etc/pound.cfg /etc/pound.cfg.org 23 | [root@wanghui ~]# cat /etc/pound.cfg 24 | User "pound" 25 | Group "pound" 26 | # log level (max: 5) 27 | LogLevel 3 28 | # specify LogFacility 29 | LogFacility local1 30 | # interval of heartbeat - seconds 31 | Alive 30 32 | 33 | # define frontend 34 | ListenHTTP 35 | Address 0.0.0.0 36 | Port 80 37 | End 38 | 39 | # define backend 40 | Service 41 | BackEnd 42 | # backend server's IP address 43 | Address 192.168.10.20 44 | # backend server's port 45 | Port 80 46 | # set priority (value is 1-9, max 9) 47 | Priority 5 48 | End 49 | 50 | BackEnd 51 | Address 192.168.10.21 52 | Port 80 53 | Priority 5 54 | End 55 | 56 | BackEnd 57 | Address 192.168.10.22 58 | Port 80 59 | Priority 5 60 | End 61 | End 62 | 63 | [root@wanghui ~]# sed -i -e "s/^PIDFile/#PIDFile/" /usr/lib/systemd/system/pound.service 64 | [root@wanghui ~]# systemctl start pound 65 | [root@wanghui ~]# systemctl enable pound 66 | ``` 67 | 68 | - 将Rsyslog设置更改为来自Pound的revord日志 69 | 70 | ``` 71 | [root@wanghui ~]# vi /etc/rsyslog.conf 72 | # line 54: change like follows 73 | *.info;mail.none;authpriv.none;cron.none,local1.none /var/log/messages 74 | local1.* /var/log/pound.log 75 | 76 | [root@wanghui ~]# systemctl restart rsyslog 77 | ``` 78 | 79 | - 测试 80 | 81 | ``` 82 | [root@node5 ~]# curl http://192.168.10.24 83 |

node1.aniu.so

84 | [root@node5 ~]# curl http://192.168.10.24 85 |

node2.aniu.so

86 | [root@node5 ~]# curl http://192.168.10.24 87 |

node3.aniu.so

88 | ``` -------------------------------------------------------------------------------- /memory cache/redis/6379.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | protected-mode no 3 | port 6379 4 | tcp-backlog 511 5 | timeout 60 6 | tcp-keepalive 300 7 | daemonize yes 8 | supervised no 9 | pidfile /var/run/redis/redis-6379.pid 10 | loglevel notice 11 | logfile /var/log/redis/redis-6379.log 12 | databases 16 13 | save "" 14 | stop-writes-on-bgsave-error yes 15 | rdbcompression yes 16 | rdbchecksum yes 17 | dbfilename dump-6379.rdb 18 | dir /var/lib/redis 19 | slave-serve-stale-data yes 20 | slave-read-only yes 21 | repl-diskless-sync no 22 | repl-diskless-sync-delay 5 23 | repl-disable-tcp-nodelay no 24 | slave-priority 100 25 | requirepass Aniuredis123 26 | rename-command FLUSHALL "" 27 | maxclients 10000 28 | maxmemory 10gb 29 | maxmemory-policy volatile-lru 30 | maxmemory-samples 5 31 | appendonly yes 32 | appendfilename "appendonly-6379.aof" 33 | appendfsync everysec 34 | no-appendfsync-on-rewrite no 35 | auto-aof-rewrite-percentage 100 36 | auto-aof-rewrite-min-size 64mb 37 | aof-load-truncated yes 38 | lua-time-limit 5000 39 | slowlog-log-slower-than 10000 40 | slowlog-max-len 128 41 | latency-monitor-threshold 0 42 | notify-keyspace-events "" 43 | hash-max-ziplist-entries 512 44 | hash-max-ziplist-value 64 45 | list-max-ziplist-size -2 46 | list-compress-depth 0 47 | set-max-intset-entries 512 48 | zset-max-ziplist-entries 128 49 | zset-max-ziplist-value 64 50 | hll-sparse-max-bytes 3000 51 | activerehashing yes 52 | client-output-buffer-limit normal 0 0 0 53 | client-output-buffer-limit slave 256mb 64mb 60 54 | client-output-buffer-limit pubsub 32mb 8mb 60 55 | hz 10 56 | aof-rewrite-incremental-fsync yes -------------------------------------------------------------------------------- /memory cache/redis/Redis_Cluster.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/memory cache/redis/Redis_Cluster.pdf -------------------------------------------------------------------------------- /memory cache/redis/cluster-slots.md: -------------------------------------------------------------------------------- 1 | # 集群SETSLOT slot 2 | 3 | ## -------------------------------------------------------------------------------- /memory cache/redis/cluster/cluster-command.md: -------------------------------------------------------------------------------- 1 | # redis集群常用命令 2 | 3 | CLUSTER info:打印集群的信息。 4 | CLUSTER nodes:列出集群当前已知的所有节点(node)的相关信息。 5 | CLUSTER meet :将ip和port所指定的节点添加到集群当中。 6 | CLUSTER addslots [slot ...]:将一个或多个槽(slot)指派(assign)给当前节点。 7 | CLUSTER delslots [slot ...]:移除一个或多个槽对当前节点的指派。 8 | CLUSTER slots:列出槽位、节点信息。 9 | CLUSTER slaves :列出指定节点下面的从节点信息。 10 | CLUSTER replicate :将当前节点设置为指定节点的从节点。 11 | CLUSTER saveconfig:手动执行命令保存保存集群的配置文件,集群默认在配置修改的时候会自动保存配置文件。 12 | CLUSTER keyslot :列出key被放置在哪个槽上。 13 | CLUSTER flushslots:移除指派给当前节点的所有槽,让当前节点变成一个没有指派任何槽的节点。 14 | CLUSTER countkeysinslot :返回槽目前包含的键值对数量。 15 | CLUSTER getkeysinslot :返回count个槽中的键。 16 | 17 | CLUSTER setslot node 将槽指派给指定的节点,如果槽已经指派给另一个节点,那么先让另一个节点删除该槽,然后再进行指派。 18 | CLUSTER setslot migrating 将本节点的槽迁移到指定的节点中。 19 | CLUSTER setslot importing 从 node_id 指定的节点中导入槽 slot 到本节点。 20 | CLUSTER setslot stable 取消对槽 slot 的导入(import)或者迁移(migrate)。 21 | 22 | CLUSTER failover:手动进行故障转移。 23 | CLUSTER forget :从集群中移除指定的节点,这样就无法完成握手,过期时为60s,60s后两节点又会继续完成握手。 24 | CLUSTER reset [HARD|SOFT]:重置集群信息,soft是清空其他节点的信息,但不修改自己的id,hard还会修改自己的id,不传该参数则使用soft方式。 25 | 26 | CLUSTER count-failure-reports :列出某个节点的故障报告的长度。 27 | CLUSTER SET-CONFIG-EPOCH:设置节点epoch,只有在节点加入集群前才能设置。 28 | 29 | 30 | ## slots迁移 31 | 32 | 1,在目标节点上声明将从源节点上迁入Slot CLUSTER SETSLOT IMPORTING 33 | 2,在源节点上声明将往目标节点迁出Slot CLUSTER SETSLOT migrating 34 | 3,批量从源节点获取KEY CLUSTER GETKEYSINSLOT 35 | 4,将获取的Key迁移到目标节点 MIGRATE 0 36 | 重复步骤3,4直到所有数据迁移完毕,MIGRATE命令会将所有的指定的key通过RESTORE key ttl serialized-value REPLACE迁移给target 37 | 5,分别向双方节点发送 CLUSTER SETSLOT NODE ,该命令将会广播给集群其他节点,取消importing和migrating。 38 | 6,等待集群状态变为OK CLUSTER INFO 中的 cluster_state = ok -------------------------------------------------------------------------------- /memory cache/redis/create-cluster: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Settings 4 | PORT=30000 5 | TIMEOUT=2000 6 | NODES=6 7 | REPLICAS=1 8 | PASSWORD=Aniuredis123 9 | IP=`/usr/sbin/ifconfig | grep 'inet' | grep -v '127.0.0.1' | awk '{ print $2}'` 10 | 11 | # 注意查看下IP获取到的是不是唯一,这里要求是唯一的IP地址 12 | 13 | # You may want to put the above config parameters into config.sh in order to 14 | # override the defaults without modifying this script. 15 | 16 | if [ -a config.sh ] 17 | then 18 | source "config.sh" 19 | fi 20 | 21 | # Computed vars 22 | ENDPORT=$((PORT+NODES)) 23 | 24 | if [ "$1" == "start" ] 25 | then 26 | while [ $((PORT < ENDPORT)) != "0" ]; do 27 | PORT=$((PORT+1)) 28 | echo "Starting $PORT" 29 | # ../../src/redis-server --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes 30 | # 31 | ../../src/redis-server --bind 0.0.0.0 --requirepass $PASSWORD --masterauth $PASSWORD --protected-mode no --port $PORT --cluster-enabled yes --cluster-config-file nodes-${PORT}.conf --cluster-node-timeout $TIMEOUT --appendonly yes --appendfilename appendonly-${PORT}.aof --dbfilename dump-${PORT}.rdb --logfile ${PORT}.log --daemonize yes 32 | done 33 | exit 0 34 | fi 35 | 36 | if [ "$1" == "create" ] 37 | then 38 | HOSTS="" 39 | while [ $((PORT < ENDPORT)) != "0" ]; do 40 | PORT=$((PORT+1)) 41 | # HOSTS="$HOSTS 127.0.0.1:$PORT" 42 | HOSTS="$HOSTS 192.168.1.124:$PORT" 43 | done 44 | ../../src/redis-trib.rb create --replicas $REPLICAS $HOSTS 45 | exit 0 46 | fi 47 | 48 | if [ "$1" == "stop" ] 49 | then 50 | while [ $((PORT < ENDPORT)) != "0" ]; do 51 | PORT=$((PORT+1)) 52 | echo "Stopping $PORT" 53 | ../../src/redis-cli -p -a $PASSWORD -h $IP $PORT shutdown nosave 54 | done 55 | exit 0 56 | fi 57 | 58 | if [ "$1" == "watch" ] 59 | then 60 | PORT=$((PORT+1)) 61 | while [ 1 ]; do 62 | clear 63 | date 64 | ../../src/redis-cli -p -a $PASSWORD $PORT -h $IP cluster nodes | head -30 65 | sleep 1 66 | done 67 | exit 0 68 | fi 69 | 70 | if [ "$1" == "tail" ] 71 | then 72 | INSTANCE=$2 73 | PORT=$((PORT+INSTANCE)) 74 | tail -f ${PORT}.log 75 | exit 0 76 | fi 77 | 78 | if [ "$1" == "call" ] 79 | then 80 | while [ $((PORT < ENDPORT)) != "0" ]; do 81 | PORT=$((PORT+1)) 82 | ../../src/redis-cli -p $PORT $2 $3 $4 $5 $6 $7 $8 $9 83 | done 84 | exit 0 85 | fi 86 | 87 | if [ "$1" == "clean" ] 88 | then 89 | rm -rf *.log 90 | rm -rf appendonly*.aof 91 | rm -rf dump*.rdb 92 | rm -rf nodes*.conf 93 | exit 0 94 | fi 95 | 96 | if [ "$1" == "clean-logs" ] 97 | then 98 | rm -rf *.log 99 | exit 0 100 | fi 101 | 102 | echo "Usage: $0 [start|create|stop|watch|tail|clean]" 103 | echo "start -- Launch Redis Cluster instances." 104 | echo "create -- Create a cluster using redis-trib create." 105 | echo "stop -- Stop Redis Cluster instances." 106 | echo "watch -- Show CLUSTER NODES output (first 30 lines) of first node." 107 | echo "tail -- Run tail -f of instance at base port + ID." 108 | echo "clean -- Remove all instances data, logs, configs." 109 | echo "clean-logs -- Remove just instances logs." 110 | -------------------------------------------------------------------------------- /memory cache/redis/install.md: -------------------------------------------------------------------------------- 1 | # redis安装 2 | 3 | sudo yum -y install redis 4 | 5 | 6 | # 7 | -------------------------------------------------------------------------------- /memory cache/redis/onekey-install-redis-cluster.md: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # ---------------------------------------- 3 | # Functions: onekey install redis cluster 4 | # Auther: shaonbean@qq.com 5 | # Changelog: 6 | # 2018-06-15 wanghui initial 7 | # ---------------------------------------- 8 | # define some variables 9 | redis_cluster_ip=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}'` 10 | version=4.0.9 11 | 12 | yum update -y && yum groupinstall -y "Development Tools" && yum -y install tcl 13 | 14 | wget http://download.redis.io/releases/redis-$version.tar.gz -P /usr/local/src -------------------------------------------------------------------------------- /memory cache/redis/redis-errors.md: -------------------------------------------------------------------------------- 1 | # redis4.0.9 集群迁移solts错误 2 | 3 | - [ERR] Calling MIGRATE: ERR Syntax error, try CLIENT (LIST | KILL | GETNAME | SETNAME | PAUSE | REPLY) 4 | 5 | 集群状态: 6 | 7 | ``` 8 | [root@ecs-116 src]# /data/redis/bin/redis-trib.rb check 192.168.0.116:30001 9 | >>> Performing Cluster Check (using node 192.168.0.116:30001) 10 | M: 3e9e5c8043ff41d6ebf60a95a31a648d064f4448 192.168.0.116:30001 11 | slots:0-16383 (16384 slots) master 12 | 3 additional replica(s) 13 | M: 41da0f40c4fd966a6cdd6b00521b64888aa271a6 192.168.0.116:30002 14 | slots: (0 slots) master 15 | 0 additional replica(s) 16 | S: fe41b92d7b4abc5361115a42b38941c4282f10c7 192.168.0.116:30006 17 | slots: (0 slots) slave 18 | replicates 3e9e5c8043ff41d6ebf60a95a31a648d064f4448 19 | S: 59b3cb11c42f57b54c5f6f69c2cc886c03ee4930 192.168.0.116:30005 20 | slots: (0 slots) slave 21 | replicates 3e9e5c8043ff41d6ebf60a95a31a648d064f4448 22 | S: 6f26676c6ab5fdb622e950eefa9b73f98e7804d7 192.168.0.116:30004 23 | slots: (0 slots) slave 24 | replicates 3e9e5c8043ff41d6ebf60a95a31a648d064f4448 25 | M: 9a51adb7912816b0e7d307df7312eb3d1e851f43 192.168.0.116:30003 26 | slots: (0 slots) master 27 | 0 additional replica(s) 28 | [OK] All nodes agree about slots configuration. 29 | >>> Check for open slots... 30 | [WARNING] Node 192.168.0.116:30001 has slots in migrating state (0). 31 | [WARNING] Node 192.168.0.116:30002 has slots in importing state (0). 32 | [WARNING] The following slots are open: 0 33 | >>> Check slots coverage... 34 | [OK] All 16384 slots covered. 35 | ``` 36 | 37 | ## 报错解决 38 | 39 | - 参考:https://github.com/antirez/redis/issues/4272 40 | 41 | ``` 42 | # 只需安装较早版本的redis.rb即可解决问题 43 | [root@ecs-116 src]# gem list |grep redis 44 | redis (4.0.1, 3.3.3) 45 | [root@ecs-116 src]# gem uninstall redis --version 4.0.1 46 | Successfully uninstalled redis-4.0.1 47 | 48 | gem install redis -v 3.3.3 # 下载完成,记得修改client.rb文件,添加redis集群密码 49 | ``` 50 | 51 | - [ERR] Calling MIGRATE: ERR Target instance replied with error: NOAUTH Authentication required. 52 | 53 | https://github.com/antirez/redis/pull/4288 54 | 55 | 56 | 57 | - 连接报错Unexpected end of stream 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /memory cache/redis/redis-master.conf: -------------------------------------------------------------------------------- 1 | bind 0.0.0.0 2 | protected-mode no 3 | port 6379 4 | tcp-backlog 511 5 | timeout 60 6 | tcp-keepalive 300 7 | daemonize yes 8 | supervised no 9 | pidfile /var/run/redis/redis-6379.pid 10 | loglevel verbose 11 | logfile /var/log/redis/redis-6379.log 12 | databases 16 13 | save "" 14 | #save 60 100000 15 | stop-writes-on-bgsave-error yes 16 | rdbcompression yes 17 | rdbchecksum yes 18 | dbfilename dump-6379.rdb 19 | dir /var/lib/redis 20 | slave-serve-stale-data yes 21 | slave-read-only yes 22 | repl-diskless-sync no 23 | repl-diskless-sync-delay 5 24 | repl-disable-tcp-nodelay no 25 | slave-priority 100 26 | requirepass Aniuredis123 27 | rename-command FLUSHALL "" 28 | maxclients 10000 29 | maxmemory 4gb 30 | maxmemory-policy volatile-lru 31 | maxmemory-samples 5 32 | appendonly no 33 | appendfilename "appendonly-6379.aof" 34 | appendfsync everysec 35 | no-appendfsync-on-rewrite no 36 | auto-aof-rewrite-percentage 100 37 | auto-aof-rewrite-min-size 64mb 38 | aof-load-truncated yes 39 | lua-time-limit 5000 40 | slowlog-log-slower-than 10000 41 | slowlog-max-len 128 42 | latency-monitor-threshold 0 43 | notify-keyspace-events "" 44 | hash-max-ziplist-entries 512 45 | hash-max-ziplist-value 64 46 | list-max-ziplist-size -2 47 | list-compress-depth 0 48 | set-max-intset-entries 512 49 | zset-max-ziplist-entries 128 50 | zset-max-ziplist-value 64 51 | hll-sparse-max-bytes 3000 52 | activerehashing yes 53 | client-output-buffer-limit normal 0 0 0 54 | client-output-buffer-limit slave 256mb 64mb 60 55 | client-output-buffer-limit pubsub 32mb 8mb 60 56 | hz 10 57 | aof-rewrite-incremental-fsync yes -------------------------------------------------------------------------------- /memory cache/redis/redis-optimization.md: -------------------------------------------------------------------------------- 1 | # redis server优化 2 | 3 | > 从server端,优化redis服务出现的一些问题 4 | 5 | ## redis延时,客户端timeout 6 | 7 | 耗时较长的命令造成阻塞 : 解决方案:redis读写分离 8 | 9 | smembers命令 10 | 11 | members命令用于获取集合全集,时间复杂度为O(N),N为集合中的数量,如果一个集合中保存了千万量级的数据,一次取回也会造成事件处理线程的长时间阻塞; 12 | 13 | 解决方案: 14 | 和sort,keys等命令不一样,smembers可能是线上实时应用场景中使用频率非常高的一个命令,这里分流一招并不适合,我们更多的需要从设计层面来考虑; 15 | 在设计时,我们可以控制集合的数量,将集合数一般保持在500个以内; 16 | 比如原来使用一个键来存储一年的记录,数据量大,我们可以使用12个键来分别保存12个月的记录,或者365个键来保存每一天的记录,将集合的规模控制在可接受的范围; 17 | 18 | 如果不容易将集合划分为多个子集合,而坚持用一个大集合来存储,那么在取集合的时候可以考虑使用SRANDMEMBER key [count];随机返回集合中的指定数量,当然,如果要遍历集合中的所有元素,这个命令就不适合了; 19 | 20 | save命令 21 | 22 | save命令使用事件处理线程进行数据的持久化;当数据量大的时候,会造成线程长时间阻塞,整个redis被block; 23 | save阻塞了事件处理的线程,我们甚至无法使用redis-cli查看当前的系统状态,造成“何时保存结束,目前保存了多少”这样的信息都无从得知; 24 | 25 | 26 | bgsave 27 | 28 | - fork产生的阻塞 29 | 30 | 在redis需要执行耗时的操作时,会新建一个进程来做,比如数据持久化bgsave: 31 | 开启RDB持久化后,当达到持久化的阈值,redis会fork一个新的进程来做持久化,采用了操作系统的copy-on-wirte写时复制策略,子进程与父进程共享Page。如果父进程的Page(每页4K)有修改,父进程自己创建那个Page的副本,不会影响到子进程; 32 | fork新进程时,虽然可共享的数据内容不需要复制,但会复制之前进程空间的内存页表,如果内存空间有40G(考虑每个页表条目消耗 8 个字节),那么页表大小就有80M,这个复制是需要时间的,如果使用虚拟机,特别是Xen虚拟服务器,耗时会更长; -------------------------------------------------------------------------------- /memory cache/redis/redis-sentinel.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/memory cache/redis/redis-sentinel.md -------------------------------------------------------------------------------- /memory cache/redis/redis.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Redis persistent key-value database 3 | After=network.target 4 | 5 | [Service] 6 | ExecStart=/data/redis/bin/redis-server /data/redis-cluster/7000/redis.conf 7 | ExecStop=/data/redis/bin/redis-cli -p 7000 -a Aniuredis123 shutdown 8 | User=redis 9 | Group=redis 10 | RuntimeDirectory=redis 11 | RuntimeDirectoryMode=0755 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | 16 | -------------------------------------------------------------------------------- /monitoring/netdata: -------------------------------------------------------------------------------- 1 | # netdata 2 | 3 | - netdata简介 4 | 5 | 构建最佳的实时健康监控和性能故障排除解决方案 6 | 7 | netdata官网:https://www.netdata.cloud/about 8 | netdata文档:https://docs.netdata.cloud/ 9 | 10 | ## netdata安装 11 | 12 | - 参考:https://docs.netdata.cloud/packaging/installer/#install-netdata 13 | 14 | ``` 15 | # 安装依赖包 16 | yum install autoconf automake curl gcc git libmnl-devel libuuid-devel openssl-devel libuv-devel lz4-devel Judy-devel make nc pkgconfig python zlib-devel 17 | # 下载源码 18 | git clone https://github.com/netdata/netdata.git --depth=100 19 | cd netdata 20 | # 执行安装,以root权限执行下面命令 21 | ./netdata-installer.sh (需在线,涉及下载github上依赖包) 22 | ``` 23 | 24 | - 安装成功如下: 25 | 26 | ``` 27 | etdata by default listens on all IPs on port 19999, 28 | so you can access it with: 29 | 30 | http://this.machine.ip:19999/ 31 | 32 | To stop netdata run: 33 | 34 | systemctl stop netdata 35 | 36 | To start netdata run: 37 | 38 | systemctl start netdata 39 | 40 | Uninstall script copied to: /usr/libexec/netdata/netdata-uninstaller.sh 41 | 42 | --- Install netdata updater tool --- 43 | Update script is located at /usr/libexec/netdata/netdata-updater.sh 44 | 45 | --- Check if we must enable/disable the netdata updater --- 46 | You chose *NOT* to enable auto-update, removing any links to the updater from cron (it may have happened if you are reinstalling) 47 | 48 | Did not find any cron entries to remove 49 | --- Wrap up environment set up --- 50 | Preparing .environment file 51 | Setting netdata.tarball.checksum to 'new_installation' 52 | 53 | --- We are done! --- 54 | 55 | ^ 56 | |.-. .-. .-. .-. .-. . netdata .-. .- 57 | | '-' '-' '-' '-' '-' is installed and running now! -' '-' 58 | +----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+---> 59 | 60 | enjoy real-time performance and health monitoring... 61 | ``` 62 | 63 | ## 离线安装 64 | 65 | - 参考:https://blog.csdn.net/waplys/article/details/88187438 66 | 67 | ``` 68 | # 外网安装时自动从git拉取,内网可提前下载文件置于http上,或者直接修改脚本,跳过下载步骤,手动上传并读取本地文件即可。 69 | curl -sSL --connect-timeout 10 --retry 3 https://github.com/netdata/go.d.plugin/releases/download/v0.8.0/go.d.plugin-v0.8.0.linux-amd64.tar.gz 70 | curl -sSL --connect-timeout 10 --retry 3 https://github.com/netdata/go.d.plugin/releases/download/v0.8.0/config.tar.gz 71 | ``` 72 | 73 | - 配置本下载 74 | 75 | ``` 76 | # yum install -y httpd 77 | # mkdir -p /var/www/html/netdata/go.d.plugin/releases/download/v0.8.0 78 | # 上传文件至此目录 79 | # 重启Http:systemctl restart httpd 80 | # 测试:wget "http://127.0.0.1:80/netdata/go.d.plugin/releases/download/v0.8.0/go.d.plugin-v0.8.0.linux-amd64" 81 | ``` 82 | 83 | - 修改脚本 84 | 85 | ``` 86 | vim netdata-installer.sh 87 | 更改810和812行的github地址为本地: 88 | github.com换成127.0.0.1 89 | ``` 90 | 91 | 92 | 93 | -------------------------------------------------------------------------------- /monitoring/netdata+prometheus+grafana.md: -------------------------------------------------------------------------------- 1 | # centos7+docker+netdata+prometheus+grafana 2 | 3 | ## 默认docker已安装 4 | 5 | - 参考:https://docs.netdata.cloud/backends/walkthrough/ 6 | 7 | - 创建自定义网络 8 | 9 | docker network create --driver bridge netdata-tutorial 10 | 11 | - 启动容器制定自定义的网络 12 | 13 | docker run -it --name netdata --hostname netdata --network=netdata-tutorial -p 19999:19999 centos:latest '/bin/bash' 14 | 15 | -------------------------------------------------------------------------------- /monitoring/telegraf+influxdb+grafana.md: -------------------------------------------------------------------------------- 1 | # 使用InfluxDB,Grafana和Telegraf监控Docker环境 2 | 3 | ## 安装Grafana 4 | 5 | - 创建持久存储卷,确保在销毁并重新创建grafana docker以进行升级时,将保留应用的配置 6 | 7 | mkdir /data/grafana-storage 8 | 9 | docker run -d --name=grafana -p 3000:3000 --name=grafana -v /data/grafana-storage:/var/lib/grafana grafana/grafana 10 | 11 | 12 | - 安装插件 13 | 14 | grafana-cli plugins ls | grep -v Restart | grep -v installed | awk '{print $1}' 15 | alexanderzobnin-zabbix-app 16 | grafana-clock-panel 17 | grafana-kubernetes-app 18 | grafana-piechart-panel 19 | grafana-simple-json-datasource 20 | grafana-worldmap-panel 21 | michaeldmoore-annunciator-panel 22 | 23 | 24 | docker run -d --name node-exporter \ 25 | -v "/proc:/host/proc" \ 26 | -v "/sys:/host/sys" \ 27 | -v "/:/rootfs" \ 28 | --net="host" \ 29 | prom/node-exporter:latest \ 30 | -collector.procfs /host/proc \ 31 | -collector.sysfs /host/sys \ 32 | -collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc)($|/)" -------------------------------------------------------------------------------- /monitoring/zabbix/c7-install-zabbix-agent.md: -------------------------------------------------------------------------------- 1 | # CentOS7安装zabbix 2 | 3 | ## 添加zabbix repo 4 | 5 | rpm -i http://repo.zabbix.com/zabbix/3.0/rhel/7/x86_64/zabbix-release-3.0-1.el7.noarch.rpm 6 | yum install zabbix-agent zabbix-get -y 7 | 8 | ansible redis -a "systemctl start zabbix-agent" 9 | ansible redis -a "systemctl enable zabbix-agent" 10 | 11 | 12 | 13 | -------------------------------------------------------------------------------- /mq/rabbitmq/backup-restore.md: -------------------------------------------------------------------------------- 1 | # 备份恢复 2 | 3 | http://www.rabbitmq.com/backup.html 4 | 5 | 每个RabbitMQ节点都有一个数据目录,用于存储驻留在该节点上的所有信息。数据目录包含两种类型的数据:定义(元数据,模式/拓扑)和消息存储数据。 -------------------------------------------------------------------------------- /mq/rabbitmq/c7-install-rabbitmq.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装RabbitMQ 2 | 3 | - RabbitMQ文档:http://www.rabbitmq.com/documentation.html 4 | - AMQP介绍:http://www.rabbitmq.com/protocol.html 5 | 6 | 安装RabbitMQ,它是实现AMQP(高级消息队列协议)的消息代理软件 7 | 8 | - 下载安装:https://www.rabbitmq.com/download.html 9 | 10 | ## 添加rabbitmq源 11 | 12 | https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.7.4/rabbitmq-server-3.7.4-1.el7.noarch.rpm # 下载到服务器上 13 | 14 | yum install rabbitmq-server-3.7.4-1.el7.noarch.rpm 15 | 16 | # rabbitmq hosts config 17 | 192.168.0.62 rabbitmq1 18 | 192.168.0.63 rabbitmq2 19 | 192.168.0.64 rabbitmq3 20 | 21 | - 启动rabbitmq-server 22 | 23 | [root@rabbitmq1 ~]# systemctl start rabbitmq-server 24 | [root@rabbitmq1 ~]# systemctl status rabbitmq-server 25 | ● rabbitmq-server.service - RabbitMQ broker 26 | Loaded: loaded (/usr/lib/systemd/system/rabbitmq-server.service; disabled; vendor preset: disabled) 27 | Active: active (running) since Wed 2018-05-09 18:27:54 CST; 9s ago 28 | Process: 2011 ExecStop=/usr/sbin/rabbitmqctl shutdown (code=exited, status=69) 29 | Main PID: 2149 (beam.smp) 30 | Status: "Initialized" 31 | CGroup: /system.slice/rabbitmq-server.service 32 | ├─2149 /usr/lib64/erlang/erts-9.3/bin/beam.smp -W w -A 64 -P 1048576 -t 5000000 -stbt db -zdbbl 1280000 -K true -- -root /usr/lib64/erlang -progname erl -- -home /var/lib/rabbitmq -- ... 33 | ├─2290 /usr/lib64/erlang/erts-9.3/bin/epmd -daemon 34 | ├─2433 erl_child_setup 1024 35 | ├─2462 inet_gethost 4 36 | └─2463 inet_gethost 4 37 | 38 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: ## ## 39 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: ## ## RabbitMQ 3.7.4. Copyright (C) 2007-2018 Pivotal Software, Inc. 40 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: ########## Licensed under the MPL. See http://www.rabbitmq.com/ 41 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: ###### ## 42 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: ########## Logs: /var/log/rabbitmq/rabbit@rabbitmq1.log 43 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: /var/log/rabbitmq/rabbit@rabbitmq1_upgrade.log 44 | May 09 18:27:49 rabbitmq1 rabbitmq-server[2149]: Starting broker... 45 | May 09 18:27:54 rabbitmq1 rabbitmq-server[2149]: systemd unit for activation check: "rabbitmq-server.service" 46 | May 09 18:27:54 rabbitmq1 systemd[1]: Started RabbitMQ broker. 47 | May 09 18:27:54 rabbitmq1 rabbitmq-server[2149]: completed with 0 plugins. 48 | 49 | [root@rabbitmq1 ~]# systemctl enable rabbitmq-server # 设置自启 50 | Created symlink from /etc/systemd/system/multi-user.target.wants/rabbitmq-server.service to /usr/lib/systemd/system/rabbitmq-server.service. 51 | 52 | 53 | ## 命令行工具,管理命令行工具 54 | 55 | man rabbitmqctl # 56 | rabbitmq-plugins 57 | rabbitmqadmin # 需单独安装 58 | 59 | rabbitmq-management插件提供了一个基于HTTP的API,用于管理和监控您的RabbitMQ服务器,以及基于浏览器的用户界面和命令行工具rabbitmqadmin。 60 | 61 | rabbitmq-plugins enable rabbitmq_management # 启用 62 | 63 | [root@rabbitmq1 ~]# rabbitmq-plugins enable rabbitmq_management 64 | The following plugins have been configured: 65 | rabbitmq_management 66 | rabbitmq_management_agent 67 | rabbitmq_web_dispatch 68 | Applying plugin configuration to rabbit@rabbitmq1... 69 | The following plugins have been enabled: 70 | rabbitmq_management 71 | rabbitmq_management_agent 72 | rabbitmq_web_dispatch 73 | 74 | started 3 plugins. 75 | 76 | [root@rabbitmq1 ~]# systemctl restart rabbitmq-server 77 | 78 | - 权限 79 | 80 | management policymaker monitoring administrator 81 | 82 | http://www.rabbitmq.com/configure.html # 参数详解 83 | 84 | 关闭所有连接 85 | 86 | rabbitmqadmin -f tsv -q list connections name | while read conn ; do rabbitmqadmin -q close connection name="${conn}" ; done 87 | 88 | rabbitmq-top是一个插件,可帮助识别消耗大部分内存或调度程序(CPU)时间的运行时进程(“轻量级线程”) -------------------------------------------------------------------------------- /mq/rabbitmq/install-erlang.md: -------------------------------------------------------------------------------- 1 | # CentOS7安装erlang 2 | 3 | 构建大规模可扩展的软实时系统 4 | 5 | - 解决方案:https://packages.erlang-solutions.com/erlang/ 6 | 7 | ## 使用存储库安装 8 | 9 | - 添加存储库条目 10 | 11 | ``` 12 | wget https://packages.erlang-solutions.com/erlang-solutions-1.0-1.noarch.rpm 13 | rpm -Uvh erlang-solutions-1.0-1.noarch.rpm 14 | ``` 15 | 16 | - 添加Erlang解决方案密钥,请执行命令: 17 | 18 | ``` 19 | rpm --import https://packages.erlang-solutions.com/rpm/erlang_solutions.asc 20 | ``` 21 | 22 | - 手动创建源 23 | 24 | ``` 25 | [erlang-solutions] 26 | name=CentOS $releasever - $basearch - Erlang Solutions 27 | baseurl=https://packages.erlang-solutions.com/rpm/centos/$releasever/$basearch 28 | gpgcheck=0 29 | gpgkey=https://packages.erlang-solutions.com/rpm/erlang_solutions.asc 30 | enabled=1 31 | 32 | # 更改为清华大学源 33 | #将里面的baseurl 改为:baseurl=https://mirrors4.tuna.tsinghua.edu.cn/erlang-solutions/centos/7/ 34 | ``` 35 | 36 | - 使用依赖关系添加存储库 37 | 38 | sudo yum install erlang 39 | 40 | -------------------------------------------------------------------------------- /mq/rabbitmq/rabbitmq-erros.md: -------------------------------------------------------------------------------- 1 | # 重启rabbitmq-server服务器,再次登录之前创建的rabbitmq用户报错: 2 | 3 | [warning] <0.694.0> HTTP access denied: user 'rabbitmq' - invalid credentials 4 | 5 | ## 解决: 6 | 7 | vim /usr/lib/rabbitmq/lib/rabbitmq_server-3.7.4/ebin/rabbit.app 8 | 9 | 找到:loopback_users里的<<”guest”>>删除 10 | 11 | 如下:{loopback_users, []} 12 | 13 | 重启:systemctl restart rabbitmq-server.service # 生产环境不建议这么做 14 | 15 | ## 创建一个管理员用户登录管理界面 16 | 17 | rabbitmqctl add_user rabbitmq Aniumq123. && rabbitmqctl set_user_tags rabbitmq administrator # 集群情况下,创建用户时,每个节点会自动同步 18 | 19 | 20 | -------------------------------------------------------------------------------- /mq/rabbitmq/rabbitmq-plugins.md: -------------------------------------------------------------------------------- 1 | - rabbitmq-top是一个插件,可帮助识别消耗大部分内存或调度程序(CPU)时间的运行时进程(“轻量级线程”) -------------------------------------------------------------------------------- /mq/rabbitmq/rabbitmq-use.md: -------------------------------------------------------------------------------- 1 | # rabbitmq常用操作 2 | 3 | ## 创建rabbitmq 4 | 5 | [root@aniu-saas-1 bin]# rabbitmqctl add_user rabbitmq password 6 | Creating user "rabbitmq" ... 7 | ...done. 8 | [root@aniu-saas-1 bin]# rabbitmqctl add_vhost /my_vhost 9 | Creating vhost "/my_vhost" ... 10 | ...done. 11 | [root@aniu-saas-1 bin]# rabbitmqctl add_vhost /nkm_vhost 12 | Creating vhost "/nkm_vhost" ... 13 | ...done. 14 | [root@aniu-saas-1 bin]# rabbitmqctl set_permissions -p /nkm_vhost rabbitmq ".*" ".*" ".*" 15 | Setting permissions for user "rabbitmq" in vhost "/nkm_vhost" ... 16 | ...done. 17 | 18 | [root@aniu-saas-1 ~]# rabbitmqctl set_permissions -p /nkm_vhost_task rabbitmq ".*" ".*" ".*" 19 | 20 | [root@aniu-saas-1 bin]# rabbitmqctl set_user_tags rabbitmq administrator 21 | Setting tags for user "rabbitmq" to [administrator] ... 22 | ...done. 23 | 24 | ## 创建生产者和消费者账号 25 | 26 | nkm_producer & nkm_consumer 27 | 28 | rabbitmqctl add_user producer_passwd 29 | 30 | 改变密码 31 | 32 | 33 | rabbitmqctl add_user nkm_consumer consumer_passwd 34 | 35 | rabbitmqctl change_password nkm_producer producer_passwd 36 | 37 | [root@aniu-saas-1 ~]# rabbitmqctl list_vhosts 38 | Listing vhosts ... 39 | / 40 | /nkm_vhost 41 | /nkm_vhost_task 42 | 43 | rabbitmqctl set_permissions -p /nkm_vhost nkm_producer ".*" ".*" ".*" 44 | rabbitmqctl set_permissions -p /nkm_vhost_task nkm_producer ".*" ".*" ".*" 45 | 46 | rabbitmqctl set_permissions -p /nkm_vhost nkm_consumer ".*" ".*" ".*" 47 | rabbitmqctl set_permissions -p /nkm_vhost_task nkm_consumer ".*" ".*" ".*" 48 | 49 | -------------------------------------------------------------------------------- /network/bonding-config.md: -------------------------------------------------------------------------------- 1 | # C7配置双网卡 2 | 3 | ## 创建网卡bond0 4 | 5 | ``` 6 | [root@localhost ~]# cd /etc/sysconfig/network-scripts/ 7 | [root@localhost network-scripts]# cat ifcfg-bond0 8 | DEVICE=bond0 9 | NAME=bond0 10 | TYPE=Bond 11 | BONDING_MASTER=yes 12 | IPADDR=192.168.0.123 13 | NETMASK=255.255.255.0 14 | GATEWAY=192.168.0.1 15 | DNS1=114.114.114.114 16 | #PREFIX=24 17 | ONBOOT=yes 18 | BOOTPROTO=none 19 | BONDING_OPTS="mode=5 miimon=100" 20 | ``` 21 | 22 | - 参考:https://www.linuxtechi.com/configure-nic-bonding-in-centos-7-rhel-7/ 23 | 24 | 25 | - 修改网卡em1 26 | ``` 27 | [root@localhost network-scripts]# cat ifcfg-em1 28 | TYPE=Ethernet 29 | BOOTPROTO=none 30 | DEFROUTE=yes 31 | PEERDNS=yes 32 | PEERROUTES=yes 33 | IPV4_FAILURE_FATAL=no 34 | IPV6INIT=yes 35 | IPV6_AUTOCONF=yes 36 | IPV6_DEFROUTE=yes 37 | IPV6_PEERDNS=yes 38 | IPV6_PEERROUTES=yes 39 | IPV6_FAILURE_FATAL=no 40 | NAME=em1 41 | UUID=2314586b-95a0-4d69-a466-1bf2e9da2e17 42 | DEVICE=em1 43 | ONBOOT=yes 44 | 45 | MASTER=bond0 46 | SLAVE=yes 47 | ``` 48 | 49 | 50 | - 修改网卡em2 51 | ``` 52 | [root@localhost network-scripts]# cat ifcfg-em1 53 | TYPE=Ethernet 54 | BOOTPROTO=none 55 | DEFROUTE=yes 56 | PEERDNS=yes 57 | PEERROUTES=yes 58 | IPV4_FAILURE_FATAL=no 59 | IPV6INIT=yes 60 | IPV6_AUTOCONF=yes 61 | IPV6_DEFROUTE=yes 62 | IPV6_PEERDNS=yes 63 | IPV6_PEERROUTES=yes 64 | IPV6_FAILURE_FATAL=no 65 | NAME=em1 66 | UUID=2314586b-95a0-4d69-a466-1bf2e9da2e17 67 | DEVICE=em1 68 | ONBOOT=yes 69 | 70 | MASTER=bond0 71 | SLAVE=yes 72 | 73 | ``` 74 | 75 | ## 修改完网卡直接重启机器 76 | 77 | - 查看bonding状态 78 | 79 | 80 | cat /proc/net/bonding/bond0 -------------------------------------------------------------------------------- /network/disbale-ipv6.md: -------------------------------------------------------------------------------- 1 | # 在CentOS/RHEL7上禁用IPv6 2 | 3 | - 在内核模块中禁用IPv6(需要重启) 4 | - 使用sysctl设置禁用IPv6(无需重新启动) 5 | 6 | ## 在内核模块中禁用IPv6 7 | 8 | - 编辑/etc/default/grub并在行GRUB_CMDLINE_LINUX中添加ipv6.disable = 1,如下: 9 | 10 | ``` 11 | # cat /etc/default/grub 12 | GRUB_TIMEOUT=5 13 | GRUB_DEFAULT=saved 14 | GRUB_DISABLE_SUBMENU=true 15 | GRUB_TERMINAL_OUTPUT="console" 16 | GRUB_CMDLINE_LINUX="ipv6.disable=1 crashkernel=auto rhgb quiet" 17 | GRUB_DISABLE_RECOVERY="true" 18 | ``` 19 | 20 | - 重新生成GRUB配置文件并覆盖现有文件 21 | 22 | ``` 23 | grub2-mkconfig -o /boot/grub2/grub.cfg 24 | shutdown -r now 25 | 26 | # 重启之后查看ipv6模块 27 | ip addr show | grep net6 28 | ``` 29 | 30 | ## 使用sysctl设置禁用IPv6 31 | 32 | - 在/etc/sysctl.conf中添加以下行 33 | 34 | ``` 35 | net.ipv6.conf.all.disable_ipv6 = 1 36 | net.ipv6.conf.default.disable_ipv6 = 1 37 | 38 | # 或者执行 39 | sed -i '$ a\net.ipv6.conf.all.disable_ipv6 = 1\nnet.ipv6.conf.default.disable_ipv6 = 1' /etc/sysctl.conf 40 | ``` 41 | 42 | - 要使设置生效,请执行 43 | 44 | ``` 45 | sysctl -p 46 | ``` 47 | 48 | > 确保文件/etc/ssh/sshd_config包含AddressFamily inet行,以避免在使用sysctl方法时破坏SSH Xforwarding 49 | 50 | - 将AddressFamily行添加到sshd_config 51 | 52 | ``` 53 | sed -i '$ a\AddressFamily inet' /etc/ssh/sshd_config 54 | systemctl restart sshd 55 | ``` 56 | 57 | - 参考:https://www.thegeekdiary.com/centos-rhel-7-how-to-disable-ipv6/ -------------------------------------------------------------------------------- /network/ntp/chrony-use.md: -------------------------------------------------------------------------------- 1 | # CentOS7使用Chrony设置时间同步 2 | 3 | yum install chrony -y 4 | 5 | - chrony配置 6 | 7 | [root@aniu-saas ~]# egrep -v "^#|^$" /etc/chrony.conf 8 | server 0.pool.ntp.org 9 | server 1.pool.ntp.org 10 | server 2.pool.ntp.org 11 | server 3.pool.ntp.org 12 | server times.aliyun.com iburst 13 | server time1.aliyun.com iburst 14 | server time2.aliyun.com iburst 15 | driftfile /var/lib/chrony/drift 16 | makestep 1.0 3 17 | rtcsync 18 | allow 192.168.0.1/24 19 | allow 172.16.1.1/24 20 | allow 192.168.31.1/24 21 | logdir /var/log/chrony 22 | 23 | 24 | # systemctl enable chronyd.service 25 | # systemctl restart chronyd.service 26 | # systemctl status chronyd.service 27 | 28 | - 查看时间同步源状态 29 | 30 | ``` 31 | [root@aniu-saas ~]# chronyc sourcestats -v 32 | 210 Number of sources = 6 33 | .- Number of sample points in measurement set. 34 | / .- Number of residual runs with same sign. 35 | | / .- Length of measurement set (time). 36 | | | / .- Est. clock freq error (ppm). 37 | | | | / .- Est. error in freq. 38 | | | | | / .- Est. offset. 39 | | | | | | | On the -. 40 | | | | | | | samples. \ 41 | | | | | | | | 42 | Name/IP Address NP NR Span Frequency Freq Skew Offset Std Dev 43 | ============================================================================== 44 | time5.aliyun.com 4 3 194 +1.061 103.339 +188us 385us 45 | 85.199.214.100 4 3 195 +3.504 82.387 +31ms 325us 46 | 118.122.35.10 3 3 130 +3.885 2096.379 +4650us 228us 47 | 85.199.214.101 4 3 195 -1.286 124.915 +26ms 412us 48 | 120.25.115.19 7 3 200 +0.859 17.221 +1983us 572us 49 | 203.107.6.88 7 4 200 -2.721 53.192 -1270us 1299us 50 | ``` 51 | 52 | 53 | ## 客户端设置 54 | 55 | ``` 56 | # yum install chrony -y 57 | 编辑 /etc/chrony.conf 文件,修改server NTP_SERVER iburst,更改更准备的NTP_SERVER,第26行 取消注释更改自己网段 allow 192.168.1.1/24 58 | [root@ovirt ~]# systemctl enable chronyd.service 59 | [root@ovirt ~]# systemctl start chronyd.service 60 | [root@ovirt ~]# systemctl status chronyd. 61 | ``` 62 | 63 | - ops ntp server 64 | 65 | -------------------------------------------------------------------------------- /network/packetfence/PacketFence_Installation_Guide.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/network/packetfence/PacketFence_Installation_Guide.pdf -------------------------------------------------------------------------------- /network/packetfence/learn-packetfence.md: -------------------------------------------------------------------------------- 1 | # PacketFence是一个完全支持,可信任的自由和开源网络访问控制(NAC)解决方案。 2 | 3 | https://packetfence.org/ 4 | 5 | https://packetfence.org/support.html#/documentation -------------------------------------------------------------------------------- /network/route/wireless-route-config.md: -------------------------------------------------------------------------------- 1 | # 无线路由器设置 2 | 3 | - SSID:无线网络的标识符 4 | - 信道:以无线信号作为传输媒体的数据信号传送通道 5 | 6 | -------------------------------------------------------------------------------- /network/switch/Catalyst-3750-X.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/network/switch/Catalyst-3750-X.pdf -------------------------------------------------------------------------------- /network/switch/cisco-3750x.md: -------------------------------------------------------------------------------- 1 | # -------------------------------------------------------------------------------- /network/switch/config-vlan.md: -------------------------------------------------------------------------------- 1 | # 交换机上配置vlan 2 | 3 | Switch> enable //输入账号密码进入特权模式 4 | Switch# configure terminal 5 | Switch(config)#vlan 10 //创建vlan 10 6 | Switch(config-vlan)#exit 7 | 8 | Switch(config)#interface f0/1 //进入f0/1端口 9 | Switch(config)#int range f0/3-4 //对端口f0/3.f0/4同时做操作 10 | Switch(config-if)#switchport mode access //端口模式 11 | Switch(config-if)#switchport access vlan 10 //将端口f0/1划分到vlan 10中 12 | 13 | Switch# show vlan //显示vlan信息 14 | 15 | ## 16 | -------------------------------------------------------------------------------- /network/vpn/free-vpn.md: -------------------------------------------------------------------------------- 1 | # https://www.geckoandfly.com/5710/free-vpn-for-windows-mac-os-x-linux-iphone-ubuntu/ -------------------------------------------------------------------------------- /network/vpn/openvpn/c7-install-openvpn.md: -------------------------------------------------------------------------------- 1 | # https://openvpn.net/ -------------------------------------------------------------------------------- /network/vpn/openvpn/learn-openvpn.md: -------------------------------------------------------------------------------- 1 | # https://openvpn.net/ -------------------------------------------------------------------------------- /network/vpn/shadowsocks/c7-install-ss.md: -------------------------------------------------------------------------------- 1 | # 搬瓦工VPS安装ShadowSocks服务 2 | 3 | ## CentOS7 Install ShadowSocks 4 | 5 | - 推荐:wget --no-check-certificate -O shadowsocks-libev-debian.sh https://raw.githubusercontent.com/teddysun/shadowsocks_install/master/shadowsocks-libev-debian.sh 6 | 7 | - 添加epel源 8 | 9 | ``` 10 | yum update -y && yum install python-pip -y 11 | 12 | pip install shadowsocks 13 | 14 | # 编辑配置文件 15 | cat /etc/shadowsocks-libev/config.json 16 | { 17 | "server":"0.0.0.0", 18 | "server_port":8888, 19 | "local_port":1080, 20 | "password":"nrmzRbYCHJvXEPLN", 21 | "timeout":60, 22 | "method":"aes-256-cfb" 23 | } 24 | ``` 25 | 26 | - 启动shadowsocks服务 27 | 28 | ``` 29 | systemctl start shadowsocks-libev 30 | systemctl status shadowsocks-libev 31 | systemctl enable shadowsocks-libev 32 | ``` 33 | 34 | - 防火墙设置 35 | 36 | ``` 37 | firewall-cmd --permanent --add-port=8888/tcp 38 | firewall-cmd --permanent --add-port=8888/udp 39 | firewall-cmd --reload 40 | 41 | # ssh端口 42 | firewall-cmd --permanent --add-port=27043/tcp 43 | ``` 44 | 45 | 46 | 47 | ## 报错解决 48 | 49 | - /usr/bin/ss-server: error while loading shared libraries: libmbedcrypto.so.0: cannot open shared object file: No such file or directory 50 | 51 | ``` 52 | cd /usr/lib64 53 | ln -s libmbedcrypto.so.1 libmbedcrypto.so.0 54 | ``` 55 | 56 | - 参考:http://radzhang.iteye.com/blog/2414919 57 | 58 | 59 | 60 | ## 使用repo安装shadsocks 61 | 62 | - https://copr.fedorainfracloud.org/coprs/librehat/shadowsocks/ 63 | - https://github.com/shadowsocks/shadowsocks-libev 64 | 65 | - 配置 66 | 67 | 68 | wget https://copr.fedorainfracloud.org/coprs/librehat/shadowsocks/repo/epel-7/librehat-shadowsocks-epel-7.repo 69 | 70 | yum install gcc gettext autoconf libtool automake make pcre-devel asciidoc xmlto c-ares-devel libev-devel libsodium-devel mbedtls-devel -y 71 | 72 | yum install shadowsocks-libev 73 | 74 | # Edit the configuration file 75 | sudo vim /etc/shadowsocks-libev/config.json 76 | 77 | # Edit the default configuration for debian 78 | sudo vim /etc/default/shadowsocks-libev 79 | 80 | # Start the service 81 | sudo /etc/init.d/shadowsocks-libev start # for sysvinit, or 82 | sudo systemctl start shadowsocks-libev # for systemd 83 | 84 | 85 | cd /usr/lib/node_modules/shadowsocks-manager/ 86 | npm install sqlite3 --save 87 | 88 | = 89 | 90 | -------------------------------------------------------------------------------- /network/vpn/shadowsocks/c7-yum-install-ss.md: -------------------------------------------------------------------------------- 1 | # centos7 repo源安装ss服务并设置代理 2 | 3 | ## 安装shadowsocks-libev 4 | 5 | ``` 6 | wget https://copr.fedorainfracloud.org/coprs/librehat/shadowsocks/repo/epel-7/librehat-shadowsocks-epel-7.repo -P /etc/yum.repos.d/ 7 | yum install -y shadowsocks-libev 8 | ``` 9 | 10 | ## 配置 11 | 12 | ``` 13 | { 14 | "server":"x.x.x.x", # Shadowsocks服务器地址 15 | "server_port":1035, # Shadowsocks服务器端口 16 | "local_address": "127.0.0.1", # 本地IP 17 | "local_port":1080, # 本地端口 18 | "password":"password", # Shadowsocks连接密码 19 | "timeout":300, # 等待超时时间 20 | "method":"aes-256-cfb", # 加密方式 21 | "fast_open": false, # true或false。开启fast_open以降低延迟,但要求Linux内核在3.7+ 22 | "workers": 1 #工作线程数 23 | } 24 | 25 | # 启动 26 | [root@wanghui shadowsocks-libev]# systemctl start shadowsocks-libev 27 | [root@wanghui shadowsocks-libev]# systemctl enable shadowsocks-libev 28 | Created symlink from /etc/systemd/system/multi-user.target.wants/shadowsocks-libev.service to /usr/lib/systemd/system/shadowsocks-libev.service. 29 | ``` 30 | 31 | - errors 32 | 33 | /usr/bin/ss-server: error while loading shared libraries: libmbedcrypto.so.2: cannot open shared object file: No such file or directory 34 | 35 | ``` 36 | $ yum install mbedtls-devel 37 | $ cd /usr/lib64 38 | $ ls |grep mbed 39 | $ ln -sf libmbedcrypto.so.1 libmbedcrypto.so.0 40 | ``` -------------------------------------------------------------------------------- /network/vpn/shadowsocks/errors.md: -------------------------------------------------------------------------------- 1 | # 启用机器人报错 2 | debconf: unable to initialize frontend: Dialog 3 | debconf: (TERM is not set, so the dialog frontend is not usable.) 4 | debconf: falling back to frontend: Readline 5 | debconf: unable to initialize frontend: Readline 6 | debconf: (Can't locate Term/ReadLine.pm in @INC (you may need to install the Term::ReadLine module) (@INC contains: /etc/perl /usr/local/lib/x86_64-linux-gnu/perl/5.22.1 /usr/local/share/perl/5.22.1 /usr/lib/x86_64-linux-gnu/perl5/5.22 /usr/share/perl5 /usr/lib/x86_64-linux-gnu/perl/5.22 /usr/share/perl/5.22 /usr/local/lib/site_perl /usr/lib/x86_64-linux-gnu/perl-base .) at /usr/share/perl5/Debconf/FrontEnd/Readline.pm line 7, <> line 1.) 7 | debconf: falling back to frontend: Teletype 8 | dpkg-preconfigure: unable to re-open stdin: 9 | 10 | 11 | ss-manager: error while loading shared libraries: libmbedcrypto.so.2: cannot open shared object file: No such file or directory -------------------------------------------------------------------------------- /network/vpn/shadowsocks/shadowsocks-manager.md: -------------------------------------------------------------------------------- 1 | # shadowsocks manager配置 (源码安装) 2 | 3 | ## 安装依赖 4 | 5 | - Node.js 6.* 6 | 7 | curl --silent --location https://rpm.nodesource.com/setup_6.x | sudo bash - 8 | sudo yum install -y nodejs 9 | 10 | ## To install the Yarn package manager, run: 11 | curl -sL https://dl.yarnpkg.com/rpm/yarn.repo | sudo tee /etc/yum.repos.d/yarn.repo 12 | sudo yum install yarn 13 | 14 | -------------------------------------------------------------------------------- /reading notes/Enterprise Security/opensource-network-security.md: -------------------------------------------------------------------------------- 1 | # 开软软件与网络安全 2 | 3 | ## 安全相关名词记录学习 4 | 5 | CNCERT:国家互联网应急中心 6 | 7 | PDR:主动防御的安全模型,protection保护、detection检测、response响应 8 | 9 | protection:加密、认证、访问控制、防火墙及防病毒 10 | detection:入侵检测、漏洞扫描 11 | response:应急响应机制建立 12 | 13 | P2DR:策略policy是模型的核心,PDR依据安全策略实施 14 | 15 | 互联网企业安全基础组件: 16 | 17 | 运营SOC 18 | 检测:扫描器、HIDS、DLP、NIDS 19 | 防护:WAF、杀毒、准入、网络隔离 20 | 预测:威胁情报 21 | 响应:EDR、NDR 22 | 23 | 商业软件 24 | 25 | 商业:sourcefire 26 | 开源:snort、calmAV、razorback 27 | 28 | ## 业务网纵深防御体系建设 29 | 30 | 抗DDOS、保障业务持续性取 31 | 放后门、防止黑客非法获取服务器权限 32 | 33 | 边界防护:UTM、WAF 34 | 纵深防御:社工 35 | 36 | 安全产品 37 | 38 | 数据库:数据库审计、数据库防火墙 39 | 服务器端:主机IDS、服务器杀毒、内核加固、主机WAF 40 | 网路层:IDS、Web威胁感知、Web审计 41 | 网络边界:防火墙、UTM、WAF、IPS、本地流量清洗设备 42 | 43 | 河防体系:控 44 | 塔防体系: 45 | 46 | 新一代纵深防御:基于预测、检测、协同、防御、响应、溯源 47 | 48 | WAF:Web Application Firewall 是通过执行一系列针对http、https的安全策略为web应用提供保护的一种产品,可防御:SQL注入、XSS、远程命令执行、目录遍历 49 | 50 | 自建WAF系统 51 | 52 | OpenResty是一个基于Nginx与Lua的高性能Web平台 -------------------------------------------------------------------------------- /reading notes/read-book-list.md: -------------------------------------------------------------------------------- 1 | # 读书笔记 2 | 3 | 1、企业安全建设 -------------------------------------------------------------------------------- /security/jumpserver/C7-install-jumpserver.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装堡垒机jumpserver 2 | 3 | - 官网:http://www.jumpserver.org/ 4 | - Github:https://github.com/jumpserver/ 5 | - 文档:http://docs.jumpserver.org/zh/docs/ 6 | 7 | 8 | 9 | 10 | -------------------------------------------------------------------------------- /security/jumpserver/restart-coco.md: -------------------------------------------------------------------------------- 1 | # 重启coco解决ssh session不释放问题 2 | 3 | ``` 4 | [root@ecs-110 ~]# cd /opt/ 5 | [root@ecs-110 opt]# source /opt/py3/bin/activate 6 | (py3) [root@ecs-110 opt]# cd coco/ 7 | (py3) [root@ecs-110 coco]# ./cocod stop 8 | Stop coco process 9 | (py3) [root@ecs-110 coco]# ./cocod start -d 10 | Start coco process 11 | (py3) [root@ecs-110 coco]# ./cocod status 12 | Coco is running: 10555 13 | ``` -------------------------------------------------------------------------------- /security/jumpserver/update.md: -------------------------------------------------------------------------------- 1 | # jumpserver更新升级 2 | 3 | - 参考文档:http://docs.jumpserver.org/zh/docs/upgrade.html 4 | 5 | ``` 6 | [root@ecs-110 ~]# cd /opt/jumpserver/ 7 | [root@ecs-110 jumpserver]# source .env 8 | (py3) [root@ecs-110 jumpserver]# cat .env 9 | source /opt/py3/bin/activate 10 | (py3) [root@ecs-110 jumpserver]# 11 | 12 | # 新版本更新了自动升级脚本,升级只需要到 utils 目录下执行 sh upgrade.sh 即可 13 | $ git pull 14 | $ pip install -r requirements/requirements.txt # 如果使用其他源下载失败可以使用 -i 参数指定源 15 | $ cd utils && sh make_migrations.sh 16 | 17 | # 1.0.x 升级 1.2.0 需要执行迁移脚本(新版本授权管理更新) 18 | $ sh 2018_04_11_migrate_permissions.sh 19 | 20 | # 注意笔者git pull的时候遇到了冲突,因为笔者pull本地修改过几个文件,笔者先暂存 21 | git stash 22 | 23 | git stash pop [–index] [stash_id] 24 | git stash pop 恢复最新的进度到工作区。git默认会把工作区和暂存区的改动都恢复到工作区。 25 | git stash pop --index 恢复最新的进度到工作区和暂存区。(尝试将原来暂存区的改动还恢复到暂存区) 26 | git stash pop stash@{1}恢复指定的进度到工作区。stash_id是通过git stash list命令得到的 27 | 28 | # 通过git stash pop命令恢复进度后,会删除当前进度。 29 | ``` -------------------------------------------------------------------------------- /security/kali/Kali渗透测试技术实战.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/security/kali/Kali渗透测试技术实战.pdf -------------------------------------------------------------------------------- /security/kali/kali-learn.md: -------------------------------------------------------------------------------- 1 | # kali文档 2 | 3 | - https://www.kali.org/kali-linux-documentation/ -------------------------------------------------------------------------------- /security/kali/vm-install-kali.md: -------------------------------------------------------------------------------- 1 | # 虚拟机安装kali系统 2 | 3 | - 下载:https://www.kali.org/downloads/ 4 | 5 | ## -------------------------------------------------------------------------------- /security/ssl/free_ssl_create.md: -------------------------------------------------------------------------------- 1 | # 免费ssl证书创建 2 | 3 | ## letsencrypt免费证书创建 4 | 5 | - https://letsencrypt.org/getting-started/ 6 | 7 | - https://certbot.eff.org/ 8 | 9 | - https://certbot.eff.org/lets-encrypt/centosrhel7-apache 10 | 11 | sudo yum install certbot-apache -------------------------------------------------------------------------------- /security/ssl/free_ssl_website.md: -------------------------------------------------------------------------------- 1 | # 免费的ssl证书申请 2 | 3 | https://letsencrypt.org/ 4 | 5 | https://www.comodo.com/ 6 | 7 | https://www.cloudflare.com/ 8 | 9 | https://www.startcomca.com/ 10 | 11 | https://www.sslforfree.com/ 12 | 13 | https://sg.godaddy.com/zh/ 14 | 15 | https://www.gogetssl.com/ -------------------------------------------------------------------------------- /shell/cmd_track.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sbin 2 | 3 | #functions:install cmd_track scripts 4 | #date:2016-04-05 5 | #auther:shaonbean 6 | #set -x 7 | # Check if user is root 8 |    if [ $(id -u) -ne "0" ]; then 9 |        echo "Error: You must be root to run this script, please use root to install " 10 |        exit 1 11 |    fi 12 | # 13 | cmd_path=/etc/profile.d 14 | log_path=/etc/rsyslog.d 15 | # 16 | cat > $cmd_path/cmd.sh << 'EOF' 17 | ################################################ 18 | # cmd track 19 | ################################################ 20 | # CHANGELOG 21 | #May 22, 2014  JY: * Initial Create 22 | ################################################ 23 | 24 | 25 | declare -x REAL_LOGNAME=`/usr/bin/who am i | cut -d" " -f1` 26 | declare -x REAL_IP=`/usr/bin/who -u am i | awk '{print $NF}'|sed -e 's/[()]//g'` 27 | if [ $USER == root ]; then 28 |         declare -x PROMT="#" 29 |   else 30 |         declare -x PROMT="$" 31 | fi 32 | 33 | 34 | #if [ x"$SSH_USER" == x ]; then 35 | #        declare -x REMOTE_USER=UNKNOW 36 | #  else 37 | #        declare -x REMOTE_USER=$SSH_USER 38 | #fi 39 | 40 | 41 | LAST_HISTORY="$(history 1)" 42 | __LAST_COMMAND="${LAST_HISTORY/*:[0-9][0-9] /}" 43 | 44 | 45 | declare -x h2l=' 46 |     THIS_HISTORY="$(history 1)" 47 |     __THIS_COMMAND="${THIS_HISTORY/*:[0-9][0-9] /}" 48 |     if [ "$LAST_HISTORY" != "$THIS_HISTORY" ];then 49 |         __LAST_COMMAND="$__THIS_COMMAND" 50 |         LAST_HISTORY="$THIS_HISTORY" 51 |         logger -p local4.notice -i -t $REAL_LOGNAME $REAL_IP "[$USER@$HOSTNAME $PWD]$PROMT $__LAST_COMMAND" 52 |     fi' 53 | trap "$h2l" DEBUG 54 | EOF 55 | ##### 56 | if [ -d $log_path ];then 57 |     echo "$log_path does exist" 58 |   else  59 |     mkdir -p $log_path 60 | fi 61 | # 62 | cat > $log_path/10-cmd_track.conf << 'EOF' 63 | # Log nc_profile generated CMD log messages to file 64 | local4.notice /var/log/cmd_track.log 65 | #:msg, contains, "REM" /var/log/cmd_track.log 66 | 67 | 68 | # Uncomment the following to stop logging anything that matches the last rule. 69 | # Doing this will stop logging kernel generated UFW log messages to the file 70 | # normally containing kern.* messages (eg, /var/log/kern.log) 71 | & ~ 72 | EOF 73 | # 74 | systemctl restart rsyslog && source /etc/profile 75 | -------------------------------------------------------------------------------- /shell/shell-learn-guide.md: -------------------------------------------------------------------------------- 1 | # shell脚本学习指南 2 | 3 | - 基本I/O重定向 4 | 5 | 标准输入、标准输出、标准错误输出 6 | 7 | 8 | - awk 9 | 10 | awk -F: -v 'OFS=**' '{print $1, $5}' /etc/passwd 11 | 12 | sort -t: -k1,1 /etc/passwd 13 | 14 | sort -t: -k3nr /etc/passwd 15 | 16 | sort -t: -k4n -k3n /etc/passwd 17 | 18 | 19 | uniq -c : 计数唯一的、排序后的记录 20 | 21 | 22 | - 查看文件前n条记录 23 | 24 | head -n n files 25 | head -n files 26 | awk 'FNR <= 3' files 27 | sed -e nq files 28 | sed nq 29 | 30 | - 去空格 31 | 32 | [root@ops-223 ~]# cat /proc/2063/status | grep VmRSS | sed s/[[:space:]]//g 33 | VmRSS:9088kB 34 | 35 | [root@ops-223 ~]# sed -r '/VmRSS/s#\s*##g' /proc/2063/status | grep VmRSS 36 | VmRSS:9088kB 37 | 38 | - 以:分割,截取打印显示第五个字段 39 | 40 | awk -F: '{print $5}' 41 | cut -d: -f5 42 | 43 | $$传递进程ID 44 | 45 | 46 | -------------------------------------------------------------------------------- /shell/tools: -------------------------------------------------------------------------------- 1 | # 最有用的Linux命令行技巧 2 | 3 | ## 将输出显示为表格 4 | 5 | mount | column –t 6 | 7 | 使用-s参数指定分隔符,如下 8 | 9 | cat /etc/passwd | column -t -s: 10 | 11 | ## 重复执行命令,直到命令成功运行 12 | 13 | 使用while true循环,>/dev/null 2>&1 将程序的输出重定向到/dev/null,同时包括标准错误和标准输出。 14 | 15 | ## 按内存使用情况对进程进行排序 16 | ps aux | sort -rnk 4 | head -10 17 | 18 | ## 按CPU使用率对进程进行排序 19 | ps aux | sort -nk 3 | head -10 20 | 21 | ## 同时观看多个日志文件 22 | yum install multitail -y 23 | 24 | ## 自动对任何命令回答是或否 25 | 26 | yes | yum update 27 | 28 | ## 记录您的命令行会话 29 | 30 | 如果要记录在shell屏幕上键入的内容,可以使用script命令将所有键入的内容保存到名为typescript的文件中 31 | 32 | ## 用制表符替换空格 33 | cat geeks.txt | tr ':[space]:' '\t' > out.txt 34 | 35 | ## 将文件转换为大写或小写 36 | cat myfile | tr a-z A-Z > output.txt 37 | 38 | ## 强大的Xargs命 39 | find. -name *.png -type f -print | xargs tar -cvzf images.tar.gz 40 | cat urls.txt | xargs wget 41 | ls /etc/*.conf | xargs -i cp {} /home/likegeeks/Desktop/out 42 | 43 | ## 常用的20个linux命令 44 | ls 45 | cp 46 | cd 47 | mv 48 | rm 49 | mkdir 50 | rmdir 51 | chown 52 | chmod 53 | locate 54 | updatedb 55 | date 56 | tar 57 | cat 58 | less 59 | grep 60 | awk 61 | sed 62 | passwd 63 | du 64 | df 65 | 66 | 67 | -------------------------------------------------------------------------------- /storage/glusterfs/c7-install-gluster.md: -------------------------------------------------------------------------------- 1 | # C7安装glusterfs服务 2 | 3 | ## 参考链接 4 | 5 | - https://wiki.centos.org/SpecialInterestGroup/Storage/gluster-Quickstart 6 | - https://docs.gluster.org/en/latest/Quick-Start-Guide/Quickstart/ 7 | - https://blog.csdn.net/wh211212/article/details/79412081 8 | - http://mirror.centos.org/centos/7/storage/x86_64/ 9 | 10 | 11 | ## 实验环境 12 | 13 | | Hostname | Role | IP | 14 | | ------------- |:----------------:| -------------:| 15 | | ovirt1 | glusterfs-server | 192.168.1.131 | 16 | | ovirt2 | glusterfs-server | 192.168.1.132 | 17 | 18 | # 在ovirt1,ovirt2节点上安装GlusterFS服务 19 | 20 | ``` 21 | # ovirt1,ovirt1执行下面操作 22 | [root@ovirt1 ~]# yum -y install centos-release-gluster40 # 当前最新版 23 | [root@ovirt1 ~]# sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/CentOS-Gluster-4.0.repo 24 | [root@ovirt1 ~]# yum --enablerepo=centos-gluster40 -y install glusterfs-server 25 | [root@ovirt1 ~]# systemctl start glusterd 26 | [root@ovirt1 ~]# systemctl enable glusterd 27 | ``` 28 | 29 | - 笔者安装glusterfs,提供ovirt的存储,安装建议版本即可 30 | 31 | ``` 32 | [root@ovirt1 yum.repos.d]# yum install centos-release-gluster -y # 使用centos wiki提供的安装源 默认3.12版本 33 | [root@ovirt1 ~]# yum install glusterfs-server -y 34 | [root@ovirt1 ~]# systemctl start glusterd 35 | [root@ovirt1 ~]# systemctl enable glusterd 36 | ``` 37 | 38 | ## Gluster 4.0:分布式配置(Distributed) 39 | 40 | - 在所有节点上为GlusterFS卷创建一个目录 41 | 42 | ``` 43 | [root@node01 ~]# mkdir /glusterfs/distributed 44 | ``` 45 | 46 | - 在节点上执行下面操作,配置群集 47 | 48 | ``` 49 | # probe the node 50 | [root@node01 ~]# gluster peer probe node02 51 | peer probe: success. 52 | # show status 53 | [root@node01 ~]# gluster peer status 54 | Number of Peers: 1 55 | 56 | Hostname: node02 57 | Uuid: 3d0b5871-6dc3-42d3-9818-6a43ef035b9f 58 | State: Peer in Cluster (Connected) 59 | 60 | # create volume 61 | [root@node01 ~]# gluster volume create vol_distributed transport tcp \ 62 | node01:/glusterfs/distributed \ 63 | node02:/glusterfs/distributed 64 | volume create: vol_distributed: success: please start the volume to access data 65 | # start volume 66 | [root@node01 ~]# gluster volume start vol_distributed 67 | volume start: vol_distributed: success 68 | # show volume info 69 | [root@node01 ~]# gluster volume info 70 | 71 | Volume Name: vol_distributed 72 | Type: Distribute 73 | Volume ID: 67b86c76-6505-4fec-be35-3e79ef7c2dc1 74 | Status: Started 75 | Snapshot Count: 0 76 | Number of Bricks: 2 77 | Transport-type: tcp 78 | Bricks: 79 | Brick1: node01:/glusterfs/distributed 80 | Brick2: node02:/glusterfs/distributed 81 | Options Reconfigured: 82 | transport.address-family: inet 83 | nfs.disable: on 84 | ``` 85 | 86 | ## Gluster 4.0:复制配置 87 | 88 | - 在所有节点上为GlusterFS卷创建一个目录 89 | 90 | ``` 91 | [root@node01 ~]# mkdir /glusterfs/replica 92 | ``` 93 | 94 | - 在节点上执行下面操作,配置群集 95 | 96 | ``` 97 | # probe the node 98 | [root@node01 ~]# gluster peer probe node02 99 | peer probe: success. 100 | # show status 101 | [root@node01 ~]# gluster peer status 102 | Number of Peers: 1 103 | 104 | Hostname: node02 105 | Uuid: 0e1917c8-7cd0-4578-af65-46a1489b10b1 106 | State: Peer in Cluster (Connected) 107 | 108 | # create volume 109 | [root@node01 ~]# gluster volume create vol_replica replica 2 transport tcp \ 110 | node01:/glusterfs/replica \ 111 | node02:/glusterfs/replica 112 | volume create: vol_replica: success: please start the volume to access data 113 | # start volume 114 | [root@node01 ~]# gluster volume start vol_replica 115 | volume start: vol_replica: success 116 | # show volume info 117 | [root@node01 ~]# gluster volume info 118 | 119 | Volume Name: vol_replica 120 | Type: Replicate 121 | Volume ID: f496e38f-9238-4721-92bf-78fbaace7758 122 | Status: Started 123 | Snapshot Count: 0 124 | Number of Bricks: 1 x 2 = 2 125 | Transport-type: tcp 126 | Bricks: 127 | Brick1: node01:/glusterfs/replica 128 | Brick2: node02:/glusterfs/replica 129 | Options Reconfigured: 130 | transport.address-family: inet 131 | nfs.disable: on 132 | performance.client-io-threads: off 133 | ``` 134 | 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /storage/glusterfs/glusterfs-command-use.md: -------------------------------------------------------------------------------- 1 | # glusterfs常用命令 2 | 3 | 一、创建卷 4 | 5 | gluster volume create 6 | 7 | 例子:gluster volume create gv0 replica 2 server1:/data/brick1/gv0 server2:/data/brick1/gv0 8 | 9 | 报错 10 | 11 | 发现报错了,这是因为我们创建的brick在系统盘,这个在gluster的默认情况下是不允许的,生产环境下也尽可能的与系统盘分开,如果必须这样请使用force 12 | 13 | gluster volume create gv0 replica 2 server1:/data/brick1/gv0 server2:/data/brick1/gv0 force 14 | 15 | 二、启动卷 16 | 17 | gluster volume start 18 | 19 | 例子:gluster volume start gv0 20 | 21 | 三、停止卷 22 | 23 | gluster volume stop 24 | 25 | 例子:gluster volume stop gv0 26 | 27 | 四、 删除卷 28 | 29 | gluster volume delete 30 | 31 | 例子:gluster volume delete gv0 32 | 33 | 五、添加节点 34 | 35 | gluster peer probe 36 | 37 | 例子:gluster peer probe server3 38 | 39 | 六、删除节点 40 | 41 | gluster peer detach (移除节点,需要提前将该节点上的brick移除) 42 | 43 | 例子:gluster peer detach server3 44 | 45 | 七、查看卷 46 | 47 | gluster volume list /*列出集群中的所有卷*/ 48 | 49 | gluster volume info [all] /*查看集群中的卷信息*/ 50 | gluster volume status [all] /*查看集群中的卷状态*/ 51 | 52 | 八、 更改卷类型 53 | 54 | 1.需要先卸载挂载的目录 55 | 56 | umount /mnt 57 | 58 | 2.停止卷 59 | 60 | 61 | 62 | 3.更改卷的类型 63 | 64 | 语法:gluster volume set test-volume config.transport tcp,rdma OR tcp OR rdma 65 | 66 | 例子: 67 | 68 | 69 | 70 | 九、重新均衡卷 71 | 72 | 语法:gluster volume rebalance fix-layout start 73 | 74 | 例子:gluster volume rebalance test-volume fix-layout start 75 | 76 | 77 | 78 | 十、收缩卷 79 | 80 | 1.开始收缩 81 | 82 | gluster volume remove-brick gv0 server3:/data/brick1/gv0 server4:/data/brick1/gv0 start 83 | 84 | 85 | 86 | 2.查看迁移状态 87 | 88 | gluster volume remove-brick gv0 server3:/data/brick1/gv0 server4:/data/brick1/gv0 status 89 | 90 | 91 | 92 | 3.迁移完成提交 93 | 94 | gluster volume remove-brick gv0 server3:/data/brick1/gv0 server4:/data/brick1/gv0 commit 95 | 96 | 97 | 98 | 九.GlusterFS的配额 99 | 100 | GlusterFS目录限额,允许你根据目录或卷配置限制磁盘空间的使用量 101 | 102 | 1.开启限额 103 | 104 | gluster volume quota VolumeName enable 105 | 106 | 2.关闭限额 107 | 108 | gluster volume quota VolumeName disable 109 | 110 | 3.设置或替换磁盘限制 111 | 112 | 3.1.根据卷限制 113 | 114 | 115 | 116 | gluster volume quota VolumeName limit-usage / size 117 | 例子:gluster volume quota gv0 limit-usage / 10GB 118 | 3.2.根据目录限制 119 | gluster volume quota VolumeName limit-usage DirectoryPath LimitSize -------------------------------------------------------------------------------- /storage/glusterfs/glusterfs-for-ovirt.md: -------------------------------------------------------------------------------- 1 | # 创建ovirt使用的glusterfs存储 2 | 3 | - 参考:https://blog.csdn.net/wh211212/article/details/79412081 4 | 5 | ## 节点准备C7 6 | 7 | - server1 server2 8 | 9 | yum -y install centos-release-gluster 10 | 11 | yum -y install glusterfs-server 12 | 13 | # server1,server2 执行 14 | lvcreate -n glusterfs -L 50G centos 15 | 16 | 17 | lvcreate -n glusterfs1 -L 500G centos 18 | 19 | mkfs.xfs -i size=512 /dev/mapper/centos-glusterfs 20 | mkdir -p /data/brick1 21 | echo '/dev/mapper/centos-glusterfs /data/brick1 xfs defaults 1 2' >> /etc/fstab 22 | 23 | echo '/dev/mapper/centos-glusterfs1 /data/brick xfs defaults 1 2' >> /etc/fstab 24 | 25 | mount -a && mount 26 | 27 | yum install glusterfs-server -y && systemctl start glusterd && systemctl enable glusterd && systemctl status glusterd 28 | 29 | ## 配置可信池 30 | 31 | - server1上执行 32 | 33 | gluster peer probe server2 34 | 35 | gluster peer status # 检查server1,server2上的对等状态 36 | 37 | ## 设置GlusterFS卷 38 | 39 | - 在server1和server2上执行 40 | 41 | mkdir -p /data/brick1/gv0 # data 42 | mkdir -p /data/brick1/gv1 # iso 43 | mkdir -p /data/brick1/gv2 # export 44 | 45 | 46 | mkdir -p /data/brick1/{gv0,gv1,gv2} 47 | 48 | chown vdsm:kvm /data/brick1 -R # 为了ovirt挂载使用,不然添加glusterfs的时候报没有权限 49 | 50 | - 任意节点上执行:ovirt1 ovirt2 51 | 52 | gluster volume create gv0 replica 2 ovirt1:/data/brick1/gv0 ovirt2:/data/brick1/gv0 53 | gluster volume create gv1 replica 2 ovirt1:/data/brick1/gv1 ovirt2:/data/brick1/gv1 54 | gluster volume create gv2 replica 2 ovirt1:/data/brick1/gv2 ovirt2:/data/brick1/gv2 55 | 56 | 57 | gluster volume create gv0 replica 2 ovirt3:/data/brick1/gv0 ovirt4:/data/brick1/gv0 58 | 59 | gluster volume create gv0 replica 2 ovirt9:/data/brick1/gv0 ovirt10:/data/brick1/gv0 force 60 | 61 | 62 | - force强制创建gvo 63 | 64 | gluster volume create gv0 replica 2 server1:/data/brick1/gv0 server2:/data/brick1/gv0 65 | gluster volume create gv1 replica 2 server1:/data/brick1/gv1 server2:/data/brick1/gv1 66 | gluster volume create gv2 replica 2 server1:/data/brick1/gv2 server2:/data/brick1/gv2 67 | 68 | gluster volume start gv0 69 | gluster volume start gv1 70 | gluster volume start gv2 71 | 72 | gluster volume create gv0 replica 2 ovirt6:/data/brick1/gv0 ovirt7:/data/brick1/gv0 73 | 74 | - 确认volume“已启动” 75 | 76 | gluster volume info 77 | 78 | ## ovirt engine所在机器执行 79 | 80 | yum -y install glusterfs glusterfs-fuse 81 | 82 | 然后到控制台添加存储域 -------------------------------------------------------------------------------- /storage/nfs/c7-install-nfs.md: -------------------------------------------------------------------------------- 1 | # C7配置NFS服务 2 | 3 | ## 实验环境 4 | 5 | > 笔者借用安装ovirt的实验环境 6 | 7 | | Hostname | Role | IP | 8 | | ------------- |:----------------:| -------------:| 9 | | ovirt.aniu.so | NFS server | 192.168.1.115 | 10 | | aniu-ops | NFS clinet | 192.168.1.134 | 11 | 12 | 13 | ## 配置NFS 服务 14 | 15 | ``` 16 | [root@ovirt ~]# yum -y install nfs-utils 17 | [root@ovirt ~]# vi /etc/idmapd.conf 18 | # 第五行 5: 取消注释更改域名后缀 19 | Domain = aniu.so 20 | [root@ovirt ~]# vi /etc/exports 21 | # 配置nfs挂载目录详情 22 | /home 192.168.1.1/24(rw,no_root_squash) 23 | 24 | [root@ovirt ~]# systemctl start rpcbind nfs-server 25 | [root@ovirt ~]# systemctl enable rpcbind nfs-server 26 | ``` 27 | 28 | - 防火墙开启情况: 29 | ``` 30 | [root@ovirt ~]# firewall-cmd --add-service=nfs --permanent 31 | success 32 | [root@ovirt ~]# firewall-cmd --reload 33 | success 34 | ``` 35 | 36 | - 针对ovirt,配置nfs并创建挂载目录如下: 37 | 38 | 39 | mkdir -p /ovirt/{data,iso,export} 40 | 41 | # 设置nfs挂载的目录及权限,编辑/etc/exports文件,添加下面内容: 42 | 43 | /exports/data *(rw) 44 | /exports/iso *(rw) 45 | /exports/export *(rw) 46 | 47 | chown vdsm:kvm /ovirt/{data,iso,export} 48 | 49 | systemctl restart rpcbind nfs-server 50 | 51 | [root@ovirt ~]# showmount -e localhost 52 | Export list for localhost: 53 | /ovirt/export * 54 | /ovirt/iso * 55 | /ovirt/data * -------------------------------------------------------------------------------- /storage/nfs/nfs-for-ovirt.md: -------------------------------------------------------------------------------- 1 | # 配置nfs作为ovirt镜像存储 2 | 3 | - 针对ovirt,配置nfs并创建挂载目录如下: 4 | 5 | yum -y install nfs-utils & systemctl start rpcbind nfs-server & systemctl enable rpcbind nfs-server 6 | 7 | mkdir -p /ovirt/{data,iso,export} 8 | 9 | # 设置nfs挂载的目录及权限,编辑/etc/exports文件,添加下面内容: 10 | 11 | /ovirt/data *(rw) 12 | /ovirt/iso *(rw) 13 | /ovirt/export *(rw) 14 | 15 | chown vdsm:kvm /ovirt/{data,iso,export} 16 | 17 | systemctl restart rpcbind nfs-server 18 | 19 | [root@ovirt ~]# showmount -e localhost 20 | Export list for localhost: 21 | /ovirt/export * 22 | /ovirt/iso * 23 | /ovirt/data * -------------------------------------------------------------------------------- /tooling/accet/opendevops.md: -------------------------------------------------------------------------------- 1 | # install opendevops 2 | 3 | mkdir -p /opt/codo/ && cd /opt/codo/ -------------------------------------------------------------------------------- /tooling/apm/free-apm.md: -------------------------------------------------------------------------------- 1 | # Application Performance Management 2 | 3 | APM = Application Performance Management,应用性能管理,对企业系统即时监控以实现对应用程序性能管理和故障管理的系统化的解决方案。 4 | 5 | google的Dapper 6 | twitter的Zipkin 7 | 淘宝的鹰眼(EgleEye) 8 | 大众点评的CAT -------------------------------------------------------------------------------- /tooling/apm/pinpoint/pinpoint-docker.md: -------------------------------------------------------------------------------- 1 | # https://github.com/naver/pinpoint-docker -------------------------------------------------------------------------------- /tooling/apm/pinpoint/pinpoint-overview.md: -------------------------------------------------------------------------------- 1 | 2 | 现在的服务通常由许多不同的组件组成,彼此之间进行通信以及对外部服务进行API调用。每个事务如何执行通常都是作为一个黑匣子。精确追踪这些组件之间的事务流,并提供清晰的视图来识别问题区域和潜在的瓶颈。 3 | 4 | - ServerMap - 通过查看组件的互连方式,了解任何分布式系统的拓扑结构。点击一个节点会显示组件的详细信息,例如其当前状态和事务计数。 5 | - Realtime Active Thread Chart 实时活动线程图 - 实时监控应用程序内的活动线程。 6 | - Request/Response Scatter Chart 请求/响应散点图 - 随时间推移可视化请求计数和响应模式,以发现潜在问题。通过拖动图表可以选择事务处理以获取更多详细信息。 7 | 8 | - CallStack - 获得分布式环境中每个事务的代码级可见性,在单个视图中识别瓶颈和故障点。 9 | 10 | - Inspector - 查看有关应用程序的其他详细信息,如CPU使用情况,内存/垃圾收集,TPS和JVM参数。 11 | 12 | 13 | ## 架构 14 | 15 | http://naver.github.io/pinpoint/images/pinpoint-architecture.png 16 | 17 | ## 技术实现 18 | 19 | - http://naver.github.io/pinpoint/techdetail.html 20 | 21 | 分布式事务跟踪,模仿Google的Dapper 22 | 23 | ## 插件 24 | 25 | - http://naver.github.io/pinpoint/additionalplugins.html -------------------------------------------------------------------------------- /tooling/apm/pinpoint/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Checking network configuration -------------------------------------------------------------------------------- /tooling/elk/c7-install-elasticsearch.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装Elastic Stack 6 2 | 3 | - 实验环境: 4 | 5 | | Hostname | Role | IP | 6 | | ------------- |:----------------:| -------------:| 7 | | elastic1 | elasticsearch | 192.168.0.116 | 8 | | elastic2 | elasticsearch | 192.168.0.117 | 9 | | elastic3 | elasticsearch | 192.168.0.118 | 10 | 11 | ## 安装Elasticsearch 12 | 13 | - 安装Java 14 | 15 | ```bash 16 | yum -y install java-1.8.0-openjdk java-1.8.0-openjdk-devel 17 | # 18 | [root@localhost ~]# cat > /etc/profile.d/java8.sh < export JAVA_HOME=$(dirname $(dirname $(readlink $(readlink $(which javac))))) 20 | > export PATH=\$PATH:\$JAVA_HOME/bin 21 | > export CLASSPATH=.:\$JAVA_HOME/jre/lib:\$JAVA_HOME/lib:\$JAVA_HOME/lib/tools.jar 22 | > EOF 23 | [root@localhost ~]# source /etc/profile.d/java8.sh 24 | [root@localhost ~]# alternatives --config java 25 | 26 | There is 1 program that provides 'java'. 27 | 28 | Selection Command 29 | ----------------------------------------------- 30 | *+ 1 java-1.8.0-openjdk.x86_64 (/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.161-0.b14.el7_4.x86_64/jre/bin/java) 31 | 32 | Enter to keep the current selection[+], or type selection number: 1 33 | ``` 34 | 35 | - 安装运行Elasticsearch 36 | 37 | ```bash 38 | [root@localhost ~]# vi /etc/yum.repos.d/elasticsearch.repo 39 | # create new 40 | [elasticsearch-6.x] 41 | name=Elasticsearch repository for 6.x packages 42 | baseurl=https://artifacts.elastic.co/packages/6.x/yum 43 | gpgcheck=1 44 | gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch 45 | enabled=1 46 | autorefresh=1 47 | type=rpm-md 48 | # 49 | [root@localhost ~]# yum -y install elasticsearch 50 | [root@localhost ~]# systemctl start elasticsearch 51 | [root@localhost ~]# systemctl enable elasticsearch 52 | #确认启动 53 | [root@localhost ~]# curl http://127.0.0.1:9200 54 | { 55 | "name" : "HRs6cxK", 56 | "cluster_name" : "elasticsearch", 57 | "cluster_uuid" : "QEdJdSZ6QBay9lUV3FEH5A", 58 | "version" : { 59 | "number" : "6.2.2", 60 | "build_hash" : "10b1edd", 61 | "build_date" : "2018-02-16T19:01:30.685723Z", 62 | "build_snapshot" : false, 63 | "lucene_version" : "7.2.1", 64 | "minimum_wire_compatibility_version" : "5.6.0", 65 | "minimum_index_compatibility_version" : "5.0.0" 66 | }, 67 | "tagline" : "You Know, for Search" 68 | } 69 | ``` 70 | 71 | -------------------------------------------------------------------------------- /tooling/itil/itop.md: -------------------------------------------------------------------------------- 1 | # 开源ITIL管理软件iTop 2.5-2.6安装 2 | 3 | - 参考:https://www.cnblogs.com/vcdx/p/9477911.html 4 | 5 | ## 环境准备 6 | 7 | 环境说明 : 8 | 9 | 操作系统centos 7、itop版本 iTop-2.5.0-3935、数据库:mariadb 10 | 11 | iTop 2.5只支持PHP5.6以上版本,本例安装的是php72w版本 12 | 13 | 1、下载链接: 14 | 15 | 2.5.1版本:https://jaist.dl.sourceforge.net/project/itop/itop/2.5.1/iTop-2.5.1-4123.zip 这里找到一个汉化比较全的包:https://pan.baidu.com/s/1u-UEJC84Xm2svKdNcSf0iQ 安装完成后替换掉/env-production/dictionaries/zh-cn.dict.php 16 | 17 | 2.6.0版本:https://nchc.dl.sourceforge.net/project/itop/itop/2.6.0/iTop-2.6.0-4294.zip 中文比较全,不需要换字典 18 | 19 | 2.6.1版本:https://jaist.dl.sourceforge.net/project/itop/itop/2.6.1/iTop-2.6.1-4463.zip 中文比较全,不需要换字典 20 | 21 | 22 | ## 安装lamp及相关软件 23 | 24 | -------------------------------------------------------------------------------- /tooling/jvm/jvm-config.md: -------------------------------------------------------------------------------- 1 | # jvm optimization 2 | 3 | ## 4 | 5 | -Xms=4G -Xmx=4G 6 | 7 | xms 和 xmx设置一样大,避免伸缩区对内存的频繁操作 8 | 9 | xss 线程栈 默认1m 10 | 11 | 12 | 动态对象数组: 队列/栈/list 13 | 14 | heap 堆内存:线程共享 15 | 16 | 年轻代:eden区/存活区 17 | -XX:+PrintPrintGCDetails 18 | -XX:SurrvivorRatio=8 19 | 20 | minor GC: 年轻代GC算法:copying 21 | 22 | BTP bump-the-pointer 不适合多线程 23 | TLAB thread-thread-local-thread-local-alloction-buffers: 多线程 会产生内存碎片 24 | 25 | 26 | eden 伊甸园区:新创建的对象,最大空间 27 | S0:from space 28 | S1:to space 29 | 30 | 负责对象回收,对象晋级(老年代/永久代) 31 | 32 | 8:1:1 33 | 34 | 老年代: 35 | 36 | 37 | -------------------------------------------------------------------------------- /version control/gitlab/Admin-tools.md: -------------------------------------------------------------------------------- 1 | # Raketasks:备份,维护,自动Webhook设置和项目导入。 2 | 3 | https://docs.gitlab.com/ce/raketasks/README.html 4 | 5 | # 备份和还原:备份和还原您的GitLab实例 6 | 7 | https://docs.gitlab.com/ce/raketasks/backup_restore.html 8 | 9 | # 通过电子邮件回复:允许用户通过回复通知电子邮件来评论问题并合并请求。 10 | 11 | https://docs.gitlab.com/ce/administration/reply_by_email.html 12 | 13 | # 定期Git存储库检查 14 | 15 | https://docs.gitlab.com/ce/administration/repository_checks.html 16 | 17 | # 管理用于存储存储库的路径 18 | 19 | https://docs.gitlab.com/ce/administration/repository_storage_paths.html 20 | 21 | # 可以做些什么来进一步保护您的GitLab实例。 22 | 23 | https://docs.gitlab.com/ce/security/README.html 24 | 25 | # 用户,项目和密钥更改时的通知 26 | 27 | https://docs.gitlab.com/ce/system_hooks/system_hooks.html 28 | -------------------------------------------------------------------------------- /version control/gitlab/Administrator-documentation.md: -------------------------------------------------------------------------------- 1 | # Gitlab 管理员设置 2 | 3 | > 了解如何管理您的GitLab实例。普通用户无权访问GitLab管理工具和设置。 4 | 5 | ## 安装,更新,升级,迁移 6 | 7 | - 安装(http://docs.gitlab.com/ce/install/README.html):来源的要求,目录结构和安装。 8 | - Mattermost: 安装Gitlab集成Mattermost(https://about.mattermost.com/) 9 | - 将GitLab CI迁移到CE/EE:如果您有一个旧的GitLab安装(早于8.0),请按照本指南将现有的GitLab CI数据迁移到GitLab CE/EE(http://docs.gitlab.com/ce/migrate_ci_to_ce/README.html)。 10 | - 重新启动GitLab(http://docs.gitlab.com/ce/administration/restart_gitlab.html):了解如何重新启动GitLab及其组件。 11 | - 更新:升级安装的更新指南 12 | 13 | ## 用户权限 14 | 15 | - 访问限制(http://docs.gitlab.com/ce/user/admin_area/settings/visibility_and_access_controls.html#enabled-git-access-protocols):定义可以使用哪个Git访问协议来与GitLab通信 16 | - 身份验证/授权(http://docs.gitlab.com/ce/topics/authentication/index.html#gitlab-administrators):强制执行2FA,使用LDAP,SAML,CAS和其他Omniauth提供程序配置外部身份验证 17 | 18 | ## GitLab管理员超级权限 19 | 20 | - 容器注册表:使用GitLab配置Docker注册表 21 | - 自定义的Git挂钩:基于文件系统自定义Git钩子 22 | - Git LFS配置:了解如何在GitLab下使用LFS 23 | - GitLab页面配置:配置GitLab页面。 24 | - 高可用性:配置多个服务器进行缩放或高可用性。 25 | - 用户队列随着时间的推移查看用户活动 26 | - Web终端:提供终端访问GitLab内的环境 27 | - GitLab CI 28 | - CI管理设置:定义最大工件大小和到期时间 29 | 30 | ## 集成 31 | 32 | - 集成(http://docs.gitlab.com/ce/integration/README.html):如何与JIRA,Redmine,Twitter等系统集成。 33 | - Koding:设置Koding与GitLab配合使用 34 | - Mattermost(http://docs.gitlab.com/ce/user/project/integrations/mattermost.html):设置GitLab与Mattermost 35 | 36 | ## 监控 37 | 38 | - 使用InfluxDB进行GitLab性能监控:配置GitLab和InfluxDB以测量性能指标。 39 | - 使用Prometheus进行GitLab性能监控:配置GitLab和Prometheus来衡量性能指标。 40 | - 监视正常运行时间:使用运行状况检查端点检查服务器状态 41 | 42 | ## 性能 43 | 44 | - 管家:保持您的Git仓库整齐,快速 45 | - 运维:保持GitLab的运行 46 | - 轮询:配置GitLab UI轮询更新的频率 47 | - 请求分析:获取关于缓慢请求的详细配置文件 48 | 49 | ## 定制 50 | 51 | - 调整实例的时区:自定义GitLab的默认时区 52 | - 环境变量:支持的环境变量,可用于覆盖其默认值以配置GitLab 53 | - 标题标识:更改整个页面和电子邮件标题上的徽标 54 | - 问题关闭模式:自定义如何从提交消息中关闭问题 55 | - 欢迎消息:将自定义欢迎消息添加到登录页面 56 | 57 | ## 管理工具 58 | 59 | - Raketasks:备份,维护,自动Webhook设置和项目导入 60 | - 备份和还原:备份和还原您的GitLab实例 61 | - 通过电子邮件回复:允许用户通过回复通知电子邮件来评论问题并合并请求 62 | - 存储库检查:定期Git存储库检查 63 | - 安全性:了解如何进一步保护您的GitLab实例 64 | - 系统挂钩:用户,项目和密钥更改时的通知 65 | 66 | ## 故障排除 67 | 68 | - 调试提示:出现问题时调试问题的提示 69 | - 日志系统:从查找日志分析问题 70 | - Sidekiq疑难解答:当Sidekiq出现挂起并且未处理作业时调试 71 | 72 | ## 贡献者文档 73 | 74 | - 开发者:所有风格的指导和解释如何贡献 75 | - 法律:供应商许可协议 76 | - 写文档:贡献GitLab文档 77 | -------------------------------------------------------------------------------- /version control/gitlab/GitLab Prometheus.md: -------------------------------------------------------------------------------- 1 | # 2 | 3 | https://prometheus.io/ 4 | 5 | 6 | # 7 | -------------------------------------------------------------------------------- /version control/gitlab/Initial-OmniAuth.md: -------------------------------------------------------------------------------- 1 | # Omnibus GitLab文档 2 | 3 | - https://docs.gitlab.com/omnibus/ 4 | 5 | ## 安装 6 | 7 | > GitLab可以通过各种方式进行安装。检查安装方法的概述。 8 | 9 | ## 安装要求 10 | 11 | > 安装GitLab之前,请确保检查需求文档,其中包含有关受支持操作系统的有用信息以及硬件要求(https://docs.gitlab.com/ce/install/requirements.html)。 12 | 13 | ## 安装方式 14 | 15 | - 推荐使用Omnibus软件包(https://about.gitlab.com/downloads/)进行安装 - 使用我们的官方deb/rpm存储库安装GitLab。 16 | - 源码包安装(https://docs.gitlab.com/ce/install/installation.html) 17 | - Docker - 使用Docker安装GitLab 18 | - 在Kubernetes中安装(https://docs.gitlab.com/ce/install/kubernetes/index.html) - 使用我们的官方Helm Chart Repository将GitLab安装到Kubernetes群集中。 19 | 20 | ## 数据库要求 21 | 22 | > 虽然推荐的数据库是PostgreSQL,但是我们提供了使用MySQL安装GitLab的信息。检查MySQL文档以获取更多信息(https://docs.gitlab.com/ce/install/database_mysql.html)。 23 | 24 | 25 | ## 维护 26 | 27 | - 获取服务状态 28 | - 开始和停止 29 | - 调用耙子任务 30 | - 启动Rails控制台会话 31 | 32 | ## 33 | 34 | ## SMTP设置 35 | 36 | > 如果您希望通过SMTP服务器而不是通过Sendmail发送应用程序电子邮件,请将以下配置信息添加到/etc/gitlab/gitlab.rb并运行gitlab-ctl reconfigure。 37 | 38 | https://docs.gitlab.com/omnibus/settings/smtp.html 39 | 40 | gitlab_rails['smtp_enable'] = true 41 | gitlab_rails['smtp_address'] = "smtp-mail.outlook.com" 42 | gitlab_rails['smtp_port'] = 587 43 | gitlab_rails['smtp_user_name'] = "username@outlook.com" 44 | gitlab_rails['smtp_password'] = "password" 45 | gitlab_rails['smtp_domain'] = "smtp-mail.outlook.com" 46 | gitlab_rails['smtp_authentication'] = "login" 47 | gitlab_rails['smtp_enable_starttls_auto'] = true 48 | gitlab_rails['smtp_openssl_verify_mode'] = 'peer' 49 | 50 | ## 通过电子邮件回复 51 | 52 | GitLab可以设置为允许用户通过回复通知电子邮件来评论问题并合并请求。 53 | 54 | 55 | gitlab_rails['incoming_email_enabled'] = true 56 | gitlab_rails['incoming_email_address'] = "incoming+%{key}@gitlab.example.com" 57 | gitlab_rails['incoming_email_email'] = "incoming" 58 | gitlab_rails['incoming_email_password'] = "[REDACTED]" 59 | gitlab_rails['incoming_email_host'] = "gitlab.example.com" 60 | gitlab_rails['incoming_email_port'] = 143 61 | gitlab_rails['incoming_email_ssl'] = false 62 | gitlab_rails['incoming_email_start_tls'] = false 63 | gitlab_rails['incoming_email_mailbox_name'] = "inbox" 64 | gitlab_rails['incoming_email_idle_timeout'] = 60 65 | 66 | 67 | ## 设置Postfix通过电子邮件回复 68 | -------------------------------------------------------------------------------- /version control/gitlab/centos-install-gitlab.md: -------------------------------------------------------------------------------- 1 | # 安装gitlab 2 | https://about.gitlab.com/installation/#centos-6 3 | 4 | sudo yum install -y curl openssh-server openssh-clients cronie 5 | sudo lokkit -s http -s ssh 6 | 7 | sudo yum install postfix 8 | sudo service postfix start 9 | sudo chkconfig postfix on 10 | 11 | curl -sS https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.rpm.sh | sudo bash 12 | sudo yum install -y gitlab-ce 13 | 14 | sudo gitlab-ctl reconfigure 15 | 16 | # 访问 17 | 18 | http://192.168.0.222/users/password/edit?reset_password_token=ehgPfKd56C92NgYHaNPi 19 | 20 | 设置新密码 fangbuxia..0 21 | 22 | - 重置密码 23 | 24 | https://docs.gitlab.com/ce/security/reset_root_password.html 25 | 26 | ## 编辑gitlab.rb 27 | 28 | - 更改域名 29 | vim /etc/gitlab/gitlab.rb 30 | 31 | 编辑:external_url '你的网址' 32 | 例如:external_url 'http://gitlab.aniu.so' 33 | 编辑完成后,再sudo gitlab-ctl reconfigure一下,使配置生效 34 | 35 | GitLab设置IP或者域名有两个配置文件: 36 | 1、GitLab的:/home/git/gitlab/config/gitlab.yml 37 | 2、GitLab-Shell的:/home/git/gitlab-shell/config.yml 38 | 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-bakcup-restore.md: -------------------------------------------------------------------------------- 1 | 1、gitlab备份与恢复 2 | > 参考:https://docs.gitlab.com/ce/raketasks/backup_restore.html 3 | 创建系统备份 4 | sudo gitlab-rake gitlab:backup:create 5 | 备份文件存在/var/opt/gitlab/backups,可编辑/etc/gitlab/gitlab.rb修改 6 | 源码安装使用下面命令备份 7 | sudo -u git -H bundle exec rake gitlab:backup:create RAILS_ENV=production 8 | docker安装使用下面命令备份 9 | docker exec -t gitlab-rake gitlab:backup:create 10 | 将备份上传到远程存储(暂无) 11 | 将备份文件存储到本地 12 | gitlab_rails['backup_upload_connection'] = { 13 | :provider => 'Local', 14 | :local_root => '/mnt/backups' 15 | } 16 | # The directory inside the mounted folder to copy backups to 17 | # Use '.' to store them in the root directory 18 | gitlab_rails['backup_upload_remote_directory'] = 'gitlab_backups' 19 | 备份档案权限 20 | # In /etc/gitlab/gitlab.rb, for omnibus packages 21 | gitlab_rails['backup_archive_permissions'] = 0644 # Makes the backup archives world-readable 22 | 备份配置文件 23 | 针对(Omnibus)备份:/etc/gitlab/gitlab.rb 、/etc/gitlab/gitlab-secrets.json 24 | 添加定时备份 25 | 每天凌晨两点备份 26 | 0 2 * * * /opt/gitlab/bin/gitlab-rake gitlab:backup:create CRON=1 27 | 保留备份30天,单位秒 28 | gitlab_rails['backup_keep_time'] = 2592000 29 | 30 | 2、Omnibus安装恢复 31 | 确认备份档案放到gitlab.rb定义的备份目录(默认/var/opt/gitlab/backups) 32 | sudo cp 1504793137_2017_09_07_9.5.3_gitlab_backup.tar /var/opt/gitlab/backups/ 33 | 停止连接数据库的进程 34 | sudo gitlab-ctl stop unicorn 35 | sudo gitlab-ctl stop sidekiq 36 | # Verify 37 | sudo gitlab-ctl status 38 | 恢复备份档案,指定时间戳 39 | sudo gitlab-rake gitlab:backup:restore BACKUP=1504793137_2017_09_07_9.5.3 40 | > Unpacking backup ... tar: 1504796591_2017_09_07_9.5.3_gitlab_backup.tar: Cannot open: Permission denied 41 | chmod git:git 1504796591_2017_09_07_9.5.3_gitlab_backup.tar 42 | 重启并检查恢复情况 43 | sudo gitlab-ctl restart 44 | sudo gitlab-rake gitlab:check SANITIZE=true 45 | 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-ce.md: -------------------------------------------------------------------------------- 1 | # GitLab社区版 2 | 3 | > GitLab社区版(CE)是一个开源产品,自主托管,可免费使用。 4 | 5 | - GitLab访问量最大的文档的捷径: 6 | 7 | https://docs.gitlab.com/ce/ci/quick_start/README.html 8 | https://docs.gitlab.com/ce/ci/yaml/README.html 9 | https://docs.gitlab.com/ce/ci/docker/using_docker_images.html 10 | https://docs.gitlab.com/ce/api/README.html 11 | https://docs.gitlab.com/ce/ssh/README.html 12 | https://docs.gitlab.com/ce/user/project/pages/index.html 13 | 14 | ## 开始使用GitLab 15 | 16 | - GitLab基础知识:开始在您的命令行和GitLab上工作。 17 | - GitLab工作流程:使用最好的GitLab工作流程来增强您的工作流程 18 | - 参见GitLab工作流程 - 概述 19 | - GitLab Markdown:GitLab的高级格式化系统(GitLab Flavored Markdown) 20 | - GitLab斜杠命令:通常通过单击GitLab UI中的按钮或下拉列表来完成的问题或合并请求的常见操作的文本快捷方式。 21 | 22 | ## 23 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-ci-multi-runner: -------------------------------------------------------------------------------- 1 | # 官方文档 2 | https://gitlab.com/gitlab-org/gitlab-ci-multi-runner 3 | 4 | ## 安装 gitlab-ci-multi-runner 5 | curl -L https://packages.gitlab.com/install/repositories/runner/gitlab-ci-multi-runner/script.rpm.sh | sudo bash 6 | yum install gitlab-ci-multi-runner 7 | 8 | # 9 | gitlab-ci-multi-runner list 10 | 11 | # error 12 | 13 | FATAL: Please specify user that will run gitlab-runner service 14 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-errors.md: -------------------------------------------------------------------------------- 1 | # gitlab升级之后502 2 | 3 | 升级之前备份gitlab.rb,然后重新生成配置文件 sudo gitlab-ctl reconfigure 4 | 5 | 再 sudo gitlab-ctl restart 6 | 7 | 8 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-grant-set.md: -------------------------------------------------------------------------------- 1 | # Gitlab上如何给指定人员在指定项目里设置指定权限,给项目设置保护 2 | 3 | ## 权限设置 4 | 5 | project -> settings --> add member --> select user --> choose permission 6 | 7 | ## 8 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-intergration-jira.md: -------------------------------------------------------------------------------- 1 | # 私有gitlab集成私有jira 2 | 3 | ## 配置jira 4 | 5 | > 为了在gitlab启用jira服务,需要在jira配置一个用户,然后在gitlab配置正确的值,需要在JIRA中创建一个用户,该用户可以访问需要与GitLab集成的所有项目。以管理员身份登录到您的JIRA实例,然后在管理下登录到用户管理并创建新的用户。 6 | 7 | - 创建名为gitlab的jira用户并加到JIRA-developers组中 8 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-nginx-ssl.md: -------------------------------------------------------------------------------- 1 | # 配置私有gitlab https 2 | 3 | > 参考:http://docs.gitlab.com/omnibus/settings/nginx.html 4 | 5 | ## 在gitlab服务器创建证书对 6 | 7 | sudo mkdir -p /etc/gitlab/ssl 8 | sudo chmod 700 /etc/gitlab/ssl 9 | cd /etc/gitlab/ssl # 使用在线生成的公钥和私钥颁证书 10 | 11 | openssl x509 -in gitlab.aniu.so.csr -out gitlab.aniu.so.crt -req -signkey gitlab.aniu.so.key -days 3650 12 | 13 | sudo cp gitlab.aniu.so.key gitlab.aniu.so.crt /etc/gitlab/ssl/ 14 | 15 | openssl x509 -in server.csr -out server.crt -req -signkey server.key -days 3650 16 | 17 | # 使用生成的证书对gitlab进行配置,同时重定向http到https 18 | 19 | ``` 20 | external_url "https://gitlab.aniu.so" 21 | nginx['redirect_http_to_https'] = true 22 | nginx['ssl_certificate'] = "/etc/gitlab/ssl/gitlab.example.crt" 23 | nginx['ssl_certificate_key'] = "/etc/gitlab/ssl/gitlab.example.com.key" 24 | ``` 25 | 26 | 27 | # csr 28 | 29 | -----BEGIN CERTIFICATE REQUEST----- 30 | MIICsTCCAZkCAQAwbDEXMBUGA1UEAwwOZ2l0bGFiLmFuaXUuc28xDTALBgNVBAoM 31 | BGFuaXUxDzANBgNVBAsMBnl1bndlaTERMA8GA1UECAwIU2hhbmdoYWkxETAPBgNV 32 | BAcMCFNoYW5naGFpMQswCQYDVQQGEwJDTjCCASIwDQYJKoZIhvcNAQEBBQADggEP 33 | ADCCAQoCggEBAOip1aYXmaB7dSgd7QNpcC9y+50jk0vo6D/QQzJXnab0O48+t9Qz 34 | TqtQXQxi848PCV+Co3+4InT/Z0sybWSkEd7AF6TpsNjQ8CA/fFfq5Sioz8rPFGRW 35 | xAxcfQtDXdk6sLG7MNfyYUSsyzQJ/2J4vHSopppu9KR0Fq8H/3OUHG4g7GfG7saE 36 | 1XN7xt/JDNkl07H/6+pwdMi3ch+GcNFr9yJgUvpVKpSPL0/xacSCig7fQP28PLAM 37 | 5KmGy1yxD0KvHHVQRyjjhAhawRIunGjoFWmgh6sbjMXj0AXcY6T/Vam0Mm4VsEBo 38 | NqBeZFwtOvv3vLR29JNJwtGGxq3HMuof9VMCAwEAAaAAMA0GCSqGSIb3DQEBCwUA 39 | A4IBAQApGHEec+c6soj5wAaLPYOq8i0nDe1ka4vEBZ76t94DWl70w4DIHRLXFLUn 40 | UZMA3jv+awwVhrOEQl5DonqVq/09KMDYWqENVPO5Ayt+3ou0cp9oQQiFUk1ypR3u 41 | TjFPcuqeHlhnPrPwfMi4QzV5BYOFP2/lttw2gzOfW6XgZmJXA47eb7o10tNSNzlF 42 | Czf1onO+DHeraeYvMbaWrPWhGgHoh8rG3r/kN7QcfStHRE0H+iRLU/5frgA6HyVZ 43 | pVreKvls1fgEisHgZlI0jFjpHueDMKdO4pKcmtTvf46qxbANtMbsqgFXVnrh3gpN 44 | Q9PCwY1NE3ai5mkarBn2NOWDhfQc 45 | -----END CERTIFICATE REQUEST----- 46 | 47 | 48 | # 证书私钥: 49 | 50 | -----BEGIN PRIVATE KEY----- 51 | MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDoqdWmF5mge3Uo 52 | He0DaXAvcvudI5NL6Og/0EMyV52m9DuPPrfUM06rUF0MYvOPDwlfgqN/uCJ0/2dL 53 | Mm1kpBHewBek6bDY0PAgP3xX6uUoqM/KzxRkVsQMXH0LQ13ZOrCxuzDX8mFErMs0 54 | Cf9ieLx0qKaabvSkdBavB/9zlBxuIOxnxu7GhNVze8bfyQzZJdOx/+vqcHTIt3If 55 | hnDRa/ciYFL6VSqUjy9P8WnEgooO30D9vDywDOSphstcsQ9Crxx1UEco44QIWsES 56 | Lpxo6BVpoIerG4zF49AF3GOk/1WptDJuFbBAaDagXmRcLTr797y0dvSTScLRhsat 57 | xzLqH/VTAgMBAAECggEAbP7TNgIsWEA9/FM4q7aDddcaBN2brZ4o32xDbpwZIROd 58 | q1eoauK2Yg896AWbMFPNAk+DJNwwWDsmOtYe5VzvejjnWryXupr3Q0Q6jj1eqZOb 59 | 9NjaJr93DItvkQ04NAIIsNqO4TuNUczZTUG5wHnrX4N6uTJtrXUtA0Zt58llIDmr 60 | kiSLyxAe//oyMUk4Dvy5S7h9hJS/FioKACfYCU4enwfixZbYkz2lxCKD/zD6gcRG 61 | obBc7WKR27yQs9gb6Y+3pYLpq8LXhR7xb9quXR5EogP+/d4vciGJ1x6JOQ0vXo9c 62 | r+FvHskKfMU5tEwIHJScrltgiYqULhieRmhh35clQQKBgQD4kNUUb26m0oRrv4+E 63 | ktmwnMZnu3Q7mdLE5iSEusbBM3BUIOOaNDMIK01qT9BTQX6AYwatVToRyc1E/sYi 64 | Su+HlqUxLPsR0vnuXS+BS2sJ2apIC+N0Uixvxjndc/CfVWZ97JBT5cUyAxRc7+5d 65 | Qs7R8w0qQafvoXiJh2JsJ5dzMQKBgQDvnz6U61/O3zFWqTF3ZXWK0sVWg7dNa7gz 66 | ADvQoSRRsKWR+SpWaIvW0IW2XY7Lu7pNL98lWEJyCALuTsTJ5mQq+xjS2xFcAzHo 67 | WKy9OoAMizy7PtpUYm5qsv/TdP5uuQowQF12XhIfXW//Ti4emy+tWESPoHgRyRJr 68 | 6zhdXXLnwwKBgCR2qqt6xVK3ozFjQpyCJmkgNoLVHvH3WNIFqOnHtIx3DU1qHblr 69 | Wukh4RNtbfQosXQIEtyumfwuDGzIqywwrf7H/KfAH3y35G4xQVzIQYWKZs524AEa 70 | ZOZov+har7vP/V8PqwSDum/hv8T6dY8807Y833uJcidXGqWiNLAFBtShAoGANxIs 71 | OeGWlV7qYfZkrgIdb5hdTjLbb+mv3djR5nMCe9vTUacoOc+xQ9+Mu4rpBJ3ShWbZ 72 | LCYKr1Z0Bf0IcTaIEvC+lcdPSAxb7gBjQuItB7UAcjBR0U8F/XPCJAEhcKUKWGiS 73 | kl+wXpEE6nI3W0VjQb2llDTXI49IspDO1XZisC0CgYBLstxc+hvgEUxeMPR5o4ji 74 | sbff+JkndFhR3k777o5jKeUGdLdHh9yxAeyV8WLeJ0xY6N3BCcgPeQybofNXfWd5 75 | x+59c6E40wbTZTkUe5bU1YnnW58nfWeCyQyH2tPJxOjKtlHXp6fAglNo9HwI8YLc 76 | XARW0URUQOyywIqnWogfgQ== 77 | -----END PRIVATE KEY----- 78 | -------------------------------------------------------------------------------- /version control/gitlab/gitlab-use-turorial.md: -------------------------------------------------------------------------------- 1 | # 使用Gitlab创建git项目 2 | 3 | - 登录gitlab系统,访问自己的gitlab.example.com,然后使用gitlab用户,登录 4 | 5 | ![这里写图片描述](http://img.blog.csdn.net/20170710181939663?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 6 | 7 | - 第一次登录需要重新修改默认登录密码 8 | 9 | ![这里写图片描述](http://img.blog.csdn.net/20170710182130674?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 10 | 11 | - 登录成功,看到下面界面,新建一个项目test,描述为test for git,然后点击创建如下图: 12 | 13 | ![这里写图片描述](http://img.blog.csdn.net/20170710182222482?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 14 | 15 | - 如上图可以看到项目已经创建成功 16 | 17 | ![这里写图片描述](http://img.blog.csdn.net/20170710182248092?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 18 | 19 | - 根据提示设置用户信息 20 | 21 | ``` 22 | git config --global user.name "test" 23 | git config --global user.email "test@aniu.tv" 24 | ``` 25 | 26 | - 然后把本地的项目(test)上传到gitlab上 27 | 28 | ![这里写图片描述](http://img.blog.csdn.net/20170710182326576?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 29 | ![这里写图片描述](http://img.blog.csdn.net/20170710182342407?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 30 | 31 | - 在命令行界面依次执行下面界面 32 | 33 | ``` 34 | git init 35 | git remote add origin https://test:testpassword@gitlab.example.com/test/test.git 36 | # test:testpassword 换成自己的用户名和密码 37 | git add . 38 | git commit -m "Initial commit" 39 | git push -u origin master 40 | ``` 41 | ![这里写图片描述](http://img.blog.csdn.net/20170710182553614?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 42 | 43 | - 上面的报错通过下面设置避免 44 | 45 | > C:\Program Files (x86)\Git\etc\gitconfig # 编辑gitconfig,路径可以不一致,可以直接搜索到gitconfig文件,然后编辑,添加下面三行 46 | 47 | ``` 48 | [http] 49 | sslVerify = false 50 | sslCAinfo = /bin/curl-ca-bundle.crt 51 | ``` 52 | 53 | - 然后重新git push ,可以看到已经成功啦。 54 | 55 | ![这里写图片描述](http://img.blog.csdn.net/20170710182730136?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 56 | 57 | - 返回到gitlab UI界面,可以看到本地的test项目已经添加到gitlab仓库。 58 | 59 | ![这里写图片描述](http://img.blog.csdn.net/20170710182833318?watermark/2/text/aHR0cDovL2Jsb2cuY3Nkbi5uZXQvd2gyMTEyMTI=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70/gravity/SouthEast) 60 | 61 | > 到这里通过使用giltab,上传本地的项目到gitlab系统。 62 | -------------------------------------------------------------------------------- /version control/gitlab/open-remote-connect-postgresql-with-omnibus-gitlab.md: -------------------------------------------------------------------------------- 1 | # Omnibus-GitLab 配置 PostgreSQL 开启远程访问 2 | 3 | - https://gitlab.com/gitlab-org/omnibus-gitlab/blob/master/doc/settings/database.md 4 | 5 | # 如何访问GitLab默认安装的PostgreSQL数据库 6 | 7 | > http://www.huangzhongzhang.cn/ru-he-fang-wen-gitlab-mo-ren-an-zhuang-de-postgresql.html 8 | 9 | # 新建用户并赋权 10 | 11 | sudo psql -h /var/opt/gitlab/postgresql -d gitlabhq_prod; 12 | -------------------------------------------------------------------------------- /version control/svn/c7-install-svn.md: -------------------------------------------------------------------------------- 1 | # CentOS7安装使用svn 2 | 3 | ## 安装版本控制工具Subversion 4 | 5 | - 安装Subversion 6 | 7 | ``` 8 | [root@vm-06 ~]# yum -y install subversion 9 | ``` 10 | 11 | - 创建一个存储库。例如,笔者这里创建“/var/svn/repos/project” 12 | 13 | ``` 14 | [root@vm-06 ~]# mkdir -p /var/svn/repos/project 15 | [root@vm-06 ~]# svnadmin create /var/svn/repos/project 16 | [root@vm-06 ~]# svn mkdir file:///var/svn/repos/project/trunk -m "create" 17 | 18 | Committed revision 1. 19 | [root@vm-06 ~]# svn mkdir file:///var/svn/repos/project/branches -m "create" 20 | 21 | Committed revision 2. 22 | [root@vm-06 ~]# svn mkdir file:///var/svn/repos/project/tags -m "create" 23 | 24 | Committed revision 3. 25 | ``` 26 | 27 | - 如果存在一些开发文件,可以将其导入到存储库。例如,将“/home/project”下的文件导入上面的存储库。 28 | 29 | ``` 30 | [root@vm-06 ~]# svn import /home/project file:///var/svn/repos/project/trunk -m "initial import" 31 | Adding /home/project/index.html 32 | Adding /home/project/readme.md 33 | Adding /home/project/index.php 34 | 35 | Committed revision 4. 36 | [root@vm-06 ~]# svn list file:///var/svn/repos/project/trunk 37 | index.html 38 | index.php 39 | readme.md 40 | ``` 41 | 42 | - 可以从其他客户端访问以运行svnserve。 svnserve监听3690 / TCP, 43 | 44 | ``` 45 | # vm-05上测试 46 | # yum -y install subversion 47 | [root@vm-05 ~]# svn list svn://192.168.1.124/repos/project 48 | branches/ 49 | tags/ 50 | trunk/ 51 | [root@vm-05 ~]# svn checkout svn://192.168.1.124/repos/project 52 | A project/tags 53 | A project/trunk 54 | A project/trunk/index.html 55 | A project/trunk/readme.md 56 | A project/trunk/index.php 57 | A project/branches 58 | Checked out revision 4. 59 | ``` 60 | 61 | - 可以在没有svnserve的情况下使用SSH进行访问 62 | 63 | ``` 64 | # vm-06上停止systemctl stop svnserve 65 | [root@vm-05 ~]# svn list svn+ssh://root@192.168.1.124/var/svn/repos/project 66 | root@192.168.1.124's password: 67 | branches/ 68 | tags/ 69 | trunk/ 70 | ``` 71 | 72 | ## 权限控制 73 | 74 | - 在“/var/svn/repos/project”上设置访问控制 75 | 76 | ``` 77 | [root@dlp ~]# vi /var/svn/repos/project/conf/svnserve.conf 78 | # 第9行添加: 79 | [general] 80 | anon-access = none 81 | # 28行取消注释 82 | password-db = passwd 83 | # 35行取消注释 84 | authz-db = authz 85 | 86 | [root@dlp ~]# vi /var/svn/repos/project/conf/passwd 87 | # 针对这个项目定义用户名和密码 88 | [users] 89 | redhat = password 90 | cent = password 91 | fedora = password 92 | [root@dlp ~]# vi /var/svn/repos/project/conf/authz 93 | # 定义用户组合用户 94 | [groups] 95 | developer = redhat,cent 96 | # 允许developer对项目进行读写 97 | [/] 98 | @developer = rw 99 | # 允许在Fedora用户的trunk文件夹上读 100 | [/trunk] 101 | fedora = r 102 | ``` 103 | 104 | - 确保从客户端访问 105 | 106 | ``` 107 | # vm-05上操作 108 | [root@vm-05 ~]# cd /root/project/trunk 109 | [root@vm-05 trunk]# svn --username yunwei list svn://192.168.1.124/repos/project/trunk 110 | Authentication realm: 7f142303-72cf-45df-9261-ae3f50870db9 111 | Password for 'yunwei': 112 | 113 | ----------------------------------------------------------------------- 114 | ATTENTION! Your password for authentication realm: 115 | 116 | 7f142303-72cf-45df-9261-ae3f50870db9 117 | 118 | can only be stored to disk unencrypted! You are advised to configure 119 | your system so that Subversion can store passwords encrypted, if 120 | possible. See the documentation for details. 121 | 122 | You can avoid future appearances of this warning by setting the value 123 | of the 'store-plaintext-passwords' option to either 'yes' or 'no' in 124 | '/root/.subversion/servers'. 125 | ----------------------------------------------------------------------- 126 | Store password unencrypted (yes/no)? yes 127 | index.html 128 | index.php 129 | readme.md 130 | ``` 131 | -------------------------------------------------------------------------------- /virtualization/cloudstack/c7-install-cloudstack.md: -------------------------------------------------------------------------------- 1 | # CloudStack学习使用 2 | 3 | ## CentOS 7安装CloudStack 4 | 5 | - 官方文档:http://docs.cloudstack.apache.org/en/latest/ 6 | 7 | 8 | ## 安装概述 9 | 10 | ### 安装步骤 11 | 12 | Choosing a Deployment Architecture 13 | Choosing a Hypervisor: Supported Features 14 | Network Setup 15 | Storage Setup 16 | Best Practices -------------------------------------------------------------------------------- /virtualization/kvm/kvm-guests-migrate-ovirt.md: -------------------------------------------------------------------------------- 1 | # Centos7下通过virt-v2v将libvirt管理下的vm迁移至ovirt 2 | 3 | ## 实验环境 4 | 5 | > CentOS7、ovirt4.2 6 | 7 | - 实验背景: 8 | 9 | > 生产环境一台kvm宿主机:192.168.0.201,上面有三台vm,需要将一台vm迁移到ovirt虚拟化平台 10 | 11 | - ovirt环境 12 | 13 | ``` 14 | ovirt-engine: 192.168.0.210 15 | ovirt-node: 192.168.0.123 16 | ovirt-storage: 192.168.0.124(nfs) :export路径为:192.168.0.124:/ovirt/export 17 | ``` 18 | 19 | ## 实验步骤 20 | 21 | - 1、在ovirt管理界面配置export存储域,如下: 22 | 23 | - 2、查看kvm宿主机中的vm,选择一台vm进行迁移,笔者以kvm-2为例: 24 | 25 | ``` 26 | [root@sh-kvm-2 ~]# virsh list --all 27 | Id Name State 28 | ---------------------------------------------------- 29 | 1 kvm-1 running 30 | 2 kvm-3 running 31 | 6 kvm-2 running 32 | # 迁移虚拟机前需要先shutdown 33 | [root@sh-kvm-2 ~]# virsh shutdown kvm-2 34 | ``` 35 | 36 | - 3、迁移报错 37 | 38 | ``` 39 | # This could be because the volume doesn't exist, or because the volume exists but is not contained in a storage pool 40 | ``` 41 | -------------------------------------------------------------------------------- /virtualization/ovirt/Red_Hat_Enterprise_Virtualization-3.4-Administration_Guide-zh-CN.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wh211212/centos7-tutorial/c422d63fff1f96af3160db205df940ade3cca863/virtualization/ovirt/Red_Hat_Enterprise_Virtualization-3.4-Administration_Guide-zh-CN.pdf -------------------------------------------------------------------------------- /virtualization/ovirt/create-vm.md: -------------------------------------------------------------------------------- 1 | # 使用ovirt创建vm注意事项 2 | ## 创建windows 10 3 | 4 | - 笔者实验环境win10,安装virt-viewer:https://www.spice-space.org/download.html 5 | 6 | - https://docs.fedoraproject.org/quick-docs/en-US/creating-windows-virtual-machines-using-virtio-drivers.html 7 | 8 | ## 需要安装virtio 驱动,不然安装系统的时候会提示找不到硬盘 9 | 10 | 11 | - 参考:https://www.youtube.com/watch?v=ljhpX446o0Q 12 | 13 | - https://www.ovirt.org/documentation/vmm-guide/chap-Installing_Windows_Virtual_Machines/ 14 | 15 | - https://www.ovirt.org/develop/release-management/features/integration/windows-guest-tools/ 16 | 17 | 18 | yum -y install http://resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm & yum install ovirt-guest-tools-iso 19 | 20 | 21 | - https://www.ovirt.org/documentation/vmm-guide/chap-Installing_Linux_Virtual_Machines/ 22 | 23 | 24 | 25 | ## C6虚拟机 安装ovirt agent 26 | 27 | 28 | yum install ovirt-guest-agent -y 29 | 30 | service ovirt-guest-agent start 31 | chkconfig ovirt-guest-agent on 32 | 33 | [root@localhost ~]# service ovirt-guest-agent status 34 | ovirt-guest-agent (pid 3389) is running... 35 | 36 | # 37 | service qemu-ga start 38 | chkconfig qemu-ga on -------------------------------------------------------------------------------- /virtualization/ovirt/install-ovirt-command.md: -------------------------------------------------------------------------------- 1 | # C7安装ovirt 4.2 步骤 2 | 3 | yum -y install http://resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm 4 | 5 | yum -y install ovirt-engine 6 | 7 | engine-setup 8 | 9 | 10 | ## ovirt node 节点添加 11 | 12 | 配置hosts 13 | 14 | yum -y install qemu-kvm libvirt virt-install bridge-utils 15 | 16 | systemctl start libvirtd && systemctl enable libvirtd 17 | 18 | yum -y install http://resources.ovirt.org/pub/yum-repo/ovirt-release42.rpm && yum -y install vdsm 19 | 20 | 21 | ## vm添加硬盘 挂载到data目录 22 | 23 | 24 | 25 | 26 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-clone-vm-use.md: -------------------------------------------------------------------------------- 1 | # ovirt使用克隆vm 2 | 3 | ## 更改网络配置 4 | 5 | [root@localhost ~]# cat /etc/sysconfig/network-scripts/ifcfg-eth0 6 | TYPE=Ethernet 7 | BOOTPROTO=static 8 | DEFROUTE=yes 9 | PEERDNS=yes 10 | PEERROUTES=yes 11 | IPV4_FAILURE_FATAL=no 12 | IPV6INIT=yes 13 | IPV6_AUTOCONF=yes 14 | IPV6_DEFROUTE=yes 15 | IPV6_PEERDNS=yes 16 | IPV6_PEERROUTES=yes 17 | IPV6_FAILURE_FATAL=no 18 | IPV6_ADDR_GEN_MODE=stable-privacy 19 | NAME=eth0 20 | DEVICE=eth0 21 | ONBOOT=yes 22 | IPADDR=192.168.0.21 23 | NETMASK=255.255.255.0 24 | GATEWAY=192.168.0.1 25 | DNS1=114.114.114.114 26 | 27 | - 克隆虚拟机前删除UUID,如果克隆前没删除,需要删除克隆vm上网卡配置中的UUID,重启启动网络 28 | 29 | 30 | ## ovirt克隆centos6 网络配置 31 | 32 | 配置完ip,rm -rf /etc/udev/rules.d/70-persistent-net.rules # 然后reboot重启即可 -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-guset-agent.md: -------------------------------------------------------------------------------- 1 | # ovirt guest agent 安装 2 | 3 | ## CentOS 4 | 5 | - 如何在CentOS中安装guest代理(CentOS7) 6 | 7 | ``` 8 | # 通过终端使用YUM来安装oVirt Guest Tools 9 | sudo yum install centos-release-ovirt42 -y && sudo yum install ovirt-guest-agent-common -y 10 | systemctl start qemu-guest-agent.service && systemctl enable qemu-guest-agent.service && systemctl start ovirt-guest-agent && systemctl enable ovirt-guest-agent 11 | ``` 12 | 13 | - CentOS6 14 | 15 | ``` 16 | sudo yum install centos-release-ovirt36 17 | yum install ovirt-guest-agent -y 18 | /etc/init.d/ovirt-guest-agent start 19 | chkconfig ovirt-guest-agent on 20 | ``` 21 | 22 | - 开始服务 23 | 24 | ``` 25 | sudo systemctl enable --now ovirt-guest-agent.service 26 | ``` 27 | 28 | - 查看状态排错 29 | 30 | ``` 31 | sudo systemctl status ovirt-guest-agent.service 32 | ``` 33 | 34 | ## Debian 35 | 36 | - 如何在Debian中安装guest代理 37 | 38 | ``` 39 | # 使用apt-get通过终端安装oVirt Guest Tools 40 | # echo "deb http://download.opensuse.org/repositories/home:/evilissimo:/deb/Debian_7.0/ ./" >> /etc/apt/sources.list 41 | # gpg -v -a --keyserver http://download.opensuse.org/repositories/home:/evilissimo:/deb/Debian_7.0/Release.key --recv-keys D5C7F7C373A1A299 42 | # gpg --export --armor 73A1A299 | apt-key add - 43 | # apt-get update 44 | # apt-get install ovirt-guest-agent 45 | 46 | # 使用gnome中的添加/删除软件安装oVirt Guest Tools 47 | ``` 48 | 49 | - 开始服务 50 | 51 | ``` 52 | #su - 53 | #service ovirt-guest-agent enable && service ovirt-guest-agent start 54 | ``` 55 | 56 | - 查看状态排错 57 | 58 | ``` 59 | # su - 60 | # service ovirt-guest-agent start 61 | ``` 62 | 63 | ## Ubuntu 64 | 65 | - 如何在Ubuntu中安装guest代理(Ubuntu 16.04) 66 | 67 | ``` 68 | # 使用apt-get通过终端安装oVirt Guest Tools 69 | # sudo nano -w /etc/apt/sources.list.d/ovirt-guest-agent.list 70 | deb http://download.opensuse.org/repositories/home:/evilissimo:/ubuntu:/16.04/xUbuntu_16.04/ / 71 | # wget http://download.opensuse.org/repositories/home:/evilissimo:/ubuntu:/16.04/xUbuntu_16.04//Release.key 72 | # sudo apt-key add - < Release.key 73 | # sudo apt-get update 74 | # sudo apt-get install ovirt-guest-agent 75 | ``` 76 | 77 | - 开始服务 78 | 79 | 安装程序将自动启动ovirt-guest-agent并将其设置为在启动时自动启动。 80 | 81 | - 参考链接:https://www.ovirt.org/documentation/how-to/guest-agent/ 82 | 83 | 84 | 85 | 86 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-iso.md: -------------------------------------------------------------------------------- 1 | # ovirt镜像管理 2 | 3 | ## 镜像上传 4 | 5 | # 使用命令 6 | 先把镜像文件上传到服务器上,执行上传命令 7 | engine-iso-uploader --nfs-server=nfs.aniu.so:/export/iso upload /usr/local/src/CentOS-7-x86_64-Minimal-1611.iso 8 | 9 | # 或者通过filezilla上传到服务的 data存储域目录下。然后到移动到正确的位置 10 | 11 | 12 | engine-iso-uploader --nfs-server=nfs1.aniu.so:/ovirt/iso upload /usr/local/src/CentOS-7-x86_64-Minimal-1611.iso 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-ldap-roles.md: -------------------------------------------------------------------------------- 1 | # Ovirt用户管理 2 | 3 | - https://www.ovirt.org/documentation/admin-guide/chap-Users_and_Roles/ 4 | 5 | ## 通过命令行管理用户任务 6 | 7 | 8 | ovirt-aaa-jdbc-tool user add yunwei --attribute=firstName=yunwei 9 | 10 | ovirt-aaa-jdbc-tool user edit yunwei --attribute=email=yunwei@aniu.tv 11 | 12 | ovirt-aaa-jdbc-tool user password-reset yunwei --password-valid-to="2025-08-01 12:00:00-0800" 13 | 14 | 15 | ## 添加普通用户 16 | 17 | ovirt-aaa-jdbc-tool user add test --attribute=firstName=test 18 | ovirt-aaa-jdbc-tool user password-reset test --password-valid-to="2025-08-01 12:00:00-0800" 19 | 20 | 21 | ovirt-aaa-jdbc-tool user add wanghui --attribute=firstName=wang 22 | ovirt-aaa-jdbc-tool user edit wanghui --attribute=email=hwang@aniu.tv 23 | ovirt-aaa-jdbc-tool user password-reset wanghui --password-valid-to="2025-08-01 12:00:00-0800" 24 | 25 | 26 | --attribute=firstName=aniu 27 | ovirt-aaa-jdbc-tool user edit test --attribute=email=hwang@aniu.tv 28 | ovirt-aaa-jdbc-tool user password-reset test --password-valid-to="2025-08-01 12:00:00-0800" 29 | 30 | 31 | - 更改管理员密码 32 | 33 | ovirt-aaa-jdbc-tool user password-reset admin --password-valid-to="2025-08-01 12:00:00Z" 34 | 35 | - 解锁管理员 36 | virt-aaa-jdbc-tool user unlock admin 37 | 38 | - 禁用内部管理用户 39 | 40 | 41 | ## 管理组 42 | 43 | ovirt-aaa-jdbc-tool group add ops 44 | 45 | ovirt-aaa-jdbc-tool group add dev 46 | 47 | ovirt-aaa-jdbc-tool group-manage useradd ops --user=yunwei 48 | 49 | ovirt-aaa-jdbc-tool group show ops 50 | 51 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-notifier-email.md: -------------------------------------------------------------------------------- 1 | # ovirt告警邮件设置 2 | 3 | ## 启用ovirt-engine-notifier服务来根据指定是事件发出邮件通知 4 | 5 | - 配置ovirt-engine-notifier 6 | 7 | ``` 8 | # 从174行开始修改 9 | # vi /usr/share/ovirt-engine/services/ovirt-engine-notifier/ovirt-engine-notifier.conf 10 | # The SMTP mail server address. Required. 11 | MAIL_SERVER=smtp.163.com 12 | 13 | # The SMTP port (usually 25 for plain SMTP, 465 for SMTP with SSL, 587 for SMTP with TLS) 14 | MAIL_PORT=465 15 | 16 | # Required if SSL or TLS enabled to authenticate the user. Used also to specify 'from' user address if mail server 17 | # supports, when MAIL_FROM is not set. Address is in RFC822 format 18 | MAIL_USER=xxxxxx@163.com 19 | 20 | # Required to authenticate the user if mail server requires authentication or if SSL or TLS is enabled 21 | SENSITIVE_KEYS="${SENSITIVE_KEYS},MAIL_PASSWORD" 22 | MAIL_PASSWORD=xxxxxx 23 | 24 | # Indicates type of encryption (none, ssl or tls) should be used to communicate with mail server. 25 | MAIL_SMTP_ENCRYPTION=ssl # 更改使用ssl,默认none收不到邮件 26 | 27 | # If set to true, sends a message in HTML format. 28 | HTML_MESSAGE_FORMAT=true 29 | 30 | # Specifies 'from' address on sent mail in RFC822 format, if supported by mail server. 31 | MAIL_FROM=xxxxxx@163.com 32 | ``` 33 | 34 | - 管理界面配置 35 | 36 | > 在 ovirt-engine “管理”-“用户” ,选定用户(admin),在下方的菜单中选择:“事件通知器”-“管理事件”,选定需要告警的事件,配置邮件接收者。 37 | 38 | 39 | - 配置完成,启动服务: 40 | 41 | ``` 42 | # systemctl start ovirt-engine-notifier 43 | # systemctl enable ovirt-engine-notifier 44 | ``` 45 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-remove.md: -------------------------------------------------------------------------------- 1 | # 清理ovirt环境 2 | 3 | - 清理ovirt相关服务(包括HA,engine),停用并卸载ovirt,vdsm,libvirt相关的服务: 4 | 5 | yum remove *ovirt* *vdsm* *libvirt* *rhev* *glusterfs* *postgresql* -y 6 | 7 | rm -rf /etc/ovirt* /etc/vdsm /etc/libvirt* /etc/pki/vdsm /etc/pki/libvirt /etc/pki/CA/cacert.pem* /var/run/vdsm /var/run/libvirt /var/lib/vdsm /var/lib/libvirt /var/lib/ovirt* /var/lib/pgsql /var/log/*ovirt* /var/log/vdsm /var/log/libvirt 8 | 9 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-storage.md: -------------------------------------------------------------------------------- 1 | # nfs 2 | 3 | # glusterfs 4 | 5 | - 参考 glusterfs 安装 6 | 7 | 新增存储域:存储 域 新建域 8 | 9 | 数据中心选择:aniudc 域功能选择:data 名称:glusterfs-data 使用主机:server1 路径:server1:/gv0 10 | 11 | 依次添加: iso域 导出域 路径:server1:/gv1 server1:/gv2 12 | 13 | 14 | -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-use-virsh.md: -------------------------------------------------------------------------------- 1 | # ovirt节点直接使用virsh操作vm需要用户名密码 2 | 3 | ``` 4 | [root@ovirt4 ~]# virsh list vdsm@ovirt 5 | error: unexpected data 'vdsm@ovirt' 6 | [root@ovirt4 ~]# virsh list --all 7 | Please enter your authentication name: vdsm@ovirt 8 | Please enter your password: 9 | Id Name State 10 | ---------------------------------------------------- 11 | 3 vm-03 running 12 | 5 vm-04 running 13 | ``` 14 | 15 | ## 用户(vdsm@ovirt)获取 16 | 17 | ``` 18 | [root@ovirt3 ~]# find / -name libvirtconnection.py 19 | /usr/lib/python2.7/site-packages/vdsm/common/libvirtconnection.py 20 | [root@ovirt3 ~]# egrep vdsm@ovirt /usr/lib/python2.7/site-packages/vdsm/common/libvirtconnection.py 21 | SASL_USERNAME = "vdsm@ovirt" 22 | ``` 23 | 24 | ## 密码(shibboleth)获取 25 | 26 | ``` 27 | [root@ovirt3 ~]# find / -name libvirt_password 28 | /etc/pki/vdsm/keys/libvirt_password 29 | [root@ovirt3 ~]# cat /etc/pki/vdsm/keys/libvirt_password 30 | shibboleth #密码 31 | ``` 32 | 33 | > 备注:不建议使用:# saslpasswd2 -a libvirt admin 创建admin用户来管理vm,但可以使用 -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-vm-attach-usb-device.md: -------------------------------------------------------------------------------- 1 | # 如何在Ovirt下的VM内部使用USB设备 2 | 3 | ## -------------------------------------------------------------------------------- /virtualization/ovirt/ovirt-vm-types.md: -------------------------------------------------------------------------------- 1 | # 参考openstack设置ovirt的vm规格 2 | 3 | ## 查看openstack vm类型(packstack) 4 | 5 | [root@packstack ~(keystone_admin)]# openstack flavor list 6 | +----+-----------+-------+------+-----------+-------+-----------+ 7 | | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | 8 | +----+-----------+-------+------+-----------+-------+-----------+ 9 | | 1 | m1.tiny | 512 | 1 | 0 | 1 | True | 10 | | 2 | m1.small | 2048 | 20 | 0 | 1 | True | 11 | | 3 | m1.medium | 4096 | 40 | 0 | 2 | True | 12 | | 4 | m1.large | 8192 | 80 | 0 | 4 | True | 13 | | 5 | m1.xlarge | 16384 | 160 | 0 | 8 | True | 14 | +----+-----------+-------+------+-----------+-------+-----------+ 15 | 16 | 17 | ## ovirt vm类型 18 | 19 | +----+-----------+-------+------+-----------+-------+-----------+ 20 | | ID | Name | RAM | Disk | Ephemeral | VCPUs | Is Public | 21 | +----+-----------+-------+------+-----------+-------+-----------+ 22 | | 1 | m1.tiny | 1024 | 5 | 0 | 1 | True | 23 | | 2 | m1.small | 2048 | 20 | 0 | 1 | True | 24 | | 3 | m1.medium | 4096 | 20 | 0 | 2 | True | 25 | | 4 | m1.large | 8192 | 30 | 0 | 4 | True | 26 | | 5 | m1.xlarge | 16384 | 30 | 0 | 8 | True | 27 | +----+-----------+-------+------+-----------+-------+-----------+ -------------------------------------------------------------------------------- /virtualization/ovirt/sealing-linux-vm.md: -------------------------------------------------------------------------------- 1 | # Sealing Linux VM 2 | 3 | ## 密封Linux VM 步骤 4 | 5 | SSH to the VM as root 6 | flag the system for reconfiguring 7 | touch /.unconfigured 8 | Remove ssh host keys: 9 | rm -i /etc/ssh/ssh_host_* 10 | Optionally, for environments where a host cannot determine its own name via DNS based lookups: 11 | hostnamectl set-hostname hostname 12 | Remove UDEV rules: 13 | rm -i /etc/udev/rules.d/70-persistent* 14 | Remove UUID 15 | [optionally] Delete the logs from /var/log 16 | [Optionally] Delete the build logs from /root. 17 | Shut down the virtual machine. 18 | 19 | ## -------------------------------------------------------------------------------- /virtualization/ovirt/vm-attach-disk.md: -------------------------------------------------------------------------------- 1 | # ovirt虚拟机添加硬盘 2 | 3 | ## 格式化硬盘 4 | 5 | fdisk /dev/sdb 6 | 7 | - 创建vg 8 | 9 | vgcreate centos /dev/sdb1 10 | 11 | - 创建lv 12 | 13 | lvcreate -l 100%FREE -n data centos 14 | 15 | - 挂载并添加到fstab 16 | 17 | yum install xfsprogs -y 18 | 19 | mkfs.xfs /dev/mapper/centos-data 20 | 21 | mount /dev/mapper/centos-data /data/ 22 | 23 | echo "/dev/mapper/centos-data /data xfs defaults 0 0" >> /etc/fstab 24 | 25 | ## 扩展data目录 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /virtualization/ovirt/vm-netowrk-performance-test.md: -------------------------------------------------------------------------------- 1 | # 虚拟机网络性能测试 2 | 3 | 1、准备环境 4 | 5 | ecs-111: 192.168.0.111 6 | ecs-112: 192.168.0.112 7 | 8 | -------------------------------------------------------------------------------- /virtualization/xen/xen-migrate-ovirt.md: -------------------------------------------------------------------------------- 1 | # xen 上虚拟机迁移到 ovirt 2 | 3 | ## -------------------------------------------------------------------------------- /web/jboss/jboss-learn.md: -------------------------------------------------------------------------------- 1 | # https://developer.jboss.org/welcome -------------------------------------------------------------------------------- /web/nginx/c7-install-nginx.md: -------------------------------------------------------------------------------- 1 | # CentOS7 安装Nginx 2 | 3 | ## 配置nginx源 4 | 5 | # cat nginx.repo 6 | 7 | echo '[nginx] 8 | name=nginx repo 9 | baseurl=http://nginx.org/packages/centos/$releasever/$basearch/ 10 | gpgcheck=0 11 | enabled=1 12 | ' | sudo tee /etc/yum.repos.d/nginx.repo 13 | ## 安装nginx 14 | 15 | yum install nginx -y 16 | 17 | - 启动设置自启 18 | 19 | systemctl start nginx 20 | systemctl enable nginx 21 | 22 | firewall-cmd --add-service=http --permanent 23 | firewall-cmd --reload 24 | -------------------------------------------------------------------------------- /web/nginx/docker-install-nginx.md: -------------------------------------------------------------------------------- 1 | # docker 安装nginx 2 | 3 | ## https://docs.docker.com/samples/library/nginx 4 | 5 | docker run --name aniu-nginx -d -p 80:80 -p 443:443 -v /data/nginx_home/html:/usr/share/nginx/html -v /data/nginx_home/nginx.conf:/etc/nginx/nginx.conf -v /data/nginx_home/logs:/var/log/nginx -v /data/nginx_home/vhost:/etc/nginx/conf.d -v /data/nginx_home/ssl:/etc/nginx/ssl -u 0 nginx 6 | 7 | 8 | # 自颁证书 9 | openssl x509 -in dev.jenkins.aniu.so.csr -out dev.jenkins.aniu.so.crt -req -signkey dev.jenkins.aniu.so.key -days 3650 10 | -------------------------------------------------------------------------------- /web/php/C7-install-php7.md: -------------------------------------------------------------------------------- 1 | # php7运行环境安装 2 | 3 | - 添加remi源 4 | 5 | ``` 6 | # yum -y install http://rpms.famillecollet.com/enterprise/remi-release-7.rpm 7 | # sed -i -e "s/enabled=1/enabled=0/g" /etc/yum.repos.d/remi-safe.repo 8 | # if [enabled=0], input a command to use the repository 9 | # yum --enablerepo=remi-safe install [Package] 10 | ``` 11 | 12 | - 安装开发环境,依赖包 13 | 14 | ``` 15 | yum groupinstall "Development tools" -y 16 | yum --enablerepo=remi-safe -y install php72-php-fpm php72-php-pear php72-php-mbstring php72-php-devel php72-php-json php72-php-xml php72-php-mbstring php72-php-pecl-redis php72-php-common php72-php-pecl-igbinary php72-php-pdo php72-php-pecl-mysql php72-php-devel php72-php-cli php72-php-pear php72-php-mysqlnd php72-php-gd 17 | ``` 18 | 19 | - 使用pecl安装功能模块 20 | 21 | ``` 22 | pecl install channel://pecl.php.net/mcrypt-1.0.1 23 | ``` 24 | 25 | 26 | 27 | https://pecl.php.net/get/event-2.3.0.tgz 28 | 29 | 30 | pecl install channel://pecl.php.net/libevent-0.1.0 31 | 32 | pecl install channel://pecl.php.net/event-2.3.0 -------------------------------------------------------------------------------- /web/php/event.md: -------------------------------------------------------------------------------- 1 | # 2 | wget -c https://github.com/libevent/libevent/releases/download/release-2.1.8-stable/libevent-2.1.8-stable.tar.gz -P /usr/local/src 3 | cd /usr/local/src 4 | tar -zxvf libevent-2.1.8-stable.tar.gz && cd libevent-2.1.8-stable 5 | ./configure --prefix=/usr/local/libevent-2.1.8 6 | make && make install 7 | 8 | wget -c http://pecl.php.net/get/event-2.3.0.tgz -P /usr/local/src 9 | cd /usr/local/src 10 | tar -zxvf event-2.3.0.tgz && cd event-2.3.0 11 | /usr/local/php/bin/phpize 12 | ./configure --with-php-config=/usr/local/php/bin/php-config --with-event-libevent-dir=/usr/local/libevent-2.1.8/ 13 | make && make install 14 | 15 | extension=event.so -------------------------------------------------------------------------------- /web/resin/resin-learn.md: -------------------------------------------------------------------------------- 1 | # http://www.caucho.com/ 2 | 3 | 从单个服务器扩展到数百万个没有外部依赖关系的用户 -------------------------------------------------------------------------------- /web/tomcat/c7-install-tomcat.md: -------------------------------------------------------------------------------- 1 | # 在CentOS 7上安装Apache Tomcat 8 2 | 3 | ## 安装Java 4 | 5 | sudo yum install java-1.8.0-openjdk-devel -y 6 | 7 | ## 创建tomcat用户 8 | 9 | sudo groupadd tomcat 10 | sudo useradd -M -s /bin/nologin -g tomcat -d /data/tomcats tomcat 11 | 12 | ## 安装Tomcat 13 | 14 | - 下载:https://tomcat.apache.org/download-80.cgi 15 | 16 | wget http://mirror.bit.edu.cn/apache/tomcat/tomcat-8/v8.5.30/bin/apache-tomcat-8.5.30.tar.gz # 建议使用时下载当前稳定版本 17 | 18 | sudo mkdir /data/tomcats 19 | sudo tar xvf apache-tomcat-8*tar.gz -C /data/tomcats/ 20 | 21 | cd /data/tomcats/ && mv apache-tomcat-8* tomcat8-8081 22 | 23 | sudo chgrp -R tomcat /data/tomcats/ 24 | 25 | ## 设置自启 26 | 27 | - 笔者在用 28 | 29 | [root@ecs-09 ~]# cat /etc/systemd/system/tomcat-7081.service 30 | [Unit] 31 | Description=Tomcat 7.0.81 servlet container 32 | After=syslog.target network.target 33 | 34 | [Service] 35 | Type=forking 36 | 37 | #User=tomcat 38 | #Group=tomcat 39 | 40 | #Environment="JAVA_HOME=/usr/lib/jvm/jre" 41 | #Environment="JAVA_OPTS=-Djava.security.egd=file:///dev/urandom" 42 | 43 | Environment="CATALINA_BASE=/data/tomcats/tomcat-7081" 44 | Environment="CATALINA_HOME=/data/tomcats/tomcat-7081" 45 | Environment="CATALINA_PID=/data/tomcats/tomcat-7081/temp/tomcat.pid" 46 | #Environment="CATALINA_OPTS=-Xms512M -Xmx1024M -server -XX:+UseParallelGC" 47 | 48 | ExecStart=/data/tomcats/tomcat-7081/bin/startup.sh 49 | ExecStop=/data/tomcats/tomcat-7081/bin/shutdown.sh 50 | 51 | [Install] 52 | WantedBy=multi-user.target 53 | 54 | - 笔者tomcat配置 55 | 56 | ``` 57 | [root@ecs-09 conf]# cat server.xml 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 73 | 74 | 75 | 76 | 77 | 84 | 85 | 108 | 109 | 110 | 111 | 113 | 114 | 115 | 117 | 120 | 121 | 122 | 123 | 124 | ``` 125 | 126 | - 环境变量设置 127 | 128 | -------------------------------------------------------------------------------- /web/wordpress/install-wordpress.md: -------------------------------------------------------------------------------- 1 | # 要求 2 | 3 | PHP7.2+ MySQL5.6+ 4 | HTTPS 5 | 6 | 7 | - mariadb 8 | 9 | [root@wanghui ~]# mysql -u root -p 10 | Enter password: 11 | Welcome to the MariaDB monitor. Commands end with ; or \g. 12 | Your MariaDB connection id is 16 13 | Server version: 10.3.7-MariaDB MariaDB Server 14 | 15 | Copyright (c) 2000, 2018, Oracle, MariaDB Corporation Ab and others. 16 | 17 | Type 'help;' or '\h' for help. Type '\c' to clear the current input statement. 18 | 19 | MariaDB [(none)]> create database wordpress; 20 | Query OK, 1 row affected (0.001 sec) 21 | 22 | MariaDB [(none)]> grant all privileges on wordpress.* to wordpress@'localhost' identified by 'Wordpress123.'; 23 | Query OK, 0 rows affected (0.002 sec) 24 | 25 | MariaDB [(none)]> flush privileges; 26 | Query OK, 0 rows affected (0.002 sec) 27 | 28 | MariaDB [(none)]> exit 29 | Bye 30 | 31 | grant all privileges on ss_manager.* to ss_manager@'localhost' identified by 'SSmanager123.'; --------------------------------------------------------------------------------