├── README.md
├── ansible
└── README.md
├── ant
└── README.md
├── apache
├── README.md
├── basic_auth.conf
├── proxy.conf
├── reverse-proxy.conf
├── ssl.conf
└── vhosts.conf
├── aufs
└── README.md
├── bind
├── README.mkd
├── bind.keys
├── china-edu-acl.cfg
├── china-edu
│ └── jizhihuwai.com.zone
├── china-mobile-acl.cfg
├── china-mobile
│ └── jizhihuwai.com.zone
├── china-telecom-acl.cfg
├── china-telecom
│ └── jizhihuwai.com.zone
├── china-unicom-acl.cfg
├── china-unicom
│ └── jizhihuwai.com.zone
├── db.0
├── db.127
├── db.255
├── db.empty
├── db.local
├── db.root
├── jizhihuwai.com.zone
├── named.conf
├── named.conf.default-zones
├── rndc.key
└── zones.rfc1918
├── c
├── README.md
└── warning-error-solution.md
├── centos
└── README.md
├── ceph
├── README.md
├── deployment-nginx.yaml
└── storage-class-ceph-rbd.yaml
├── chef
├── README.mkd
├── chef-local-mode.md
├── client.rb
└── knife.rb
├── chrome
├── README.md
└── img
│ ├── chrome1.png
│ ├── chrome2.png
│ └── chrome3.png
├── confd
└── README.md
├── coredns
├── README.md
└── coredns
│ ├── Corefile
│ ├── hosts.d
│ └── file.hosts
│ └── youya.org.zone
├── db2
└── README.md
├── debian
├── README.md
├── binary
│ └── DEBIAN
│ │ ├── changelog
│ │ └── control
├── how_to_build_deb_repo.md
├── install-conffiles.png
├── install.png
├── purge.png
├── remove-purge.png
├── remove.png
├── source
│ └── debian
│ │ ├── changelog
│ │ ├── compat
│ │ ├── control
│ │ ├── copyright
│ │ ├── rules
│ │ └── rules.standard
└── upgrade.png
├── deepin
└── README.md
├── dns
└── README.md
├── dnsmasq
└── README.md
├── docker
├── README.md
└── daemon.json
├── drbd
├── README.mkd
└── drbd.conf
├── etcd
├── README.md
└── etcd.sh
├── fdisk
└── README.md
├── ffmpeg
└── README.md
├── fio
└── README.md
├── gcc
└── README.md
├── gdb
├── README.md
├── dot_gdbinit
└── libstdcxx
│ ├── __init__.py
│ └── v6
│ ├── __init__.py
│ ├── printers.py
│ └── xmethods.py
├── gerrit
└── README.md
├── git
├── README.md
├── build-git-server.mkd
└── how-to-contribute-to-github-project.md
├── gitlab-kubernetes
├── README.md
└── gitlab-ci.yml
├── go
├── README.md
└── golang-install.sh
├── gpg
├── README.mkd
└── zhiwei.public.gpg
├── gtest
└── README.md
├── haproxy
└── README.mkd
├── heartbeat
├── README.config
├── README.mkd
├── authkeys
├── ha.cf
├── haresources
└── resource.d
│ ├── IProute
│ ├── SendArp
│ └── arp_check
├── http
└── README.md
├── iperf3
└── README.md
├── iptables
└── README.mkd
├── keepalived
├── 113.keepalived.conf
├── 163.keepalived.conf
├── 225.keepalived.conf
├── 226.keepalived.conf
├── 96-lvs-keepalived.conf
├── README.mkd
├── keepalived_notify.sh
└── openstack.md
├── kind
├── README.md
├── cluster-csi-controller
│ ├── rbac-snapshot-controller.yaml
│ ├── setup-snapshot-controller.yaml
│ ├── snapshot.storage.k8s.io_volumesnapshotclasses.yaml
│ ├── snapshot.storage.k8s.io_volumesnapshotcontents.yaml
│ └── snapshot.storage.k8s.io_volumesnapshots.yaml
├── containerd
│ └── certs.d
│ │ └── _default
│ │ └── hosts.toml
├── kind.yaml
├── nfs-csi-driver
│ ├── nfs-csi-controller.yaml
│ ├── nfs-csi-driverinfo.yaml
│ ├── nfs-csi-node.yaml
│ ├── nfs-csi-rbac.yaml
│ └── nfs-csi-storageclass.yaml
├── nfs-provisioning
│ ├── dynamic-pvc.yaml
│ ├── dynamic-statefulset.yaml
│ ├── static-deployment.yaml
│ ├── static-pv.yaml
│ └── static-pvc.yaml
└── nfs-server
│ └── nfs-server.yaml
├── kubernetes
├── README.md
├── etc
│ ├── etcd
│ │ └── etcd.conf
│ └── kubernetes
│ │ ├── apiserver
│ │ ├── config
│ │ ├── controller-manager
│ │ ├── kubelet
│ │ ├── proxy
│ │ └── scheduler
├── files
│ ├── ifcfg-kbr0
│ └── route-eth0
├── nsenter.yaml
└── usr
│ └── lib
│ └── systemd
│ └── system
│ ├── etcd.service
│ ├── kube-apiserver.service
│ ├── kube-controller-manager.service
│ ├── kube-proxy.service
│ ├── kube-scheduler.service
│ └── kubelet.service
├── kvm-qemu-libvirt-virtualization
├── README.mkd
├── addbr.sh
├── ifcfg-br0
├── ifcfg-eth0
├── instance-name.xml
├── new
│ ├── README.md
│ ├── domain.xml
│ └── virbr1.xml
├── virbr0.xml
├── vm.instance-name.xml
├── vm.manager.sh
├── vms.ini
└── windows.mkd
├── laravel
└── README.md
├── letsencrypt
└── README.md
├── library
├── README.mkd
├── id.sh
├── initial-rc.sh
├── php_lib.php
├── python_lib.py
└── shell_lib.sh
├── logrotate
└── README.md
├── lvm
└── README.mkd
├── lvs-keepalived
└── README.mkd
├── lvs
├── README.mkd
└── lvs.conf
├── mesos
└── README.md
├── minio
├── Dockerfile.minio
├── README.md
├── k8s-deployment.yaml
└── nginx
│ ├── certs
│ ├── server.crt
│ └── server.key
│ └── conf.d
│ └── minio.conf
├── mount
└── README.md
├── mysql
└── README.md
├── nfs
├── README.md
└── exports
├── nftables
├── README.md
├── ip.md
└── nftable.conf
├── nginx
├── README.md
├── conf.d
│ ├── basic-auth.conf
│ ├── file-autoindex.conf
│ ├── geo-redirect.conf
│ ├── jizhihuwai.com.conf
│ ├── php.conf
│ ├── proxy.conf
│ ├── reverse-proxy.conf
│ ├── single-file.conf
│ ├── ssl.conf
│ └── vhosts.conf
├── nginx.conf
└── raw.d
│ ├── tcp-backends.conf
│ ├── tcp.conf
│ └── udp.conf
├── nodejs
└── README.md
├── opensource-solution
└── README.md
├── openstack
├── PACKAGING.md
├── README.mkd
├── TESTING.md
├── cinder
│ ├── api-paste.ini
│ └── cinder.conf
├── define.sh
├── easystack
│ └── README.md
├── glance.sh
├── glance
│ ├── glance-api-paste.ini
│ ├── glance-api.conf
│ ├── glance-registry-paste.ini
│ └── glance-registry.conf
├── heat.md
├── keystone.sh
├── keystone
│ └── keystone.conf
├── nova.sh
├── nova
│ ├── api-paste.ini
│ ├── nova-compute.conf
│ └── nova.conf
├── openstackrc
└── quantum
│ ├── api-paste.ini
│ ├── dhcp_agent.ini
│ ├── l3_agent.ini
│ ├── ovs_quantum_plugin.ini
│ └── quantum.conf
├── pacemaker-corosync
└── README.mkd
├── php
├── README.md
├── lang.php
└── lang
│ └── zh_CN
│ └── LC_MESSAGES
│ ├── zh.mo
│ └── zh.po
├── podman
├── .config
│ └── containers
│ │ └── storage.conf
├── README.md
├── build-podman.sh
├── metadata
│ ├── etc
│ │ └── containers
│ │ │ ├── containers.conf
│ │ │ ├── manifest
│ │ │ └── k8s-v2ray.yaml
│ │ │ ├── mounts.conf
│ │ │ ├── policy.json
│ │ │ ├── registries.conf
│ │ │ ├── registries.conf.d
│ │ │ ├── 00-shortnames.conf
│ │ │ ├── docker.io.conf
│ │ │ └── ghcr.io.conf
│ │ │ ├── registries.d
│ │ │ └── default.yaml
│ │ │ ├── seccomp.json
│ │ │ ├── storage.conf
│ │ │ └── systemd
│ │ │ ├── clash.container
│ │ │ └── v2ray.kube
│ └── usr
│ │ ├── lib
│ │ ├── systemd
│ │ │ └── system
│ │ │ │ ├── podman-auto-update.service
│ │ │ │ ├── podman-auto-update.timer
│ │ │ │ ├── podman-clean-transient.service
│ │ │ │ ├── podman-kube@.service
│ │ │ │ ├── podman-restart.service
│ │ │ │ ├── podman.service
│ │ │ │ └── podman.socket
│ │ └── tmpfiles.d
│ │ │ ├── containers-common.conf
│ │ │ └── podman.conf
│ │ └── share
│ │ └── containers
│ │ ├── containers.conf
│ │ ├── seccomp.json
│ │ └── storage.conf
└── src
│ └── wasm
│ ├── Dockerfile
│ └── main.go
├── pptp
└── README.md
├── prometheus
└── pod.yaml
├── protobuf
└── README.md
├── puppet
└── README.mkd
├── pxe-install-os
├── README.mkd
├── boot.msg
├── default
├── dhcpd.conf
├── rhel.cfg
└── tftp
├── pypi
└── README.md
├── python-pip
├── README.md
└── zenith
│ ├── setup.cfg
│ ├── setup.py
│ └── zenith
│ ├── __init__.py
│ ├── app.py
│ ├── common.py
│ └── tests
│ └── __init__.py
├── python-virtualenv
└── README.md
├── python
├── README.md
├── calculate.c
├── calculate_module.c
└── python-c-api.md
├── redis
├── README.md
└── redis
│ ├── 7001
│ └── redis.conf
│ ├── 7002
│ └── redis.conf
│ ├── 7003
│ └── redis.conf
│ ├── 7004
│ └── redis.conf
│ ├── 7005
│ └── redis.conf
│ └── 7006
│ └── redis.conf
├── registry
└── docker-registry-setup.sh
├── repository
├── qr_alipay_pay.png
└── qr_wechat_pay.png
├── resources
├── README.md
├── adblock.txt
├── chrome.md
├── linux.md
├── switchy.txt
├── wiznote.css
└── wiznote.md
├── rest-api
└── README.md
├── review-board
└── README.md
├── rime
└── README.md
├── router
└── asus.md
├── rpm-package-management
├── README.md
├── keepalived.spec
├── php-apc.spec
└── template.spec
├── rsync
├── README.md
├── rsyncd.conf
└── rsyncd.secrets
├── ruby
├── Gemfile
└── README.md
├── sar
└── README.md
├── sasl
└── README.md
├── screen
└── README.md
├── shell
├── README.md
├── io-redirection.md
├── test.sh
└── text.md
├── skydns
└── README.md
├── snmp
└── README.md
├── ssh
└── README.md
├── systemd
└── README.md
├── terraform
├── README.md
├── backend.tf
├── input.tfvars
├── main.tf
├── output.tf
├── variables.tf
└── versions.tf
├── timezone
├── README.md
└── timezone.jpg
├── tips
└── README.md
├── tls-cert
└── README.md
├── travis-ci
├── .travis.yml
├── .yamllint
└── README.md
├── ubuntu
├── .fonts.conf
├── README.md
├── gtk.css
└── how-to-make-debain-package.mkd
├── ucarp
├── README.md
├── vip-down.sh
└── vip-up.sh
├── vagrant
├── README.md
├── Vagrantfile
└── Vagrantfile.tmpl
├── vegeta
└── README.md
├── yarn
├── README.md
└── single-node-cluster.md
└── zookeeper
├── README.md
├── zookeeper-client.jaas.conf
├── zookeeper-client.properties
├── zookeeper.jaas.conf
└── zookeeper.properties
/README.md:
--------------------------------------------------------------------------------
1 | # Linux Related Notes
2 |
3 | I like to use Git and Markdown to write notes.
4 |
5 | Git can give me all the history of the notes.
6 |
7 | I can use Markdown to easily edit and view the notes.
8 |
--------------------------------------------------------------------------------
/ansible/README.md:
--------------------------------------------------------------------------------
1 | # Ansible Usage
2 |
3 | 在使用过 cfengine/puppet/chef 之后,我终于准备向 ansible 下手了,前些天简单的阅读了一下 ansible 的文档,感觉这个东西非常符合我的期望。
4 |
5 | 同学们有时间的话可以看看 ansible ,感觉这货不赖。
6 |
--------------------------------------------------------------------------------
/apache/README.md:
--------------------------------------------------------------------------------
1 | 具体怎么生成 SSL 证书和让自己签的证书受到系统信任,可以参考 [ssl-cert](../ssl-cert).
2 |
--------------------------------------------------------------------------------
/apache/basic_auth.conf:
--------------------------------------------------------------------------------
1 |
2 | AuthType basic
3 | AuthName "Basic Login"
4 | AuthUserFile /etc/httpd/conf.d/dbmpasswd
5 | Require valid-user
6 |
7 |
8 | # htpasswd -c /etc/httpd/conf.d/dbmpasswd username
9 | # then put the password, then you can use basic auth
10 | # apt install apache2-utils
11 |
--------------------------------------------------------------------------------
/apache/proxy.conf:
--------------------------------------------------------------------------------
1 | Listen 3128
2 |
3 | ProxyRequests On
4 | ProxyVia On
5 |
6 |
7 |
8 |
9 | # a2enmod proxy
10 | # a2enmod proxy_http
11 | # LoadModule proxy_module modules/mod_proxy.so
12 | # LoadModule proxy_http_module modules/mod_proxy_http.so
13 |
--------------------------------------------------------------------------------
/apache/reverse-proxy.conf:
--------------------------------------------------------------------------------
1 | Listen 3000
2 |
3 | ProxyPreserveHost On
4 |
5 | ProxyPass / http://localhost/
6 | ProxyPassReverse / http://localhost/
7 |
8 |
9 | # a2enmod headers
10 | # a2enmod proxy
11 | # a2enmod proxy_balancer
12 | # a2enmod proxy_http
13 | # LoadModule proxy_module modules/mod_proxy.so
14 | # LoadModule proxy_http_module modules/mod_proxy_http.so
15 |
--------------------------------------------------------------------------------
/apache/ssl.conf:
--------------------------------------------------------------------------------
1 |
2 | ServerAdmin ssl@ssl.dev
3 | DocumentRoot /var/www/htdocs/ssl.dev
4 | ServerName ssl.dev
5 | SSLEngine on
6 | SSLCertificateFile "/path/to/ssl.dev.crt"
7 | SSLCertificateKeyFile "/path/to/ssl.dev.key"
8 | ErrorLog logs/ssl.dev_error_log
9 | CustomLog logs/ssl.dev_access_log common
10 |
11 |
--------------------------------------------------------------------------------
/apache/vhosts.conf:
--------------------------------------------------------------------------------
1 |
2 | ServerAdmin admin@admin.com
3 | DocumentRoot /var/www/htdocs/admin.com
4 | ServerName admin.com
5 | ErrorLog logs/admin.com_error_log
6 | CustomLog logs/admin.com_access_log common
7 |
8 |
9 | ServerAdmin admin@admin2.com
10 | DocumentRoot /var/www/htdocs/admin2.com
11 | ServerName admin2.com
12 | ErrorLog logs/admin2.com_error_log
13 | CustomLog logs/admin2.com_access_log common
14 |
15 |
--------------------------------------------------------------------------------
/bind/README.mkd:
--------------------------------------------------------------------------------
1 | # BIND相关介绍
2 |
3 | ## BIND是什么
4 |
5 | BIND是Berkeley Internet Name Daemon的缩写,是现今互联网上最常见的DNS服务器软件。
6 |
7 | ISC的主席为BIND写了个[RRL][rrl](Response Rate Limiting)补丁,可以有效的防护DDoS攻击,将来有可能加入到BIND里面。
8 |
9 | [rrl]: http://www.redbarn.org/dns/ratelimits
10 |
11 | ## BIND的配置
12 |
13 | 请先看配置文件,稍后整理成文档。
14 |
--------------------------------------------------------------------------------
/bind/china-edu-acl.cfg:
--------------------------------------------------------------------------------
1 | acl china-edu-beijing {
2 | 162.105.0.0/16;
3 | 166.111.0.0/16;
4 | };
5 |
--------------------------------------------------------------------------------
/bind/china-edu/jizhihuwai.com.zone:
--------------------------------------------------------------------------------
1 | $TTL 600
2 | $ORIGIN jizhihuwai.com.
3 | @ IN SOA jizhihuwai.com. mail.jizhihuwai.com. (
4 | 2013010401 ; serial
5 | 10800 ; refresh (3 hours)
6 | 900 ; retry (15 minutes)
7 | 604800 ; expire (1 week)
8 | 86400 ; minimum (1 day)
9 | )
10 |
11 |
12 | IN NS ns1.jizhihuwai.com.
13 | IN NS ns2.jizhihuwai.com.
14 |
15 | ns1 A 10.210.214.113
16 | ns2 A 10.210.214.163
17 |
18 | @ A 10.210.214.163
19 | blog A 10.210.214.113
20 | wiki CNAME jizhihuwai.com.
21 | * A 10.210.214.163
22 |
--------------------------------------------------------------------------------
/bind/china-mobile-acl.cfg:
--------------------------------------------------------------------------------
1 | acl china-mobile-beijing {
2 | 211.136.17.97/32;
3 | 211.136.17.98/32;
4 | };
5 |
--------------------------------------------------------------------------------
/bind/china-mobile/jizhihuwai.com.zone:
--------------------------------------------------------------------------------
1 | $TTL 600
2 | $ORIGIN jizhihuwai.com.
3 | @ IN SOA jizhihuwai.com. mail.jizhihuwai.com. (
4 | 2013010401 ; serial
5 | 10800 ; refresh (3 hours)
6 | 900 ; retry (15 minutes)
7 | 604800 ; expire (1 week)
8 | 86400 ; minimum (1 day)
9 | )
10 |
11 |
12 | IN NS ns1.jizhihuwai.com.
13 | IN NS ns2.jizhihuwai.com.
14 |
15 | ns1 A 10.210.214.113
16 | ns2 A 10.210.214.163
17 |
18 | @ A 10.210.214.163
19 | blog A 10.210.214.113
20 | wiki CNAME jizhihuwai.com.
21 | * A 10.210.214.163
22 |
--------------------------------------------------------------------------------
/bind/china-telecom-acl.cfg:
--------------------------------------------------------------------------------
1 | acl china-telecom-anhui {
2 | 60.166.0.0/15;
3 | 60.168.0.0/13;
4 | };
5 |
6 | acl china-telecom-beijing {
7 | 58.83.128.0/18;
8 | 60.194.0.0/15;
9 | };
10 |
11 | # much more other ip address
12 | # this is just an example
13 |
--------------------------------------------------------------------------------
/bind/china-telecom/jizhihuwai.com.zone:
--------------------------------------------------------------------------------
1 | $TTL 600
2 | $ORIGIN jizhihuwai.com.
3 | @ IN SOA jizhihuwai.com. mail.jizhihuwai.com. (
4 | 2013010401 ; serial
5 | 10800 ; refresh (3 hours)
6 | 900 ; retry (15 minutes)
7 | 604800 ; expire (1 week)
8 | 86400 ; minimum (1 day)
9 | )
10 |
11 |
12 | IN NS ns1.jizhihuwai.com.
13 | IN NS ns2.jizhihuwai.com.
14 |
15 | ns1 A 10.210.214.113
16 | ns2 A 10.210.214.163
17 |
18 | @ A 10.210.214.163
19 | blog A 10.210.214.113
20 | wiki CNAME jizhihuwai.com.
21 | * A 10.210.214.163
22 |
--------------------------------------------------------------------------------
/bind/china-unicom-acl.cfg:
--------------------------------------------------------------------------------
1 | acl china-unicom-beijing {
2 | 59.193.0.0/20;
3 | 61.48.0.0/14;
4 | };
5 |
--------------------------------------------------------------------------------
/bind/china-unicom/jizhihuwai.com.zone:
--------------------------------------------------------------------------------
1 | $TTL 600
2 | $ORIGIN jizhihuwai.com.
3 | @ IN SOA jizhihuwai.com. mail.jizhihuwai.com. (
4 | 2013010401 ; serial
5 | 10800 ; refresh (3 hours)
6 | 900 ; retry (15 minutes)
7 | 604800 ; expire (1 week)
8 | 86400 ; minimum (1 day)
9 | )
10 |
11 |
12 | IN NS ns1.jizhihuwai.com.
13 | IN NS ns2.jizhihuwai.com.
14 |
15 | ns1 A 10.210.214.113
16 | ns2 A 10.210.214.163
17 |
18 | @ A 10.210.214.163
19 | blog A 10.210.214.113
20 | wiki CNAME jizhihuwai.com.
21 | * A 10.210.214.163
22 |
--------------------------------------------------------------------------------
/bind/db.0:
--------------------------------------------------------------------------------
1 | ;
2 | ; BIND reverse data file for broadcast zone
3 | ;
4 | $TTL 604800
5 | @ IN SOA localhost. root.localhost. (
6 | 1 ; Serial
7 | 604800 ; Refresh
8 | 86400 ; Retry
9 | 2419200 ; Expire
10 | 604800 ) ; Negative Cache TTL
11 | ;
12 | @ IN NS localhost.
13 |
--------------------------------------------------------------------------------
/bind/db.127:
--------------------------------------------------------------------------------
1 | ;
2 | ; BIND reverse data file for local loopback interface
3 | ;
4 | $TTL 604800
5 | @ IN SOA localhost. root.localhost. (
6 | 1 ; Serial
7 | 604800 ; Refresh
8 | 86400 ; Retry
9 | 2419200 ; Expire
10 | 604800 ) ; Negative Cache TTL
11 | ;
12 | @ IN NS localhost.
13 | 1.0.0 IN PTR localhost.
14 |
--------------------------------------------------------------------------------
/bind/db.255:
--------------------------------------------------------------------------------
1 | ;
2 | ; BIND reverse data file for broadcast zone
3 | ;
4 | $TTL 604800
5 | @ IN SOA localhost. root.localhost. (
6 | 1 ; Serial
7 | 604800 ; Refresh
8 | 86400 ; Retry
9 | 2419200 ; Expire
10 | 604800 ) ; Negative Cache TTL
11 | ;
12 | @ IN NS localhost.
13 |
--------------------------------------------------------------------------------
/bind/db.empty:
--------------------------------------------------------------------------------
1 | ; BIND reverse data file for empty rfc1918 zone
2 | ;
3 | ; DO NOT EDIT THIS FILE - it is used for multiple zones.
4 | ; Instead, copy it, edit named.conf, and use that copy.
5 | ;
6 | $TTL 86400
7 | @ IN SOA localhost. root.localhost. (
8 | 1 ; Serial
9 | 604800 ; Refresh
10 | 86400 ; Retry
11 | 2419200 ; Expire
12 | 86400 ) ; Negative Cache TTL
13 | ;
14 | @ IN NS localhost.
15 |
--------------------------------------------------------------------------------
/bind/db.local:
--------------------------------------------------------------------------------
1 | ;
2 | ; BIND data file for local loopback interface
3 | ;
4 | $TTL 604800
5 | @ IN SOA localhost. root.localhost. (
6 | 2 ; Serial
7 | 604800 ; Refresh
8 | 86400 ; Retry
9 | 2419200 ; Expire
10 | 604800 ) ; Negative Cache TTL
11 | ;
12 | @ IN NS localhost.
13 | @ IN A 127.0.0.1
14 | @ IN AAAA ::1
15 |
--------------------------------------------------------------------------------
/bind/jizhihuwai.com.zone:
--------------------------------------------------------------------------------
1 | $TTL 600
2 | $ORIGIN jizhihuwai.com.
3 | @ IN SOA jizhihuwai.com. mail.jizhihuwai.com. (
4 | 2013010401 ; serial
5 | 10800 ; refresh (3 hours)
6 | 900 ; retry (15 minutes)
7 | 604800 ; expire (1 week)
8 | 86400 ; minimum (1 day)
9 | )
10 |
11 |
12 | IN NS ns1.jizhihuwai.com.
13 | IN NS ns2.jizhihuwai.com.
14 |
15 | ns1 A 10.210.214.113
16 | ns2 A 10.210.214.163
17 |
18 | @ A 10.210.214.163
19 | blog A 10.210.214.113
20 | wiki CNAME jizhihuwai.com.
21 | * A 10.210.214.163
22 |
--------------------------------------------------------------------------------
/bind/named.conf:
--------------------------------------------------------------------------------
1 | options {
2 | directory "/usr/local/bind";
3 | dump-file "/usr/local/bind/data/cache_dump.db";
4 | statistics-file "/usr/local/bind/data/named_stats.txt";
5 | allow-transfer { none; };
6 | };
7 |
8 | include "etc/rndc.key";
9 | include "etc/china-edu-acl.cfg";
10 | include "etc/china-mobile-acl.cfg";
11 | include "etc/china-telecom-acl.cfg";
12 | include "etc/china-unicom-acl.cfg";
13 |
14 | view "china-edu" {
15 | match-clients {
16 | china-edu-beijing;
17 | };
18 |
19 | include "etc/named.conf.default-zones";
20 |
21 | zone "jizhihuwai.com" IN {
22 | type master;
23 | file "etc/china-edu/jizhihuwai.com.zone";
24 | };
25 | };
26 |
27 | view "china-mobile" {
28 | match-clients {
29 | china-mobile-beijing;
30 | };
31 |
32 | include "etc/named.conf.default-zones";
33 |
34 | zone "jizhihuwai.com" IN {
35 | type master;
36 | file "etc/china-mobile/jizhihuwai.com.zone";
37 | };
38 | };
39 |
40 | view "china-telecom" {
41 | match-clients {
42 | china-telecom-anhui;
43 | china-telecom-beijing;
44 | };
45 |
46 | include "etc/named.conf.default-zones";
47 |
48 | zone "jizhihuwai.com" IN {
49 | type master;
50 | file "etc/china-telecom/jizhihuwai.com.zone";
51 | };
52 | };
53 |
54 | view "china-unicom" {
55 | match-clients {
56 | china-unicom-beijing;
57 | };
58 |
59 | include "etc/named.conf.default-zones";
60 |
61 | zone "jizhihuwai.com" IN {
62 | type master;
63 | file "etc/china-unicom/jizhihuwai.com.zone";
64 | };
65 | };
66 |
67 | view "other" {
68 | match-clients { "any"; };
69 |
70 | include "etc/named.conf.default-zones";
71 |
72 | zone "jizhihuwai.com" IN {
73 | type master;
74 | file "etc/jizhihuwai.com.zone";
75 | };
76 | };
77 |
--------------------------------------------------------------------------------
/bind/named.conf.default-zones:
--------------------------------------------------------------------------------
1 | // prime the server with knowledge of the root servers
2 | zone "." {
3 | type hint;
4 | file "etc/db.root";
5 | };
6 |
7 | // be authoritative for the localhost forward and reverse zones, and for
8 | // broadcast zones as per RFC 1912
9 |
10 | zone "localhost" {
11 | type master;
12 | file "etc/db.local";
13 | };
14 |
15 | zone "127.in-addr.arpa" {
16 | type master;
17 | file "etc/db.127";
18 | };
19 |
20 | zone "0.in-addr.arpa" {
21 | type master;
22 | file "etc/db.0";
23 | };
24 |
25 | zone "255.in-addr.arpa" {
26 | type master;
27 | file "etc/db.255";
28 | };
29 |
30 | // Consider adding the 1918 zones here, if they are not used in your
31 | // organization
32 | //include "etc/zones.rfc1918";
33 |
--------------------------------------------------------------------------------
/bind/rndc.key:
--------------------------------------------------------------------------------
1 | key "rndc-key" {
2 | algorithm hmac-md5;
3 | secret "1c1VGeSK4bWW9NuFXKZ7gQ==";
4 | };
5 |
--------------------------------------------------------------------------------
/bind/zones.rfc1918:
--------------------------------------------------------------------------------
1 | zone "10.in-addr.arpa" { type master; file "etc/db.empty"; };
2 |
3 | zone "16.172.in-addr.arpa" { type master; file "etc/db.empty"; };
4 | zone "17.172.in-addr.arpa" { type master; file "etc/db.empty"; };
5 | zone "18.172.in-addr.arpa" { type master; file "etc/db.empty"; };
6 | zone "19.172.in-addr.arpa" { type master; file "etc/db.empty"; };
7 | zone "20.172.in-addr.arpa" { type master; file "etc/db.empty"; };
8 | zone "21.172.in-addr.arpa" { type master; file "etc/db.empty"; };
9 | zone "22.172.in-addr.arpa" { type master; file "etc/db.empty"; };
10 | zone "23.172.in-addr.arpa" { type master; file "etc/db.empty"; };
11 | zone "24.172.in-addr.arpa" { type master; file "etc/db.empty"; };
12 | zone "25.172.in-addr.arpa" { type master; file "etc/db.empty"; };
13 | zone "26.172.in-addr.arpa" { type master; file "etc/db.empty"; };
14 | zone "27.172.in-addr.arpa" { type master; file "etc/db.empty"; };
15 | zone "28.172.in-addr.arpa" { type master; file "etc/db.empty"; };
16 | zone "29.172.in-addr.arpa" { type master; file "etc/db.empty"; };
17 | zone "30.172.in-addr.arpa" { type master; file "etc/db.empty"; };
18 | zone "31.172.in-addr.arpa" { type master; file "etc/db.empty"; };
19 |
20 | zone "168.192.in-addr.arpa" { type master; file "etc/db.empty"; };
21 |
--------------------------------------------------------------------------------
/c/warning-error-solution.md:
--------------------------------------------------------------------------------
1 | # Warnings and Errors
2 |
3 | ### warning: function declaration isn't a prototype
4 |
5 | You may need to change `int foo()` to `int foo(void)`.
6 |
7 | In C int foo() and int foo(void) are different functions. int foo() accepts an arbitrary number of arguments, while int foo(void) accepts 0 arguments. In C++ they mean the same thing. I suggest that you use void consistently when you mean no arguments.
8 |
9 | If you have a variable a, extern int a; is a way to tell the compiler that a is a symbol that might be present in a different translation unit (C compiler speak for source file), don't resolve it until link time. On the other hand, symbols which are function names are anyway resolved at link time. The meaning of a storage class specifier on a function (extern, static) only affects its visibility and extern is the default, so extern is actually unnecessary.
10 |
11 | Link:
12 |
13 | ### warning: implicit declaration of function func_name
14 |
15 | Add the function declareation on top of the file.
16 |
17 | ```
18 | #include
19 |
20 | // function declaration
21 | int addNumbers(int, int);
22 |
23 | int main()
24 | {
25 | addNumbers(a, b);
26 | }
27 |
28 | int addNumbers(int a, int b)
29 | {
30 | // definition
31 | }
32 | ```
33 |
34 | Link:
35 |
--------------------------------------------------------------------------------
/centos/README.md:
--------------------------------------------------------------------------------
1 | # CentOS Tips
2 |
3 | ## Install essential packages
4 |
5 | ```
6 | # yum -y install vim autoconf automake make cmake gcc gcc-c++ gdb telnet nmap nfs-utils rsync wget createrepo rpm-build rpm-sign cpio tcpdump sysstat subversion git strace python-setuptools ppp pptp gnupg fuse ntp ntpdate tree net-tools screen
7 | ```
8 |
9 | ## Disable SELinux
10 |
11 | ```
12 | # sed -i 's/^SELINUX=.*$/SELINUX=disabled/g' /etc/selinux/config
13 | ```
14 |
15 | ## Disable Firewall
16 |
17 | ```
18 | # systemctl stop firewalld.service iptables.service
19 | # systemctl disable firewalld.service iptables.service
20 | ```
21 |
22 | ## Set DNS Server
23 |
--------------------------------------------------------------------------------
/ceph/deployment-nginx.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Service
4 | metadata:
5 | name: nginx
6 | spec:
7 | selector:
8 | app: nginx
9 | type: NodePort
10 | ports:
11 | - protocol: TCP
12 | port: 80
13 | targetPort: 80
14 |
15 | ---
16 | apiVersion: extensions/v1beta1
17 | kind: Deployment
18 | metadata:
19 | name: nginx
20 | spec:
21 | replicas: 1
22 | template:
23 | metadata:
24 | labels:
25 | app: nginx
26 | spec:
27 | containers:
28 | - name: nginx
29 | image: siji/nginx:1.12.2
30 | ports:
31 | - containerPort: 80
32 | volumeMounts:
33 | - name: nginx
34 | mountPath: /var/www/html
35 | volumes:
36 | - name: nginx
37 | persistentVolumeClaim:
38 | claimName: nginx
39 |
40 | ---
41 | kind: PersistentVolumeClaim
42 | apiVersion: v1
43 | metadata:
44 | name: nginx
45 | spec:
46 | volumeMode: Filesystem
47 | storageClassName: ceph-rbd
48 | accessModes:
49 | - ReadWriteOnce
50 | resources:
51 | requests:
52 | storage: 1Gi
53 |
--------------------------------------------------------------------------------
/ceph/storage-class-ceph-rbd.yaml:
--------------------------------------------------------------------------------
1 | kind: StorageClass
2 | apiVersion: storage.k8s.io/v1
3 | metadata:
4 | name: ceph-rbd
5 | provisioner: kubernetes.io/rbd
6 | parameters:
7 | monitors: 9.30.255.51:6789
8 | adminId: admin
9 | adminSecretName: ceph-admin-secret
10 | adminSecretNamespace: kube-system
11 | pool: kube
12 | userId: admin
13 | userSecretName: ceph-admin-secret
14 | userSecretNamespace: kube-system
15 | fsType: ext4
16 | imageFormat: "2"
17 | imageFeatures: "layering"
18 |
--------------------------------------------------------------------------------
/chef/client.rb:
--------------------------------------------------------------------------------
1 | log_level :auto
2 | log_location STDOUT
3 | chef_server_url "https://server.chef.com"
4 | validation_client_name "chef-validator"
5 | # Using default node name (fqdn)
6 |
--------------------------------------------------------------------------------
/chef/knife.rb:
--------------------------------------------------------------------------------
1 | log_level :info
2 | log_location STDOUT
3 | node_name 'admin'
4 | client_key '/var/chef/chef-repo/.chef/admin.pem'
5 | validation_client_name 'chef-validator'
6 | validation_key '/var/chef/chef-repo/.chef/chef-validator.pem'
7 | chef_server_url 'https://server.chef.com'
8 | cache_type 'BasicFile'
9 | syntax_check_cache_path '/var/chef/chef-repo/.chef/syntax_check_cache'
10 | cookbook_path [ '/var/chef/chef-repo/cookbooks' ]
11 |
--------------------------------------------------------------------------------
/chrome/README.md:
--------------------------------------------------------------------------------
1 | # Debug JS
2 |
3 | Debug JS code with chrome dev tool.
4 |
5 |
6 | ## Open the URL and Console
7 |
8 | https://googlechrome.github.io/devtools-samples/debug-js/get-started
9 |
10 | 1. Click source tab
11 | 2. Select the source file in left panel tree
12 | 3. Create a break point in the line of source code editor
13 | 4. Show the console drawer in the right corner
14 |
15 | 
16 |
17 |
18 | ## Start debugging
19 |
20 | Click the button on the web, manually apply code fix in console.
21 |
22 | 1. Resume code execution
23 | 2. Edit source code in code editor and ctrl+s
24 | 3. Deactivate the break point
25 |
26 | 
27 |
28 |
29 | ## Click and run on web page
30 |
31 | Start verification on web page.
32 |
33 | 
34 |
--------------------------------------------------------------------------------
/chrome/img/chrome1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/chrome/img/chrome1.png
--------------------------------------------------------------------------------
/chrome/img/chrome2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/chrome/img/chrome2.png
--------------------------------------------------------------------------------
/chrome/img/chrome3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/chrome/img/chrome3.png
--------------------------------------------------------------------------------
/confd/README.md:
--------------------------------------------------------------------------------
1 | # Confd 是个好东西
2 |
3 | 可以用 Confd 去监控 Etcd 里某个 directory 或 keys 的改动,然后生成相当的配置文件并重新 reload 一下服务。
4 |
5 | 举个例子:将 nginx 的监听端口作为一个 key 放在 Etcd 里,当这个 key 改变时就自动 reload nginx 。
6 |
7 | ## 安装部署步骤
8 |
9 | ### 下载 Confd
10 |
11 | ```
12 | # wget https://github.com/kelseyhightower/confd/releases/download/v0.11.0/confd-0.11.0-linux-amd64
13 | # chmod +x confd-0.11.0-linux-amd64
14 | ```
15 |
16 | ### 生成 Confd 配置文件`/etc/confd/conf.d/nginx.toml`
17 |
18 | ```
19 | [template]
20 |
21 | # The name of the template that will be used to render the application's configuration file
22 | # Confd will look in `/etc/conf.d/templates` for these files by default
23 | src = "nginx.tmpl"
24 |
25 | # The location to place the rendered configuration file
26 | dest = "/etc/nginx/conf.d/app.conf"
27 |
28 | # The etcd keys or directory to watch. This is where the information to fill in
29 | # the template will come from.
30 | keys = [ "/nginx" ]
31 |
32 | # File ownership and mode information
33 | owner = "root"
34 | mode = "0644"
35 |
36 | # These are the commands that will be used to check whether the rendered config is
37 | # valid and to reload the actual service once the new config is in place
38 | check_cmd = "/usr/sbin/nginx -t"
39 | reload_cmd = "/usr/sbin/service nginx reload"
40 | ```
41 |
42 | ### 生成 Confd 模板文件`/etc/confd/templates/nginx.tmpl`
43 |
44 | ```
45 | server {
46 | listen {{ getv "/nginx/port" }};
47 | location / {
48 | index index.html;
49 | root /usr/share/nginx/html/;
50 | }
51 | }
52 | ```
53 |
54 | ### 给 `/nginx/port` 赋值
55 |
56 | ```
57 | # etcdctl set /nginx/port 8080
58 | ```
59 |
60 | ### 启动 Confd
61 |
62 | ```
63 | # ./confd-0.11.0-linux-amd64 -watch -backend etcd -node http://127.0.0.1:4001
64 | ```
65 |
66 |
67 | ## 使用 Confd
68 |
69 | From the first command you will see that Nginx listens on 8080.
70 |
71 | The second command is to set the key `/nginx/port` to 8000.
72 |
73 | From the third command you will see that Nginx listens on 8000.
74 |
75 | ```
76 | # netstat -tnlp | grep 8080 # will show nginx listen 8080 port
77 | # etcdctl set /nginx/port 8000
78 | # netstat -tnlp | grep 8000 # will show nginx listen 8000 port
79 | ```
80 |
--------------------------------------------------------------------------------
/coredns/README.md:
--------------------------------------------------------------------------------
1 | # CoreDNS
2 |
3 | CoreDNS is a DNS flexible server.
4 |
5 | ## Binary
6 |
7 | ```
8 | coredns -conf /etc/coredns/Corefile
9 | ```
10 |
11 | ## Docker image
12 |
13 | ```
14 | docker run -d --net host --name coredns -v /etc/coredns:/etc/coredns coredns/coredns:1.2.2 -conf /etc/coredns/Corefile
15 | ```
16 |
--------------------------------------------------------------------------------
/coredns/coredns/Corefile:
--------------------------------------------------------------------------------
1 | # Snippet, can be used by `import`
2 | (common-snip) {
3 | log
4 | errors
5 |
6 | # reload every 24 hours
7 | reload 24h
8 |
9 | # bind address
10 | bind 0.0.0.0
11 |
12 | # forward . 1.1.1.1 8.8.8.8 9.9.9.9
13 | forward . /etc/resolv.conf
14 | }
15 |
16 | # DNS Server for a specific domain
17 | youya.org:53 {
18 | file /etc/coredns/zones/youya.org.zone
19 |
20 | import common-snip
21 | }
22 |
23 | # DNS Server
24 | .:53 {
25 | # hosts plugin
26 | # use both /etc/hosts and following inline hosts
27 | hosts {
28 | 192.168.122.11 node1.internal
29 | 192.168.122.12 node2.internal
30 | 192.168.122.22 node2.internal
31 | 192.168.122.32 node2.internal
32 | ttl 3600
33 | fallthrough
34 | }
35 |
36 | # template plugin
37 | # 1.2.3.4.noip -> 1.2.3.4
38 | # ip-1-2-3-4.noip -> 1.2.3.4
39 | template ANY ANY {
40 | match "^(?P[0-9]*)\.(?P[0-9]*)\.(?P[0-9]*)\.(?P[0-9]*)\.noip[.]$"
41 | match "^ip-(?P[0-9]*)-(?P[0-9]*)-(?P[0-9]*)-(?P[0-9]*)\.noip[.]$"
42 | answer "{{ .Name }} 60 IN A {{ .Group.a }}.{{ .Group.b }}.{{ .Group.c }}.{{ .Group.d }}"
43 | fallthrough
44 | }
45 |
46 | # template plugin
47 | # prefix.xxx.xx -> 10.1.2.3
48 | template ANY ANY {
49 | match "^prefix\."
50 | answer "{{ .Name }} 60 IN A 10.1.2.3"
51 | fallthrough
52 | }
53 |
54 | import common-snip
55 | }
56 |
57 | # DOH DNS Server
58 | https://. {
59 | tls /etc/coredns/pki/tls.crt /etc/coredns/pki/tls.key
60 |
61 | import common-snip
62 | }
63 |
--------------------------------------------------------------------------------
/coredns/coredns/hosts.d/file.hosts:
--------------------------------------------------------------------------------
1 | 127.0.0.1 node1.file
2 | 127.0.0.2 node2.file
3 | 127.0.0.3 node3.file
4 |
--------------------------------------------------------------------------------
/coredns/coredns/youya.org.zone:
--------------------------------------------------------------------------------
1 | $TTL 600
2 | $ORIGIN youya.org.
3 | @ IN SOA youya.org. mail.youya.org. (
4 | 2013010401 ; serial
5 | 10800 ; refresh (3 hours)
6 | 900 ; retry (15 minutes)
7 | 604800 ; expire (1 week)
8 | 86400 ; minimum (1 day)
9 | )
10 |
11 |
12 | @ A 10.210.214.163
13 | blog A 10.210.214.113
14 | blog A 10.210.214.114
15 | wiki CNAME youya.github.io.
16 | * A 10.210.214.163
17 |
--------------------------------------------------------------------------------
/debian/binary/DEBIAN/changelog:
--------------------------------------------------------------------------------
1 | mesos (0.25.0-1) unstable; urgency=low
2 |
3 | * Initial build of Mesos debian package
4 |
5 | -- Chen Zhiwei Fri, 11 Sep 2015 16:44:44 +0800
6 |
--------------------------------------------------------------------------------
/debian/binary/DEBIAN/control:
--------------------------------------------------------------------------------
1 | Package: mesos
2 | Version: 0.25.0-1
3 | Architecture: amd64
4 | Maintainer: Chen Zhiwei
5 | Depends: java-runtime-headless, libcurl3, libsvn1, libsasl2-modules
6 | Section: misc
7 | Priority: extra
8 | Description: Cluster resource manager with efficient resource isolation
9 | Apache Mesos is a cluster manager that offers efficient resource isolation
10 | and sharing across distributed applications, or frameworks. It can run
11 | Hadoop, MPI, Hypertable, Spark (a new framework for low-latency interactive
12 | and iterative jobs), and other applications.
13 |
--------------------------------------------------------------------------------
/debian/how_to_build_deb_repo.md:
--------------------------------------------------------------------------------
1 | # 怎样创建 deb 仓库
2 |
3 | ## 安装 reprepro
4 |
5 | ```
6 | # apt-get install reprepro
7 | ```
8 |
9 | ## 创建配置文件
10 |
11 | ```
12 | $ mkdir conf
13 | $ vim conf/distributions
14 | Origin: Apache Mesos
15 | Label: Apache Mesos
16 | Codename: trusty
17 | Architectures: i386 amd64
18 | Components: main
19 | Description: Apache Mesos debian repository
20 | # SignWith: yes
21 | ```
22 |
23 | ## 创建仓库
24 |
25 | ```
26 | $ reprepro --outdir=/var/www/html/mesos_repo includedeb trusty /path/to/mesos_0.25.0-1_amd64.deb
27 | ```
28 |
29 | 然后将 HTTP Server 的 htdocs 目录指向`/var/www/html/mesos_repo`就可以了。
30 |
31 | ## 给 deb 包签名
32 |
33 | 这个以后再说吧。
34 |
--------------------------------------------------------------------------------
/debian/install-conffiles.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/debian/install-conffiles.png
--------------------------------------------------------------------------------
/debian/install.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/debian/install.png
--------------------------------------------------------------------------------
/debian/purge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/debian/purge.png
--------------------------------------------------------------------------------
/debian/remove-purge.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/debian/remove-purge.png
--------------------------------------------------------------------------------
/debian/remove.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/debian/remove.png
--------------------------------------------------------------------------------
/debian/source/debian/changelog:
--------------------------------------------------------------------------------
1 | mesos (0.25.0-1) unstable; urgency=low
2 |
3 | * Initial build of Mesos debian package
4 |
5 | -- Chen Zhiwei Fri, 11 Sep 2015 16:44:44 +0800
6 |
--------------------------------------------------------------------------------
/debian/source/debian/compat:
--------------------------------------------------------------------------------
1 | 9
2 |
--------------------------------------------------------------------------------
/debian/source/debian/control:
--------------------------------------------------------------------------------
1 | Source: mesos
2 | Section: misc
3 | Priority: extra
4 | Maintainer: Chen Zhiwei
5 | Build-Depends: debhelper (>= 9)
6 | Standards-Version: 0.25.0
7 | Homepage: http://mesos.apache.org
8 |
9 | Package: mesos
10 | Architecture: amd64
11 | Depends: ${shlibs:Depends}, ${misc:Depends}, java-runtime-headless, libcurl3, libsvn1, libsasl2-modules
12 | Description: Cluster resource manager with efficient resource isolation
13 | Apache Mesos is a cluster manager that offers efficient resource isolation
14 | and sharing across distributed applications, or frameworks. It can run
15 | Hadoop, MPI, Hypertable, Spark (a new framework for low-latency interactive
16 | and iterative jobs), and other applications.
17 |
--------------------------------------------------------------------------------
/debian/source/debian/copyright:
--------------------------------------------------------------------------------
1 | Apache License v2.0
2 |
--------------------------------------------------------------------------------
/debian/source/debian/rules:
--------------------------------------------------------------------------------
1 | #!/usr/bin/make -f
2 | # -*- makefile -*-
3 | # Sample debian/rules that uses debhelper.
4 | # This file was originally written by Joey Hess and Craig Small.
5 | # As a special exception, when this file is copied by dh-make into a
6 | # dh-make output file, you may use that output file without restriction.
7 | # This special exception was added by Craig Small in version 0.37 of dh-make.
8 |
9 | # Uncomment this to turn on verbose mode.
10 | # export DH_VERBOSE=1
11 |
12 | override_dh_auto_configure:
13 | ./bootstrap
14 | mkdir -p build
15 | cd build && ../configure --prefix=/usr/apache/mesos
16 |
17 | override_dh_auto_build:
18 | cd build && $(MAKE)
19 |
20 | # Do not make check
21 | override_dh_auto_test:
22 | echo Skip make check
23 |
24 | override_dh_auto_install:
25 | cd build && $(MAKE) install DESTDIR=$(CURDIR)/debian/mesos
26 |
27 | %:
28 | dh $@
29 |
--------------------------------------------------------------------------------
/debian/source/debian/rules.standard:
--------------------------------------------------------------------------------
1 | #!/usr/bin/make -f
2 | # -*- makefile -*-
3 | # Sample debian/rules that uses debhelper.
4 | # This file was originally written by Joey Hess and Craig Small.
5 | # As a special exception, when this file is copied by dh-make into a
6 | # dh-make output file, you may use that output file without restriction.
7 | # This special exception was added by Craig Small in version 0.37 of dh-make.
8 |
9 | # Uncomment this to turn on verbose mode.
10 | export DH_VERBOSE=1
11 |
12 | override_dh_auto_configure:
13 | ./bootstrap
14 | dh_auto_configure -- --prefix=/usr/apache/mesos
15 |
16 | override_dh_auto_build:
17 | dh_auto_build -- -j4 V=0
18 |
19 | # Do not make check
20 | override_dh_auto_test:
21 | echo Skip make check
22 |
23 | override_dh_auto_install:
24 | dh_auto_install
25 |
26 | %:
27 | dh $@
28 |
--------------------------------------------------------------------------------
/debian/upgrade.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/debian/upgrade.png
--------------------------------------------------------------------------------
/dns/README.md:
--------------------------------------------------------------------------------
1 | # DNS
2 |
3 | ## Anycast
4 |
5 | ## 注意事项
6 |
7 | 根据[RFC 1034][rfc-1034] 文档说明,当DNS查询一个域名非CNAME记录外的其他记录时,如果本地DNS缓存里存在了该域名的CNAME记录,那么就直接用该域名CNAME记录去继续查找。所以最好不要给一个域名同时设置CNAME和MX记录。
8 |
9 | 比如`chenzhiwei.cn`的CNAME指向了`jizhihuwai.com`,MX记录指向了`mx.jizhihuwai.com`:
10 |
11 | ```
12 | chenzhiwei.cn CNAME jizhihuwai.com
13 | chenzhiwei.cn MX mx.jizhihuwai.com
14 | ```
15 |
16 | 当你先查询`chenzhiwei.cn`的CNAME记录时,DNS会缓存下来`jizhihuwai.com`。再次查询MX记录时,DNS会直接用缓存下来的`jizhihuwai.com`来继续查找MX记录,这样一来`chenzhiwei.cn`的MX记录就变成`jizhihuwai.com`的MX记录了。。。
17 |
18 | [rfc-1034]: https://tools.ietf.org/html/rfc1034
19 |
--------------------------------------------------------------------------------
/docker/daemon.json:
--------------------------------------------------------------------------------
1 | {
2 | "registry-mirrors": [
3 | "https://docker.mirrors.ustc.edu.cn",
4 | "https://registry.cn-hangzhou.aliyuncs.com"
5 | ]
6 | }
7 |
--------------------------------------------------------------------------------
/drbd/drbd.conf:
--------------------------------------------------------------------------------
1 | global {
2 | usage-count yes;
3 | }
4 |
5 | common {
6 | protocol C;
7 | syncer { rate 10M; }
8 | }
9 |
10 | resource r0 {
11 | net {
12 | cram-hmac-alg sha1;
13 | shared-secret "FooBar";
14 | }
15 | on host1 {
16 | device /dev/drbd0;
17 | disk /dev/vdb;
18 | address 192.169.100.1:7898;
19 | meta-disk internal;
20 | }
21 | on host2{
22 | device /dev/drbd0;
23 | disk /dev/vdb;
24 | address 192.168.100.2:7898;
25 | meta-disk internal;
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/fdisk/README.md:
--------------------------------------------------------------------------------
1 | # Format Disk
2 |
3 | Use `fdisk` to increase the partition size.
4 |
5 | ```
6 | fdisk /dev/sda
7 |
8 | > d (delete a partition)
9 | > 1 (input a partition number)
10 | > n (new a partition)
11 | > 1 (input a partition)
12 | > (enter)
13 | > (enter)
14 | > (enter)
15 | > w (write to partition table)
16 |
17 | resize2fs /dev/sda1
18 | ```
19 |
--------------------------------------------------------------------------------
/ffmpeg/README.md:
--------------------------------------------------------------------------------
1 | # FFMPEG
2 |
3 | ## Convert Format
4 |
5 | ```
6 | ffmpeg -i input.m4a -acodec mp3 -ac 2 -ab 192k output.mp3
7 |
8 | for file in *.wma; do ffmpeg -i "${file}" -acodec libmp3lame -ab 192k "${file/.wma/.mp3}"; done
9 | ```
10 |
11 | ## Turn up the file volume
12 |
13 | Add 10dB volume to the input.mp3
14 |
15 | ```
16 | ffmpeg -i input.mp3 -af volume=10dB output.mp3
17 | ```
18 |
--------------------------------------------------------------------------------
/gcc/README.md:
--------------------------------------------------------------------------------
1 | # GCC
2 |
3 | ## Typical C++ Makefile
4 |
5 | ```
6 | CXX = g++
7 | CXXFLAGS = -g -O0 -pthread -std=c++0x
8 | LDFLAGS += -lmesos -lpthread -lprotobuf
9 | CXXCOMPILE = $(CXX) $(INCLUDES) $(CXXFLAGS) -c -o $@
10 | ME_INCLUDES = -I/usr/local/mesos/include/ -L/usr/local/mesos/build/src/.libs/
11 | CXXLINK = $(CXX) $(INCLUDES) $(ME_INCLUDES) $(CXXFLAGS) -o $@
12 |
13 | default: all
14 | all: rendler crawl_executor render_executor
15 |
16 | HEADERS = rendler_helper.hpp
17 |
18 |
19 | crawl_executor: crawl_executor.cpp $(HEADERS)
20 | $(CXXLINK) $< $(LDFLAGS) -lboost_regex -lcurl
21 |
22 | %: %.cpp $(HEADERS)
23 | $(CXXLINK) $< $(LDFLAGS)
24 |
25 | clean:
26 | (rm -f core crawl_executor render_executor rendler)
27 | ```
28 |
29 | * -O0 means no compiler Optimization, usually used in debug mode.
30 | * -lmesos means dynamic libs, i.e. `libmesos.so`
31 | * -I/dir the /dir contains header files, i.e. `*.h`
32 | * -L/dir the /dir contains `*.so` or `*.a` file, i.e. `libmesos.so`
33 |
--------------------------------------------------------------------------------
/gdb/dot_gdbinit:
--------------------------------------------------------------------------------
1 | # GDB .gdbinit file
2 |
3 | # disable clumsy paging (use terminal scrollback buffer instead)
4 | set height 0
5 |
6 | # log gdb output (defaults to gdb.txt in current directory)
7 | # set logging on
8 | # set logging file ~/.gdb/gdb_logging
9 |
10 | # gdb history
11 | set history save
12 | set history filename ~/.gdb/gdb_history
13 | set history size 10000
14 |
15 | # C++ related beautifiers (optional)
16 | set print pretty on
17 | set print object on
18 | set print static-members on
19 | set print vtbl on
20 | set print demangle on
21 | set demangle-style gnu-v3
22 | set print sevenbit-strings off
23 |
24 | set follow-fork-mode child
25 | set detach-on-fork off
26 |
27 | # Custom commands
28 | def pw
29 | if $argc == 0
30 | help pw
31 | else
32 | set print elements 0
33 | p $arg0
34 | set print elements 200
35 | end
36 | end
37 |
38 | document pw
39 | Print wide string
40 | end
41 |
42 | # Enable Pretty Printer
43 | python
44 | import os
45 | import sys
46 | home = os.path.expanduser("~")
47 | sys.path.insert(0, home + '/.gdb/libstdcxx')
48 | from v6.printers import register_libstdcxx_printers
49 | register_libstdcxx_printers(None)
50 | end
51 |
--------------------------------------------------------------------------------
/gdb/libstdcxx/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/gdb/libstdcxx/__init__.py
--------------------------------------------------------------------------------
/gdb/libstdcxx/v6/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2014-2015 Free Software Foundation, Inc.
2 |
3 | # This program is free software; you can redistribute it and/or modify
4 | # it under the terms of the GNU General Public License as published by
5 | # the Free Software Foundation; either version 3 of the License, or
6 | # (at your option) any later version.
7 | #
8 | # This program is distributed in the hope that it will be useful,
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 | # GNU General Public License for more details.
12 | #
13 | # You should have received a copy of the GNU General Public License
14 | # along with this program. If not, see .
15 |
16 | import gdb
17 |
18 | # Load the pretty-printers.
19 | from .printers import register_libstdcxx_printers
20 | register_libstdcxx_printers(gdb.current_objfile())
21 |
22 | # Load the xmethods if GDB supports them.
23 | def gdb_has_xmethods():
24 | try:
25 | import gdb.xmethod
26 | return True
27 | except ImportError:
28 | return False
29 |
30 | if gdb_has_xmethods():
31 | from .xmethods import register_libstdcxx_xmethods
32 | register_libstdcxx_xmethods(gdb.current_objfile())
33 |
--------------------------------------------------------------------------------
/gerrit/README.md:
--------------------------------------------------------------------------------
1 | # Gerrit
2 |
3 | ## 给一个Gerrit Project添加commentlink
4 |
5 | * Checkout code and edit project configure file
6 |
7 | ```
8 | # git clone ssh://username@gerrit.system.com:29418/group/project
9 | # git fetch origin refs/meta/config:refs/remotes/origin/meta/config
10 | # git checkout meta/config
11 | # vim project.config
12 | [commentlink "bugheader"]
13 | match = ([Cc]loses|[Pp]artial|[Rr]elated)-[Bb]ug:\\s*#?(\\d+)
14 | link = https://bugzilla.xxx.com/?id=$2
15 | ```
16 |
17 | * Push the changes to gerrit server
18 |
19 | ```
20 | # git push origin meta/config:meta/config # push directly
21 | # git push origin meta/config:refs/for/refs/meta/config # push via review
22 | ```
23 |
24 | * Flush the caches
25 |
26 | ```
27 | # ssh gerrit gerrit flush-caches --cache project_list
28 | # ssh gerrit gerrit flush-caches --cache projects
29 | ```
30 |
31 | * Reference
32 |
33 |
34 |
35 | ## Integration
36 |
37 | Gerrit + Bugzilla = Perfect!
38 |
--------------------------------------------------------------------------------
/git/how-to-contribute-to-github-project.md:
--------------------------------------------------------------------------------
1 | # How to contribute to project
2 |
3 | ## Set your SSH key on github
4 |
5 | Generate and upload your SSH public key to github.
6 |
7 | ## Set your git config
8 |
9 | See:
10 |
11 | ## Fork it on github.com
12 |
13 | source repo: `https://github.com/liu21st/thinkphp`
14 | your fork: `https://github.com/chenzhiwei/thinkphp`
15 |
16 | ## Sync your fork with source repo
17 |
18 | ```
19 | $ git clone git@github.com:chenzhiwei/thinkphp.git
20 | $ cd thinkphp
21 | $ git remote add upstream https://github.com/liu21st/thinkphp
22 | $ git fetch upstream
23 | $ git checkout master
24 | $ git merge upstream/master
25 | $ git push
26 | ```
27 |
28 | ## Contribute to source repo
29 |
30 | ```
31 | $ git clone git@github.com:chenzhiwei/thinkphp.git
32 | $ cd thinkphp
33 | $ git checkout master
34 | $ git checkout -b your_feature_or_bugfix_branch
35 | $ vim filename
36 | $ git add .
37 | $ git commit -m"commit msg"
38 | $ git push origin your_feature_or_bugfix_branch
39 | ```
40 |
41 | Then login to github.com, and send a pull request from your_feature_or_bugfix_branch to upstream master branch.
42 |
43 | After the pull request accept, sync your fork with source repo and/or delete the your_feature_or_bugfix_branch branch.
44 |
45 | ## Rebase your branch
46 |
47 | Please take a look at:
48 |
49 | ## Solve conflicts on your branch
50 |
51 | Please take a look at:
52 |
53 | ## Checkout GitHub Pull Request locally
54 |
55 | ```
56 | $ git clone git@github.com:chenzhiwei/thinkphp.git
57 | $ cd thinkphp
58 | $ git remote add upstream https://github.com/liu21st/thinkphp
59 | $ git fetch upstream
60 | $ git fetch origin pull/123/head:pr-123
61 | ```
62 |
63 | ## Sources
64 |
65 | 1.
66 | 2.
67 | 3.
68 | 4.
69 |
--------------------------------------------------------------------------------
/gitlab-kubernetes/gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | # requires settings in gitlab-runner
2 | # [[runners.kubernetes.volumes.empty_dir]]
3 | # name = "docker-certs"
4 | # mount_path = "/certs"
5 | # medium = "Memory"
6 | variables:
7 | DOCKER_HOST: tcp://localhost:2376/
8 | DOCKER_TLS_CERTDIR: "/certs"
9 | DOCKER_TLS_VERIFY: 1
10 | DOCKER_CERT_PATH: "$DOCKER_TLS_CERTDIR/client"
11 |
12 | # only triggers in "push" and "merge requests in master" events
13 | workflow:
14 | rules:
15 | - if: $CI_MERGE_REQUEST_IID
16 | - if: $CI_COMMIT_TAG
17 | - if: $CI_COMMIT_BRANCH == "master"
18 |
19 | stages:
20 | - check
21 | - test
22 | - build
23 | - deploy
24 |
25 | check-lint-job:
26 | stage: check
27 | image: docker.mixhub.cn/gitlab/alpine:latest
28 | before_script:
29 | - pwd
30 | - ls
31 | script:
32 | - echo "lint check job - script"
33 |
34 | check-syntax-job:
35 | stage: check
36 | image: docker.mixhub.cn/gitlab/alpine:latest
37 | before_script:
38 | - pwd
39 | - ls
40 | script:
41 | - echo "syntax check job - script"
42 |
43 | test-unit-job:
44 | stage: test
45 | image: docker.mixhub.cn/gitlab/alpine:latest
46 | before_script:
47 | - pwd
48 | - ls
49 | script:
50 | - echo "unit test job - script"
51 |
52 | test-integration-job:
53 | stage: test
54 | image: docker.mixhub.cn/gitlab/alpine:latest
55 | before_script:
56 | - pwd
57 | - ls
58 | script:
59 | - echo "integration test job - script"
60 |
61 | build-package-job:
62 | stage: build
63 | image: docker.mixhub.cn/gitlab/maven:3.6-jdk-11
64 | before_script:
65 | - pwd
66 | - ls
67 | script:
68 | - mkdir target
69 | - touch target/yamu-data-product-common-111-RELEASE.jar
70 | artifacts: # share files with follow up jobs
71 | paths:
72 | - ./target/yamu-data-product-common-*-RELEASE.jar
73 |
74 | deploy-job:
75 | stage: deploy
76 | image: docker.mixhub.cn/gitlab/docker:latest
77 | services:
78 | - docker.mixhub.cn/gitlab/docker:dind
79 | before_script:
80 | - env
81 | - echo "$CI_PIPELINE_SOURCE"
82 | - pwd
83 | - ls
84 | - docker info
85 | script:
86 | - find ./target
87 | rules:
88 | - if: '$CI_PIPELINE_SOURCE != "merge_request_event"'
89 |
--------------------------------------------------------------------------------
/go/README.md:
--------------------------------------------------------------------------------
1 | # GO
2 |
3 | 喜欢使用 GO 。
4 |
5 | ## Set goproxy
6 |
7 | ```
8 | go env -w GOPROXY=https://goproxy.cn,direct
9 | ```
10 |
11 | ## 静态链接 binary
12 |
13 | ```
14 | # git clone https://github.com/docker/swarm.git
15 | # cd swarm
16 | # godep restore -v
17 | # CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags '-w' .
18 | ```
19 |
20 | 另一种方法编译静态链接 binary
21 |
22 | ```
23 | # GOPATH=~/go CGO_ENABLED=0 GOOS=linux go get -a -tags netgo -ldflags '-w' github.com/docker/swarm
24 | ```
25 |
26 | 某些项目里还可以去掉`CGO_ENABLED=0 GOOS=linux`这个东西。
27 |
28 |
29 | ## 跨平台编译
30 |
31 | GO 的强大之处还有就是跨平台编译,你可以在`x86_64`上编译出来`ppc64le`的二进制文件。
32 |
33 | ```
34 | $ GOPATH=~/go GOARCH=ppc64le GOOS=linux go get -a -tags netgo -ldflags '-w' github.com/docker/swarm
35 | ```
36 |
37 | ## Go Modules
38 |
39 | Go 语言最奇帕的地方终于改进了,把源码放在 GOPATH 里导致的问题实在太多了,从 Go 1.11 开始终于改进了。
40 |
41 | 使用方法如下:
42 |
43 | ```
44 | mkdir project
45 | cd project
46 | go mod init github.com/chenzhiwei/project
47 |
48 | go mod tidy
49 | ```
50 |
51 | 然后开始写正常代码就行,接下来 go 就会把依赖放在 vendor 目录下,也不会再提 GOPATH 的事情了。
52 |
53 | ## 设置自己的 Go Import 地址
54 |
55 | 很多时候,很多平台说倒就倒了,或者自己想完全掌控自己的内容,所以就会把东西放在自己的域名下。
56 |
57 | 官方文档:https://golang.org/cmd/go/#hdr-Remote_import_paths
58 |
59 | 示例:
60 |
61 | * Go Import: `k8s.io/api`
62 | * Source Code: `https://github.com/kubernetes/api`
63 | * Https Server URL: `https://k8s.io/api?go-get=1`
64 |
65 | ```
66 |
67 |
68 |
71 |
76 |
77 |
78 | ```
79 |
--------------------------------------------------------------------------------
/go/golang-install.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ARCH=$(uname -m|sed 's/x86_64/amd64/g')
4 | KERNAL=$(uname -s|tr '[:upper:]' '[:lower:]')
5 | VERSION=$(curl -sL https://go.dev/VERSION?m=text)
6 | URL=https://dl.google.com/go/${VERSION}.${KERNAL}-${ARCH}.tar.gz
7 |
8 | GOINSTALL=$HOME/.golang/versions/$VERSION
9 | mkdir -p $GOINSTALL
10 |
11 | if type curl &>/dev/null; then
12 | curl -kL $URL | tar -xz -C $GOINSTALL
13 | else
14 | wget -O - $URL | tar -xz -C $GOINSTALL
15 | fi
16 |
17 | \rm -f $HOME/.golang/go
18 | ln -sf $GOINSTALL/go $HOME/.golang/go
19 |
20 | cat <<'EOF' > $HOME/.golang/rc
21 | # GOLANG
22 | export GOROOT=$HOME/.golang/go
23 | export GOPATH=$HOME/dev/go
24 | [[ $PATH == *$GOROOT/bin* ]] || export PATH=$GOROOT/bin:$PATH
25 | [[ $PATH == *$GOPATH/bin* ]] || export PATH=$GOPATH/bin:$PATH
26 | EOF
27 |
28 | grep -wq '.golang/rc' $HOME/.bashrc || echo '. $HOME/.golang/rc' >> $HOME/.bashrc
29 |
--------------------------------------------------------------------------------
/gpg/README.mkd:
--------------------------------------------------------------------------------
1 | # GPG的使用
2 |
3 | GPG也称GnuPG,是Gnu Privacy Guard的简写。GPG是一个以GNU通用公共许可证释出的开放源码用于加密或签名的软件。目前很多人、很多领域都在使用GPG。
4 |
5 | ## 生成钥匙
6 |
7 | 根据提示选择适合自己的加密方式,一般默认的就行(RSA,2048位)。
8 |
9 | ```
10 | $ gpg --gen-key
11 | ```
12 |
13 | ## 上传公钥到钥匙服务器
14 |
15 | `gpg --list-keys`命令会出现如下所示的内容,而你的key id就在其中。
16 |
17 | >pub 2048R/98564809 2013-05-26
18 |
19 | ```
20 | $ gpg --send-keys 98564809
21 | ```
22 |
23 | ## 导出公钥
24 |
25 | 公钥是对外公开的,别人可以使用你的公钥来加密内容,然后你用私钥解密。
26 |
27 | ```
28 | $ gpg --export -a -o your-public-key.asc
29 | ```
30 |
31 | ## 导出私钥
32 |
33 | ```
34 | $ gpg --export-secret-keys -o your-secret-key.gpg
35 | ```
36 |
37 | ## 导致公钥和私钥
38 |
39 | ```
40 | $ gpg --import your-pulic-key/your-secret-key.gpg
41 | ```
42 |
43 | ## 用公钥加密文件
44 |
45 | ```
46 | $ gpg -e file.txt
47 | ```
48 |
49 | ## 用私钥加密文件
50 |
51 | ```
52 | $ gpg -s file.txt
53 | ```
54 |
55 | ## 解密文件
56 |
57 | ```
58 | $ gpg -d file.txt.gpg
59 | ```
60 |
61 | ## 参考文档
62 |
63 | 1.
64 |
--------------------------------------------------------------------------------
/gpg/zhiwei.public.gpg:
--------------------------------------------------------------------------------
1 | -----BEGIN PGP PUBLIC KEY BLOCK-----
2 | Version: GnuPG v1.4.12 (GNU/Linux)
3 |
4 | mQENBFGhbmoBCADohHwUPbaUsatdL0LKZU3t3eV9Vt5AGEFjBeV3bJyrtIG4aLxe
5 | 9URxIz0j07raUZRXvDAUcLL3vQMY381DMK4JSZbIS4mTcNpDcDmrsFeL+Rh/aw8y
6 | rsWUcv0B7tCDGRXCoGR2FHqg5WXy/LzRA11Ap91eVuPolrikYQEtZj6PeC/I4cNi
7 | dFoMfHi25xFP51esNcv3XmPtGr9/vhR2suzUXL2dKeV/HjzssE6Ng4W7oaprYHQA
8 | OUw/Kzi0JsX/p/QHrp+Nd3NCHS/ORX32QWruPNWHnJH7x80EUq8hFZeDQzWkJG/m
9 | 7reJdKCnEdCQh19I3TPqiZFKS+4kEQubWPw/ABEBAAG0LENoZW4gWmhpd2VpIChJ
10 | J20gWmhpd2VpKSA8emhpd2Vpa0BnbWFpbC5jb20+iQE4BBMBAgAiBQJRoW5qAhsD
11 | BgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRCMlXS8mFZICawdCADesbw2BA33
12 | nq3YVMvDNHweTtWdK9hgJb7aTIhDZNlah+RkabPnvhLcGVxx33eq//iDZEXegiVm
13 | tzAqntAgSVrf+zIC30d7MCQz6VvGNnFZYiGkmvd1lMFi3KH9vikXKfZM0JAfHXtx
14 | 5pqeSdH7uBx5ZBbNDktT+/uDoztDwwbhpiUCUvr7caFnjkilTQ5MyIP+2sHNvCYC
15 | 0f/pjaskAuxRNOpN6qZhxcguvEoggVxNDRWuYuV/GIS+yw5iqgJYi8/bbjjHNBFK
16 | 3ZNbOUQ8lWVHck136Zpk0w0b2ltURVTje2qfxgtYRo+2IMg/B1D3KpUhkrrJ/nO3
17 | 8TIqtvYEa4cRuQENBFGhbmoBCACyMI76senWLLMm5QtcVt26pGb9mzPzBrk4Jixr
18 | JRsCoBBWd32MfaJ6lwgvwKC56OFipMt/zdmu9NMUhrWDx1XOAkU9Rd95+onr3sen
19 | XrIwLL/I0FUJ3gRzn7+tngxXCOOblO/EZ4yO6jebDtjW91OzEion2mHENkKMUReC
20 | SXdHvMF6to+F6y53E9cFSQxTq8SYLR2582bpA7PxKphCys83jbL2/XKsZPO7FQEw
21 | FyYRmmOj2mHoMUFROql3EQ+RajAfp/yBj98TsaObYcj/DC+nYvuk2wQ1S7Cy8VgT
22 | C29zS77v+uC79v2SCX3uvOPvIuwm9sgQydFQEPWgsVgFpjqXABEBAAGJAR8EGAEC
23 | AAkFAlGhbmoCGwwACgkQjJV0vJhWSAmO4wf/URv7EiG08gsUaY/zUiltrNZIGIiw
24 | uVtsXzDz+dnAuxoq4QAavq5cEEc1LgJvXNKcXAkKJ5qcfy+atDd+t7AciFO+tyvc
25 | YQDcxNe3A7zti8sdqyob4lQQ7R3D6XMXrXyjJODEbGW6RymRrblTV1yZT0QEz3Oj
26 | NC1p6pBIGXQB2X5Bm9qiGNp66CXikgWzZKvxwQ+kqsBecb1tXFN6FsxMCv/DE6Om
27 | oFKiHzXYGTQvrP+xNPw3JByBVyyBsEm7vQjqnXzKGVhbnUNZf6W+S5SkHVr9Awfl
28 | Apd0P3QivHczGX6te8+wn9GOfiZB+MqB3GVF4Dzdc5q+6HzCSB3Hy0K93Q==
29 | =XxDj
30 | -----END PGP PUBLIC KEY BLOCK-----
31 |
--------------------------------------------------------------------------------
/gtest/README.md:
--------------------------------------------------------------------------------
1 | # Google Test
2 |
3 | 一个不错的 C++ 测试框架,Mesos 用的就是它,以下是我在修复 Mesos 相关的 bug 时用到的东东。
4 |
5 | ## 只测试单个 test case
6 |
7 | ```
8 | $ make check/test GTEST_FILTER="DecoderTest.Response"
9 | $ ./libprocess-tests --gtest_filter="HTTPTest.StreamingGetFailure"
10 | $ ./libprocess-tests --gtest_filter="HTTPTest.StreamingGetFailure" --verbose
11 | $ ./libprocess-tests --gtest_filter="HTTPTest.StreamingGetFailure" --gtest_repeat=100 --verbose
12 | $ make check/test GTEST_FILTER="DecoderTest.Response:Name.Case"
13 | $ make check/test GTEST_FILTER="-DecoderTest.Response:Name.Case"
14 | ```
15 |
16 | * 第一个和第二个命令一样。
17 | * 第三个命令输出更详细的信息。
18 | * 第四个命令是重复运行这个 test case,有线程之间抢占资源时,运行单个是不会报错的,这时就需要多运行几次了。
19 | * 第五个命令是只测试`DecoderTest.Response`和`Name.Case`两个 test case 。
20 | * 第六个命令是不测试`DecoderTest.Response`和`Name.Case`两个 test case 。(注意:最前面只有一个`-`)
21 |
22 |
23 | ## 调试
24 |
25 | ```
26 | $ gdb --args ./libprocess-tests --gtest_filter="HTTPTest.StreamingGetFailure" --gtest_repeat=100
27 | (gdb) b xxx.cpp:23
28 | ```
29 |
30 |
31 | ## Help
32 |
33 | ```
34 | $ ./libprocess-tests --help
35 | ```
36 |
--------------------------------------------------------------------------------
/heartbeat/README.config:
--------------------------------------------------------------------------------
1 | You need three configuration files to make heartbeat happy,
2 | and they all go in this directory.
3 |
4 | They are:
5 | ha.cf Main configuration file
6 | haresources Resource configuration file
7 | authkeys Authentication information
8 |
9 | These first two may be readable by everyone, but the authkeys file
10 | must not be.
11 |
12 | The good news is that sample versions of these files may be found in
13 | the documentation directory (providing you installed the documentation).
14 |
15 | If you installed heartbeat using rpm packages then
16 | this command will show you where they are on your system:
17 | rpm -q heartbeat -d
18 |
19 | If you installed heartbeat using Debian packages then
20 | the documentation should be located in /usr/share/doc/heartbeat
21 |
22 |
--------------------------------------------------------------------------------
/heartbeat/authkeys:
--------------------------------------------------------------------------------
1 | auth 2
2 | #1 crc
3 | 2 sha1 HA_somestring
4 | #3 md5 HA_somestring
5 |
--------------------------------------------------------------------------------
/heartbeat/ha.cf:
--------------------------------------------------------------------------------
1 | keepalive 2
2 | deadtime 30
3 | warntime 5
4 | initdead 120
5 | udpport 6942
6 | ucast eth1 172.16.12.42
7 | auto_failback off
8 | node host41 host42
9 | ping_group group1 172.16.12.1
10 | respawn hacluster /usr/lib64/heartbeat/ipfail
11 | respawn hacluster /usr/lib64/heartbeat/dopd
12 | apiauth dopd gid=haclient uid=hacluster
13 | use_logd yes
14 |
--------------------------------------------------------------------------------
/heartbeat/haresources:
--------------------------------------------------------------------------------
1 | host41 IPaddr2::202.102.154.41/24/eth0/202.102.154.255 IPaddr2::202.102.154.42/24/eth0/202.102.154.255 IProute::202.102.154.1 nginx arp_check
2 |
--------------------------------------------------------------------------------
/heartbeat/resource.d/IProute:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | unset LANG; export LANG
4 | LC_ALL=C
5 | export LC_ALL
6 |
7 | . /etc/ha.d/resource.d//hto-mapfuncs
8 |
9 | # We need to split the argument into pieces that IPaddr OCF RA can
10 | # recognize, sed is prefered over Bash specific builtin functions
11 | # for portability.
12 |
13 | usage() {
14 | echo "usage: $0 default-gw $LEGAL_ACTIONS"
15 | }
16 |
17 | if [ $# != 2 ]; then
18 | usage
19 | exit 1
20 | fi
21 |
22 | DEFAULT_GW=$1
23 | ACT=$2
24 |
25 | case $2 in
26 | start)
27 | /sbin/ip r | grep $DEFAULT_GW|grep -q default ||
28 | /sbin/ip r add default via $DEFAULT_GW
29 | ;;
30 | stop)
31 | # /sbin/ip r | grep $DEFAULT_GW|grep -q default &&
32 | # /sbin/ip r del default via $DEFAULT_GW
33 | ;;
34 | status)
35 | /sbin/ip r | grep $DEFAULT_GW|grep -q default
36 | ;;
37 | *)
38 | usage
39 | ;;
40 | esac
41 | exit 0
42 |
--------------------------------------------------------------------------------
/heartbeat/resource.d/SendArp:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | #
4 | # 2006, Huang Zhen
5 | # convert it to calling OCF counterpart.
6 | #
7 | # Copyright (C) 2004 Horms
8 | #
9 | # Based on IPaddr2: Copyright (C) 2003 Tuomo Soini
10 | #
11 | # License: GNU General Public License (GPL)
12 | # Support: linux-ha@lists.linux-ha.org
13 | #
14 | # This script send out gratuitous Arp for an IP address
15 | #
16 | # It can be used _instead_ of the IPaddr2 or IPaddr resource
17 | # to send gratuitous arp for an IP address on a given interface,
18 | # without adding the address to that interface. I.e. if for
19 | # some reason you want to send gratuitous arp for addresses
20 | # managed by IPaddr2 or IPaddr on an additional interface.
21 | #
22 | # usage: $0 ip-address[/netmaskbits[/interface[:label][/broadcast]]] \
23 | # {start|stop|status|monitor}
24 | #
25 | # The "start" arg adds an IP alias.
26 | #
27 | # Surprisingly, the "stop" arg removes one. :-)
28 | #
29 | #
30 |
31 | set -e
32 |
33 | unset LANG; export LANG
34 | LC_ALL=C
35 | export LC_ALL
36 |
37 | . /etc/ha.d/resource.d//hto-mapfuncs
38 |
39 | # We need to split the argument into pieces that IPaddr OCF RA can
40 | # recognize, sed is prefered over Bash specific builtin functions
41 | # for portability.
42 |
43 | usage() {
44 | echo "usage: $0 ip-address/interface $LEGAL_ACTIONS"
45 | }
46 |
47 | if [ $# != 2 ]; then
48 | usage
49 | exit 1
50 | fi
51 | BASEIP=`echo $1 | sed "s%/.*%%"`
52 | INTERFACE=`echo $1 | sed "s%${BASEIP}/%%"`
53 |
54 | OCF_TYPE=SendArp
55 | OCF_RESKEY_ip=$BASEIP
56 | OCF_RESKEY_nic=$INTERFACE
57 | OCF_RESOURCE_INSTANCE=${OCF_TYPE}_$BASEIP
58 | export OCF_TYPE OCF_RESOURCE_INSTANCE OCF_RESKEY_ip OCF_RESKEY_nic
59 |
60 | ra_execocf $2
61 |
62 | # EOF - end of file
63 |
--------------------------------------------------------------------------------
/heartbeat/resource.d/arp_check:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | #
3 | # This script is used to ensure correct mapping of vip<->mac
4 | # For we use eth network not serial line to probe heartbeat mostly,
5 | # when switch box becomes unstable temporaryly, heartbeat A will think
6 | # heartbeat B is dead, and acquire all resources.
7 | # After switch box becomes stable, heartbeat A and B will arbitrate, and
8 | # one of heartbeat servers will giveup all resources!
9 | # But another heartbeat will not send arp-broadcasting again! and may cause
10 | # problems, for ip resource is not the same as other resources!
11 | #
12 | #
13 |
14 | SEND_ARP=/usr/lib/heartbeat/send_arp
15 | [ -x $SEND_ARP ] || SEND_ARP=/usr/lib64/heartbeat/send_arp
16 | [ -x $SEND_ARP ] || SEND_ARP=/opt/lib/heartbeat/send_arp
17 |
18 | PIDFILE_DIR=/var/run/heartbeat/rsctmp/send_arp
19 | [ -d $PIDFILE_DIR ] || PIDFILE_DIR=/opt/var/run/heartbeat/rsctmp/send_arp
20 |
21 | HARESOURCES=/etc/ha.d/haresources
22 | [ -f $HARESOURCES ] || HARESOURCES=/opt/etc/ha.d/haresources
23 |
24 | RES=`awk '$0 !~ /^\s*#/ {
25 | for(i=2; i iperf3: error - socket buffer size not set correctly
42 |
43 | https://github.com/esnet/iperf/issues/757#issuecomment-401173762
44 |
45 | 在客户端与服务端运行如下命令:
46 |
47 | ```
48 | sysctl -w net.core.wmem_max=67108864
49 | sysctl -w net.core.rmem_max=67108864
50 | sysctl -w net.ipv4.tcp_rmem="4096 87380 33554432"
51 | sysctl -w net.ipv4.tcp_wmem="4096 65536 33554432"
52 | ```
53 |
--------------------------------------------------------------------------------
/keepalived/113.keepalived.conf:
--------------------------------------------------------------------------------
1 | ! Configuration File for keepalived
2 |
3 | global_defs {
4 | notification_email {
5 | sa1@abc.com
6 | sa2@abc.com
7 | }
8 | notification_email_from alert@abc.com
9 | smtp_server smtp.abc.com
10 | smtp_connect_timeout 30
11 | router_id host113
12 | }
13 |
14 | vrrp_script chk_http_port {
15 | script "> /mnt/outfile; sleep 5; done
40 | ports:
41 | - containerPort: 80
42 | name: web
43 | volumeMounts:
44 | - name: persist-data
45 | mountPath: /mnt
46 | volumeClaimTemplates:
47 | - metadata:
48 | name: persist-data
49 | spec:
50 | accessModes: ["ReadWriteOnce"]
51 | storageClassName: nfs-csi
52 | resources:
53 | requests:
54 | storage: 1Gi
55 |
--------------------------------------------------------------------------------
/kind/nfs-provisioning/static-deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: deployment-nfs
5 | labels:
6 | app: deployment-nfs
7 | spec:
8 | ports:
9 | - port: 80
10 | name: web
11 | selector:
12 | app: deployment-nfs
13 |
14 | ---
15 | apiVersion: apps/v1
16 | kind: Deployment
17 | metadata:
18 | name: deployment-nfs
19 | spec:
20 | replicas: 1
21 | selector:
22 | matchLabels:
23 | app: deployment-nfs
24 | template:
25 | metadata:
26 | labels:
27 | app: deployment-nfs
28 | spec:
29 | containers:
30 | - name: deployment-nfs
31 | image: docker.io/library/ubuntu:latest
32 | command:
33 | - bash
34 | - -c
35 | - set -euo pipefail; while true; do echo $(hostname) $(date) >> /mnt/outfile; sleep 5; done
36 | ports:
37 | - containerPort: 80
38 | name: web
39 | volumeMounts:
40 | - name: nfs
41 | mountPath: /mnt
42 | volumes:
43 | - name: nfs
44 | persistentVolumeClaim:
45 | claimName: static-nfs-pvc
46 |
--------------------------------------------------------------------------------
/kind/nfs-provisioning/static-pv.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolume
4 | metadata:
5 | name: static-nfs-pv
6 | spec:
7 | capacity:
8 | storage: 2Gi
9 | accessModes:
10 | - ReadWriteMany
11 | persistentVolumeReclaimPolicy: Retain
12 | storageClassName: nfs-csi
13 | mountOptions:
14 | - nfsvers=4.1
15 | csi:
16 | driver: nfs.csi.k8s.io
17 | readOnly: false
18 | volumeHandle: static-nfs-pv
19 | volumeAttributes:
20 | server: 10.88.111.111
21 | share: /
22 | subDir: static-nfs-pv
23 |
--------------------------------------------------------------------------------
/kind/nfs-provisioning/static-pvc.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: PersistentVolumeClaim
4 | metadata:
5 | name: static-nfs-pvc
6 | spec:
7 | accessModes:
8 | - ReadWriteMany
9 | resources:
10 | requests:
11 | storage: 2Gi
12 | volumeName: static-nfs-pv
13 | storageClassName: nfs-csi
14 |
--------------------------------------------------------------------------------
/kind/nfs-server/nfs-server.yaml:
--------------------------------------------------------------------------------
1 | kind: Namespace
2 | apiVersion: v1
3 | metadata:
4 | name: nfs-server
5 |
6 | ---
7 | kind: Service
8 | apiVersion: v1
9 | metadata:
10 | name: nfs-server
11 | namespace: nfs-server
12 | labels:
13 | app: nfs-server
14 | spec:
15 | type: ClusterIP
16 | selector:
17 | app: nfs-server
18 | ports:
19 | - name: tcp-2049
20 | port: 2049
21 | protocol: TCP
22 | - name: udp-111
23 | port: 111
24 | protocol: UDP
25 |
26 | ---
27 | kind: Deployment
28 | apiVersion: apps/v1
29 | metadata:
30 | name: nfs-server
31 | namespace: nfs-server
32 | spec:
33 | replicas: 1
34 | selector:
35 | matchLabels:
36 | app: nfs-server
37 | template:
38 | metadata:
39 | name: nfs-server
40 | labels:
41 | app: nfs-server
42 | spec:
43 | nodeSelector:
44 | kubernetes.io/hostname: k8s-control-plane
45 | tolerations:
46 | - key: node-role.kubernetes.io/control-plane
47 | operator: Exists
48 | effect: NoSchedule
49 | containers:
50 | - name: nfs-server
51 | image: quay.io/siji/nfs-server:latest
52 | volumeMounts:
53 | - mountPath: /var/nfs
54 | name: nfs-vol
55 | securityContext:
56 | capabilities:
57 | add: ["SYS_ADMIN", "SETPCAP"]
58 | ports:
59 | - name: tcp-2049
60 | containerPort: 2049
61 | protocol: TCP
62 | - name: udp-111
63 | containerPort: 111
64 | protocol: UDP
65 | volumes:
66 | - name: nfs-vol
67 | hostPath:
68 | path: /var/nfs
69 | type: DirectoryOrCreate
70 |
--------------------------------------------------------------------------------
/kubernetes/etc/etcd/etcd.conf:
--------------------------------------------------------------------------------
1 | # This configuration file is written in [TOML](https://github.com/mojombo/toml)
2 |
3 | # addr = "127.0.0.1:4001"
4 | # bind_addr = "127.0.0.1:4001"
5 | # ca_file = ""
6 | # cert_file = ""
7 | # cors = []
8 | # cpu_profile_file = ""
9 | # data_dir = "."
10 | # discovery = "http://etcd.local:4001/v2/keys/_etcd/registry/examplecluster"
11 | # http_read_timeout = 10
12 | # http_write_timeout = 10
13 | # key_file = ""
14 | # peers = []
15 | # peers_file = ""
16 | # max_cluster_size = 9
17 | # max_result_buffer = 1024
18 | # max_retry_attempts = 3
19 | # name = "default-name"
20 | # snapshot = false
21 | # verbose = false
22 | # very_verbose = false
23 |
24 | # [peer]
25 | # addr = "127.0.0.1:7001"
26 | # bind_addr = "127.0.0.1:7001"
27 | # ca_file = ""
28 | # cert_file = ""
29 | # key_file = ""
30 |
31 | # [cluster]
32 | # active_size = 9
33 | # remove_delay = 1800.0
34 | # sync_interval = 5.0
35 |
--------------------------------------------------------------------------------
/kubernetes/etc/kubernetes/apiserver:
--------------------------------------------------------------------------------
1 | ###
2 | # kubernetes system config
3 | #
4 | # The following values are used to configure the kube-apiserver
5 | #
6 |
7 | # The address on the local server to listen to.
8 | KUBE_API_ADDRESS="--address=127.0.0.1"
9 |
10 | # The port on the local server to listen on.
11 | KUBE_API_PORT="--port=8080"
12 |
13 | # How the replication controller and scheduler find the kube-apiserver
14 | KUBE_MASTER="--master=127.0.0.1:8080"
15 |
16 | # Port minions listen on
17 | KUBELET_PORT="--kubelet_port=10250"
18 |
19 | # Address range to use for services
20 | KUBE_SERVICE_ADDRESSES="--portal_net=10.254.0.0/16"
21 |
22 | # Add you own!
23 | KUBE_API_ARGS=""
24 |
--------------------------------------------------------------------------------
/kubernetes/etc/kubernetes/config:
--------------------------------------------------------------------------------
1 | ###
2 | # kubernetes system config
3 | #
4 | # The following values are used to configure various aspects of all
5 | # kubernetes services, including
6 | #
7 | # kube-apiserver.service
8 | # kube-controller-manager.service
9 | # kube-scheduler.service
10 | # kubelet.service
11 | # kube-proxy.service
12 |
13 | # Comma seperated list of nodes in the etcd cluster
14 | KUBE_ETCD_SERVERS="--etcd_servers=http://127.0.0.1:4001"
15 |
16 | # logging to stderr means we get it in the systemd journal
17 | KUBE_LOGTOSTDERR="--logtostderr=true"
18 |
19 | # journal message level, 0 is debug
20 | KUBE_LOG_LEVEL="--v=0"
21 |
22 | # Should this cluster be allowed to run privleged docker containers
23 | KUBE_ALLOW_PRIV="--allow_privileged=false"
24 |
--------------------------------------------------------------------------------
/kubernetes/etc/kubernetes/controller-manager:
--------------------------------------------------------------------------------
1 | ###
2 | # The following values are used to configure the kubernetes controller-manager
3 |
4 | # defaults from config and apiserver should be adequate
5 |
6 | # Comma seperated list of minions
7 | KUBELET_ADDRESSES="--machines=127.0.0.1"
8 |
9 | # Add you own!
10 | KUBE_CONTROLLER_MANAGER_ARGS=""
11 |
--------------------------------------------------------------------------------
/kubernetes/etc/kubernetes/kubelet:
--------------------------------------------------------------------------------
1 | ###
2 | # kubernetes kubelet (minion) config
3 |
4 | # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
5 | KUBELET_ADDRESS="--address=127.0.0.1"
6 |
7 | # The port for the info server to serve on
8 | KUBELET_PORT="--port=10250"
9 |
10 | # You may leave this blank to use the actual hostname
11 | KUBELET_HOSTNAME="--hostname_override=127.0.0.1"
12 |
13 | # Add your own!
14 | KUBELET_ARGS=""
15 |
--------------------------------------------------------------------------------
/kubernetes/etc/kubernetes/proxy:
--------------------------------------------------------------------------------
1 | ###
2 | # kubernetes proxy config
3 |
4 | # default config should be adequate
5 |
6 | # Add your own!
7 | KUBE_PROXY_ARGS=""
8 |
--------------------------------------------------------------------------------
/kubernetes/etc/kubernetes/scheduler:
--------------------------------------------------------------------------------
1 | ###
2 | # kubernetes scheduler config
3 |
4 | # default config should be adequate
5 |
6 | # Add your own!
7 | KUBE_SCHEDULER_ARGS=""
8 |
--------------------------------------------------------------------------------
/kubernetes/files/ifcfg-kbr0:
--------------------------------------------------------------------------------
1 | DEVICE=kbr0
2 | STP=yes
3 | TYPE=Bridge
4 | BOOTPROTO=static
5 | IPADDR=172.17.1.1
6 | PREFIX=24
7 | NAME=kbr0
8 | ONBOOT=yes
9 | NM_CONTROLLED=no
10 |
--------------------------------------------------------------------------------
/kubernetes/files/route-eth0:
--------------------------------------------------------------------------------
1 | 172.17.1.0/24 via 192.168.122.21
2 |
--------------------------------------------------------------------------------
/kubernetes/nsenter.yaml:
--------------------------------------------------------------------------------
1 | # Enter to host node
2 | # kubectl exec -it nsenter -- nsenter -a -t 1 -- bash
3 | #
4 | # Single command:
5 | # kubectl run nsenter --rm -it --privileged --image=debian:stable-slim --overrides '{"spec":{"nodeName":"kube-worker02","hostPID":true}}' -- nsenter -a -t 1 -- bash
6 |
7 | ---
8 | apiVersion: v1
9 | kind: Pod
10 | metadata:
11 | name: nsenter
12 | spec:
13 | hostPID: true
14 | nodeName: kube-worker02
15 | containers:
16 | - name: nsenter
17 | image: docker.io/library/debian:stable-slim
18 | imagePullPolicy: IfNotPresent
19 | securityContext:
20 | privileged: true
21 | command:
22 | - sleep
23 | - "36000000"
24 |
--------------------------------------------------------------------------------
/kubernetes/usr/lib/systemd/system/etcd.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Etcd Server
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | # etc logs to the journal directly, suppress double logging
8 | StandardOutput=null
9 | WorkingDirectory=/var/lib/etcd
10 | User=etcd
11 | ExecStart=/usr/bin/etcd
12 |
13 | [Install]
14 | WantedBy=multi-user.target
15 |
--------------------------------------------------------------------------------
/kubernetes/usr/lib/systemd/system/kube-apiserver.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes API Server
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 |
5 | [Service]
6 | EnvironmentFile=-/etc/kubernetes/config
7 | EnvironmentFile=-/etc/kubernetes/apiserver
8 | User=kube
9 | ExecStart=/usr/bin/kube-apiserver \
10 | ${KUBE_LOGTOSTDERR} \
11 | ${KUBE_LOG_LEVEL} \
12 | ${KUBE_ETCD_SERVERS} \
13 | ${KUBE_API_ADDRESS} \
14 | ${KUBE_API_PORT} \
15 | ${KUBELET_PORT} \
16 | ${KUBE_ALLOW_PRIV} \
17 | ${KUBE_SERVICE_ADDRESSES} \
18 | ${KUBE_API_ARGS}
19 | Restart=on-failure
20 |
21 | [Install]
22 | WantedBy=multi-user.target
23 |
--------------------------------------------------------------------------------
/kubernetes/usr/lib/systemd/system/kube-controller-manager.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Controller Manager
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 |
5 | [Service]
6 | EnvironmentFile=-/etc/kubernetes/config
7 | EnvironmentFile=-/etc/kubernetes/apiserver
8 | EnvironmentFile=-/etc/kubernetes/controller-manager
9 | User=kube
10 | ExecStart=/usr/bin/kube-controller-manager \
11 | ${KUBE_LOGTOSTDERR} \
12 | ${KUBE_LOG_LEVEL} \
13 | ${KUBELET_ADDRESSES} \
14 | ${KUBE_MASTER} \
15 | ${KUBE_CONTROLLER_MANAGER_ARGS}
16 | Restart=on-failure
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
--------------------------------------------------------------------------------
/kubernetes/usr/lib/systemd/system/kube-proxy.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Kube-Proxy Server
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 |
5 | [Service]
6 | EnvironmentFile=-/etc/kubernetes/config
7 | EnvironmentFile=-/etc/kubernetes/proxy
8 | ExecStart=/usr/bin/kube-proxy \
9 | ${KUBE_LOGTOSTDERR} \
10 | ${KUBE_LOG_LEVEL} \
11 | ${KUBE_ETCD_SERVERS} \
12 | ${KUBE_PROXY_ARGS}
13 | Restart=on-failure
14 |
15 | [Install]
16 | WantedBy=multi-user.target
17 |
--------------------------------------------------------------------------------
/kubernetes/usr/lib/systemd/system/kube-scheduler.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Scheduler Plugin
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 |
5 | [Service]
6 | EnvironmentFile=-/etc/kubernetes/config
7 | EnvironmentFile=-/etc/kubernetes/apiserver
8 | EnvironmentFile=-/etc/kubernetes/scheduler
9 | User=kube
10 | ExecStart=/usr/bin/kube-scheduler \
11 | ${KUBE_LOGTOSTDERR} \
12 | ${KUBE_LOG_LEVEL} \
13 | ${KUBE_MASTER} \
14 | ${KUBE_SCHEDULER_ARGS}
15 | Restart=on-failure
16 |
17 | [Install]
18 | WantedBy=multi-user.target
19 |
--------------------------------------------------------------------------------
/kubernetes/usr/lib/systemd/system/kubelet.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kubernetes Kubelet Server
3 | Documentation=https://github.com/GoogleCloudPlatform/kubernetes
4 | After=docker.socket cadvisor.service
5 | Requires=docker.socket
6 |
7 | [Service]
8 | EnvironmentFile=-/etc/kubernetes/config
9 | EnvironmentFile=-/etc/kubernetes/kubelet
10 | ExecStart=/usr/bin/kubelet \
11 | ${KUBE_LOGTOSTDERR} \
12 | ${KUBE_LOG_LEVEL} \
13 | ${KUBE_ETCD_SERVERS} \
14 | ${KUBELET_ADDRESS} \
15 | ${KUBELET_PORT} \
16 | ${KUBELET_HOSTNAME} \
17 | ${KUBE_ALLOW_PRIV} \
18 | ${KUBELET_ARGS}
19 | Restart=on-failure
20 |
21 | [Install]
22 | WantedBy=multi-user.target
23 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/addbr.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # After executing this script, you can use br0/br1 to create VMs.
4 | # And you should set the VMs' default gateway to 192.168.0.1
5 | #
6 |
7 | # brctl show br0 || brctl addbr br0
8 | # brctl show br1 || brctl addbr br1
9 |
10 | ip link show br0 || ip link add br0 type bridge
11 | ip link show br1 || ip link add br1 type bridge
12 |
13 | ip addr add 192.168.0.1/24 brd 192.168.0.255 dev br0
14 | ip addr add 10.0.0.1/24 brd 10.0.0.255 dev br1
15 |
16 | ip link set br0 up
17 | ip link set br1 up
18 |
19 | iptables -t nat -A POSTROUTING -s 192.168.0.0/24 ! -d 192.168.0.0/24 -p tcp -j MASQUERADE --to-ports 1024-65535
20 | iptables -t nat -A POSTROUTING -s 192.168.0.0/24 ! -d 192.168.0.0/24 -p udp -j MASQUERADE --to-ports 1024-65535
21 | iptables -t nat -A POSTROUTING -s 192.168.0.0/24 ! -d 192.168.0.0/24 -j MASQUERADE
22 | iptables -t mangle -A POSTROUTING -o br0 -p udp -m udp --dport 68 -j CHECKSUM --checksum-fill
23 | iptables -t filter -A INPUT -i br0 -p udp -m udp --dport 53 -j ACCEPT
24 | iptables -t filter -A INPUT -i br0 -p tcp -m tcp --dport 53 -j ACCEPT
25 | iptables -t filter -A INPUT -i br0 -p udp -m udp --dport 67 -j ACCEPT
26 | iptables -t filter -A INPUT -i br0 -p tcp -m tcp --dport 67 -j ACCEPT
27 | iptables -t filter -A FORWARD -d 192.168.0.0/24 -o br0 -m state --state RELATED,ESTABLISHED -j ACCEPT
28 | iptables -t filter -A FORWARD -s 192.168.0.0/24 -i br0 -j ACCEPT
29 | iptables -t filter -A FORWARD -i br0 -o br0 -j ACCEPT
30 | iptables -t filter -A FORWARD -o br0 -j REJECT --reject-with icmp-port-unreachable
31 | iptables -t filter -A FORWARD -i br0 -j REJECT --reject-with icmp-port-unreachable
32 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/ifcfg-br0:
--------------------------------------------------------------------------------
1 | DEVICE="br0"
2 | BOOTPROTO="static"
3 | ONBOOT="yes"
4 | TYPE="Bridge"
5 | IPADDR="192.168.1.170"
6 | NETMASK="255.255.255.0"
7 | GATEWAY="192.168.1.1"
8 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/ifcfg-eth0:
--------------------------------------------------------------------------------
1 | DEVICE="eth0"
2 | BRIDGE="br0"
3 | ONBOOT="yes"
4 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/instance-name.xml:
--------------------------------------------------------------------------------
1 |
2 | instance-hostname
3 | d9ef885b-634a-4437-adb6-e7abe1f792a5
4 |
5 | 2
6 | 2097152
7 | 2097152
8 |
9 | destroy
10 | restart
11 | restart
12 |
13 |
14 | hvm
15 | /var/instances/instance-hostname/kernel
16 | /var/instances/instance-hostname/ramdisk
17 | root=/dev/vda console=ttyS0
18 |
19 |
20 |
21 |
22 | /usr/libexec/qemu-kvm
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/new/README.md:
--------------------------------------------------------------------------------
1 | # KVM virtualization
2 |
3 | ## Edit QCow2 image
4 |
5 | ### Use guestmount
6 |
7 | ```
8 | guestmount -a rhel.qcow2 -i /mnt
9 | mount --bind /dev /mnt/dev
10 | mount --bind /proc /mnt/proc
11 | chroot /mnt
12 | ```
13 |
14 | Then do whatever you want to do, usually you want to disable cloud-init:
15 |
16 | ```
17 | systemctl disable cloud-config.service cloud-final.service cloud-init.service cloud-init-local.service NetworkManager.service postfix.service
18 | yum install vim git
19 | rm -f /etc/udev/rules.d/70-persistent-*
20 | ```
21 |
22 |
23 | ### or Use guestfish
24 |
25 | ```
26 | guestfish --rw -a rhel.qcow2
27 | > run
28 | > list-filesystems
29 | > mount /dev/vda1 /
30 | > vi /etc/shadow
31 | > ln-sf /dev/null /etc/systemd/system/cloud-init.service
32 | ```
33 |
34 |
35 | ## Resize QCow2 image
36 |
37 | ```
38 | qemu-img info rhel.qcow2
39 | cp rhel.qcow2 rhel.final.qcow2
40 | qemu-img resize rhel.qcow2 +100G
41 | virt-filesystems --long -h --all -a rhel.final.qcow2
42 | virt-resize --expand /dev/sda1 rhel.qcow2 rhel.final.qcow2
43 | virt-filesystems --long -h --all -a rhel.final.qcow2
44 | ```
45 |
46 | If the filesystem of `rhel.qcow2` is xfs, then you need to boot this image as a running VM, login to this VM and run `xfs_growfs /dev/sda1` to resize it.
47 |
48 |
49 | ## Shrink(Compress) QCow2 image
50 |
51 | ```
52 | virt-sparsify --compress --convert qcow2 --format qcow2 input.qcow2 output.qcow2
53 | ```
54 |
55 |
56 |
57 | ## KVM(Libvirt) create a network
58 |
59 | ```
60 | virsh net-define virbr1.xml
61 | virsh net-start second
62 | virsh net-autostart second
63 | ```
64 |
65 | ## KVM(Libvirt) start a vm from qcow2
66 |
67 | ```
68 | virt-install --name=rhel --memory=1024 --vcpu=4 --graphics=vnc,listen=0.0.0.0 --disk=rhel-server-7.4-x86_64-kvm.qcow2 --import
69 | ```
70 |
71 | Append `--print-xml` to above command to only print the xml file, later we can use the xml file to custom our VM.
72 |
73 |
74 | ## KVM(Libvirt) start a vm from xml file
75 |
76 | ```
77 | virsh create domain.xml
78 | virsh destroy name-xxx
79 | virsh help
80 | ```
81 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/new/virbr1.xml:
--------------------------------------------------------------------------------
1 |
2 | second
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/virbr0.xml:
--------------------------------------------------------------------------------
1 |
2 | default
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/vm.instance-name.xml:
--------------------------------------------------------------------------------
1 |
2 | hostname
3 | hostname-uuid
4 |
5 | cpunum
6 | memsize
7 | memsize
8 |
9 | destroy
10 | restart
11 | restart
12 |
13 |
14 | hvm
15 | /var/instances/instance-hostname/kernel
16 | /var/instances/instance-hostname/ramdisk
17 | root=/dev/vda console=ttyS0
18 |
19 |
20 |
21 |
22 | /usr/libexec/qemu-kvm
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/vms.ini:
--------------------------------------------------------------------------------
1 | ;type=releasever|hostname|ip[|cpu|mem|disk]
2 | xx=centos-5.4.x86_64|host100|10.1.2.100|2|2048|50
3 | m1=centos-5.4.x86_64|host101|10.1.2.101|2|2048|50
4 | x1=centos-6.3.x86_64|host102|10.1.2.102
5 |
--------------------------------------------------------------------------------
/kvm-qemu-libvirt-virtualization/windows.mkd:
--------------------------------------------------------------------------------
1 | # 制作 Windows 系统镜像模板
2 |
3 | 虽然我非常不喜欢 Windows 系统,但是这个系统的镜像制作方法还是需要写出来的。
4 |
5 | ## 创建 raw 文件
6 |
7 | ```
8 | # qemu-img create -f raw windows.img 10G
9 | ```
10 |
11 | ## 启动安装 Windows 系统
12 |
13 | ```
14 | # kvm -m 2048 -cdrom windowns_dvd.iso -drive file=windows.img,if=virtio \
15 | -drive file=virtio-win-0.1-59.iso,index=3,media=cdrom -net nic,model=virtio -net user -nographic -vnc :0
16 | ```
17 |
18 | 其中`virtio-win-0.1-59.iso`是从下载的。
19 |
20 | 打开你的 VNC 客户端开始进行系统安装吧,安装完成之后,那个`windows.raw`就是你所需要的镜像文件。
21 |
22 | ## 最后
23 |
24 | 我做是做好了,但是一直没有使用。
25 |
--------------------------------------------------------------------------------
/library/README.mkd:
--------------------------------------------------------------------------------
1 | # 一些内容
2 |
3 | ## 库
4 |
5 | HTTP GET/POST, MySQL CRUD.
6 |
7 | ## 初始化环境
8 |
9 | ```
10 | $ curl -sSL https://github.com/chenzhiwei/linux/raw/master/library/initial-rc.sh | bash
11 | ```
12 |
--------------------------------------------------------------------------------
/library/id.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | id=$1
4 | if [[ -z $id ]]; then
5 | id=110000200010101019
6 | fi
7 |
8 | factor=(0:7 1:9 2:10 3:5 4:8 5:4 6:2 7:1 8:6 9:3 10:7 11:9 12:10 13:5 14:8 15:4 16:2)
9 | remainders=(0:1 1:0 2:x 3:9 4:8 5:7 6:6 7:5 8:4 9:3 10:2)
10 |
11 | # Get the final number of ID
12 | final=${id:(-1)}
13 |
14 | # Calculate sum of first 17 numbers
15 | sum=0
16 | for fact in ${factor[@]}; do
17 | index=${fact%:*}
18 | value=${fact##*:}
19 | num=${id:$index:1}
20 | sum=$(( sum + $((num * value)) ))
21 | done
22 |
23 | # Calculate the remainer
24 | remainer=$((sum % 11))
25 |
26 | # Get the expected final number
27 | expect_final=0
28 | for rem in ${remainders[@]}; do
29 | index=${rem%:*}
30 | value=${rem##*:}
31 | if [[ $index -eq $remainer ]]; then
32 | expect_final=$value
33 | fi
34 | done
35 |
36 | if [[ "$final" == "$expect_final" ]]; then
37 | echo
38 | echo "The ID number($id) is a valid number"
39 | echo
40 | else
41 | echo
42 | echo "The expected final number is $expect_final"
43 | echo "The ID number($id) is invalid"
44 | echo
45 | fi
46 |
--------------------------------------------------------------------------------
/library/initial-rc.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Create ~/.screenrc
4 | function screenrc() {
5 | curl -o ~/.screenrc -sSL https://github.com/chenzhiwei/linux/raw/master/screen/.screenrc
6 | }
7 |
8 | # Create ~/.gitconfig
9 | function gitconfig() {
10 | curl -o ~/.gitconfig -sSL https://github.com/chenzhiwei/linux/raw/master/git/.gitconfig
11 | }
12 |
13 | # Create ~/.vimrc
14 | function vimrc() {
15 | rm -rf ~/.vim
16 | git clone --recursive https://github.com/chenzhiwei/dot_vim.git ~/.vim
17 | ln -sf ~/.vim/dot_vimrc ~/.vimrc
18 | }
19 |
20 | screenrc
21 | gitconfig
22 | vimrc
23 |
--------------------------------------------------------------------------------
/library/shell_lib.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function url_get() {
4 | local url="$1"
5 | local param="$2"
6 | local timeout="$3"
7 | if [ "x$param" != "x" ]; then
8 | url=$(echo "$url?$param")
9 | fi
10 | if [ "x$timeout" = "x" ]; then
11 | timeout=10
12 | fi
13 | local content=$(curl -sfS -m $timeout --write-out "\n%{http_code}" "$url" 2>&1)
14 | SURL_HTTP_CODE=$(echo "$content" | tail -1)
15 | SURL_HTTP_CONTENT=$(echo "$content" | sed '$d')
16 | }
17 |
--------------------------------------------------------------------------------
/logrotate/README.md:
--------------------------------------------------------------------------------
1 | # logrotate
2 |
3 | logrotate是一个强大的日志备份工具,当某文件超过指定大小时就对其进行裁断备份。
4 |
5 | logrotate的功能是管理记录文件。使用logrotate指令可以轻松管理系统产生的记录文件。它提供自动替换、压缩、删除和邮寄记录文件,每个记录文件都可被设置成每日、每周或每月处理,也能在文件太大时立即处理。这需要我们自行编辑配置文件,指定配置文件,设置配置文件在/etc/logrotate.conf
6 |
7 | 默认配置文件:/etc/logrotate.conf
8 |
9 | 自定义配置文件(默认配置文件中有include字段):/etc/logrotate.d/*
10 |
11 | 调试某配置文件是否有语法错误:logrotate -d /etc/logrotate.d/logs
12 |
13 | 格式:
14 |
15 | ```
16 | "/var/log/define_*.log" {
17 | daily (monthly,weekly,表示每天执行一次)
18 | dateext (在备份日志后加日志如:define_a.log-20110606)
19 | rotate 10 (轮转10个,即保留10个备份)
20 | missingok (当日志文件不存在时也不报错)
21 | notifempty (如果是空刚不转储,ifempty是空文件也转储)
22 | copytruncate (是指进行轮转时先把日志内容复制到新文件再清空旧文件,保证日志记录连续性)
23 | compress (nocompress,对轮转的日志文件进行压缩)
24 | sharedscripts ()
25 | postrotate (插入脚本前间隔段,指转储后要执行的命令;prerotate指转储前要执行的命令)
26 | /bin/bash /home/zhiwei/script.sh > /dev/null 2>&1
27 | endscript (插入脚本后间隔段)
28 | create 0644 owner group (轮转文件时使用指定模式和用户及用户组来创建新的日志文件)
29 | size 1024M (当日志文件达到1024M时才转储)
30 | olddir /backup (指定将备份后的文件放置位置)
31 | }
32 | ```
33 |
--------------------------------------------------------------------------------
/lvm/README.mkd:
--------------------------------------------------------------------------------
1 | # LVM相关
2 |
3 | ## LVM是什么
4 |
5 | LVM全称Logical Volume Manager,逻辑卷管理器,其作用是在不损坏磁盘数据的情况下结磁盘空间进行增加、删除。
6 |
7 | ## 与LVM相关的几个词语
8 |
9 | PV全称Physical Volume,物理卷。将物理分区使用pvcreate命令转换成LVM底层物理卷,然后才能将其利用。
10 |
11 | VG全称Volume Group,卷组。许多PV(物理卷)组全成VG(卷组)。
12 |
13 | LV全称Logical Volume,逻辑卷。由VG来创建LV,创建完LV之后就可以格式化并投入使用了。
14 |
15 | PE全称Physical Extend,物理扩展块。
16 |
17 | LE全称Logical Extend,逻辑扩展块。
18 |
19 | ## LVM操作流程
20 |
21 | ### 1.创建PV
22 |
23 | ```
24 | # pvcreate /dev/sdb1
25 | ```
26 |
27 | ### 2.创建VG
28 |
29 | ```
30 | # vgcreate vgname /dev/sdb1
31 | ```
32 |
33 | 其中-s代表--physicalextentsize,稍后补充。
34 |
35 | ### 3.创建LV
36 |
37 | ```
38 | # lvcreate -l 512 -n lvname vgname
39 | ```
40 |
41 | ### 4.将LV挂载到某目录
42 |
43 | ```
44 | mkfs.ext4 /dev/vgname/lvname
45 | mount /dev/vgname/lvname /mnt
46 | ```
47 |
48 | ### 5.扩展LV: lvname
49 |
50 | ```
51 | # pvcreate /dev/sdb2
52 | # vgextend vgname /dev/sdb2
53 | # vgdisplay //列出Free PE的大小,假如为1024
54 | # lvextend -l +1024 /dev/vgname/lvname
55 | # resize2fs /dev/vgname/lvname
56 | # xfs_growfs /dev/vgname/lvname
57 | ```
58 |
59 | ### 6.注意事项
60 |
61 | 以后补充。
62 |
63 | ## 参考资料
64 |
65 | 1.
66 |
67 | 2.
68 |
69 | 3.Create thin provisioned LV:
70 |
--------------------------------------------------------------------------------
/lvs-keepalived/README.mkd:
--------------------------------------------------------------------------------
1 | # LVS+Keepalived
2 |
3 | LVS+Keepalived有两种常见的用法,一个是用NAT,另一个是用DR方式。
4 |
5 | ## LVS+NAT+Keepalived
6 |
7 | 请看keepalived的[配置页面][lvs_keepalived]。
8 |
9 | ## LVS+DR+Keepalived
10 |
11 | 也请查看keepalived的[配置页面][lvs_keepalived]。
12 |
13 | ## 说明
14 |
15 | 本来想把keepalived+lvs单独拿出来在这个页面写一下,但是发现放在在keepalived页面效果会更好。
16 |
17 | [lvs_keepalived]: https://github.com/chenzhiwei/linux/tree/master/keepalived#lvskeepalived%E9%85%8D%E7%BD%AE
18 |
--------------------------------------------------------------------------------
/lvs/lvs.conf:
--------------------------------------------------------------------------------
1 | serial_no = 26
2 | primary = 10.1.2.11
3 | service = lvs
4 | backup = 10.1.2.12
5 | heartbeat = 1
6 | heartbeat_port = 539
7 | keepalive = 6
8 | deadtime = 18
9 | network = direct
10 | debug_level = NONE
11 | virtual dev {
12 | active = 1
13 | address = 10.1.2.15 eth0:1
14 | vip_nmask = 255.255.255.0
15 | port = 80
16 | send = "GET / HTTP/1.0\r\n\r\n"
17 | expect = "HTTP"
18 | use_regex = 0
19 | load_monitor = none
20 | scheduler = wlc
21 | protocol = tcp
22 | timeout = 6
23 | reentry = 15
24 | quiesce_server = 0
25 | server 21 {
26 | address = 10.1.2.21
27 | active = 1
28 | weight = 1
29 | }
30 | server 22 {
31 | address = 10.1.2.22
32 | active = 1
33 | weight = 1
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/mesos/README.md:
--------------------------------------------------------------------------------
1 | # Apache Mesos
2 |
3 | 让你管理你的数据中心就像管理一个资源池一样,你可以很容易的对里面的资源进行各种调度、管理等。是一个统一资源管理和调度平台。
4 |
5 | ## Mesos是什么
6 |
7 | Mesos是一个分布式集群系统的核心,它运行在集群的各个节点上,用来调度和管理整个数据中心和云环境上的各种资源。
8 |
--------------------------------------------------------------------------------
/minio/Dockerfile.minio:
--------------------------------------------------------------------------------
1 | # podman build -t docker.io/siji/minio -f Dockerfile.minio .
2 |
3 | FROM docker.io/minio/mc:latest AS client
4 |
5 | FROM docker.io/minio/minio:latest AS server
6 |
7 | FROM docker.io/library/ubuntu:latest
8 |
9 | COPY --from=client /usr/bin/mc /usr/bin/mc
10 | COPY --from=server /opt/bin/minio /usr/bin/minio
11 |
12 | RUN apt update \
13 | && apt install -y --no-install-recommends curl ca-certificates \
14 | && rm -rf /var/lib/apt/lists/*
15 |
--------------------------------------------------------------------------------
/minio/k8s-deployment.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: minio
6 | labels: &labels
7 | app: minio
8 | spec:
9 | replicas: 1
10 | selector:
11 | matchLabels: *labels
12 | template:
13 | metadata:
14 | labels: *labels
15 | spec:
16 | containers:
17 | - name: nginx
18 | image: docker.io/siji/nginx:alpine
19 | ports:
20 | - containerPort: 8000
21 | hostPort: 8000
22 | protocol: TCP
23 | - containerPort: 8001
24 | hostPort: 8001
25 | protocol: TCP
26 | resources: {}
27 | volumeMounts:
28 | - name: nginx-conf
29 | mountPath: /etc/nginx/conf.d
30 | - name: nginx-certs
31 | mountPath: /etc/nginx/certs
32 | - name: minio
33 | image: docker.io/siji/minio:latest
34 | command:
35 | - minio
36 | - server
37 | - /data
38 | - --address=:9000
39 | - --console-address=:9001
40 | env:
41 | - name: MINIO_ROOT_USER
42 | value: minio
43 | - name: MINIO_ROOT_PASSWORD
44 | value: minio123
45 | resources: {}
46 | volumeMounts:
47 | - name: minio-data
48 | mountPath: /data
49 | volumes:
50 | - name: nginx-conf
51 | hostPath:
52 | path: /etc/nginx/conf.d
53 | type: DirectoryOrCreate
54 | - name: nginx-certs
55 | hostPath:
56 | path: /etc/nginx/certs
57 | type: DirectoryOrCreate
58 | - name: minio-data
59 | hostPath:
60 | path: /var/lib/minio
61 | type: DirectoryOrCreate
62 |
--------------------------------------------------------------------------------
/minio/nginx/certs/server.crt:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIFWDCCA0CgAwIBAgIQay91zk+hR8uGULJ9/R7rvDANBgkqhkiG9w0BAQsFADAg
3 | MQ4wDAYDVQQKEwVNaW5pbzEOMAwGA1UEAxMFbWluaW8wIBcNMjEwOTI5MDMyMTE5
4 | WhgPMjEyMTA5MDUwMzIxMTlaMCAxDjAMBgNVBAoTBU1pbmlvMQ4wDAYDVQQDEwVt
5 | aW5pbzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAM/U6Hr3eterw6i4
6 | e7ypOAc5x+s8diSf1pnuTBXO1jx+SikauUazoCs2o94EUj/LPBvwn/tmzCpD0WvR
7 | dEyIT+JfRGUezbzNZfXYfBkHedqYSqiwolU21MRelqf3nRo7pjg5US60PwE0buLm
8 | h/W77Ou/g0rRuRaDEUUwNo+iFCRA16Q5twwVHpNmqlh76vkYL3l/pn7jVbNykZRh
9 | o+wp/lB981BRRqhE6g0o2fR32ujN7eN4AsmScBMgOvWkB6hkhBVUFYxa8mGKtJZO
10 | KU0eGXwgpzuorPylBU12g3N2h4LnsGepVErgvowBsBNDqXWbgXLq3Zk7PW8eq8S6
11 | Hhh8EjdZEwTXnootjnE6c2eGs/M3lF7DUm0TJ7nnEa4gdSZjmLQRz+665VcSY0gA
12 | GTecCnr6kLi7eOLlySSwGXdiJC44Y5xmSSqPahFTzIbZoA44wSJI1YF/oym6D+ak
13 | bCEcfhAnigTaUvmLmi2/sUt+G4xErFoxKOkb1kJXp7kxdMsX9/Jh4AJWaWcHBlSo
14 | XfGl2EgkWNNymxZ7D+mfijl9f21JOvhNCoifdcZvAGeDyXE+7xkckka2E+BX6ZTj
15 | nzEIIzqvEYU7wBl3Fson2mrht1QOQEXycQYQqT1k/GbUiK3CvwVt9uCzIdk5UzBK
16 | RSbPWPJIXqvOlcA1+cCeKurhHYY7AgMBAAGjgYswgYgwDgYDVR0PAQH/BAQDAgKk
17 | MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAPBgNVHRMBAf8EBTADAQH/
18 | MB0GA1UdDgQWBBQmcWZgb6pBjcB2Pd4lJ+KT1x17XTAnBgNVHREEIDAeggttaW5p
19 | by5sb2NhbIIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4ICAQBbVz7L
20 | p8JGMoz2hJhtxFUl24Pbd9NhzdJN1GzuNFkQqHGaawvro+bVZcw1GOQZznD1Uacl
21 | x2SsLd4exdmoLYAur4GiKyv0BQ+DtSQ4SDSWgiH7tqYd+J2aX/1/uNPquW+p0yFS
22 | Go0I51Ej0nhrooOFRotAjLdNfZaRsqhYZSOJlStP1BlNGbBdb8WiAG/ZokWbwpnG
23 | Hv22PB4L4rFxj+PIoZCXIovHN4YHnBtLn2EMkSFrFql3iKgiMR6bXxfVQ8clDhkR
24 | Z+IhyVGV8RMFlXls8t0ScFyY9CGePGgbJ7GUJKe2Mv40wEqUA1RzZFA+51y9TtLE
25 | iTcZ/S1KW4+bynb+wWD3kWgJKMjDHhoSlmk7o0rbOFcapq8C1h1nBaItADHXej15
26 | pAWpZsHFRbcZy7JtyIfgnHXCOyMfShf+IYOOZA/1IL4mXkwp+g/MbfqNAzbcSDLU
27 | WIkD3RF4T+8lK9w6rQ0bqIRRlTETuGxJgUjsr70p+ztLR1jlsZD60Tan7EbMYQmD
28 | kAXv65TGkCFbQC8OMC6DGOGTSkOxqKnsU1FbG4v1LLBQr4s8I/JQvvXNG6v4fJKK
29 | iIiQEdiTE/df08hj0co/K4FsaRg1w37BjDhjA7VD9P9dHGj7Ub54UQzU+iTzX42w
30 | IqVaFi0Ftn2g+SmKTYX0kuzxUh3gEAcuFbirBA==
31 | -----END CERTIFICATE-----
32 |
--------------------------------------------------------------------------------
/mount/README.md:
--------------------------------------------------------------------------------
1 | # Mount
2 |
3 | ## Bind mount
4 |
5 | Sometimes you may need bind mount, run:
6 |
7 | ```
8 | mount --bind /data/docker /var/lib/docker
9 | ```
10 |
11 | In the `/etc/fstab`:
12 |
13 | ```
14 | /data/docker /var/lib/docker none defaults,bind 0 0
15 | ```
16 |
--------------------------------------------------------------------------------
/nfs/README.md:
--------------------------------------------------------------------------------
1 | # NFS
2 |
3 | ## NFS(Network File System)
4 |
5 | NFS能让使用者访问网络上其他主机的文件就像访问自己电脑上文件一样。NFS是基于UDP/IP协议的应用,其实现主要是采用RPC(Remote Procedure Call,远程过程调用)机制,PRC提供了一个与机器、操作系统以及低层传送协议无关的存取远程文件的操作。RPC采用了XDR的支持,XDR是一种与机器无关的数据描述编码协议,他以独立于任意机器体系结构的格式对网上传送的数据进行编码和解码,支持在异构系统之间数据的传送。
6 |
7 | NFS在`/etc/fstab`里的简单配置如下:
8 |
9 | ```
10 | nfs_server:/var/nfs /mnt nfs defaults 0 0
11 | ```
12 |
13 | NFS的配置文件在`/etc/exports`里面,当修改之后需要用`exportfs -ar`命令来重新加载一下。
14 |
15 | `no_root_squash` 一般在NFS文件系统上安装RPM包时会报`chown`之类的错误,添加这个参数之后就OK了。
16 |
17 | ## 当 umount 不掉时
18 |
19 | ```
20 | # fuser -m /mnt
21 | # fuser -k /mnt
22 | # fuser -mk /mnt
23 | ```
24 |
25 | 第一条命令是查看哪些进程在使用,第二条命令是 kill 掉这样进程,第三条命令是两者一起用。
26 |
--------------------------------------------------------------------------------
/nfs/exports:
--------------------------------------------------------------------------------
1 | # /etc/exports: the access control list for filesystems which may be exported
2 | # to NFS clients. See exports(5).
3 | #
4 | # Example for NFSv2 and NFSv3:
5 | # /srv/homes hostname(rw,sync,no_subtree_check) networkname(ro,sync,no_subtree_check)
6 | #
7 | # Example for NFSv4:
8 | # /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
9 | # /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check)
10 |
11 | /var/nfs/dir1 10.0.0.0/8(rw,sync,no_subtree_check,no_root_squash) 172.16.212.13(ro,no_subtree_check,no_root_squash)
12 | /var/nfs/dir2 *(rw,sync,no_subtree_check,no_root_squash)
13 |
--------------------------------------------------------------------------------
/nftables/ip.md:
--------------------------------------------------------------------------------
1 | # iproute
2 |
3 |
4 | ## Create tun
5 |
6 | ```
7 | ip tuntap add mode tun name tun1
8 | ip link set tun1 up
9 |
10 | # set owner to alice
11 | ip tuntap add mode tun user alice name tun1
12 |
13 | # route packets to tun
14 | ip route add 198.18.0.0/15 dev tun1
15 | ```
16 |
17 |
18 | ## route packets with fwmark policy
19 |
20 | ```
21 | # Set default gateway with a table
22 | # packets with table 115 will be handled by tun1
23 | ip route add default dev tun1 table 115
24 |
25 | # packets with fwmark 115 go to table 115 then handled by tun1
26 | ip rule add fwmark 115 lookup 115
27 |
28 | # test the rule policy
29 | ip route get 1.1.1.1 mark 115
30 |
31 | # nftables accept tun1 packets
32 | nft add rule ip test local iif tun1 accept
33 |
34 | # nftables mark other packets
35 | nft add rule ip test local meta mark 115
36 | ```
37 |
38 | ## References
39 |
40 | https://comzyh.gitbook.io/clash/
41 |
42 | https://comzyh.gitbook.io/clash/real-ip-tun-example
43 |
--------------------------------------------------------------------------------
/nftables/nftable.conf:
--------------------------------------------------------------------------------
1 | define LOCAL_SUBNET = {10.0.0.0/8, 127.0.0.0/8, 169.254.0.0/16, 172.16.0.0/12, 192.168.0.0/16, 224.0.0.0/4}
2 |
3 | table clash
4 | flush table clash
5 |
6 | table clash {
7 | chain local {
8 | type route hook output priority mangle; policy accept;
9 |
10 | ip daddr $LOCAL_SUBNET accept
11 | ip protocol != { tcp, udp } accept
12 | tcp dport 8192-65535 accept
13 | udp dport 4096-65535 accept
14 | iif "utun" accept
15 |
16 | ip daddr ${DNS_SERVER} udp dport 53 meta mark set ${MARK_NUM}
17 | ip daddr ${DNS_SERVER} tcp dport 53 meta mark set ${MARK_NUM}
18 |
19 | tcp dport 53 accept
20 | udp dport 53 accept
21 |
22 | meta mark set ${MARK_NUM}
23 | }
24 |
25 | chain forward {
26 | type filter hook prerouting priority mangle; policy accept;
27 |
28 | ip daddr \$LOCAL_SUBNET accept
29 | ip protocol != { tcp, udp } accept
30 | tcp dport 8192-65535 accept
31 | udp dport 4096-65535 accept
32 | iif "utun" accept
33 |
34 | ip daddr ${DNS_SERVER} udp dport 53 meta mark set ${MARK_NUM}
35 | ip daddr ${DNS_SERVER} tcp dport 53 meta mark set ${MARK_NUM}
36 |
37 | tcp dport 53 accept
38 | udp dport 53 accept
39 |
40 | meta mark set ${MARK_NUM}
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/nginx/README.md:
--------------------------------------------------------------------------------
1 | # nginx配置相关
2 |
3 | 该目录下有几个简单的nginx配置文件,分别代表用Nginx完成不同的功能。
4 |
5 | 关于nginx的配置可以参考一下[官方文档][nginx_site],里面写的非常清楚,这里就不多提了。
6 |
7 | [nginx_site]: http://wiki.nginx.org/
8 |
9 | 生成 SSL 证书:
10 |
--------------------------------------------------------------------------------
/nginx/conf.d/basic-auth.conf:
--------------------------------------------------------------------------------
1 | # auth.file format
2 | # username:encrypted_password
3 | # encrypted_password can be generated by `openssl passwd`
4 | server {
5 | listen 80 default_server;
6 | server_name auth.com www.auth.com;
7 | access_log logs/auth.com.access.log main;
8 | location / {
9 | auth_basic "Restricted";
10 | auth_basic_user_file auth.file;
11 | index index.html;
12 | root /var/www/htdocs/domain1.com;
13 | }
14 | }
15 |
16 | server {
17 | listen 443 ssl;
18 | http2 on;
19 | server_name auth.com;
20 |
21 | ssl_protocols TLSv1.2 TLSv1.3;
22 | ssl_prefer_server_ciphers on;
23 | ssl_certificate /etc/nginx/pki/tls.crt;
24 | ssl_certificate_key /etc/nginx/pki/tls.key;
25 |
26 | satisfy any; # either IP rule or basic auth
27 |
28 | # IP rule
29 | allow 10.0.0.0/8;
30 | allow 127.0.0.0/8;
31 | allow 172.16.0.0/12;
32 | allow 192.168.0.0/16;
33 | deny all;
34 |
35 | # Basic Authentication
36 | auth_basic "Restricted";
37 | auth_basic_user_file /etc/nginx/secret/auth.file;
38 |
39 | location /public {
40 | auth_basic off;
41 | index index.html;
42 | root /var/www/htdocs/domain1.com;
43 | }
44 |
45 | location / {
46 | index index.html;
47 | root /var/www/htdocs/domain1.com;
48 | }
49 | }
50 |
--------------------------------------------------------------------------------
/nginx/conf.d/file-autoindex.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | location /file/ {
4 | rewrite /file/(.*) /$1 break;
5 | autoindex on;
6 | root /directory-of-files;
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/nginx/conf.d/geo-redirect.conf:
--------------------------------------------------------------------------------
1 | geo $geo {
2 | default 0;
3 | 192.168.12.0/24 1;
4 | 192.168.22.0/24 2;
5 | 192.168.32.0/24 3;
6 | }
7 |
8 | server {
9 |
10 | listen 8888;
11 |
12 | location / {
13 | proxy_set_header Accept-Encoding "";
14 | proxy_set_header Host $http_host;
15 | proxy_set_header X-Forwarded-By $server_addr:$server_port;
16 | proxy_set_header X-Forwarded-For $remote_addr;
17 | proxy_set_header X-Forwarded-Proto $scheme;
18 | proxy_set_header X-Real-IP $remote_addr;
19 | proxy_pass http://127.0.0.1;
20 |
21 | if ( $geo = 1 ) {
22 | proxy_pass http://192.168.12.10;
23 | }
24 |
25 | if ( $geo = 2 ) {
26 | proxy_pass http://192.168.22.10;
27 | }
28 |
29 | if ( $geo = 3 ) {
30 | proxy_pass http://192.168.32.10;
31 | }
32 |
33 | proxy_next_upstream error timeout invalid_header http_500 http_502 http_503 http_504;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/nginx/conf.d/jizhihuwai.com.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name jizhihuwai.com www.jizhihuwai.com default_server;
4 |
5 | if ($host != 'jizhihuwai.com' ) {
6 | rewrite ^/(.*)$ http://jizhihuwai.com/$1 permanent;
7 | }
8 |
9 | proxy_connect_timeout 4;
10 | proxy_read_timeout 300;
11 | proxy_send_timeout 300;
12 |
13 | index index.php index.html index.htm;
14 |
15 | location / {
16 | root /var/www/html/jizhihuwai.com;
17 | try_files $uri $uri/ /index.php?$query_string;
18 | }
19 |
20 | location ~ /editor/php/ {
21 | deny all;
22 | }
23 |
24 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
25 | # ./php-cgi -b 127.0.0.1:9000
26 | # usually use PHP FastCGI Process Manager(php-fpm) to manage php-cgi
27 |
28 | location ~ \.php$ {
29 | root /var/www/html/jizhihuwai.com;
30 | # fastcgi_pass 127.0.0.1:9000;
31 | fastcgi_pass unix:/var/run/php5-fpm.sock;
32 | fastcgi_index index.php;
33 | fastcgi_param SCRIPT_FILENAME /var/www/html/jizhihuwai.com$fastcgi_script_name;
34 | include fastcgi_params;
35 | }
36 |
37 | # error_page 500 502 503 504 /var/www/htdocs/default/50x.html;
38 |
39 | access_log /var/log/nginx/jizhihuwai.com.access.log;
40 | error_log /var/log/nginx/jizhihuwai.com.error.log;
41 | }
42 |
--------------------------------------------------------------------------------
/nginx/conf.d/php.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name localhost;
4 | proxy_connect_timeout 4;
5 | proxy_read_timeout 300;
6 | proxy_send_timeout 300;
7 |
8 | location / {
9 | root /var/www/htdocs/default;
10 | index index.php index.html index.htm;
11 | }
12 |
13 | # pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
14 | # ./php-cgi -b 127.0.0.1:9000
15 | # usually use PHP FastCGI Process Manager(php-fpm) to manage php-cgi
16 |
17 | location ~ \.php$ {
18 | root /var/www/htdocs/default;
19 | fastcgi_pass 127.0.0.1:9000;
20 | # fastcgi_pass unix:/var/run/php-fpm.sock
21 | fastcgi_index index.php;
22 | fastcgi_param SCRIPT_FILENAME /var/www/htdocs/default$fastcgi_script_name;
23 | include fastcgi_params;
24 | }
25 |
26 | error_page 500 502 503 504 /var/www/htdocs/default/50x.html;
27 |
28 | # access_log /data0/logs/default.access.log main;
29 | # error_log /data0/logs/defaul.error.log warn;
30 | }
31 |
--------------------------------------------------------------------------------
/nginx/conf.d/proxy.conf:
--------------------------------------------------------------------------------
1 | # Run Nginx as a basic http proxy server
2 | server {
3 |
4 | listen 3128;
5 | resolver 8.8.8.8;
6 |
7 | location /{
8 | proxy_pass http://$http_host$request_uri;
9 | #allow 127.0.0.1;
10 | #deny all;
11 | }
12 |
13 | access_log /var/log/nginx/proxy.access.log;
14 | error_log /var/log/nginx/proxy.error.log;
15 | }
16 |
--------------------------------------------------------------------------------
/nginx/conf.d/single-file.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 | location /file.txt {
4 | alias /var/www/html/xxx.txt
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/nginx/conf.d/ssl.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 443 ssl;
3 | server_name *.chenzhiwei.cn;
4 |
5 | ssl_certificate /etc/nginx/chenzhiwei.cn.crt;
6 | ssl_certificate_key /etc/nginx/chenzhiwei.cn.key;
7 |
8 | ssl_session_timeout 5m;
9 |
10 | ssl_protocols SSLv2 SSLv3 TLSv1;
11 | ssl_ciphers HIGH:!aNULL:!MD5;
12 | ssl_prefer_server_ciphers on;
13 |
14 | location / {
15 | index index.html;
16 | root /var/www/htdocs/chenzhiwei.cn;
17 | }
18 |
19 | # access_log /var/logs/nginx/chenzhiwei.cn.ssl.access.log main;
20 | # error_log /var/logs/nginx/chenzhiwei.cn.ssl.error.log warn;
21 | }
22 |
--------------------------------------------------------------------------------
/nginx/conf.d/vhosts.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | server_name domain1.com www.domain1.com;
4 | access_log /var/log/nginx/domain1.com.access.log;
5 | location / {
6 | index index.html;
7 | root /var/www/html/domain1.com;
8 | }
9 | }
10 |
11 | server {
12 | listen 80;
13 | server_name domain2.com www.domain2.com;
14 | access_log /var/log/nginx/domain2.com.access.log;
15 | location / {
16 | index index.html;
17 | root /var/www/html/domain2.com;
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | user nginx;
2 | worker_processes 1;
3 |
4 | error_log /var/log/nginx/error.log warn;
5 | pid /var/run/nginx.pid;
6 |
7 |
8 | events {
9 | worker_connections 1024;
10 | }
11 |
12 |
13 | http {
14 | include /etc/nginx/mime.types;
15 | default_type application/octet-stream;
16 |
17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
18 | '$status $body_bytes_sent "$http_referer" '
19 | '"$http_user_agent" "$http_x_forwarded_for"';
20 |
21 | access_log /var/log/nginx/access.log main;
22 |
23 | sendfile on;
24 | #tcp_nopush on;
25 |
26 | keepalive_timeout 65;
27 |
28 | #gzip on;
29 |
30 | include /etc/nginx/conf.d/*.conf;
31 | }
32 |
33 | stream {
34 | include /etc/nginx/raw.d/*.conf;
35 | }
36 |
--------------------------------------------------------------------------------
/nginx/raw.d/tcp-backends.conf:
--------------------------------------------------------------------------------
1 | upstream backend_443 {
2 | server 192.168.122.10:443;
3 | server 192.168.122.11:443;
4 | server 192.168.122.12:443;
5 | }
6 | server {
7 | listen 443;
8 | proxy_pass backend_443;
9 | }
10 |
--------------------------------------------------------------------------------
/nginx/raw.d/tcp.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 443;
3 | proxy_pass 192.168.122.10:443;
4 | }
5 |
--------------------------------------------------------------------------------
/nginx/raw.d/udp.conf:
--------------------------------------------------------------------------------
1 | upstream dns_servers {
2 | least_conn;
3 | server 192.168.122.10:53;
4 | server 192.168.122.11:53;
5 | }
6 |
7 | server {
8 | listen 53 udp;
9 | proxy_pass dns_servers;
10 | }
11 |
--------------------------------------------------------------------------------
/nodejs/README.md:
--------------------------------------------------------------------------------
1 | # nodejs
2 |
3 | nodejs development environment setup.
4 |
5 | ## Setup a mirror registry
6 |
7 | ```
8 | $ vim ~/.npmrc
9 | progress = true
10 | registry = https://registry.npm.taobao.org/
11 | ```
12 |
13 | ## Install nvm
14 |
15 | nvm is Node Version Manager.
16 |
17 | ```
18 | $ curl -o- https://raw.githubusercontent.com/creationix/nvm/master/install.sh | bash
19 | ```
20 |
21 | ## Install nodejs
22 |
23 | ```
24 | $ nvm install stable
25 | $ nvm install 8
26 | $ nvm list
27 | $ nvm use 8
28 | ```
29 |
30 | ## Reference
31 |
32 | nvm: https://github.com/creationix/nvm
33 |
--------------------------------------------------------------------------------
/opensource-solution/README.md:
--------------------------------------------------------------------------------
1 | # Opensource Solution
2 |
3 | Opensource Solution for companies and teams.
4 |
5 | ## Wiki & Design
6 |
7 | Gollum wiki system, which is built on top of Git and can store the history change.
8 |
9 | Gollum pages can be written in a variety of formats such as `markdown`. You can edit your wiki page through the built-in web interface or by using your favorite text editor or IDE.
10 |
11 | ## Code & Review
12 |
13 | Gerrit Code Review system, which is a web based code review system, facilitating online code reviews for projects using the Git version control system.
14 |
15 | ## Continuous integration
16 |
17 | Jenkins CI, which is an extensible open source continuous integration server.
18 |
19 | After you submit your patch, you need to do some basic check or automation test(unit test) before merge it to code repository. It is the Jenkins time!
20 |
21 | The github code can use Travis CI.
22 |
23 | ## Bug & Tracking
24 |
25 | Bugzilla, which is a "Defect Tracking System" or "Bug-Tracking System". Defect Tracking Systems allow individual or groups of developers to keep track of outstanding bugs in their product effectively.
26 |
27 | ## Git hosting
28 |
29 | Gitlab is a good opensource solution for Git hosting, it is very like Github. I am very like it.
30 |
--------------------------------------------------------------------------------
/openstack/PACKAGING.md:
--------------------------------------------------------------------------------
1 | # Packaging OpenStack
2 |
3 | ## Create tar package that RPM spec file requrired
4 |
5 | # git clone https://github.com/openstack/nova
6 | # cd nova
7 | # PBR_VERSION=2013.2.3 python setup.py sdist
8 | # ls dist
9 |
10 | ## Create RPM package
11 |
12 | # git clone https://github.com/openstack/nova
13 | # cd nova
14 | # python setup.py bdist_rpm
15 |
16 | ## More info
17 |
18 |
19 |
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/openstack/TESTING.md:
--------------------------------------------------------------------------------
1 | # OpenStack Dev
2 |
3 | ## 搭建OpenStack Unit Test环境(nova)
4 |
5 | ### 安装虚拟环境所需软件包
6 |
7 | * Ubuntu
8 |
9 | ```
10 | $ sudo apt-get install python-dev libssl-dev python-pip git-core libxml2-dev libxslt-dev pkg-config libffi-dev libpq-dev libmysqlclient-dev libvirt-dev graphviz libsqlite3-dev python-tox
11 | ```
12 |
13 | * CentOS/RHEL/Fedora
14 |
15 | ```
16 | # yum install python-devel openssl-devel python-pip git gcc libxslt-devel mysql-devel postgresql-devel libffi-devel libvirt-devel graphviz sqlite-devel libxml2-devel mysql
17 | # pip-python install tox
18 | ```
19 |
20 | ### 创建虚拟环境(venv)
21 |
22 | ```
23 | $ git clone https://github.com/openstack/nova
24 | $ cd nova
25 | $ ./run_tests.sh --update
26 | $ ./run_tests.sh nova.tests.virt.libvirt.test_libvirt
27 | $ source .venv/bin/activate
28 | $ pip install flake8
29 | $ pip install hacking
30 | $ flake8 nova/scheduler/utils.py
31 | $ flake8 --config=tox.ini nova/scheduler/utils.py
32 | $ pip install nose
33 | $ nosetests nova/tests/
34 | $ nosetests nova/tests/test_file.py
35 | $ nosetests nova.tests.virt.libvirt.test_libvirt
36 | $ nosetests -s -v nova/tests/virt/libvirt/test_libvirt.py:LibvirtDriverTestCase.test_finish_revert_migration_power_on
37 | ```
38 |
39 | * flake8
40 |
41 | `flake8`是用来做代码风格检查的,可以简单的认为是`pep8`、`pyflake`等的集合。
42 |
43 | * hacking
44 |
45 | `hacking`是一系列`flake8`的插件,主要用来增强`OpenStack`项目代码检查。
46 |
47 | * nosetests
48 |
49 | `nosetests`是一个执行Python Unit Test的工具。
50 |
51 | ## Using tox
52 |
53 | * Run code-style Test
54 |
55 | ```
56 | $ tox -e pep8
57 | ```
58 |
59 | * Run Unit Test
60 |
61 | ```
62 | $ tox -e py27
63 | ```
64 |
65 | * Run Unit Test `under tests/api/v2`
66 |
67 | ```
68 | $ tox -e py27 -- api.v2
69 | ```
70 |
71 | ## Reference
72 |
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/openstack/cinder/api-paste.ini:
--------------------------------------------------------------------------------
1 | #############
2 | # Openstack #
3 | #############
4 |
5 | [composite:osapi_volume]
6 | use = call:cinder.api.openstack.urlmap:urlmap_factory
7 | /: osvolumeversions
8 | /v1: openstack_volume_api_v1
9 |
10 | [composite:openstack_volume_api_v1]
11 | use = call:cinder.api.auth:pipeline_factory
12 | noauth = faultwrap sizelimit noauth osapi_volume_app_v1
13 | keystone = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
14 | keystone_nolimit = faultwrap sizelimit authtoken keystonecontext osapi_volume_app_v1
15 |
16 | [filter:faultwrap]
17 | paste.filter_factory = cinder.api.openstack:FaultWrapper.factory
18 |
19 | [filter:noauth]
20 | paste.filter_factory = cinder.api.openstack.auth:NoAuthMiddleware.factory
21 |
22 | [filter:sizelimit]
23 | paste.filter_factory = cinder.api.sizelimit:RequestBodySizeLimiter.factory
24 |
25 | [app:osapi_volume_app_v1]
26 | paste.app_factory = cinder.api.openstack.volume:APIRouter.factory
27 |
28 | [pipeline:osvolumeversions]
29 | pipeline = faultwrap osvolumeversionapp
30 |
31 | [app:osvolumeversionapp]
32 | paste.app_factory = cinder.api.openstack.volume.versions:Versions.factory
33 |
34 | ##########
35 | # Shared #
36 | ##########
37 |
38 | [filter:keystonecontext]
39 | paste.filter_factory = cinder.api.auth:CinderKeystoneContext.factory
40 |
41 | [filter:authtoken]
42 | paste.filter_factory = keystone.middleware.auth_token:filter_factory
43 | service_protocol = http
44 | service_host = 127.0.0.1
45 | service_port = 5000
46 | auth_host = 127.0.0.1
47 | auth_port = 35357
48 | auth_protocol = http
49 | admin_tenant_name = DemoTenant
50 | admin_user = admin
51 | admin_password = 123456
52 |
--------------------------------------------------------------------------------
/openstack/cinder/cinder.conf:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | rootwrap_config = /etc/cinder/rootwrap.conf
3 | api_paste_confg = /etc/cinder/api-paste.ini
4 | iscsi_helper = tgtadm
5 | volume_name_template = volume-%s
6 | volume_group = cinder-volumes
7 | verbose = True
8 | auth_strategy = keystone
9 | state_path = /var/lib/cinder
10 | volumes_dir = /var/lib/cinder/volumes
11 |
12 | sql_connection = mysql://cinder:123456@10.73.26.252/cinder
13 | rabbit_password = 123456
14 |
--------------------------------------------------------------------------------
/openstack/define.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | HOST=10.73.26.252
4 | MY_PASS=123456
5 | MY_KS_PASS=123456
6 | MY_CONF=/etc/mysql/my.cnf
7 | TOKEN=$(echo -n openstack | md5sum | awk '{print $1}')
8 | ENDPOINT=http://$HOST:35357/v2.0
9 |
10 | TENANT_NAME=DemoTenant
11 | TENANT_DESC="This is a DemoTenant"
12 | USERNAME=DemoUser
13 | PASSWORD=123456
14 | EMAIL=zhiweik@gmail.com
15 | ROLE=DemoRole
16 |
17 | KS_NAME=keystone
18 | NS_NAME=nova
19 | VS_NAME=volume
20 | IS_NAME=image
21 |
--------------------------------------------------------------------------------
/openstack/easystack/README.md:
--------------------------------------------------------------------------------
1 | # Memory of ES
2 |
3 | EasyStack is a simple project that let users easy to deploy and use OpenStack.
4 |
--------------------------------------------------------------------------------
/openstack/glance.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function install_glance_packages() {
4 | glance_packages=("glance" "glance-api" "glance-common" "glance-registry" \
5 | "python-glance" "python-glanceclient")
6 |
7 | apt-get -y install ${glance_packages[@]}
8 | }
9 |
10 | function create_glance_db() {
11 | sql_create="CREATE DATABASE glance"
12 | sql_grant="GRANT ALL ON glance.* TO 'glance'@'%' IDENTIFIED BY '$MY_GL_PASS'"
13 | mysql -u root -p$MY_PASS -e "$sql_create"
14 | mysql -u root -p$MY_PASS -e "$sql_grant"
15 | }
16 |
17 | function configure_glance() {
18 | sed -i 's#connection = sqlite.*$#connection = mysql://glance:'$MY_GL_PASS'@'$HOST'/glance#g' $GL_API_CONF
19 | sed -i 's/^#flavor=.*/flavor = keystone/g' $GL_API_CONF
20 | sed -i 's#connection = sqlite.*$#connection = mysql://glance:'$MY_GL_PASS'@'$HOST'/glance#g' $GL_REGISTRY_CONF
21 | sed -i 's/^#flavor=.*/flavor = keystone/g' $GL_REGISTRY_CONF
22 | glance-manage db_sync
23 | }
24 |
25 | function upload_glance_image() {
26 | glance image-create name="linux-image-kernel" disk_format=aki \
27 | container_format=aki < /data0/images/kernel
28 | glance image-create name="linux-image-ramdisk" disk_format=ari \
29 | container_format=ari < /data0/images/ramdisk
30 | glance image-create name="linux-image-img" disk_format=ami \
31 | container_format=ami --property kernel_id=xxx --property \
32 | ramdisk_id=xxx < /data0/images/ubuntu.img
33 | }
34 |
--------------------------------------------------------------------------------
/openstack/glance/glance-api-paste.ini:
--------------------------------------------------------------------------------
1 | # Use this pipeline for no auth or image caching - DEFAULT
2 | [pipeline:glance-api]
3 | pipeline = versionnegotiation unauthenticated-context rootapp
4 |
5 | # Use this pipeline for image caching and no auth
6 | [pipeline:glance-api-caching]
7 | pipeline = versionnegotiation unauthenticated-context cache rootapp
8 |
9 | # Use this pipeline for caching w/ management interface but no auth
10 | [pipeline:glance-api-cachemanagement]
11 | pipeline = versionnegotiation unauthenticated-context cache cachemanage rootapp
12 |
13 | # Use this pipeline for keystone auth
14 | [pipeline:glance-api-keystone]
15 | pipeline = versionnegotiation authtoken context rootapp
16 |
17 | # Use this pipeline for keystone auth with image caching
18 | [pipeline:glance-api-keystone+caching]
19 | pipeline = versionnegotiation authtoken context cache rootapp
20 |
21 | # Use this pipeline for keystone auth with caching and cache management
22 | [pipeline:glance-api-keystone+cachemanagement]
23 | pipeline = versionnegotiation authtoken context cache cachemanage rootapp
24 |
25 | [composite:rootapp]
26 | paste.composite_factory = glance.api:root_app_factory
27 | /: apiversions
28 | /v1: apiv1app
29 | /v2: apiv2app
30 |
31 | [app:apiversions]
32 | paste.app_factory = glance.api.versions:create_resource
33 |
34 | [app:apiv1app]
35 | paste.app_factory = glance.api.v1.router:API.factory
36 |
37 | [app:apiv2app]
38 | paste.app_factory = glance.api.v2.router:API.factory
39 |
40 | [filter:versionnegotiation]
41 | paste.filter_factory = glance.api.middleware.version_negotiation:VersionNegotiationFilter.factory
42 |
43 | [filter:cache]
44 | paste.filter_factory = glance.api.middleware.cache:CacheFilter.factory
45 |
46 | [filter:cachemanage]
47 | paste.filter_factory = glance.api.middleware.cache_manage:CacheManageFilter.factory
48 |
49 | [filter:context]
50 | paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
51 |
52 | [filter:unauthenticated-context]
53 | paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
54 |
55 | [filter:authtoken]
56 | paste.filter_factory = keystone.middleware.auth_token:filter_factory
57 | delay_auth_decision = true
58 |
--------------------------------------------------------------------------------
/openstack/glance/glance-registry-paste.ini:
--------------------------------------------------------------------------------
1 | # Use this pipeline for no auth - DEFAULT
2 | [pipeline:glance-registry]
3 | pipeline = unauthenticated-context registryapp
4 |
5 | # Use this pipeline for keystone auth
6 | [pipeline:glance-registry-keystone]
7 | pipeline = authtoken context registryapp
8 |
9 | [app:registryapp]
10 | paste.app_factory = glance.registry.api.v1:API.factory
11 |
12 | [filter:context]
13 | paste.filter_factory = glance.api.middleware.context:ContextMiddleware.factory
14 |
15 | [filter:unauthenticated-context]
16 | paste.filter_factory = glance.api.middleware.context:UnauthenticatedContextMiddleware.factory
17 |
18 | [filter:authtoken]
19 | paste.filter_factory = keystone.middleware.auth_token:filter_factory
20 |
--------------------------------------------------------------------------------
/openstack/heat.md:
--------------------------------------------------------------------------------
1 | # Heat
2 |
3 | Heat用来管理OpenStack的各种资源。你可以在heat template里面添加各种资源,然后由heat统一来创建、更新或删除。
4 |
5 | Heat可以用来创建OpenStack网络、镜像、虚拟机、用户等等东西,还可以创建一个搭载MySQL等service的虚拟机。
6 |
7 | ## Heat的组件
8 |
9 | [heat architecture](http://docs.openstack.org/developer/heat/architecture.html)
10 |
11 | ### heat
12 |
13 | heat客户端,是个命令行工具,和heat-api进行各种交互,终端用户也可以直接操作heat-api。
14 |
15 | ### heat-api
16 |
17 | heat-api提供了一个OpenStack原生API通过RPC将请求传递给heat-engine。
18 |
19 | ### heat-api-cfn
20 |
21 | 与heat-api类似,只不过这个API是用来兼容AWS CloudFormation API的。
22 |
23 | ### heat-api-cloudwatch
24 |
25 | 与heat-api类似,这个API是用来兼容CloudWatch的,目前哥不知道CloudWatch不个什么东东,落后了。
26 |
27 | ### heat-engine
28 |
29 | 用来对template中的资源进行管理,并且将结果返回给API调用者。
30 |
31 | ### heat-manage
32 |
33 | 一个命令行工具,用来对heat数据库进行相关操作。
34 |
35 | ### heat-db-setup
36 |
37 | 一个命令行工具,用来配置heat本地数据库的。
38 |
39 | ### heat-keystone-setup
40 |
41 | 一个命令行工具,用来配置keystone让其和heat配合使用的。
42 |
43 | ## Heat Template
44 |
45 | [heat template guide](http://docs.openstack.org/developer/heat/template_guide/index.html)
46 |
47 | 在template里添加各种资源,然后heat会根据模板里的这些资源来调用OpenStack相关组件的API来进行操作。
48 |
49 | ## Environments
50 |
51 | Environment用来重写一个template运行时的属性或动作,比如template里设置一个用户的密码是`123456`,那么你可以在environment里override这个密码。
52 |
53 | ## Heat resource life cycle
54 |
55 | [heat资源生命周期](http://docs.openstack.org/developer/heat/pluginguide.html)
56 |
57 | * create
58 | * update
59 | * suspend
60 | * resume
61 | * delete
62 |
63 | ## Heat调用过程
64 |
65 | ```
66 | CLI ---> RESET-API ---> AMQP ---> ENGINE
67 | ```
68 |
69 | Heat的各个组件都可以有多个。
70 |
71 | ## Heat源码索引
72 |
73 |
74 |
75 | ## Heat的用法
76 |
77 |
78 |
--------------------------------------------------------------------------------
/openstack/nova.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | function install_nova_packages() {
4 | nova_packages=("nova" "nova-api")
5 | apt-get -y install ${nova_packages[@]}
6 | }
7 |
8 | function create_nova_db() {
9 | sql_create="CREATE DATABASE nova"
10 | sql_grant="GRANT ALL ON nova.* TO 'nova'@'%' IDENTIFIED BY '$MY_NV_PASS'"
11 | mysql -u root -p$MY_PASS -e "$sql_create"
12 | mysql -u root -p$MY_PASS -e "$sql_grant"
13 | }
14 |
15 | function configure_nova() {
16 | }
17 |
18 | # http://wiki.openstack.org/LibvirtXMLCPUModel
19 |
--------------------------------------------------------------------------------
/openstack/nova/nova-compute.conf:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | libvirt_type=qemu
3 | libvirt_ovs_bridge=br-int
4 | libvirt_vif_type=ethernet
5 | libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
6 | libvirt_use_virtio_for_bridges=True
7 | libvirt_cpu_mode=none
8 |
--------------------------------------------------------------------------------
/openstack/nova/nova.conf:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 |
3 | # MySQL Connection #
4 | sql_connection=mysql://nova:123456@10.73.26.252/nova
5 |
6 | # nova-scheduler #
7 | rabbit_password=123456
8 | scheduler_driver=nova.scheduler.simple.SimpleScheduler
9 |
10 | # nova-api #
11 | cc_host=10.73.26.252
12 | auth_strategy=keystone
13 | s3_host=10.73.26.252
14 | ec2_host=10.73.26.252
15 | nova_url=http://10.73.26.252:8774/v1.1/
16 | ec2_url=http://10.73.26.252:8773/services/Cloud
17 | keystone_ec2_url=http://10.73.26.252:5000/v2.0/ec2tokens
18 | api_paste_config=/etc/nova/api-paste.ini
19 | allow_admin_api=true
20 | use_deprecated_auth=false
21 | ec2_private_dns_show_ip=True
22 | dmz_cidr=169.254.169.254/32
23 | ec2_dmz_host=10.73.26.252
24 | metadata_host=10.73.26.252
25 | metadata_listen=0.0.0.0
26 | enabled_apis=ec2,osapi_compute,metadata
27 |
28 | # Networking #
29 | network_api_class=nova.network.quantumv2.api.API
30 | quantum_url=http://10.73.26.252:9696
31 | quantum_auth_strategy=keystone
32 | quantum_admin_tenant_name=DemoTenant
33 | quantum_admin_username=admin
34 | quantum_admin_password=123456
35 | quantum_admin_auth_url=http://10.73.26.252:35357/v2.0
36 | libvirt_vif_driver=nova.virt.libvirt.vif.LibvirtHybridOVSBridgeDriver
37 | linuxnet_interface_driver=nova.network.linux_net.LinuxOVSInterfaceDriver
38 | firewall_driver=nova.virt.libvirt.firewall.IptablesFirewallDriver
39 |
40 | # Compute #
41 | compute_driver=libvirt.LibvirtDriver
42 | connection_type=libvirt
43 |
44 | # Cinder #
45 | volume_api_class=nova.volume.cinder.API
46 |
47 | # Glance #
48 | glance_api_servers=10.73.26.252:9292
49 | image_service=nova.image.glance.GlanceImageService
50 |
51 | # novnc #
52 | novnc_enable=true
53 | novncproxy_base_url=http://10.73.26.252:6080/vnc_auto.html
54 | vncserver_proxyclient_address=127.0.0.1
55 | vncserver_listen=0.0.0.0
56 |
57 | # Misc #
58 | logdir=/var/log/nova
59 | state_path=/var/lib/nova
60 | lock_path=/var/lock/nova
61 | root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
62 | verbose=true
63 |
--------------------------------------------------------------------------------
/openstack/openstackrc:
--------------------------------------------------------------------------------
1 | HOST=10.73.26.252
2 | TOKEN=$(echo -n openstack | md5sum | awk '{print $1}')
3 | export OS_NO_CACHE=1
4 | export OS_USERNAME=DemoUser
5 | export OS_PASSWORD=123456
6 | export OS_TENANT_NAME=DemoTenant
7 | export OS_TENANT_ID=
8 | export OS_REGION_NAME=China
9 |
10 | export SERVICE_TOKEN=$TOKEN
11 | export SERVICE_ENDPOINT=http://$HOST:35357/v2.0
12 |
--------------------------------------------------------------------------------
/openstack/quantum/api-paste.ini:
--------------------------------------------------------------------------------
1 | [composite:quantum]
2 | use = egg:Paste#urlmap
3 | /: quantumversions
4 | /v2.0: quantumapi_v2_0
5 |
6 | [composite:quantumapi_v2_0]
7 | use = call:quantum.auth:pipeline_factory
8 | noauth = extensions quantumapiapp_v2_0
9 | keystone = authtoken keystonecontext extensions quantumapiapp_v2_0
10 |
11 | [filter:keystonecontext]
12 | paste.filter_factory = quantum.auth:QuantumKeystoneContext.factory
13 |
14 | [filter:authtoken]
15 | paste.filter_factory = keystone.middleware.auth_token:filter_factory
16 | auth_host = 10.73.26.252
17 | auth_port = 35357
18 | auth_protocol = http
19 | admin_tenant_name = DemoTenant
20 | admin_user = admin
21 | admin_password = 123456
22 |
23 | [filter:extensions]
24 | paste.filter_factory = quantum.extensions.extensions:plugin_aware_extension_middleware_factory
25 |
26 | [app:quantumversions]
27 | paste.app_factory = quantum.api.versions:Versions.factory
28 |
29 | [app:quantumapiapp_v2_0]
30 | paste.app_factory = quantum.api.v2.router:APIRouter.factory
31 |
--------------------------------------------------------------------------------
/openstack/quantum/dhcp_agent.ini:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | # Show debugging output in log (sets DEBUG log level output)
3 | # debug = true
4 |
5 | # Where to store dnsmasq state files. This directory must be writable by the
6 | # user executing the agent. The value below is compatible with a default
7 | # devstack installation.
8 | state_path = /var/lib/quantum
9 |
10 | # The DHCP agent will resync its state with Quantum to recover from any
11 | # transient notification or rpc errors. The interval is number of
12 | # seconds between attempts.
13 | # resync_interval = 30
14 |
15 | # The DHCP requires that an inteface driver be set. Choose the one that best
16 | # matches you plugin.
17 |
18 | # OVS
19 | interface_driver = quantum.agent.linux.interface.OVSInterfaceDriver
20 | # LinuxBridge
21 | #interface_driver = quantum.agent.linux.interface.BridgeInterfaceDriver
22 | # Ryu
23 | #interface_driver = quantum.agent.linux.interface.RyuInterfaceDriver
24 |
25 | # The agent can use other DHCP drivers. Dnsmasq is the simplest and requires
26 | # no additional setup of the DHCP server.
27 | dhcp_driver = quantum.agent.linux.dhcp.Dnsmasq
28 |
29 | # Allow overlapping IP (Must have kernel build with CONFIG_NET_NS=y and
30 | # iproute2 package that supports namespaces).
31 | use_namespaces = False
32 |
33 | # Use "sudo quantum-rootwrap /etc/quantum/rootwrap.conf" to use the real
34 | # root filter facility.
35 | # Change to "sudo" to skip the filtering and just run the comand directly
36 | root_helper = sudo /usr/bin/quantum-rootwrap /etc/quantum/rootwrap.conf
37 |
--------------------------------------------------------------------------------
/php/README.md:
--------------------------------------------------------------------------------
1 | # PHP
2 |
3 | ## PHP添加多语言支持
4 |
5 | PHP的多语言支持是使用gettext扩展来实现的。
6 |
7 | 示例PHP代码如下:
8 |
9 | ```
10 | echo _('Hello, world!');
11 | echo gettext('Hello, PHP!');
12 | ```
13 |
14 | 其中`_()`为`gettext()`的简写。
15 |
16 | 由PHP文件生成po及mo文件的命令如下:
17 |
18 | ```
19 | # xgettext --output=zh.po --language=PHP test.php
20 | # msgfmt --output-file=zh.mo zh.po
21 | ```
22 |
23 | 生成po文件后,需要手动打开该文件进行编辑,然后再生成mo文件,最终程序只需要mo文件。
24 |
25 | 最终目录结构如下:
26 |
27 | ```
28 | ./lang.php
29 | ./lang/zh_CN/LC_MESSAGES/zh.mo
30 | ```
31 |
--------------------------------------------------------------------------------
/php/lang.php:
--------------------------------------------------------------------------------
1 | ";
12 | echo _("Who are you?") . "
";
13 | echo gettext("I do not tell you!") . "
";
14 | echo 'Swith to '.LANG_SWITCH.'';
15 | }
16 |
17 | function set_language() {
18 | $lang = @$_GET['lang'];
19 | if(!$lang) $lang = @$_SERVER['HTTP_ACCEPT_LANGUAGE'];
20 | if(preg_match('/en.+us/i', $lang)) {
21 | $dom = 'en';
22 | $lang = 'en_US.uft-8';
23 | define('LANG_SWITCH', 'zh_CN');
24 | } else {
25 | $dom = 'zh';
26 | $lang = 'zh_CN.utf-8';
27 | define('LANG_SWITCH', 'en_US');
28 | }
29 | setlocale(LC_ALL, $lang);
30 | bindtextdomain($dom, __dir__ . '/lang');
31 | bind_textdomain_codeset($dom, 'UTF-8');
32 | textdomain($dom);
33 | }
34 | ?>
35 |
--------------------------------------------------------------------------------
/php/lang/zh_CN/LC_MESSAGES/zh.mo:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/php/lang/zh_CN/LC_MESSAGES/zh.mo
--------------------------------------------------------------------------------
/php/lang/zh_CN/LC_MESSAGES/zh.po:
--------------------------------------------------------------------------------
1 | # SOME DESCRIPTIVE TITLE.
2 | # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
3 | # This file is distributed under the same license as the PACKAGE package.
4 | # FIRST AUTHOR , YEAR.
5 | #
6 | #, fuzzy
7 | msgid ""
8 | msgstr ""
9 | "Project-Id-Version: PACKAGE VERSION\n"
10 | "Report-Msgid-Bugs-To: \n"
11 | "POT-Creation-Date: 2013-01-31 21:00+0800\n"
12 | "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
13 | "Last-Translator: FULL NAME \n"
14 | "Language-Team: LANGUAGE \n"
15 | "Language: \n"
16 | "MIME-Version: 1.0\n"
17 | "Content-Type: text/plain; charset=utf-8\n"
18 | "Content-Transfer-Encoding: 8bit\n"
19 |
20 | #: lang.php:11
21 | msgid "Hello, world!"
22 | msgstr "你好,世界!"
23 |
24 | #: lang.php:12
25 | msgid "Who are you?"
26 | msgstr "你好吗?"
27 |
28 | #: lang.php:13
29 | msgid "I do not tell you!"
30 | msgstr "我不告诉你!"
31 |
--------------------------------------------------------------------------------
/podman/.config/containers/storage.conf:
--------------------------------------------------------------------------------
1 | [storage]
2 | driver = "overlay"
3 |
4 | [storage.options]
5 | mount_program = "/usr/bin/fuse-overlayfs"
6 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/mounts.conf:
--------------------------------------------------------------------------------
1 | # Configuration file for default mounts in containers (see man 5
2 | # containers-mounts.conf for further information)
3 |
4 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/policy.json:
--------------------------------------------------------------------------------
1 | {
2 | "default": [
3 | {
4 | "type": "insecureAcceptAnything"
5 | }
6 | ],
7 | "transports":
8 | {
9 | "docker-daemon":
10 | {
11 | "": [{"type":"insecureAcceptAnything"}]
12 | }
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/registries.conf.d/docker.io.conf:
--------------------------------------------------------------------------------
1 | [[registry]]
2 | location = "docker.io"
3 |
4 | [[registry.mirror]]
5 | location = "docker.m.daocloud.io"
6 |
7 | [[registry.mirror]]
8 | location = "registry.dockermirror.com"
9 |
10 | [[registry.mirror]]
11 | location = "docker.zhai.cm"
12 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/registries.conf.d/ghcr.io.conf:
--------------------------------------------------------------------------------
1 | [[registry]]
2 | location = "ghcr.io"
3 |
4 | [[registry.mirror]]
5 | location = "ghcr.m.daocloud.io"
6 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/registries.d/default.yaml:
--------------------------------------------------------------------------------
1 | # This is a default registries.d configuration file. You may
2 | # add to this file or create additional files in registries.d/.
3 | #
4 | # lookaside: for reading/writing simple signing signatures
5 | # lookaside-staging: for writing simple signing signatures, preferred over lookaside
6 | #
7 | # lookaside and lookaside-staging take a value of the following:
8 | # lookaside: {schema}://location
9 | #
10 | # For reading signatures, schema may be http, https, or file.
11 | # For writing signatures, schema may only be file.
12 |
13 | # The default locations are built-in, for both reading and writing:
14 | # /var/lib/containers/sigstore for root, or
15 | # ~/.local/share/containers/sigstore for non-root users.
16 | default-docker:
17 | # lookaside: https://…
18 | # lookaside-staging: file:///…
19 |
20 | # The 'docker' indicator here is the start of the configuration
21 | # for docker registries.
22 | #
23 | # docker:
24 | #
25 | # privateregistry.com:
26 | # lookaside: https://privateregistry.com/sigstore/
27 | # lookaside-staging: /mnt/nfs/privateregistry/sigstore
28 |
29 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/systemd/clash.container:
--------------------------------------------------------------------------------
1 | # Location: /etc/containers/systemd/clash.container
2 | [Unit]
3 | Description=The Clash Service
4 | After=local-fs.target network.target
5 |
6 | [Container]
7 | ContainerName=clash
8 | Image=docker.io/siji/clash:latest
9 | Network=host
10 | Volume=/etc/clash:/etc/clash
11 | AddCapability=NET_ADMIN MKNOD
12 | AddDevice=/dev/net/tun
13 | AutoUpdate=registry
14 |
15 | [Service]
16 | TimeoutStartSec=900
17 |
18 | [Install]
19 | WantedBy=multi-user.target
20 |
--------------------------------------------------------------------------------
/podman/metadata/etc/containers/systemd/v2ray.kube:
--------------------------------------------------------------------------------
1 | # Location: /etc/containers/systemd/v2ray.kube
2 | [Unit]
3 | Description=The v2ray Server
4 | After=local-fs.target network.target
5 |
6 | [Kube]
7 | Yaml=/etc/containers/manifest/k8s-v2ray.yaml
8 |
9 | [Service]
10 | TimeoutStartSec=900
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman-auto-update.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Podman auto-update service
3 | Documentation=man:podman-auto-update(1)
4 | Wants=network-online.target
5 | After=network-online.target
6 |
7 | [Service]
8 | Type=oneshot
9 | ExecStart=/usr/bin/podman auto-update
10 | ExecStartPost=/usr/bin/podman image prune -f
11 |
12 | [Install]
13 | WantedBy=default.target
14 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman-auto-update.timer:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Podman auto-update timer
3 |
4 | [Timer]
5 | OnCalendar=daily
6 | RandomizedDelaySec=900
7 | Persistent=true
8 |
9 | [Install]
10 | WantedBy=timers.target
11 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman-clean-transient.service:
--------------------------------------------------------------------------------
1 | # This service runs once each boot to remove potential leftover
2 | # container state from previous boots.
3 |
4 | # This is needed when using transient storage mode in podman where the
5 | # database and other configs are stored in tmpfs, but some other files
6 | # are not. If we don't run this after an unclean boot then there may
7 | # be some leftover files that grow over time.
8 |
9 | [Unit]
10 | Description=Clean up podman transient data
11 | RequiresMountsFor=%t/containers
12 | Documentation=man:podman-system-prune(1)
13 | Requires=boot-complete.target
14 | After=local-fs.target boot-complete.target
15 |
16 | [Service]
17 | Type=oneshot
18 | ExecStart=/usr/bin/podman system prune --external
19 |
20 | [Install]
21 | WantedBy=default.target
22 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman-kube@.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=A template for running K8s workloads via podman-kube-play
3 | Documentation=man:podman-kube-play(1)
4 | Wants=network-online.target
5 | After=network-online.target
6 | RequiresMountsFor=%t/containers
7 |
8 | [Service]
9 | Environment=PODMAN_SYSTEMD_UNIT=%n
10 | TimeoutStopSec=70
11 | ExecStart=/usr/bin/podman kube play --replace --service-container=true %I
12 | ExecStop=/usr/bin/podman kube down %I
13 | Type=notify
14 | NotifyAccess=all
15 |
16 | [Install]
17 | WantedBy=default.target
18 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman-restart.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Podman Start All Containers With Restart Policy Set To Always
3 | Documentation=man:podman-start(1)
4 | StartLimitIntervalSec=0
5 | Wants=network-online.target
6 | After=network-online.target
7 |
8 | [Service]
9 | Type=oneshot
10 | RemainAfterExit=true
11 | Environment=LOGGING="--log-level=info"
12 | ExecStart=/usr/bin/podman $LOGGING start --all --filter restart-policy=always
13 | ExecStop=/bin/sh -c '/usr/bin/podman $LOGGING stop $(/usr/bin/podman container ls --filter restart-policy=always -q)'
14 |
15 | [Install]
16 | WantedBy=default.target
17 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Podman API Service
3 | Requires=podman.socket
4 | After=podman.socket
5 | Documentation=man:podman-system-service(1)
6 | StartLimitIntervalSec=0
7 |
8 | [Service]
9 | Delegate=true
10 | Type=exec
11 | KillMode=process
12 | Environment=LOGGING="--log-level=info"
13 | ExecStart=/usr/bin/podman $LOGGING system service
14 |
15 | [Install]
16 | WantedBy=default.target
17 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/systemd/system/podman.socket:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Podman API Socket
3 | Documentation=man:podman-system-service(1)
4 |
5 | [Socket]
6 | ListenStream=%t/podman/podman.sock
7 | SocketMode=0660
8 |
9 | [Install]
10 | WantedBy=sockets.target
11 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/tmpfiles.d/containers-common.conf:
--------------------------------------------------------------------------------
1 | d %S/containers 0755
2 |
--------------------------------------------------------------------------------
/podman/metadata/usr/lib/tmpfiles.d/podman.conf:
--------------------------------------------------------------------------------
1 | # /tmp/podman-run-* directory can contain content for Podman containers that have run
2 | # for many days. This following line prevents systemd from removing this content.
3 | x /tmp/podman-run-*
4 | x /tmp/storage-run-*
5 | x /tmp/containers-user-*
6 | x /tmp/run-*/libpod
7 | D! /var/lib/containers/storage/tmp 0700 root root
8 | D! /run/podman 0700 root root
9 | D! /var/lib/cni/networks
10 | # Remove /var/tmp/container_images* podman temporary directories on each
11 | # boot which are created when pulling or saving images.
12 | R! /var/tmp/container_images*
13 |
--------------------------------------------------------------------------------
/podman/src/wasm/Dockerfile:
--------------------------------------------------------------------------------
1 | # podman build -t docker.io/siji/wasm --platform wasi/wasm .
2 | FROM scratch
3 |
4 | COPY main.wasm /
5 | CMD ["/main.wasm"]
6 |
--------------------------------------------------------------------------------
/podman/src/wasm/main.go:
--------------------------------------------------------------------------------
1 | // GOOS=wasip1 GOARCH=wasm go build -o main.wasm main.go
2 | package main
3 |
4 | import "fmt"
5 |
6 | func main() {
7 | fmt.Println("Hello, WebAssembly!")
8 | }
9 |
--------------------------------------------------------------------------------
/pxe-install-os/README.mkd:
--------------------------------------------------------------------------------
1 | # PXE+TFTP+DHCP
2 |
3 | 安装操作系统一般都需要光驱,而PXE可以使你直接通过网络来安装操作系统。
4 |
5 | ## PXE是什么
6 |
7 | PXE(Preboot Execute Environment)是Intel开发的,可以使得通过网络来启动并安装操作系统。
8 |
9 | TFTP(Trivial File Transfer Protocol),这个协议是用来进行小文件传输的。
10 |
11 | DHCP(Dynamic Host Configuration Protocol),动态主机配置协议,不详说了。
12 |
13 | ## 安装软件包
14 |
15 | ```
16 | # yum -y install tftp-server dhcp httpd
17 | ```
18 |
19 | ## 配置TFTP
20 |
21 | * /etc/xinetd.d/tftp
22 |
23 | 将其中的`disable`改为`no`,详情请见本目录中的`tftp`文件
24 |
25 | * /var/lib/tftpboot
26 |
27 | 其目录树如下
28 |
29 | ```
30 | /var/lib/tftpboot
31 | |--/var/lib/tftpboot/boot.msg
32 | |--/var/lib/tftpboot/initrd.img
33 | |--/var/lib/tftpboot/pxelinux.0
34 | |--/var/lib/tftpboot/pxelinux.cfg
35 | | `--/var/lib/tftpboot/pxelinux.cfg/default
36 | |--/var/lib/tftpboot/splash.xpm.gz
37 | `--/var/lib/tftpboot/vmlinuz
38 | ```
39 |
40 | `boot.msg`, `default`见本目录下文件。
41 |
42 | `initrd.img`, `vmlinuz`在`rhel.iso/images/pxeboot/`目录中。
43 |
44 | `pxelinux.0`位置`rhel.iso/Packages/syslinux-4.02-8.el6.x86_64.rpm/usr/share/syslinux/pxelinux.0`。
45 |
46 | `splash.xpm.gz`位置`rhel.iso/EFI/BOOT/splash.xpm.gz`。
47 |
48 | ## 配置DHCP
49 |
50 | * /etc/sysconfig/dhcpd
51 |
52 | 将其中的`DHCPDARGS`设置成你的`DHCP Server`所使用的网卡名称。
53 |
54 | * /etc/dhcp/dhcpd.conf
55 |
56 | 内容请见本目录中的文件。
57 |
58 | ## 最后
59 |
60 | 我是在上一次成功配置之后总结的,这次配置并没有验证。不过,应该不会有大问题,小问题自己可以调试一下解决。
61 |
--------------------------------------------------------------------------------
/pxe-install-os/boot.msg:
--------------------------------------------------------------------------------
1 | #########################################################
2 |
3 | Welcome to The IBM Linux Installer
4 |
5 | Enter the number you want:
6 |
7 | 1. Install or upgrade an existing system
8 | 2. Rescue installed system
9 | 3. Boot from local drive
10 | 4. Memory test
11 |
12 | [F1 - Show This Main Menu]
13 |
14 | #########################################################
15 |
--------------------------------------------------------------------------------
/pxe-install-os/default:
--------------------------------------------------------------------------------
1 | default 1
2 | prompt 1
3 | timeout 200
4 |
5 | display boot.msg
6 | F1 boot.msg
7 |
8 | label 1
9 | menu label ^Install or upgrade an existing system
10 | kernel vmlinuz
11 | append ksdevice=eth1 ks=http://9.115.78.28/kickstart/rhel.cfg initrd=initrd.img
12 |
13 | label 2
14 | menu label ^Rescue installed system
15 | kernel vmlinuz
16 | append initrd=initrd.img rescue
17 |
18 | label 3
19 | menu label ^Boot from local drive
20 | localboot
21 |
22 | label 4
23 | menu label ^Memory test
24 | kernel memtest
25 | append -
26 |
--------------------------------------------------------------------------------
/pxe-install-os/dhcpd.conf:
--------------------------------------------------------------------------------
1 | #
2 | # DHCP Server Configuration file.
3 | # see /usr/share/doc/dhcp*/dhcpd.conf.sample
4 | # see 'man 5 dhcpd.conf'
5 | #
6 |
7 | option space PXE;
8 | option PXE.mtftp-ip code 1 = ip-address;
9 | option PXE.mtftp-cport code 2 = unsigned integer 16;
10 | option PXE.mtftp-sport code 3 = unsigned integer 16;
11 | option PXE.mtftp-tmout code 4 = unsigned integer 8;
12 | option PXE.mtftp-delay code 5 = unsigned integer 8;
13 | option PXE.discovery-control code 6 = unsigned integer 8;
14 | option PXE.discovery-mcast-addr code 7 = ip-address;
15 | option domain-name-servers 172.16.0.110;
16 |
17 | allow booting;allow bootp;
18 | ddns-update-style interim;
19 | ignore client-updates;
20 | max-lease-time 604800;
21 | default-lease-time 604800;
22 | next-server 9.115.78.28;
23 | deny unknown-clients;
24 |
25 | subnet 9.115.78.0 netmask 255.255.255.0 {
26 | max-lease-time 120;
27 | default-lease-time 120;
28 | # authoritative;
29 | option routers 9.115.78.1;
30 | option subnet-mask 255.255.255.0;
31 | option broadcast-address 9.115.78.28;
32 |
33 | #range 9.115.78.26 9.115.78.26;
34 | }
35 |
36 | group {
37 | filename "pxelinux.0";
38 | # host os43{
39 | # hardware ethernet E4:1F:13:EF:xx:xx;
40 | # server-name "os43";
41 | # fixed-address 9.115.78.43;
42 | # }
43 | # host os31{
44 | # hardware ethernet E4:1F:13:EF:xx:xx;
45 | # server-name "os31";
46 | # fixed-address 9.115.78.31;
47 | # }
48 | host os26{
49 | hardware ethernet E4:1F:13:EF:xx:xx;
50 | server-name "os26";
51 | fixed-address 9.115.78.26;
52 | }
53 | }
54 |
--------------------------------------------------------------------------------
/pxe-install-os/rhel.cfg:
--------------------------------------------------------------------------------
1 | # Kickstart file automatically generated by anaconda.
2 |
3 | #version=DEVEL
4 | install
5 | url --url=http://9.115.78.28/kickstart/rhel
6 | lang en_US.UTF-8
7 | keyboard us
8 | network --onboot yes --device eth0 --bootproto dhcp --noipv6
9 | network --onboot no --device eth1 --bootproto dhcp --noipv6
10 | network --onboot no --device usb0 --bootproto dhcp --noipv6
11 | rootpw --iscrypted $6$WV83nY0y/pN46p.l$x3f.lkG5VpJ6VN4WLi6rWuunJmZJPcLGuXB.2uR.em.pKfPTTyeCPFAERfO4berp3dTSgnbk1.qy8H/JhD6vc0
12 | firewall --service=ssh
13 | authconfig --enableshadow --passalgo=sha512
14 | selinux --disabled
15 | timezone --utc Asia/Shanghai
16 | skipx
17 | bootloader --location=mbr --driveorder=sda --append="crashkernel=auto rhgb quiet"
18 | # The following is the partition information you requested
19 | # Note that any partitions you deleted are not expressed
20 | # here so unless you clear all partitions first, this is
21 | # not guaranteed to work
22 | zerombr
23 | clearpart --all --initlabel
24 |
25 | part / --fstype=ext4 --size=51200
26 | part swap --size=8000
27 | part /var/log --fstype=ext4 --size=40960
28 | part /var --fstype=ext4 --grow --size=30000
29 |
30 | %packages
31 | @base
32 | @client-mgmt-tools
33 | @console-internet
34 | @core
35 | @debugging
36 | @directory-client
37 | @hardware-monitoring
38 | @java-platform
39 | @large-systems
40 | @network-file-system-client
41 | @performance
42 | @perl-runtime
43 | @server-platform
44 | @server-policy
45 | pax
46 | python-dmidecode
47 | oddjob
48 | sgpio
49 | device-mapper-persistent-data
50 | samba-winbind
51 | certmonger
52 | pam_krb5
53 | krb5-workstation
54 | perl-DBD-SQLite
55 | telnet
56 | nmap
57 | tree
58 | %end
59 |
--------------------------------------------------------------------------------
/pxe-install-os/tftp:
--------------------------------------------------------------------------------
1 | # default: off
2 | # description: The tftp server serves files using the trivial file transfer \
3 | # protocol. The tftp protocol is often used to boot diskless \
4 | # workstations, download configuration files to network-aware printers, \
5 | # and to start the installation process for some operating systems.
6 | service tftp
7 | {
8 | socket_type = dgram
9 | protocol = udp
10 | wait = yes
11 | user = root
12 | server = /usr/sbin/in.tftpd
13 | server_args = -s /var/lib/tftpboot
14 | disable = no
15 | per_source = 11
16 | cps = 100 2
17 | flags = IPv4
18 | }
19 |
--------------------------------------------------------------------------------
/pypi/README.md:
--------------------------------------------------------------------------------
1 | # 搭建 pypi 镜像
2 |
3 | 简单几步,更多用法请看文档:
4 |
5 | ## 下载包
6 |
7 | ```
8 | # pip2tgz packages/ foo==1.2
9 | # pip2tgz packages/ -r requirements.txt
10 | ```
11 |
12 | ## 创建索引
13 |
14 | ```
15 | # dir2pi packages/
16 | ```
17 |
18 | ## 创建 Web Server
19 |
20 | Web root 目录指向 `packages/simple`
21 |
--------------------------------------------------------------------------------
/python-pip/README.md:
--------------------------------------------------------------------------------
1 | # Python pip
2 |
3 | 最开始 Python 打包非常不方便,安装之后也不知道都装了哪些内容,想要删除的话非常麻烦。
4 |
5 | 后来 Python 成立了个 pypa 就是 [Python Packaging Authority][pypa]。然后就有了 pip 以及优化扩展后的打包方式。
6 |
7 | [pypa]: https://github.com/pypa
8 |
9 | 具体样例请看该目录下的`zenith`项目。
10 |
11 | 最开始我遇到一个很郁闷的事情,用`python setup.py install`安装时,代码也会安装在 EGG 目录中,一直没查到原因。后来用`pip install git+http://localhost/zenith.git`安装就会分开了,然后我也没详细查原因。
12 |
--------------------------------------------------------------------------------
/python-pip/zenith/setup.cfg:
--------------------------------------------------------------------------------
1 | [wheel]
2 | universal = 1
3 |
--------------------------------------------------------------------------------
/python-pip/zenith/setup.py:
--------------------------------------------------------------------------------
1 | try:
2 | from setuptools import setup, find_packages
3 | except ImportError:
4 | from distutils.core import setup, find_packages
5 |
6 | setup(
7 | name="zenith",
8 | version="0.0.1",
9 | packages=find_packages(exclude=["tests"]),
10 | description="Zenith sample project",
11 | install_requires=[""],
12 | entry_points={
13 | "console_scripts": [
14 | "zenith=zenith.app:main"
15 | ]
16 | }
17 | )
18 |
--------------------------------------------------------------------------------
/python-pip/zenith/zenith/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/python-pip/zenith/zenith/__init__.py
--------------------------------------------------------------------------------
/python-pip/zenith/zenith/app.py:
--------------------------------------------------------------------------------
1 | from zenith import common
2 |
3 | def main():
4 | users = common.users()
5 | print users
6 |
--------------------------------------------------------------------------------
/python-pip/zenith/zenith/common.py:
--------------------------------------------------------------------------------
1 | def users():
2 | return [{"username": "Larry"}]
3 |
--------------------------------------------------------------------------------
/python-pip/zenith/zenith/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/python-pip/zenith/zenith/tests/__init__.py
--------------------------------------------------------------------------------
/python-virtualenv/README.md:
--------------------------------------------------------------------------------
1 | # Python Virtual Environment
2 |
3 | 这个东西比较好,方便用来开发并且不会破坏系统现有的依赖。
4 |
5 | 会在你机器上指定目录下创建一套虚拟的 Python 开发环境,你可以随意的安装、卸载 Python 包,使用非常方便。
6 |
7 | 这个东西和 Ruby 的 bundle 有点像,我很喜欢。
8 |
9 |
10 | ## 用法
11 |
12 | ### 创建环境
13 |
14 | ```
15 | $ pip install virtualenv
16 | $ vitualenv .venv
17 | $ source .venv/bin/activate
18 | $ type python
19 | $ type pip
20 | $ pip install six
21 | $ ls .venv/lib/python/site-packages/six..
22 | ```
23 |
24 | 然后你使用的 pip 和 python 命令就是虚拟环境里的了,你可以去用它们来做各种操作而不去担心破坏自己操作系统的依赖了。
25 |
26 |
27 | ## 退出Python虚拟环境
28 |
29 | ```
30 | $ deactivate
31 | ```
32 |
--------------------------------------------------------------------------------
/python/README.md:
--------------------------------------------------------------------------------
1 | # Python
2 |
3 | At first, let's fuck Python before using it.
4 |
5 | ## Pep8
6 |
7 | Python code syntax format.
8 |
9 | I hate to wrap a line when it exceeds 79 characters.
10 |
11 | [pep8 document][pep8-doc]
12 |
13 | [pep8-doc]: http://pep8.readthedocs.org/en/latest/
14 |
15 | 可以顺便了解一下`flake8`。
16 |
17 | ## Python internationalization
18 |
19 | [Babel](https://pypi.python.org/pypi/Babel/)用着不错。
20 |
21 | ## Debug python code
22 |
23 | ```python
24 | def add(x, y):
25 | return x + y
26 |
27 | a = 'abc'
28 | b = 'efg'
29 | import pdb
30 | pdb.set_trace()
31 | ab = add(a, b)
32 | print ab
33 | ```
34 |
35 | When you execute these code, the pdb debugger will appear.
36 |
37 | You can use `help` in pdb to show available commands, and use `help command` to show detailed help message of the command.
38 |
--------------------------------------------------------------------------------
/python/calculate.c:
--------------------------------------------------------------------------------
1 | #include
2 |
3 | int add(int m, int n)
4 | {
5 | return m + n;
6 | }
7 |
8 | int square(int n)
9 | {
10 | return n * n;
11 | }
12 |
13 | void message(char *str, int n)
14 | {
15 | printf("MSG: %s\tINT: %d\n", str, n);
16 | }
17 |
--------------------------------------------------------------------------------
/python/calculate_module.c:
--------------------------------------------------------------------------------
1 | #include
2 | #include "calculate.c"
3 |
4 | static PyObject *
5 | PyCal_add(PyObject *self, PyObject *args)
6 | {
7 | int m;
8 | int n;
9 | int t;
10 | int ret;
11 | if(!PyArg_ParseTuple(args, "iii", &m, &n, &t))
12 | {
13 | return NULL;
14 | }
15 | ret = add(m, n);
16 | return Py_BuildValue("i", ret);
17 | }
18 |
19 | static PyObject *
20 | PyCal_square(PyObject *self, PyObject *args)
21 | {
22 | int n;
23 | int ret;
24 | if(!PyArg_ParseTuple(args, "i", &n))
25 | {
26 | return NULL;
27 | }
28 | ret = square(n);
29 | return Py_BuildValue("i", ret);
30 | }
31 |
32 | static PyObject *
33 | PyCal_message(PyObject *self, PyObject *args)
34 | {
35 | char *str;
36 | int n;
37 | if(!PyArg_ParseTuple(args, "si", &str, &n))
38 | {
39 | return NULL;
40 | }
41 | message(str, n);
42 | return Py_None;
43 | }
44 |
45 | static PyMethodDef CalMethods[] = {
46 | {"add", PyCal_add, METH_VARARGS, "Add two integer values."},
47 | {"square", PyCal_square, METH_VARARGS, "Square of integer value."},
48 | {"message", PyCal_message, METH_VARARGS, "Output a message and an integer."},
49 | {NULL, NULL, 0, NULL}
50 | };
51 |
52 | PyMODINIT_FUNC
53 | initcalculate(void)
54 | {
55 | PyObject *m;
56 | m = Py_InitModule("calculate", CalMethods);
57 | if(m == NULL)
58 | {
59 | return;
60 | }
61 | }
62 |
--------------------------------------------------------------------------------
/redis/README.md:
--------------------------------------------------------------------------------
1 | # Redis
2 |
3 | ## Redis cluster mode
4 |
5 | Put the `redis` directory under `/etc` directory.
6 |
7 | Start Redis instances:
8 |
9 | ```
10 | docker run --name=redis7001 --net=host -d -v /etc/redis:/etc/redis redis:3.2.1 redis-server /etc/redis/7001/redis.conf
11 | ...
12 | docker run --name=redis7006 --net=host -d -v /etc/redis:/etc/redis redis:3.2.1 redis-server /etc/redis/7006/redis.conf
13 | ```
14 |
15 | Create a Redis cluster by using above instances:
16 |
17 | ```
18 | docker run --rm -it siji/redis:cluster-tool sh
19 | rcm -a redis321 create 192.168.122.20:7001 192.168.122.20:7002 192.168.122.20:7003 192.168.122.20:7004 192.168.122.20:7005 192.168.122.20:7006
20 | ```
21 |
22 | If you want to disable authentication, you can remove the `requirepass redis321` in all `redis.conf` files, and run:
23 |
24 | ```
25 | docker run --rm -it siji/redis:cluster-tool sh
26 | rcm create 192.168.122.20:7001 192.168.122.20:7002 192.168.122.20:7003 192.168.122.20:7004 192.168.122.20:7005 192.168.122.20:7006
27 | ```
28 |
29 | or
30 |
31 | ```
32 | docker run --rm -it siji/redis:cluster-tool sh
33 | redis-trib.rb create --replicas 1 192.168.122.20:7001:redis321 192.168.122.20:7002 192.168.122.20:7003 192.168.122.20:7004 192.168.122.20:7005 192.168.122.20:7006
34 | ```
35 |
36 | Then using following command to connect:
37 |
38 | ```
39 | redis-cli -c -h 192.168.122.20 -p 7001
40 | ```
41 |
--------------------------------------------------------------------------------
/redis/redis/7001/redis.conf:
--------------------------------------------------------------------------------
1 | port 7001
2 | cluster-enabled yes
3 | cluster-config-file nodes.conf
4 | cluster-node-timeout 5000
5 | appendonly yes
6 | requirepass redis321
7 |
--------------------------------------------------------------------------------
/redis/redis/7002/redis.conf:
--------------------------------------------------------------------------------
1 | port 7002
2 | cluster-enabled yes
3 | cluster-config-file nodes.conf
4 | cluster-node-timeout 5000
5 | appendonly yes
6 | requirepass redis321
7 |
--------------------------------------------------------------------------------
/redis/redis/7003/redis.conf:
--------------------------------------------------------------------------------
1 | port 7003
2 | cluster-enabled yes
3 | cluster-config-file nodes.conf
4 | cluster-node-timeout 5000
5 | appendonly yes
6 | requirepass redis321
7 |
--------------------------------------------------------------------------------
/redis/redis/7004/redis.conf:
--------------------------------------------------------------------------------
1 | port 7004
2 | cluster-enabled yes
3 | cluster-config-file nodes.conf
4 | cluster-node-timeout 5000
5 | appendonly yes
6 | requirepass redis321
7 |
--------------------------------------------------------------------------------
/redis/redis/7005/redis.conf:
--------------------------------------------------------------------------------
1 | port 7005
2 | cluster-enabled yes
3 | cluster-config-file nodes.conf
4 | cluster-node-timeout 5000
5 | appendonly yes
6 | requirepass redis321
7 |
--------------------------------------------------------------------------------
/redis/redis/7006/redis.conf:
--------------------------------------------------------------------------------
1 | port 7006
2 | cluster-enabled yes
3 | cluster-config-file nodes.conf
4 | cluster-node-timeout 5000
5 | appendonly yes
6 | requirepass redis321
7 |
--------------------------------------------------------------------------------
/repository/qr_alipay_pay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/repository/qr_alipay_pay.png
--------------------------------------------------------------------------------
/repository/qr_wechat_pay.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/repository/qr_wechat_pay.png
--------------------------------------------------------------------------------
/resources/README.md:
--------------------------------------------------------------------------------
1 | # 一些学习资源
2 |
3 | ## Linux
4 |
5 | [Linux tips](linux.md)
6 |
7 | ## Guide to IP Layer Network Administration with Linux
8 |
9 | 可以当作字典来查询各种命令及应用场景,有事没事多看看。
10 |
11 | Linux系统IP层网络管理指南:
12 |
13 | ## GPG Howto document
14 |
15 | GPG的中文使用文档,安全通信的必备神器。
16 |
17 | ## Nginx开发从入门到精通
18 |
19 | 淘宝核心系统服务器平台组成员共同写:
20 |
21 | ## Redis和Memcached的比较
22 |
23 | 以下几点是二者很明确的差别,可以根据自己需求来选择适合自己的。
24 |
25 | * redis persists the entire dataset to disk
26 | * redis doesn't support LRU or any similar policy for handling overload
27 | * redis doesn't support CAS (check and set) which is useful for maintaining cache consistency
28 | * redis supports richer data structures other than plain strings and can operate on them internally. For example, you can intersect two sets and return the result.
29 | * redis supports master/slave replication
30 |
31 | 如果以上几点不是你最关心的,那么你可以参考如下的对比测试:
32 |
33 | 最好你自己进行测试一下,因为这是以前低版本的测试数据。(注意:不要用虚拟机进行测试)
34 |
35 | 1.
36 | 2.
37 | 3.
38 | 4.
39 | 5.
40 |
--------------------------------------------------------------------------------
/resources/adblock.txt:
--------------------------------------------------------------------------------
1 | !-----------------
2 | !360
3 | 360.cn
4 | 360.com
5 | 360safe.com
6 | qihoo.com
7 | so.com
8 | sou.com
9 | haosou.com
10 | !-----------------
11 | !CSDN
12 | csdn.net
13 | !---------------
14 | !Sogou
15 | sogou.com
16 |
--------------------------------------------------------------------------------
/resources/chrome.md:
--------------------------------------------------------------------------------
1 | # Chrome
2 |
3 | ## Remove the search engines
4 |
5 | Sometimes you may encounter the `Search engines added by extensions`, and you can't remove them even when you uninstall the extension.
6 |
7 | The only solution is to manually open the `Web Data` file by sqlitebrowser and fine the `keywords` table, remove the column.
8 |
--------------------------------------------------------------------------------
/resources/switchy.txt:
--------------------------------------------------------------------------------
1 | eyJjb25maWciOiJ7XCJmaXJzdFRpbWVcIjpcIjpdXCIsXCJwcm94eU1vZGVcIjpcImF1dG9cIixcInByb3h5U2VydmVyXCI6XCJcIixcInJ1bGVMaXN0VXJsXCI6XCJodHRwOi8vYXV0b3Byb3h5LWdmd2xpc3QuZ29vZ2xlY29kZS5jb20vc3ZuL3RydW5rL2dmd2xpc3QudHh0XCIsXCJydWxlTGlzdFJlbG9hZFwiOlwiMTgwXCIsXCJydWxlTGlzdFByb2ZpbGVJZFwiOlwiU1NILURcIixcInJ1bGVMaXN0QXV0b1Byb3h5XCI6dHJ1ZSxcInN3aXRjaFJ1bGVzXCI6dHJ1ZSxcInJ1bGVMaXN0RW5hYmxlZFwiOnRydWUsXCJtb25pdG9yUHJveHlDaGFuZ2VzXCI6dHJ1ZSxcInByZXZlbnRQcm94eUNoYW5nZXNcIjpmYWxzZSxcInF1aWNrU3dpdGNoXCI6ZmFsc2UsXCJzdGFydHVwUHJvZmlsZUlkXCI6XCJcIixcImNvbmZpcm1EZWxldGlvblwiOnRydWUsXCJyZWZyZXNoVGFiXCI6ZmFsc2UsXCJhdXRvUGFjU2NyaXB0UGF0aFwiOlwiOm1lbW9yeTpcIixcInBhY1NjcmlwdERhdGFcIjpcIlwiLFwicnVsZXNGaXJzdFRpbWVcIjpcIjtdXCIsXCJxdWlja1J1bGVQcm9maWxlSWRcIjpcIlNTSC1EXCIsXCJxdWlja1J1bGVQYXR0ZXJuVHlwZVwiOlwid2lsZGNhcmRcIixcInByb3h5Q29uZmlnVXJsXCI6XCI6bWVtb3J5OlwiLFwibGFzdExpc3RVcGRhdGVcIjpcIlRodSBEZWMgMTEgMjAxNCAxNTo1NzoxNSBHTVQrMDgwMCAoQ1NUKVwifSIsImRlZmF1bHRSdWxlIjoie1wiaWRcIjpcImRlZmF1bHRSdWxlXCIsXCJuYW1lXCI6XCJEZWZhdWx0IFJ1bGVcIixcInVybFBhdHRlcm5cIjpcIlwiLFwicGF0dGVyblR5cGVcIjpcIndpbGRjYXJkXCIsXCJwcm9maWxlSWRcIjpcImRpcmVjdFwifSIsInByb2ZpbGVzIjoie1wiU1NILURcIjp7XCJuYW1lXCI6XCJTU0gtRFwiLFwicHJveHlNb2RlXCI6XCJtYW51YWxcIixcInByb3h5SHR0cFwiOlwiXCIsXCJ1c2VTYW1lUHJveHlcIjpmYWxzZSxcInByb3h5SHR0cHNcIjpcIlwiLFwicHJveHlGdHBcIjpcIlwiLFwicHJveHlTb2Nrc1wiOlwiMTI3LjAuMC4xOjcwNzBcIixcInNvY2tzVmVyc2lvblwiOjUsXCJwcm94eUV4Y2VwdGlvbnNcIjpcImxvY2FsaG9zdDsgMTI3LjAuMC4xOyA8bG9jYWw+XCIsXCJwcm94eUNvbmZpZ1VybFwiOlwiXCIsXCJjb2xvclwiOlwiYmx1ZVwiLFwiaWRcIjpcIlNTSC1EXCJ9fSIsInF1aWNrU3dpdGNoUHJvZmlsZXMiOiJbXSIsInJ1bGVzIjoie30iLCJzZWxlY3RlZFByb2ZpbGUiOiJ7XCJpZFwiOlwiYXV0b1wiLFwibmFtZVwiOlwiW0F1dG8gU3dpdGNoXVwiLFwicHJveHlNb2RlXCI6XCJhdXRvXCIsXCJjb2xvclwiOlwiYXV0by1ibHVlXCIsXCJpc0F1dG9tYXRpY01vZGVQcm9maWxlXCI6dHJ1ZSxcInByb3h5Q29uZmlnVXJsXCI6XCI6bWVtb3J5OlwifSJ9
--------------------------------------------------------------------------------
/resources/wiznote.md:
--------------------------------------------------------------------------------
1 | # 为知笔记 Hack
2 |
3 | I Love WizNote.
4 |
5 | ## Custom CSS
6 |
7 | [wiznote.css](wiznote.css)
8 |
9 |
10 | ## Disable Line Number
11 |
12 | ```
13 | # sed -i 's/linenums //g' /usr/share/wiznote/files/markdown/wiznote-markdown-inject.js
14 | ```
15 |
--------------------------------------------------------------------------------
/rest-api/README.md:
--------------------------------------------------------------------------------
1 | # ReST API
2 |
3 | ## Python Rest API framework
4 |
5 | Falcon is a high-performance Python framework for building cloud APIs, smart proxies, and app backends.
6 |
7 | URL:
8 |
9 |
10 | ## PHP Rest API framework
11 |
12 | Recomment to use Laravel framework.
13 |
--------------------------------------------------------------------------------
/review-board/README.md:
--------------------------------------------------------------------------------
1 | # Review Board
2 |
3 | 和 Gerrit 比起来太逊了,懒得写。
4 |
5 | Apache 下面的项目喜欢用 Review Board,比如 Mesos 。
6 |
7 | Review Board URL:
8 |
--------------------------------------------------------------------------------
/rime/README.md:
--------------------------------------------------------------------------------
1 | # Rime Input Method Engine
2 |
3 | See here: https://github.com/chenzhiwei/rime
4 |
--------------------------------------------------------------------------------
/router/asus.md:
--------------------------------------------------------------------------------
1 | # Asus Router
2 |
3 | ## Setting guest wifi password
4 |
5 | ```
6 | nvram get wl0.1_wpa_psk
7 | nvram set wl0.1_wpa_psk=wifi-password
8 | ```
9 |
10 | ## DDns Setting
11 |
12 | DNSPod API:
13 |
14 | * /jffs/scripts/ddns-start
15 |
16 | ```
17 | #!/bin/sh
18 |
19 | IP=$1
20 | TOKEN=DNSPOD_TOKEN
21 | DOMAIN=DNSPOD_DOMAIN_ID
22 | RECORD=DNSPOD_DOMAIN_RECORD
23 | SUBDOMAIN=asus
24 | EMAIL=xxx@abc.com
25 |
26 | curl -fsk -m 3 -X POST \
27 | -A "DDNS Client/1.0.0 ($EMAIL)" \
28 | -d "login_token=$TOKEN&format=json&domain_id=$DOMAIN&record_id=$RECORD&record_line_id=0&sub_domain=$SUBDOMAIN&value=$IP" \
29 | https://dnsapi.cn/Record.Ddns
30 |
31 | /sbin/ddns_custom_updated 1
32 | ```
33 |
34 | ## Reference
35 |
36 | * https://github.com/RMerl/asuswrt-merlin/wiki
37 |
--------------------------------------------------------------------------------
/rpm-package-management/php-apc.spec:
--------------------------------------------------------------------------------
1 | %global php_extdir %(php-config --extension-dir 2>/dev/null || echo "undefined")
2 | %global php_apiver %((echo 0; php -i 2>/dev/null | sed -n 's/^PHP API => //p') | tail -1)
3 |
4 | Name: php-apc
5 | Version: 3.1.9
6 | Release: 1
7 | Summary: A module for PHP applications speeder
8 |
9 | Group: Development/Languages
10 | License: PHP
11 | URL: http://www.php.net/
12 | Source0: http://www.php.net/distributions/%{name}-%{version}.tar.gz
13 | BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
14 |
15 | BuildRequires: php-devel
16 | Requires: php(api) = %{php_apiver}
17 |
18 | %description
19 | The Alternative PHP Cache (APC) is a free and open opcode cache for PHP. Its goal is to provide a free, open, and robust framework for caching and optimizing PHP intermediate code.
20 |
21 | %prep
22 | %setup -q
23 |
24 | %build
25 | %{_bindir}/phpize
26 | %configure
27 | %{__make} %{?_smp_mflags}
28 |
29 | %install
30 | %{__rm} -rf $RPM_BUILD_ROOT
31 | %{__mkdir} -p $RPM_BUILD_ROOT%{_sysconfdir}/php.d
32 | %{__make} install INSTALL_ROOT=$RPM_BUILD_ROOT
33 | %{__cat} > $RPM_BUILD_ROOT%{_sysconfdir}/php.d/apc.ini < - 3.1.9-1
49 | - Build php-apc package
50 |
--------------------------------------------------------------------------------
/rpm-package-management/template.spec:
--------------------------------------------------------------------------------
1 | Name:
2 | Version:
3 | Release: 1%{?dist}
4 | Summary:
5 |
6 | Group:
7 | License:
8 | URL:
9 | Source0:
10 | BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
11 |
12 | BuildRequires:
13 | Requires:
14 | AutoReq: 0
15 | AutoProv: 0
16 | AutoReqProv: 0
17 |
18 | %description
19 |
20 | %prep
21 | %setup -q
22 |
23 | %build
24 | %configure
25 | make %{?_smp_mflags}
26 |
27 | %install
28 | rm -rf $RPM_BUILD_ROOT
29 | make install DESTDIR=$RPM_BUILD_ROOT
30 |
31 | %clean
32 | rm -rf $RPM_BUILD_ROOT
33 |
34 | %pre
35 | echo "before install"
36 |
37 | %post
38 | echo "after install"
39 |
40 | %preun
41 | echo "before uninstall"
42 |
43 | %postun
44 | echo "after uninstall"
45 |
46 | %files
47 | %defattr(-,root,root,-)
48 | %attr(0600, root, root) %config(noreplace) %{_sysconfdir}/application/app.conf
49 | %attr(0755, root, root) /etc/rc.d/init.d/application
50 | %doc
51 |
52 | %changelog
53 |
--------------------------------------------------------------------------------
/rsync/rsyncd.conf:
--------------------------------------------------------------------------------
1 | uid = nobody
2 | gid = nobody
3 | use chroot = yes
4 | max connections = 20
5 | read only = yes
6 | timeout = 300
7 |
8 | [ftp]
9 | path = /var/ftp
10 | auth users = zhiwei, foobar
11 | read only = no
12 | list = yes
13 | secrets file = /etc/rsyncd.secrets
14 | hosts allow = 10.0.0.0/8
15 |
16 | [www]
17 | path = /var/www
18 | list = no
19 | write only = yes
20 | hosts deny = 172.16.0.0/12
21 |
--------------------------------------------------------------------------------
/rsync/rsyncd.secrets:
--------------------------------------------------------------------------------
1 | zhiwei:123456
2 | foobar:password
3 |
--------------------------------------------------------------------------------
/ruby/Gemfile:
--------------------------------------------------------------------------------
1 | # source 'https://rubygems.org'
2 | # source 'https://ruby.taobao.org/'
3 | source 'http://mirrors.aliyun.com/rubygems/'
4 |
5 | gem 'sass'
6 |
--------------------------------------------------------------------------------
/ruby/README.md:
--------------------------------------------------------------------------------
1 | # Install Ruby Gems
2 |
3 |
4 | ## Install with bundler
5 |
6 | Gemfile
7 |
8 | ```
9 | source 'https://ruby.taobao.org/'
10 | # source 'http://mirrors.aliyun.com/rubygems/'
11 | gem 'rails'
12 | ```
13 |
14 | Command
15 |
16 | ```
17 | # bundle install --system
18 | ```
19 |
20 | ## Install with gem
21 |
22 | Command
23 |
24 | ```
25 | # gem sources --remove https://rubygems.org/
26 | # gem sources -a https://ruby.taobao.org/
27 | # gem sources -l
28 | # gem install rails
29 | ```
30 |
--------------------------------------------------------------------------------
/sar/README.md:
--------------------------------------------------------------------------------
1 | # SAR, Linux 性能监视工具
2 |
3 | SAR(System Activity Reporter,即系统活动报告)是Linux系统上比较牛逼的性能分析工具,可以监测系统以下内容:文件的读写情况、系统调用的使用情况、磁盘I/O、CPU效率、内存使用情况、进程活动以及IPC(InterProcess Communication,进程间通信)有关活动。该命令在`sysstat`包里面,默认是计划任务(/etc/cron.d/sysstat)方式定期运行并收集数据存放在`/var/log/sa`目录下。
4 |
5 | ## sar 命令格式
6 |
7 | sar [options] [-o output_file] t [n]
8 |
9 | * options 的常用选项如下
10 |
11 | -A 指所有报告的总和
12 | -n 网络流量情况
13 | -u CPU利用率
14 | -v 进程、I节点、文件和锁表状态
15 | -d 磁盘使用报告
16 | -r 没有使用的内存页面和磁盘
17 | -g 串口I/O的情况
18 | -b 缓冲区使用情况
19 | -a 文件读写情况
20 | -c 系统调用情况
21 | -R 进程的活动情况
22 | -y 终端设备活动情况
23 | -w 系统交换活动
24 |
25 | * -o 是指将采样结果以二进制形式写入某文件中
26 |
27 | * t [n] 分别表示每隔多久采样一次和采样的次数
28 |
29 | ## sar 命令使用示例
30 |
31 | ### 分析网卡流量
32 |
33 | 每五秒采样一次所有网卡的流入、流出流量,总共记录十次。
34 |
35 | sar -n DEV 5 10
36 |
37 | 09:10:39 PM IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s
38 | 09:10:41 PM lo 0.00 0.00 0.00 0.00 0.00 0.00 0.00
39 | 09:10:41 PM usb0 0.00 0.00 0.00 0.00 0.00 0.00 0.00
40 | 09:10:41 PM eth0 18.27 6.60 1.85 1.01 0.00 0.00 8.12
41 | 09:10:41 PM eth1 10.15 7.11 0.79 2.08 0.00 0.00 0.00
42 | 09:10:41 PM br0 17.26 5.08 1.25 0.48 0.00 0.00 8.63
43 |
44 | * IFACE, network interface,即网卡名字。
45 |
46 | * rxpck/s, Total number of packets received per second,每秒接收的数据包的总数。
47 |
48 | * txpck/s, Total number of packets transmitted per second.
49 |
50 | * rxkB/s, Total number of kilobytes received per second.
51 |
52 | * txkB/s, Total number of kilobytes transmitted per second.
53 |
54 | * rxcmp/s, Number of compressed packets received per second (for cslip etc.).
55 |
56 | * txcmp/s, Number of compressed packets transmitted per second.
57 |
58 | * rxmcst/s, Number of multicast packets received per second.
59 |
60 | ### 分析历史记录
61 |
62 | 假设sar命令保存的进制文件名为 sa15 ,那么可以用 `sar [options] -f sa15`来查看里面的内容。
63 |
64 | ### In The End
65 |
66 | 今天本来哥还想多写一些,可是太忙了。。。
67 |
--------------------------------------------------------------------------------
/sasl/README.md:
--------------------------------------------------------------------------------
1 | # SASL
2 |
3 | SASL(Simple Authentication and Security Layer) is a framework for application protocols to add authentication and data security support.
4 |
5 | The SASL framework does not specify the technology used to perform the authentication, that is the responsibility for each SASL mechanism. The [supported mechanisms][supported-mechanisms] are CRAM-MD5, GSSAPI, PLAIN...
6 |
7 | [supported-mechanisms]: https://en.wikipedia.org/wiki/Simple_Authentication_and_Security_Layer#SASL_mechanisms
8 |
9 | ## Authentication exchange
10 |
11 | Typically a SASL negotiation works as follows. First the client requests authentication (possibly implicitly by connecting to the server). The server responds with a list of supported mechanisms. The client chose one of the mechanisms. The client and server then exchange data, one round-trip at a time, until authentication either succeeds or fails. After that, the client and server knows more about who is on the other end of the channel.
12 |
13 | Client: Request authentication exchange
14 | Server: Initial challenge
15 | Client: Initial response
16 | Server: Outcome of authentication exchange
17 |
18 | 具体的交换方式跟认证机制有关,有空了我写个简单的样例。
19 |
20 | ## Reference
21 |
22 | * SASL RFC:
23 | * SASL Programming:
24 |
--------------------------------------------------------------------------------
/screen/README.md:
--------------------------------------------------------------------------------
1 | # screen 命令简单用法
2 |
3 | 现在很多时候我们的开发环境都已经部署到云端了,直接通过SSH来登录到云端服务器进行开发测试以及运行各种命令,一旦网络中断,通过SSH运行的命令也会退出,这个发让人发疯的。
4 |
5 | 好在有screen命令,它可以解决这些问题。我使用screen命令已经有三年多的时间了,感觉还不错。
6 |
7 | ## 新建一个Screen Session
8 |
9 | ```
10 | $ screen -S screen_session_name
11 | ```
12 |
13 | ## 将当前Screen Session放到后台
14 |
15 | ```
16 | $ CTRL + A + D
17 | ```
18 |
19 | ## 唤起一个Screen Session
20 |
21 | ```
22 | $ screen -r screen_session_name
23 | ```
24 |
25 | ## 分享一个Screen Session
26 |
27 | ```
28 | $ screen -x screen_session_name
29 | ```
30 |
31 | 通常你想和别人分享你在终端里的操作时可以用此命令。
32 |
33 | ## 终止一个Screen Session
34 |
35 | ```
36 | $ exit
37 | $ CTRL + D
38 | ```
39 |
40 | ## 查看一个screen里的输出
41 |
42 | 当你进入一个screen时你只能看到一屏内容,如果想看之前的内容可以如下:
43 |
44 | ```
45 | $ Ctrl + a ESC
46 | ```
47 |
48 | 以上意思是进入Copy mode,拷贝模式,然后你就可以像操作VIM一样查看screen session里的内容了。
49 |
50 | 可以 Page Up 也可以 Page Down。
51 |
52 | ## screen进阶
53 |
54 | 对我来说,以上就足够了,有特定需求时再说。
55 |
56 | ## screenrc
57 |
58 | [.screenrc](.screenrc)
59 |
60 | 如果没有就在~下新建该文件
61 |
62 | 然后添加一行配置
63 |
64 | ```
65 | escape ^Bt
66 | ```
67 |
68 | 表示其他(例如ctrl+B)替换掉默认的ctrl+A,
69 |
70 | 因为ctrl+A往往是快速回到命令的头部的快捷键,非常常用和方便
71 |
72 | ## End
73 |
74 | screen命令很好用,但是最让人头痛的是`CTRL+A`命令和BASH里的快捷键重复了,我不觉得替换一下快捷键是个很好的解决方案,所以这个问题一直存在我这里。
75 |
76 | 这里有更详细的说明:
77 |
--------------------------------------------------------------------------------
/shell/test.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -o errexit
4 | set -o nounset
5 | set -o pipefail
6 |
7 | ROOT_PATH="$(cd "$(dirname "${BASH_SOURCE}")" && pwd -P)"
8 | SUB_PATH="${SUB_PATH:-sub}"
9 | FULL_PATH="${ROOT_PATH}/${SUB_PATH}"
10 |
11 | readonly SERVICE_PORT=5600
12 |
13 | function app::component::action() {
14 | local version=1.1.0
15 | if [[ "$version" == "1.1.0" ]]; then
16 | return $version
17 | fi
18 | return 0.0
19 | }
20 |
21 | function app::component::stdout() {
22 | cat <&1
23 |
24 | Hello stdout!
25 |
26 | This is the FULL_PATH: ${FULL_PATH}
27 |
28 | EOF
29 | }
30 |
31 | function app::component::stderr() {
32 | cat <&2
33 |
34 | Hello stderr!
35 |
36 | This is the FULL_PATH: ${FULL_PATH}
37 |
38 | EOF
39 | }
40 |
41 | function app::has_ping() {
42 | which ping &>/dev/null
43 | }
44 |
45 | function app::string_operation() {
46 | echo
47 | # string in array
48 | local str="are"
49 | local arr=(how are you)
50 | regx_arr=$(echo ${arr[@]} | sed 's/ /|/g')
51 | if [[ $str =~ ($regx_arr) ]]; then
52 | echo string is in an array
53 | fi
54 | if [[ $str =~ (how|are|you) ]]; then
55 | echo string is in an array
56 | fi
57 | echo
58 | }
59 |
60 | function app::array_operation() {
61 | local factor=(0:9 1:8 2:7 3:6 4:5)
62 | for fact in ${factor[@]}; do
63 | key=${fact%:*}
64 | value=${fact##*:}
65 | echo "key: $key, value: $value"
66 | done
67 | echo
68 | for fact in ${factor[@]}; do
69 | arr=(${fact//:/ })
70 | key=${arr[0]}
71 | value=${arr[1]}
72 | echo "key: $key, value: $value"
73 | done
74 | echo
75 | for fact in ${factor[@]}; do
76 | arr=${fact//:/ }
77 | set -- $arr
78 | key=$1
79 | value=$2
80 | echo "key: $key, value: $value"
81 | done
82 | }
83 |
84 |
85 | if app::has_ping; then
86 | echo there is ping
87 | fi
88 |
89 | app::component::stdout
90 | app::component::stderr
91 | app::array_operation
92 | app::string_operation
93 |
--------------------------------------------------------------------------------
/shell/text.md:
--------------------------------------------------------------------------------
1 | # 文本操作
2 |
3 | ## 汉字正则
4 |
5 | ```
6 | # 匹配汉字
7 | grep -P '[^\x00-\x7F]' test.txt
8 |
9 | # 匹配三到五个汉字
10 | vim: /[^\x00-\x7F]\{3,5}
11 | ```
12 |
13 | ## 打印前5行内容
14 |
15 | ```
16 | head -5 test.txt
17 | head -n 5 test.txt
18 |
19 | sed -n '1,5p' test.txt
20 |
21 | awk 'NR<6' test.txt
22 | ```
23 |
24 | ## 打印第5行内容
25 |
26 | ```
27 | sed -n '5p' test.txt
28 |
29 | awk 'NR==5' test.txt
30 | ```
31 |
32 | ## 打印第3行到第5行内容
33 |
34 | ```
35 | sed -n '3,5p' test.txt
36 |
37 | awk 'NR>2 && NR<6' test.txt
38 | ```
39 |
40 | ## 跨行打印:打印第 3 行 和 5~7 行内容
41 |
42 | ```
43 | sed -n '3p;5,7p' test.txt
44 |
45 | awk 'NR==3 || (NR>4 && NR<8)' test.txt
46 | ```
47 |
48 | ## 打印奇偶行内容
49 |
50 | ```
51 | # 打印奇数行内容
52 | ## NR 表示行号
53 | $ awk 'NR%2!=0' test.txt
54 | $ awk 'NR%2' test.txt
55 |
56 | ## i 为变量,未定义变量初始值为 0,对于字符运算,未定义变量初值为空字符串
57 | ## 读取第 1 行记录,进行模式匹配:i=!0(!表示取反)。! 右边是个布尔值,0 为假,非 0 为真,!0 就是真,因此 i=1,条件为真打印第一条记录。
58 | ## 读取第 2 行记录,进行模式匹配:i=!1(因为上次 i 的值由 0 变成了 1),条件为假不打印。
59 | ## 读取第 3 行记录,因为上次条件为假,i 恢复初值为 0,继续打印。以此类推...
60 | ## 上述运算并没有真正的判断记录,而是布尔值真假判断。
61 | $ awk 'i=!i' test.txt
62 |
63 | ## m~np:m 表示起始行;~2 表示:步长
64 | $ sed -n '1~2p' test.txt
65 |
66 | ## 先打印第 1 行,执行 n 命令读取当前行的下一行,放到模式空间,后面再没有打印模式空间行操作,所以只保存不打印,同等方式继续打印第 3 行。
67 | $ sed -n '1,$p;n' test.txt
68 | $ sed -n 'p;n' test.txt
69 |
70 | # 打印偶数行内容
71 | $ awk 'NR%2==0' test.txt
72 | $ awk '!(NR%2)' test.txt
73 | $ awk '!(i=!i)' test.txt
74 | $ sed -n 'n;p' test.txt
75 | $ sed -n '1~1p' test.txt
76 | $ sed -n '1,$n;p' test.txt
77 | ```
78 |
79 | ## 打印最后5行内容
80 |
81 | ```
82 | tail -n 5 test.txt
83 | ```
84 |
85 | ## 打印匹配行内容
86 |
87 | ```
88 | # 打印以 "1" 开头的行内容
89 | $ sed -n '/^1/p' test.txt
90 | $ grep "^1" test.txt
91 |
92 | # 打印不以 "1" 开头的行内容
93 | $ sed -n '/1/!p' test.txt
94 | $ grep -v "^1" test.txt
95 |
96 | # 从匹配 "03" 行到第 5 行内容
97 | $ sed -n '/03/,5p' test.txt
98 |
99 | # 打印匹配 "03" 行 到匹配 "05" 行内容
100 | $ sed -n '/03/,/05/p' test.txt
101 | ```
102 |
--------------------------------------------------------------------------------
/skydns/README.md:
--------------------------------------------------------------------------------
1 | # SkyDNS
2 |
3 | SkyDNS is a distributed service for announcement and discovery of services built on top of etcd.
4 |
5 | ## 编译安装
6 |
7 | ```
8 | $ go get github.com/skynetservices/skydns
9 | $ cd $GOPATH/src/github.com/skynetservices/skydns
10 | $ go build -v
11 | ```
12 |
13 | ## 启动使用
14 |
15 | 安装配置etcd请查看[etcd文档](../etcd/)。
16 |
17 | ### Linux amd64
18 |
19 | ```
20 | # cd cd $GOPATH/bin
21 | # ./skydns
22 | ```
23 |
24 | ### Docker
25 |
26 | ```
27 | # docker run -d -p 172.17.42.1:53:53/udp --name skydns skynetservices/skydns -machines="http://172.17.42.1:4001" -addr="0.0.0.0:53" -nameservers="8.8.8.8:53"
28 | ```
29 |
30 | ## 设置一个域名
31 |
32 | SkyDNS默认domain是`skydns.local`,当然你也可以在启动时用`-domain='custom.local'`来指定。
33 |
34 | ### abcd.skydns.local 解析到 10.0.0.1
35 |
36 | ```
37 | $ curl -X PUT http://172.17.42.1:4001/v2/keys/skydns/local/skydns/abcd -d value='{"host": "10.0.0.1"}'
38 | $ dig @127.0.0.1 abcd.skydns.local
39 | ```
40 |
41 | ### xyz.abc.skydns.local 解析到 10.0.0.2
42 |
43 | ```
44 | $ curl -X PUT http://172.17.42.1:4001/v2/keys/skydns/local/skydns/abc/xyz -d value='{"host": "10.0.0.2"}'
45 | $ dig @127.0.0.1 xyz.abc.skydns.local
46 | ```
47 |
48 | ### 设置/etc/resolv.conf
49 |
50 | 如果你想让你的机器能直接通过`abcd`和`xyz.abc`来访问,你可以设置`/etc/resolv.conf`为以下内容。
51 |
52 | ```
53 | domain skydns.local
54 | nameserver skydns-ip-address
55 | ```
56 |
57 | ## SkyDNS
58 |
59 | SkyDNS其实是个非常简单的DNS发现服务,不要企求有太多功能。
60 |
61 | ## Reference
62 |
63 | 1.
64 |
--------------------------------------------------------------------------------
/terraform/README.md:
--------------------------------------------------------------------------------
1 | # Terraform
2 |
3 | A quick start of terraform.
4 |
5 |
6 | ## Code Structure
7 |
8 | A terraform project usually contain following files:
9 |
10 | * input.tfvars - the input variables that applied
11 | * main.tf - the main logic
12 | * each-component.tf - if the project contains too many components, then separate them into individual file
13 | * variables.tf - to declare the variables that used in the project
14 | * versions.tf - to restrict the terraform and provider versions
15 | * backend.tf - to define the terraform state file location(local or in remote)
16 | * output.tf - to define the terraform project output
17 |
18 |
19 | ## Commands
20 |
21 | ```
22 | gcloud auth application-default login
23 |
24 | terraform init
25 | terraform plan -var-file input.tfvars
26 | terraform apply -var-file input.tfvars
27 | terraform apply -var-file input.tfvars -backend-config=xx=xx
28 | ```
29 |
30 |
31 | ## How to use a module
32 |
33 | Suppose this project is in a git repo:
34 |
35 | ```
36 | module "simple_mig" {
37 | source = "https://github.com/chenzhiwei/terraform-mig.git?ref=master"
38 | project = "your-project"
39 | region = "asia-east2"
40 | xxx = xxx
41 | }
42 | ```
43 |
--------------------------------------------------------------------------------
/terraform/backend.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "gcs" {
3 | prefix = "my-name"
4 | }
5 | }
6 |
--------------------------------------------------------------------------------
/terraform/input.tfvars:
--------------------------------------------------------------------------------
1 | project = "my-project"
2 | region = "asia-east2"
3 | mig_name = "test-mig"
4 | mig_size = 1
5 | machine_type = "n1-standard-1"
6 | service_account_email = "my@sa.com"
7 | metadata = {
8 | owner = "my-name"
9 | }
10 | labels = {
11 | app = "kafka"
12 | }
13 |
14 | tags = [
15 | "firewall-tag"
16 | ]
17 |
18 | interfaces = [
19 | {
20 | subnetwork = "projects/my-project/regions/asia-east2/subnetworks/vpc1"
21 | },
22 | {
23 | subnetwork = "projects/my-project/regions/asia-east2/subnetworks/vpc2"
24 | }
25 | ]
26 |
27 | disks = [
28 | {
29 | boot = true
30 | auto_delete = true
31 | mode = "READ_WRITE"
32 | type = "pd-standard"
33 | size_gb = 64
34 | image_family = "projects/my-project/global/images/family/ubuntu"
35 | },
36 | {
37 | boot = false
38 | auto_delete = false
39 | mode = "READ_WRITE"
40 | type = "pd-ssd"
41 | size_gb = 100
42 | image_family = null
43 | }
44 | ]
45 |
--------------------------------------------------------------------------------
/terraform/output.tf:
--------------------------------------------------------------------------------
1 | output "mig_self_link" {
2 | value = google_compute_region_instance_group_manager.default.self_link
3 | }
4 |
--------------------------------------------------------------------------------
/terraform/variables.tf:
--------------------------------------------------------------------------------
1 | variable "project" {
2 | type = string
3 | default = "my-project"
4 | description = "the gcp project name"
5 | }
6 |
7 | variable "tags" {
8 | type = list(string)
9 | description = "firewall tags"
10 | }
11 |
12 | variable "metadata" {
13 | type = map(string)
14 | description = "metadata"
15 | }
16 |
17 | variable "interfaces" {
18 | type = list(object({
19 | subnetwork = string
20 | }))
21 | }
22 |
23 | variable "disks" {
24 | type = list(object({
25 | boot = bool
26 | auto_delete = bool
27 | mode = string
28 | type = string
29 | size_gb = number
30 | image_family = string
31 | }))
32 | }
33 |
34 | variable "service_account_scopes" {
35 | type = list(string)
36 | default = [
37 | "https://www.googleapis.com/auth/compute"
38 | ]
39 | }
40 |
--------------------------------------------------------------------------------
/terraform/versions.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_version = "> 1.6.0"
3 | required_providers {
4 | google = {
5 | source = "hashicorp/google"
6 | version = "4.18.0"
7 | }
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/timezone/README.md:
--------------------------------------------------------------------------------
1 | # 时区及时间
2 |
3 | 时区就是时间区域,主要是为了克服时间上的混乱,统一各地时间。地球上共有 24 个时区,东西各 12 个时区(东12与西12合二为一)。
4 |
5 | ## UTC 和 GMT
6 |
7 | 时区通常写成`+0800`,有时也写成`GMT +0800`,其实这两个是相同的概念。
8 |
9 | GMT 是格林尼治标准时间(Greenwich Mean Time)。
10 |
11 | UTC 是协调世界时间(Universal Time Coordinated),又叫世界标准时间,其实就是`0000`时区的时间。
12 |
13 | ## 时间换算
14 |
15 | 通常当有跨区域的会议时,一般大家都用 UTC 来公布,比如某个会议在`UTC 20:00 周三`开始,按照时间换算规则:
16 |
17 | 计算的区时=已知区时-(已知区时的时区-要计算区时的时区)
18 |
19 | 中国北京时间是:20:00 - ( 0 - 8 ) = 1天 + 04:00,即北京时间周四早上 04:00 。
20 |
21 | ## Linux 下调整时区
22 |
23 | ```
24 | # ls /usr/share/zoneinfo
25 | # ln -sf /usr/share/zoneinfo/PRC /etc/localtime
26 | # ln -sf /usr/share/zoneinfo/Asia/Taipei /etc/localtime
27 | ```
28 |
29 | 临时调整一下:
30 |
31 | ```
32 | $ TZ=PRC
33 | $ date -R
34 | $ TZ=Asia/Taipei
35 | $ date -R
36 | ```
37 |
38 | ## 世界时区表
39 |
40 | 
41 |
--------------------------------------------------------------------------------
/timezone/timezone.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/chenzhiwei/linux/8cdb9aa837747b12ea6efb240e7ebab89ec7a767/timezone/timezone.jpg
--------------------------------------------------------------------------------
/tips/README.md:
--------------------------------------------------------------------------------
1 | # Some Tips
2 |
3 | ## Add epel repo
4 |
5 | ```
6 | yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
7 | ```
8 |
9 | ## Add git repo
10 |
11 | ```
12 | # /etc/yum.repos.d/git.repo
13 |
14 | [git]
15 | name=Git Packages for Enterprise Linux 7 - $basearch
16 | baseurl=http://opensource.wandisco.com/centos/7/git/x86_64/
17 | enabled=1
18 | gpgcheck=0
19 | ```
20 |
21 | ## Install Docker
22 |
23 | ```
24 | wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo
25 | yum install docker-ce
26 | ```
27 |
28 | ```
29 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
30 | add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
31 | apt install docker-ce
32 | ```
33 |
34 | ```
35 | vim /etc/docker/daemon.json
36 | {
37 | "log-driver": "journald",
38 | "exec-opts": ["native.cgroupdriver=systemd"]
39 | }
40 |
41 | {
42 | "log-driver": "json-file",
43 | "log-opts": {
44 | "max-size": "100m",
45 | "max-file": "3"
46 | },
47 | "registry-mirrors": []
48 | }
49 | ```
50 |
--------------------------------------------------------------------------------
/travis-ci/.travis.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | language: python
4 | os: linux
5 | dist: trusty
6 |
7 | python: "2.7"
8 |
9 | install:
10 | - pip install yamllint ansible
11 |
12 | script:
13 | - git diff-tree --check $(git hash-object -t tree /dev/null) HEAD !(exclude-dir|*.patch)
14 | - yamllint $(git ls-files '*.yaml' '*.yml')
15 | - ansible-playbook -e cluster/config.yaml site.yaml --syntax-check
16 |
--------------------------------------------------------------------------------
/travis-ci/.yamllint:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | rules:
4 | braces:
5 | min-spaces-inside: 0
6 | max-spaces-inside: 0
7 | brackets:
8 | min-spaces-inside: 0
9 | max-spaces-inside: 0
10 | colons:
11 | max-spaces-before: 0
12 | max-spaces-after: 1
13 | commas:
14 | max-spaces-before: 0
15 | min-spaces-after: 1
16 | max-spaces-after: 1
17 | comments:
18 | level: warning
19 | require-starting-space: yes
20 | min-spaces-from-content: 2
21 | comments-indentation:
22 | level: warning
23 | document-end: disable
24 | document-start:
25 | level: warning
26 | present: yes
27 | empty-lines:
28 | max: 2
29 | max-start: 0
30 | max-end: 0
31 | hyphens:
32 | max-spaces-after: 1
33 | indentation:
34 | spaces: 2
35 | indent-sequences: yes
36 | check-multi-line-strings: no
37 | key-duplicates: enable
38 | line-length: disable
39 | new-line-at-end-of-file: enable
40 | new-lines:
41 | type: unix
42 | trailing-spaces: enable
43 |
--------------------------------------------------------------------------------
/travis-ci/README.md:
--------------------------------------------------------------------------------
1 | # Travis CI
2 |
3 | ## Build Lifecycle
4 |
5 | 1. Install `apt addons`
6 | 2. `before_install`
7 | 3. `install` install any dependencies required
8 | 4. `before_script`
9 | 5. `script` run the build script
10 | 6. `after_success` or `after_failure`
11 | 7. OPTIONAL `before_deploy`
12 | 8. OPTIONAL `deploy`
13 | 9. OPTIONAL `after_deploy`
14 | 10. `after_script`
15 |
--------------------------------------------------------------------------------
/ubuntu/.fonts.conf:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | serif
8 |
9 |
10 | WenQuanYi Micro Hei
11 | HYSong
12 | AR PL UMing CN
13 | AR PL UMing HK
14 | AR PL New Sung
15 | WenQuanYi Bitmap Song
16 | AR PL UKai CN
17 | AR PL ZenKai Uni
18 |
19 |
20 |
21 |
22 | sans-serif
23 |
24 |
25 | WenQuanYi Micro Hei
26 | Droid Sans Fallback
27 | WenQuanYi Zen Hei
28 | HYSong
29 | AR PL UMing CN
30 | AR PL UMing HK
31 | AR PL New Sung
32 | AR PL UKai CN
33 | AR PL ZenKai Uni
34 |
35 |
36 |
37 |
38 | monospace
39 |
40 |
41 | WenQuanYi Micro Hei
42 | Droid Sans Fallback
43 | WenQuanYi Zen Hei Mono
44 | HYSong
45 | AR PL UMing CN
46 | AR PL UMing HK
47 | AR PL New Sung
48 | AR PL UKai CN
49 | AR PL ZenKai Uni
50 |
51 |
52 |
53 |
54 |
--------------------------------------------------------------------------------
/ubuntu/gtk.css:
--------------------------------------------------------------------------------
1 | /*
2 | * GTK 3.0 CSS file for gnome-terminal
3 | * Path: ~/.config/gtk-3.0/gtk.css
4 | * */
5 |
6 | TerminalWindow .button {
7 | /* Make the notebook tab have a smaller height */
8 | padding: 2px 0;
9 | }
10 |
11 | TerminalWindow .notebook {
12 | /* Make the notebook tab a little darker */
13 | padding: 0;
14 | background-color: #CCC;
15 | }
16 |
17 | TerminalWindow .notebook tab:active {
18 | /* Highlight the active tab */
19 | background-color: #EEE;
20 | }
21 |
22 | /* reference: /usr/share/themes/Ambiance/gtk-3.0/apps/gnome-terminal.css */
23 |
--------------------------------------------------------------------------------
/ubuntu/how-to-make-debain-package.mkd:
--------------------------------------------------------------------------------
1 | # 怎样制作debian软件包
2 |
3 | ## Python程序
4 |
5 | 具体Python程序可以参考我写的`indicator-screenshot`。
6 |
7 | ```
8 | $ git clone git://github.com/chenzhiwei/indicator-screenshot.git
9 | $ cd indicator-screenshot
10 | $ debuild -d -k98564809
11 | ```
12 |
13 | `-k` 为指定 gpg secret-key 的意思,如果不指定可能会出现`secret key not available`错误。
14 |
15 | ## 参考这里
16 |
17 |
18 |
--------------------------------------------------------------------------------
/ucarp/README.md:
--------------------------------------------------------------------------------
1 | # UCarp
2 |
3 | ## Start UCarp
4 |
5 | * vip: 10.0.0.10
6 | * node1: 10.0.0.11
7 | * node2: 10.0.0.12
8 |
9 | ### On node 10.0.0.11
10 |
11 | ```
12 | ucarp --interface=eth0 --srcip=10.0.0.11 --vhid=10 --pass=10.0.0.10 --addr=10.0.0.10 --upscript=/etc/vip-up.sh --downscript=/etc/vip-down.sh
13 | ```
14 |
15 | ### On node 10.0.0.12
16 |
17 | ```
18 | ucarp --interface=eth0 --srcip=10.0.0.12 --vhid=10 --pass=10.0.0.10 --addr=10.0.0.10 --upscript=/etc/vip-up.sh --downscript=/etc/vip-down.sh
19 | ```
20 |
--------------------------------------------------------------------------------
/ucarp/vip-down.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ip=$2
4 | iface=$1
5 |
6 | if [[ ! "$ip" =~ "/" ]]; then
7 | ip=$ip/32
8 | fi
9 |
10 | if ip addr show $iface | grep -wq $ip; then
11 | ip addr del $ip dev $iface
12 | fi
13 |
--------------------------------------------------------------------------------
/ucarp/vip-up.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | ip=$2
4 | iface=$1
5 |
6 | if [[ ! "$ip" =~ "/" ]]; then
7 | ip=$ip/32
8 | fi
9 |
10 | if ! (ip addr show $iface | grep -wq $ip); then
11 | ip addr add $ip dev $iface
12 | fi
13 |
--------------------------------------------------------------------------------
/vagrant/README.md:
--------------------------------------------------------------------------------
1 | # Vagrant with libvirt
2 |
3 | ## Installing
4 |
5 | Deb/RPM: https://releases.hashicorp.com/vagrant/
6 |
7 | * Ubuntu/Debian
8 |
9 | ```
10 | sudo dpkg -i vagrant.deb
11 | sudo apt install libvirt-daemon-system qemu-utils qemu-system-x86 libvirt-dev --no-install-recommends
12 | vagrant plugin install vagrant-libvirt
13 |
14 | # logout & login to let the usermod take effect
15 | sudo usermod --append --groups kvm,libvirt $USER
16 | ```
17 |
18 | * RHEL/CentOS/Fedora
19 |
20 | ```
21 | $ sudo rpm -i vagrant.rpm
22 | $ sudo yum install libxslt-devel libxml2-devel libvirt-devel libguestfs-tools-c
23 | $ export PATH=/opt/vagrant/embedded/bin:$PATH
24 | $ vagrant plugin install vagrant-libvirt
25 | ```
26 |
27 | ## Vagrant disksize plugin
28 |
29 | ```
30 | vagrant plugin install vagrant-disksize
31 | ```
32 |
33 | Set disksize like following:
34 |
35 | ```
36 | vagrant.configure('2') do |config|
37 | config.vm.box = 'ubuntu/bionic64'
38 | config.disksize.size = '50GB'
39 | end
40 | ```
41 |
42 | ## Using
43 |
44 | ```
45 | $ mkdir vagrant
46 | $ cd vagrant
47 | $ vagrant init centos/7
48 | $ vagrant up --provider libvirt
49 | $ export VAGRANT_DEFAULT_PROVIDER=libvirt
50 | $ vagrant up
51 | $ vagrant ssh dcos1
52 | ```
53 |
54 | ## Set the box size
55 |
56 | ```
57 | cd ~/.vagrant.d/boxes/ubuntu-VAGRANTSLASH-xenial64/20170803.0.0/virtualbox
58 | VBoxManage clonehd ubuntu-xenial-16.04-cloudimg.vmdk tmp.vdi --format vdi
59 | VBoxManage modifyhd tmp.vdi --resize 102400
60 | clonehd tmp.vdi tmp.vmdk --format vmdk
61 | mv tmp.vmdk ubuntu-xenial-16.04-cloudimg.vmdk
62 | rm -f tmp.vdi
63 | ```
64 |
65 | ## More
66 |
67 | I don't like GUI, so there is no VirtualBox here.
68 |
69 |
70 |
--------------------------------------------------------------------------------
/vagrant/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | node = {:hostname => "debian", :ip => "192.168.122.10", :box => "debian/bullseye64", :cpus => 4, :memory => 4096, :disk => 50}
5 |
6 | Vagrant.configure("2") do |config|
7 | config.vm.box_check_update = true
8 | config.vm.synced_folder '.', '/vagrant', disabled: true
9 |
10 | config.vm.box = node[:box]
11 | config.vm.hostname = node[:hostname]
12 |
13 | config.vm.provider "libvirt" do |libvirt|
14 | libvirt.management_network_address = "192.168.122.0/24" # public_network
15 | libvirt.cpus = node[:cpus]
16 | libvirt.memory = node[:memory]
17 | libvirt.machine_virtual_size = node[:disk]
18 | end
19 |
20 | config.vm.network :public_network,
21 | :dev => "virbr0",
22 | :mode => "bridge",
23 | :type => "bridge",
24 | :hostname => true,
25 | :ip => node[:ip]
26 |
27 | config.vm.provision "shell", inline: <<-SHELL
28 | passwd -u root
29 | apt update && apt -y install curl ca-certificates
30 | mkdir -p /root/.ssh
31 | curl -sSL -o /root/.ssh/authorized_keys https://chenzhiwei.cn/keys.txt
32 | SHELL
33 | end
34 |
--------------------------------------------------------------------------------
/vegeta/README.md:
--------------------------------------------------------------------------------
1 | # Vegeta
2 |
3 | ## 单台机器压测
4 |
5 | ```
6 | echo "GET http://localhost/index.html" | vegeta attack -rate=1000 -duration=120s > result.bin
7 | vegeta report -inputs=result.bin
8 | ```
9 |
10 | ## 多台机器压测
11 |
12 | 同步机器的时间:
13 |
14 | ```
15 | pdsh -R ssh -l root -b -w "10.0.0.10,10.0.0.11,10.0.0.12" 'ntpdate 0.pool.ntp.org'
16 | pdsh -R ssh -l root -b -w "10.0.0.10,10.0.0.11,10.0.0.12" 'date +%s'
17 | ```
18 |
19 | 开始测试:
20 |
21 | ```
22 | pdsh -R ssh -l root -b -w "10.0.0.10,10.0.0.11,10.0.0.12" 'echo "GET htt://10.1.0.12/index.html" | vegeta attack -rate=1000 -duration=120s > result.bin'
23 | for host in 10.0.0.10 10.0.0.11 10.0.0.12; do scp $host:~/result.bin $host; done
24 | vegeta report -inputs=10.0.0.10,10.0.0.11,10.0.0.12
25 | ```
26 |
27 | ## Vegeta
28 |
29 | https://github.com/tsenart/vegeta
30 |
31 | ## pdsh
32 |
33 | ```
34 | ./configure --with-ssh
35 | make
36 | make install
37 | ```
38 |
39 | https://github.com/grondo/pdsh
40 |
--------------------------------------------------------------------------------
/yarn/README.md:
--------------------------------------------------------------------------------
1 | # Yarn
2 |
3 | Yarn 可以说是新一代的 Hadoop MapReduce,用来做资源管理、调度和监控。
4 |
5 | ## Yarn 几个概念
6 |
7 | ### Resource Manager
8 |
9 | 全局资源管理器。有两个主要的组件:Scheduler 和 ApplicationsManager。
10 |
11 | Scheduler 就只是单纯的 Scheduler 并支持插件,只用来分配资源,不做监控和追踪工作。
12 |
13 | ApplicationsManager 用来接受作业提交请求并执行 Application 特定的 ApplicationMaster,并且还提供当 ApplicationMaster 失败时重启 ApplicationMaster 的服务。
14 |
15 | ### Application Master
16 |
17 | 每个 Application 都有一个 Application Master,这里的 Application 可以是`single job`或`DAG of jobs`。用来从 Scheduler 选择合适的资源容器并追踪他们的状态和进度。
18 |
19 | ### Note Manager
20 |
21 | 去执行和监控任务(task)的。监控资源(CPU、Memory、Disk、Network)的使用情况并汇报给 ResourceManager/Scheduler。
22 |
23 |
24 | ## 图
25 |
26 | 
27 |
--------------------------------------------------------------------------------
/zookeeper/README.md:
--------------------------------------------------------------------------------
1 | # Zookeeper
2 |
3 | ## Server Start
4 |
5 | ```
6 | export JVM_OPTS="-Djava.security.auth.login.config=/etc/zookeeper/zookeeper.jaas.conf"
7 | zookeeper-server-start.sh /etc/zookeeper/zookeeper.properties
8 | ```
9 |
10 | ## Client Use
11 |
12 | ```
13 | export JVM_OPTS="-Djava.security.auth.login.config=/etc/zookeeper/zookeeper-client.jaas.conf"
14 |
15 | kafka-configs.sh --zookeeper zk1-host.local:2182 \
16 | --zk-tls-config-file zookeeper-client.properties \
17 | --alter --add-config 'SCRAM-SHA-256=[password=password-holder]' \
18 | --entity-type users --entity-name admin
19 | ```
20 |
21 | ## Certificates
22 |
23 | * zookeeper.server.keystore.jks
24 |
25 | The Zookeeper server certificate that include serverAuth in extended key usage.
26 |
27 | * zookeeper.server.truststore.jks
28 |
29 | The Zookeeper server CA certificate.
30 |
31 | * zookeeper.quorum.keystore.jks
32 |
33 | The Zookeeper quorum server certificate that include serverAuth and clientAuth in extended key usage.
34 |
35 | * zookeeper.quorum.truststore.jks
36 |
37 | The Zookeeper quorum server CA certificate.
38 |
39 | * zookeeper.client.keystore.jks
40 |
41 | The Zookeeper client authentication certificate for Zookeeper server.
42 |
43 | * zookeeper.client.truststore.jks
44 |
45 | The Zookeeper client authentication CA certificate.
46 |
--------------------------------------------------------------------------------
/zookeeper/zookeeper-client.jaas.conf:
--------------------------------------------------------------------------------
1 | Client {
2 | org.apache.zookeeper.server.auth.DigestLoginModule required
3 | username="admin"
4 | password="the-password-of-admin-user-for-client";
5 | }
6 |
--------------------------------------------------------------------------------
/zookeeper/zookeeper-client.properties:
--------------------------------------------------------------------------------
1 | zookeeper.connect=zk1-host.local:2182,zk2-host.local:2182,zk3-host.local:2183
2 |
3 | zookeeper.ssl.client.enable=true
4 | zookeeper.clientCnxnSocket=org.apache.zookeeper.ClientCnxnSocketNetty
5 | zookeeper.ssl.keystore.location=/etc/zookeeper/zookeeper.client.keystore.jks
6 | zookeeper.ssl.keystore.password=zk123
7 | zookeeper.ssl.truststore.location=/etc/zookeeper/zookeeper.client.truststore.jks
8 | zookeeper.ssl.truststore.password=zk123
9 |
--------------------------------------------------------------------------------
/zookeeper/zookeeper.jaas.conf:
--------------------------------------------------------------------------------
1 | Server {
2 | org.apache.zookeeper.server.auth.DigestLoginModule required
3 | user_admin="the-password-of-admin-user-for-client";
4 | }
5 |
6 | QuorumServer {
7 | org.apache.zookeeper.server.auth.DigestLoginModule required
8 | user_admin="the-password-of-admin-user-for-quorum";
9 | }
10 |
11 | QuorumLearner {
12 | org.apache.zookeeper.server.auth.DigestLoginModule required
13 | username="admin"
14 | password="the-password-of-admin-user-for-quorum";
15 | }
16 |
--------------------------------------------------------------------------------