├── README.md ├── mysql ├── my.cnf ├── mysql_install.sh └── xbackup.sh ├── nfs ├── nfs_client.sh └── nfs_server.sh ├── nginx ├── files │ ├── index.php │ ├── nginx │ ├── nginx.conf │ ├── temp.conf │ └── www.conf └── nginx.sh ├── php └── php_install.sh ├── redis ├── files │ ├── redis │ └── redis.conf └── redis_install.sh ├── svn ├── command.txt ├── files │ ├── authz │ ├── ext.txt │ ├── passwd │ ├── post-commit │ ├── pre-commit │ ├── svn │ └── svnserve.conf └── svn_create.sh └── zookeeper └── zk_install.sh /README.md: -------------------------------------------------------------------------------- 1 | # scripts 2 | # hp test 3 | 4 | one key setup scripts 5 | 6 | # default path 7 | 8 | if dir is not exist it will be created 9 | 10 | /data 11 | 12 | /data/server 13 | 14 | /data/logs 15 | 16 | /data/rpm 17 | 18 | /data/scripts 19 | -------------------------------------------------------------------------------- /mysql/my.cnf: -------------------------------------------------------------------------------- 1 | # http://dev.mysql.com/doc/refman/5.6/en/server-configuration-defaults.html 2 | [client] 3 | port=3306 4 | socket=/data/server/mysql/mysql.sock 5 | 6 | [mysqld] 7 | sync_binlog=1 8 | server-id=1 9 | port=3306 10 | explicit_defaults_for_timestamp=true 11 | 12 | socket=/data/server/mysql/mysql.sock 13 | pid-file=/data/server/mysql/my3306.pid 14 | user=mysql 15 | datadir=/data/server/mysql/data 16 | tmpdir=/data/server/mysql/temp 17 | log-bin=/data/logs/mysql/db01-bin.log 18 | log-error=/data/logs/mysql/error.log 19 | slow_query_log_file=/data/logs/mysql/slow.log 20 | binlog_format=row 21 | slow_query_log = 1 22 | long_query_time=2 23 | 24 | wait_timeout=1800 25 | interactive_timeout=1800 26 | max_connections=1024 27 | back_log = 100 28 | #max_user_connections=490 29 | max_connect_errors=512 30 | character_set_server=utf8 31 | collation-server=utf8_general_ci 32 | #transaction_isolation = READ-COMMITTED 33 | skip-external-locking 34 | 35 | #key_buff 4G内存参考值 256M或384 36 | #key_buffer_size = 256M 37 | max_allowed_packet = 16M 38 | table_open_cache = 2048 39 | sort_buffer_size = 2M 40 | join_buffer_size = 2M 41 | read_buffer_size = 2M 42 | read_rnd_buffer_size = 32M 43 | #myisam_sort_buffer_size = 128M 44 | 45 | #thread_stack = 256K 46 | #thread_cache_size = 8 47 | #query_cache_size = 256M 48 | #query_cache_limit = 8M 49 | #tmp_table_size = 64M 50 | 51 | expire-logs-days=16 52 | skip-name-resolve 53 | lower_case_table_names=1 54 | log_bin_trust_function_creators=1 55 | 56 | # InnoDB 57 | innodb_data_home_dir=/data/server/mysql/data 58 | #innodb_log_group_home_dir=/data/logs/mysql 59 | #innodb_buffer_pool_size=4G 60 | #innodb_log_file_size=512M 61 | #innodb_log_buffer_size=8M 62 | innodb_lock_wait_timeout=30 63 | innodb_file_per_table 64 | innodb_file_format=barracuda 65 | innodb_strict_mode=1 66 | innodb_flush_log_at_trx_commit=1 67 | #innodb_sort_buffer_size = 67108864 68 | 69 | #sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_TRANS_TABLES 70 | sql_mode=STRICT_TRANS_TABLES,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION,NO_AUTO_VALUE_ON_ZERO 71 | 72 | #GTID 73 | #gtid_mode = on 74 | #enforce_gtid_consistency = 1 75 | log_slave_updates 76 | #relay_log = /data/logs/mysql/db01-relay.log 77 | #relay_log_recovery = 1 78 | #binlog_gtid_simple_recovery = 1 79 | slave_skip_errors = all 80 | 81 | ########semi sync replication settings######## 82 | #plugin_dir=/usr/local/mysql/lib/plugin 83 | #plugin_load = "rpl_semi_sync_master=semisync_master.so;rpl_semi_sync_slave=semisync_slave.so" 84 | #rpl_semi_sync_master_enabled=1 85 | #rpl_semi_sync_slave_enabled=1 86 | #rpl_semi_sync_master_timeout = 1000 87 | 88 | [mysqldump] 89 | quick 90 | #max_allowed_packet = 16M 91 | 92 | [mysql] 93 | no-auto-rehash 94 | 95 | #[myisamchk] 96 | #key_buffer_size = 256M 97 | #sort_buffer_size = 256M 98 | #read_buffer = 32M 99 | #write_buffer = 32M 100 | 101 | [mysqlhotcopy] 102 | interactive-timeout -------------------------------------------------------------------------------- /mysql/mysql_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # mysql install 3 | #set -e 4 | MYSQLDIR=/data/server/mysql 5 | 6 | if [ -d $MYSQLDIR/bin ] 7 | then 8 | echo "your linux has installed mysql!" 9 | read -p "uninstall mysql and beginning a new install?[y,n]" confirm 10 | case $confirm in 11 | yes|y|Y|Yes|YES) 12 | echo "beginning clean install..." 13 | service mysqld stop 14 | rm -rf $MYSQLDIR 15 | rm -rf /usr/local/mysql 16 | rm -rf /etc/my.cnf 17 | rm -rf /data/logs/mysql 18 | rm -rf /etc/init.d/mysqld 19 | ;; 20 | no|n|N|NO|No) 21 | exit 1 22 | ;; 23 | *) 24 | exit 1 25 | ;; 26 | esac 27 | fi 28 | echo "=============================" 29 | echo "====mysql install script=====" 30 | echo "=============================" 31 | [ ! -d /data/server ] && mkdir -p /data/server 32 | [ ! -d /data/logs ] && mkdir -p /data/logs 33 | cd /data/server 34 | read -p "please type mysql version[default:5.6.29]:" mysqlv 35 | if [ -z $mysqlv ] 36 | then 37 | mysqlv=5.6.29 38 | fi 39 | if [ -f /data/rpm/mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz ] 40 | then 41 | scp /data/rpm/mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz /data/server 42 | else 43 | ls /data/server|grep "mysql-$mysqlv-linux-glibc2.5"|xargs rm -rf 44 | wget http://downloads.mysql.com/archives/get/file/mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz 45 | fi 46 | if [ $? -eq 1 ] 47 | then 48 | echo "your mysql version is wrong!" 49 | exit 1 50 | fi 51 | echo "=============yum install packages==============" 52 | yum install -y libaio ncurses ncurses-devel libaio-devel 53 | tar zxvf mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz 54 | mv mysql-$mysqlv-linux-glibc2.5-x86_64 mysql 55 | cd $MYSQLDIR 56 | mkdir -p temp 57 | grep "mysql" /etc/group || groupadd mysql 58 | grep "mysql" /etc/passwd || useradd -r -g mysql mysql -s /sbin/nologin 59 | chown -R mysql:mysql . 60 | $MYSQLDIR/scripts/mysql_install_db --user=mysql --basedir=$MYSQLDIR --datadir=$MYSQLDIR/data 61 | scp support-files/mysql.server /etc/init.d/mysqld 62 | ln -s $MYSQLDIR /usr/local/mysql 63 | source /etc/profile 64 | [ -f /etc/my.cnf ] && scp /etc/my.cnf my.cnf.$(date +%y%m%d).bak 65 | echo "backup /etc/my.cnf my.cnf.$(date +%y%m%d).bak" 66 | rm -rf /etc/my.cnf 67 | rm -rf $MYSQLDIR/my.cnf 68 | scp /data/scripts/mysql/my.cnf $MYSQLDIR 69 | ln -s $MYSQLDIR/my.cnf /etc/my.cnf 70 | [ ! -d /data/logs/mysql ] && mkdir -p /data/logs/mysql 71 | chown -R mysql.mysql /data/logs/mysql 72 | grep "$MYSQLDIR/bin" /etc/bashrc || echo "export PATH=$MYSQLDIR/bin:\$PATH" >> /etc/bashrc 73 | source /etc/bashrc 74 | [ -f /data/server/mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz ] && [ ! -f /data/rpm/mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz ] && mv /data/server/$mysqlv-linux-glibc2.5-x86_64.tar.gz /data/rpm/. 75 | chkconfig mysqld on 76 | rm -rf /data/server/mysql-$mysqlv-linux-glibc2.5-x86_64.tar.gz 77 | echo "service mysqld start" 78 | service mysqld start 79 | echo "mysql install successfull!!" 80 | -------------------------------------------------------------------------------- /mysql/xbackup.sh: -------------------------------------------------------------------------------- 1 | #backup.sh 2 | #!/bin/sh 3 | #on xtrabackup 2.2.8 4 | # 第一次执行它的时候它会检查是否有完全备份,否则先创建一个全库备份 5 | # 当你再次运行它的时候,它会根据脚本中的设定来基于之前的全备或增量备份进行增量备份 6 | 7 | INNOBACKUPEX_PATH=innobackupex #INNOBACKUPEX的命令 8 | INNOBACKUPEXFULL=/usr/bin/$INNOBACKUPEX_PATH #INNOBACKUPEX的命令路径 9 | 10 | #mysql目标服务器以及用户名和密码 11 | MYSQL_CMD="--host=localhost --user=root --password=gsa2088 --port=3306" 12 | MYSQL_UP=" --user=root --password='gsa2088' --port=3306 " #mysqladmin的用户名和密码 13 | TMPLOG="/data/logs/mysql/innobackupex.$$.log" 14 | MY_CNF=/usr/local/mysql/my.cnf #mysql的配置文件 15 | MYSQL=/usr/local/mysql/bin/mysql 16 | MYSQL_ADMIN=/usr/local/mysql/bin/mysqladmin 17 | BACKUP_DIR=/data/nfs2_backup/db # 备份的主目录 18 | LOG_DIR=/$BACKUP_DIR/log # 日志的主目录 19 | FULLBACKUP_DIR=$BACKUP_DIR/full # 全库备份的目录 20 | INCRBACKUP_DIR=$BACKUP_DIR/incre # 增量备份的目录 21 | FULLBACKUP_INTERVAL=86400 # 全库备份的间隔周期,时间:秒 22 | KEEP_FULLBACKUP=7 # 至少保留几个全库备份 23 | logfiledate=backup.`date +%Y%m%d%H%M`.txt 24 | 25 | #开始时间 26 | STARTED_TIME=`date +%s` 27 | 28 | ############################################################################# 29 | # 显示错误并退出 30 | ############################################################################# 31 | 32 | error() 33 | { 34 | echo "$1" 1>&2 35 | exit 1 36 | } 37 | 38 | # 检查执行环境 39 | 40 | if [ ! -x $INNOBACKUPEXFULL ]; then 41 | error "$INNOBACKUPEXFULL未安装或未链接到/usr/bin." 42 | fi 43 | 44 | if [ ! -d $BACKUP_DIR ]; then 45 | error "备份目标文件夹:$BACKUP_DIR不存在." 46 | fi 47 | 48 | mysql_status=`netstat -nl | awk 'NR>2{if ($4 ~ /.*:3306/) {print "Yes";exit 0}}'` 49 | 50 | if [ "$mysql_status" != "Yes" ];then 51 | error "MySQL 没有启动运行." 52 | fi 53 | 54 | if ! `echo 'exit' | $MYSQL -s $MYSQL_CMD` ; then 55 | error "提供的数据库用户名或密码不正确!" 56 | fi 57 | 58 | # 备份的头部信息 59 | 60 | echo "----------------------------" 61 | echo 62 | echo "$0: MySQL备份脚本" 63 | echo "开始于: `date +%F' '%T' '%w`" 64 | echo 65 | 66 | #新建全备和差异备份的目录 67 | 68 | mkdir -p $FULLBACKUP_DIR 69 | mkdir -p $INCRBACKUP_DIR 70 | mkdir -p $LOG_DIR 71 | 72 | #查找最新的完全备份 73 | LATEST_FULL_BACKUP=`find $FULLBACKUP_DIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1` 74 | 75 | # 查找最近修改的最新备份时间 76 | LATEST_FULL_BACKUP_CREATED_TIME=`stat -c %Y $FULLBACKUP_DIR/$LATEST_FULL_BACKUP` 77 | 78 | #如果全备有效进行增量备份否则执行完全备份 79 | if [ "$LATEST_FULL_BACKUP" -a `expr $LATEST_FULL_BACKUP_CREATED_TIME + $FULLBACKUP_INTERVAL + 5` -ge $STARTED_TIME ] ; then 80 | # 如果最新的全备未过期则以最新的全备文件名命名在增量备份目录下新建目录 81 | echo -e "完全备份$LATEST_FULL_BACKUP未过期,将根据$LATEST_FULL_BACKUP名字作为增量备份基础目录名" 82 | echo " " 83 | NEW_INCRDIR=$INCRBACKUP_DIR/$LATEST_FULL_BACKUP 84 | mkdir -p $NEW_INCRDIR 85 | 86 | # 查找最新的增量备份是否存在.指定一个备份的路径作为增量备份的基础 87 | LATEST_INCR_BACKUP=`find $NEW_INCRDIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1` 88 | if [ ! $LATEST_INCR_BACKUP ] ; then 89 | INCRBASEDIR=$FULLBACKUP_DIR/$LATEST_FULL_BACKUP 90 | echo -e "增量备份将以$INCRBASEDIR作为备份基础目录" 91 | echo " " 92 | else 93 | INCRBASEDIR=$INCRBACKUP_DIR/${LATEST_FULL_BACKUP}/${LATEST_INCR_BACKUP} 94 | echo -e "增量备份将以$INCRBASEDIR作为备份基础目录" 95 | echo " " 96 | fi 97 | 98 | echo "使用$INCRBASEDIR作为基础本次增量备份的基础目录." 99 | $INNOBACKUPEXFULL --defaults-file=$MY_CNF --use-memory=4G $MYSQL_CMD --incremental $NEW_INCRDIR --incremental-basedir $INCRBASEDIR > $TMPLOG 2>&1 100 | 101 | #保留一份备份的详细日志 102 | 103 | cat $TMPLOG>/$LOG_DIR/$logfiledate 104 | 105 | if [ -z "`tail -1 $TMPLOG | grep 'innobackupex: completed OK!'`" ] ; then 106 | echo "$INNOBACKUPEX命令执行失败:"; echo 107 | echo -e "---------- $INNOBACKUPEX_PATH错误 ----------" 108 | cat $TMPLOG 109 | rm -f $TMPLOG 110 | exit 1 111 | fi 112 | 113 | THISBACKUP=`awk -- "/Backup created in directory/ { split( \\\$0, p, \"'\" ) ; print p[2] }" $TMPLOG` 114 | rm -f $TMPLOG 115 | 116 | 117 | echo -n "数据库成功备份到:$THISBACKUP" 118 | echo 119 | 120 | # 提示应该保留的备份文件起点 121 | 122 | LATEST_FULL_BACKUP=`find $FULLBACKUP_DIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1` 123 | 124 | NEW_INCRDIR=$INCRBACKUP_DIR/$LATEST_FULL_BACKUP 125 | 126 | LATEST_INCR_BACKUP=`find $NEW_INCRDIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1` 127 | 128 | RES_FULL_BACKUP=${FULLBACKUP_DIR}/${LATEST_FULL_BACKUP} 129 | 130 | RES_INCRE_BACKUP=`dirname ${INCRBACKUP_DIR}/${LATEST_FULL_BACKUP}/${LATEST_INCR_BACKUP}` 131 | 132 | echo 133 | echo -e '\e[31m NOTE:---------------------------------------------------------------------------------.\e[m' #红色 134 | echo -e "必须保留$KEEP_FULLBACKUP份全备即全备${RES_FULL_BACKUP}和${RES_INCRE_BACKUP}目录中所有增量备份." 135 | echo -e '\e[31m NOTE:---------------------------------------------------------------------------------.\e[m' #红色 136 | echo 137 | 138 | 139 | 140 | else 141 | echo "*********************************" 142 | echo -e "正在执行全新的完全备份...请稍等..." 143 | echo "*********************************" 144 | $INNOBACKUPEXFULL --defaults-file=$MY_CNF --use-memory=4G $MYSQL_CMD $FULLBACKUP_DIR > $TMPLOG 2>&1 145 | #保留一份备份的详细日志 146 | 147 | cat $TMPLOG>/$LOG_DIR/$logfiledate 148 | 149 | 150 | if [ -z "`tail -1 $TMPLOG | grep 'innobackupex: completed OK!'`" ] ; then 151 | echo "$INNOBACKUPEX命令执行失败:"; echo 152 | echo -e "---------- $INNOBACKUPEX_PATH错误 ----------" 153 | cat $TMPLOG 154 | rm -f $TMPLOG 155 | exit 1 156 | fi 157 | 158 | 159 | THISBACKUP=`awk -- "/Backup created in directory/ { split( \\\$0, p, \"'\" ) ; print p[2] }" $TMPLOG` 160 | rm -f $TMPLOG 161 | 162 | echo -n "数据库成功备份到:$THISBACKUP" 163 | echo 164 | 165 | # 提示应该保留的备份文件起点 166 | 167 | LATEST_FULL_BACKUP=`find $FULLBACKUP_DIR -mindepth 1 -maxdepth 1 -type d -printf "%P\n" | sort -nr | head -1` 168 | 169 | RES_FULL_BACKUP=${FULLBACKUP_DIR}/${LATEST_FULL_BACKUP} 170 | 171 | echo 172 | echo -e '\e[31m NOTE:---------------------------------------------------------------------------------.\e[m' #红色 173 | echo -e "无增量备份,必须保留$KEEP_FULLBACKUP份全备即全备${RES_FULL_BACKUP}." 174 | echo -e '\e[31m NOTE:---------------------------------------------------------------------------------.\e[m' #红色 175 | echo 176 | 177 | fi 178 | 179 | #删除过期的全备 180 | 181 | echo -e "find expire backup file...........waiting........." 182 | echo -e "寻找过期的全备文件并删除">>/$LOG_DIR/$logfiledate 183 | for efile in $(/usr/bin/find $FULLBACKUP_DIR/ -mtime +6) 184 | do 185 | if [ -d ${efile} ]; then 186 | rm -rf "${efile}" 187 | echo -e "删除过期全备文件:${efile}" >>/$LOG_DIR/$logfiledate 188 | elif [ -f ${efile} ]; then 189 | rm -rf "${efile}" 190 | echo -e "删除过期全备文件:${efile}" >>/$LOG_DIR/$logfiledate 191 | fi; 192 | 193 | done 194 | 195 | if [ $? -eq "0" ];then 196 | echo 197 | echo -e "未找到可以删除的过期全备文件" 198 | fi 199 | 200 | echo 201 | echo "完成于: `date +%F' '%T' '%w`" 202 | exit 0 203 | -------------------------------------------------------------------------------- /nfs/nfs_client.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | yum install -y nfs-utils 3 | chkconfig rpcbind on 4 | service rpcbind start 5 | echo "10.100.1.1 nms1" >> /etc/hosts 6 | mkdir -p /data/nms1_nfs 7 | mount -t nfs4 nms1:/data/nfs_share /data/nms1_nfs 8 | echo "mount -t nfs4 nms1:/data/nfs_share /data/nms1_nfs" >> /etc/rc.local -------------------------------------------------------------------------------- /nfs/nfs_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | yum install -y nfs-utils 3 | chkconfig rpcbind on 4 | chkconfig nfs on 5 | mkdir -p /data/nfs_share 6 | echo "/data/nfs_share 10.100.1.0/24(rw,no_root_squash,no_all_squash,sync,anonuid=501,anongid=501) 7 | " >> /etc/exports 8 | service rpcbind start 9 | service nfs start 10 | -------------------------------------------------------------------------------- /nginx/files/index.php: -------------------------------------------------------------------------------- 1 | &1 | grep "configure arguments:" | sed 's/[^*]*--user=\([^ ]*\).*/\1/g' -` 34 | if [ -z "`grep $user /etc/passwd`" ]; then 35 | useradd -M -s /bin/nologin $user 36 | fi 37 | options=`$nginx -V 2>&1 | grep 'configure arguments:'` 38 | for opt in $options; do 39 | if [ `echo $opt | grep '.*-temp-path'` ]; then 40 | value=`echo $opt | cut -d "=" -f 2` 41 | if [ ! -d "$value" ]; then 42 | # echo "creating" $value 43 | mkdir -p $value && chown -R $user $value 44 | fi 45 | fi 46 | done 47 | } 48 | 49 | start() { 50 | [ -x $nginx ] || exit 5 51 | [ -f $NGINX_CONF_FILE ] || exit 6 52 | make_dirs 53 | echo -n $"Starting $prog: " 54 | daemon $nginx -c $NGINX_CONF_FILE 55 | retval=$? 56 | echo 57 | [ $retval -eq 0 ] && touch $lockfile 58 | return $retval 59 | } 60 | 61 | stop() { 62 | echo -n $"Stopping $prog: " 63 | killproc $prog -QUIT 64 | retval=$? 65 | echo 66 | [ $retval -eq 0 ] && rm -f $lockfile 67 | return $retval 68 | } 69 | 70 | restart() { 71 | configtest || return $? 72 | stop 73 | sleep 1 74 | start 75 | } 76 | 77 | reload() { 78 | configtest || return $? 79 | echo -n $"Reloading $prog: " 80 | killproc $nginx -HUP 81 | RETVAL=$? 82 | echo 83 | } 84 | 85 | force_reload() { 86 | restart 87 | } 88 | 89 | configtest() { 90 | $nginx -t -c $NGINX_CONF_FILE 91 | } 92 | 93 | rh_status() { 94 | status $prog 95 | } 96 | 97 | rh_status_q() { 98 | rh_status >/dev/null 2>&1 99 | } 100 | 101 | case "$1" in 102 | start) 103 | rh_status_q && exit 0 104 | $1 105 | ;; 106 | stop) 107 | rh_status_q || exit 0 108 | $1 109 | ;; 110 | restart|configtest) 111 | $1 112 | ;; 113 | reload) 114 | rh_status_q || exit 7 115 | $1 116 | ;; 117 | force-reload) 118 | force_reload 119 | ;; 120 | status) 121 | rh_status 122 | ;; 123 | condrestart|try-restart) 124 | rh_status_q || exit 0 125 | ;; 126 | *) 127 | echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}" 128 | exit 2 129 | esac 130 | -------------------------------------------------------------------------------- /nginx/files/nginx.conf: -------------------------------------------------------------------------------- 1 | user nobody nobody; 2 | 3 | worker_processes 2; 4 | 5 | error_log /data/logs/nginx/nginx_error.log crit; 6 | 7 | pid /data/server/nginx/nginx.pid; 8 | 9 | #Specifies the value for maximum file descriptors that can be opened by this process. 10 | worker_rlimit_nofile 51200; 11 | events 12 | { 13 | use epoll; 14 | worker_connections 51200; 15 | } 16 | 17 | http 18 | { 19 | include mime.types; 20 | default_type application/octet-stream; 21 | 22 | 23 | server_names_hash_bucket_size 128; 24 | client_header_buffer_size 32k; 25 | large_client_header_buffers 4 32k; 26 | client_max_body_size 8m; 27 | 28 | sendfile on; 29 | tcp_nopush on; 30 | 31 | keepalive_timeout 15; 32 | 33 | tcp_nodelay on; 34 | 35 | fastcgi_connect_timeout 300; 36 | fastcgi_send_timeout 300; 37 | fastcgi_read_timeout 300; 38 | fastcgi_buffer_size 64k; 39 | fastcgi_buffers 4 64k; 40 | fastcgi_busy_buffers_size 128k; 41 | fastcgi_temp_file_write_size 256k; 42 | 43 | proxy_connect_timeout 600; 44 | proxy_read_timeout 600; 45 | proxy_send_timeout 600; 46 | proxy_buffer_size 32k; 47 | proxy_buffers 4 32k; 48 | proxy_busy_buffers_size 64k; 49 | # proxy_temp_file_write_size 1024m; 50 | 51 | gzip on; 52 | gzip_min_length 1k; 53 | gzip_buffers 4 16k; 54 | gzip_http_version 1.1; 55 | gzip_comp_level 6; 56 | gzip_types text/plain application/x-javascript text/css application/xml; 57 | gzip_vary on; 58 | 59 | #limit_zone crawler $binary_remote_addr 10m; 60 | server_tokens off; 61 | #log format 62 | log_format access '$http_x_forwarded_for - $remote_user [$time_local] "$request" ' 63 | '$status $body_bytes_sent "$http_referer" ' 64 | '"$http_user_agent" $remote_addr'; 65 | include vhost/*.conf; 66 | } -------------------------------------------------------------------------------- /nginx/files/temp.conf: -------------------------------------------------------------------------------- 1 | server 2 | { 3 | listen 80; 4 | server_name {{domain}}; 5 | index index.html index.htm index.php; 6 | if ($request_uri ~ ".*/.svn/.*") 7 | { 8 | return 404; 9 | } 10 | location / 11 | { 12 | proxy_pass http://172.16.1.38:8001; 13 | proxy_set_header Host $host:$server_port; 14 | proxy_set_header X-Real-IP $remote_addr; 15 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 16 | 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /nginx/files/www.conf: -------------------------------------------------------------------------------- 1 | server 2 | { 3 | listen 80; 4 | server_name localhost; 5 | index index.html index.htm index.php; 6 | root /data/www; 7 | 8 | if ($request_uri ~ ".*/.svn/.*") 9 | { 10 | return 404; 11 | } 12 | location ~ \.php$ 13 | { 14 | try_files $uri =404; 15 | fastcgi_pass 127.0.0.1:9000; 16 | fastcgi_index index.php; 17 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 18 | include fastcgi_params; 19 | } 20 | access_log /data/logs/nginx/access.log access; 21 | } 22 | -------------------------------------------------------------------------------- /nginx/nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | [ -f /data/server/nginx/bin/nginx ] && echo "nginx has installed" && exit 0 3 | yum -y install gcc gcc-c++ autoconf automake zlib zlib-devel openssl openssl-devel pcre-devel 4 | cd /data/rpm 5 | [ -f /data/rpm/nginx-1.8.1.tar.gz ] || wget http://nginx.org/download/nginx-1.8.1.tar.gz 6 | tar zxvf nginx-1.8.1.tar.gz 7 | mkdir -p /data/logs/nginx 8 | chown -R nobody.nobody /data/logs/nginx 9 | cd nginx-1.8.1 10 | ./configure \ 11 | --prefix=/data/server/nginx \ 12 | --user=nobody \ 13 | --group=nobody \ 14 | --error-log-path=/data/logs/nginx/error.log \ 15 | --http-log-path=/data/logs/nginx/access.log \ 16 | --with-http_realip_module \ 17 | --with-http_ssl_module \ 18 | --with-http_flv_module \ 19 | --with-http_gzip_static_module \ 20 | --with-http_stub_status_module \ 21 | --with-pcre 22 | make && make install 23 | ln -s /data/server/nginx/sbin/nginx /usr/sbin/nginx 24 | mkdir -p /data/server/nginx/conf/vhost 25 | cp /data/scripts/nginx/files/nginx /etc/init.d 26 | rm -rf /data/server/nginx/conf/nginx.conf 27 | cp /data/scripts/nginx/files/nginx.conf /data/server/nginx/conf 28 | cp /data/scripts/nginx/files/temp.conf /data/server/nginx/conf/vhost/temp.backup 29 | chmod +x /etc/init.d/nginx 30 | chkconfig --add nginx 31 | chkconfig nginx on 32 | rm -rf /data/rpm/nginx-1.8.1 33 | mkdir -p /data/www 34 | scp /data/scripts/files/index.php /data/www 35 | scp /data/scripts/files/www.conf /data/server/nginx/conf/vhost 36 | service nginx restart 37 | echo "nginx installed successfully!" 38 | -------------------------------------------------------------------------------- /php/php_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #php_easy_install 20160423 by GuoHongze 3 | set -e 4 | if [ -f /usr/bin/php ] 5 | then 6 | echo "your linux has install php!" 7 | exit 1 8 | fi 9 | read -p "please type php version[default:5.6.20]:" phpversion 10 | if [ ! $phpversion ] 11 | then 12 | phpversion=5.6.20 13 | fi 14 | read -p "please input php-fpm max children(default:5):" max 15 | if [ -z $max ] 16 | then 17 | max=5 18 | fi 19 | read -p "please confirm you will install php-$phpversion and fpm children $max [y/n]" con1 20 | case $con1 in 21 | yes|y) 22 | echo "beginning install...." 23 | ;; 24 | no|n) 25 | exit 1 26 | ;; 27 | *) 28 | exit 1 29 | ;; 30 | esac 31 | if [ -d /data/src ] 32 | then 33 | echo 34 | else 35 | mkdir /data/src 36 | fi 37 | cd /data/src 38 | [ -f /data/src/php-$phpversion.tar.gz ] || wget http://cn2.php.net/distributions/php-$phpversion.tar.gz 39 | tar zxvf php-$phpversion.tar.gz 40 | cd /data/src/php-$phpversion 41 | yum install -y gmp-devel \ 42 | readline-devel \ 43 | gcc \ 44 | gcc-c++ \ 45 | libicu-devel libicu \ 46 | ncurses ncurses-devel \ 47 | pcre pcre-devel \ 48 | libjpeg libjpeg-devel \ 49 | libpng libpng-devel \ 50 | freetype freetype-devel \ 51 | gettext gettext-devel \ 52 | libtiff libtiff-devel \ 53 | libxml2 libxml2-devel \ 54 | zlib zlib-devel \ 55 | glibc glibc-devel \ 56 | glib2 glib2-devel \ 57 | bzip2 bzip2-devel \ 58 | curl curl-devel \ 59 | openssl openssl-devel \ 60 | openldap openldap-devel \ 61 | libXpm libXpm-devel \ 62 | gd gd-devel \ 63 | libmcrypt libmcrypt-devel \ 64 | libtool \ 65 | fontconfig fontconfig-devel 66 | sleep 1 67 | ./configure \ 68 | --prefix=/data/server/php \ 69 | --with-config-file-path=/data/server/php/etc \ 70 | --disable-rpath \ 71 | --disable-debug \ 72 | --enable-fpm \ 73 | --with-fpm-user=nobody \ 74 | --with-fpm-group=nobody \ 75 | --with-mysqli=mysqlnd \ 76 | --with-pdo-mysql=mysqlnd \ 77 | --with-mysql=mysqlnd \ 78 | --with-libxml-dir \ 79 | --with-gd \ 80 | --with-jpeg-dir \ 81 | --with-png-dir \ 82 | --with-freetype-dir \ 83 | --enable-calendar \ 84 | --with-iconv-dir \ 85 | --enable-bcmath \ 86 | --with-zlib \ 87 | --with-zlib-dir \ 88 | --with-mcrypt \ 89 | --with-mhash \ 90 | --enable-opcache \ 91 | --enable-soap \ 92 | --enable-gd-native-ttf \ 93 | --enable-ftp \ 94 | --enable-mbstring \ 95 | --enable-exif \ 96 | --disable-ipv6 \ 97 | --with-pear \ 98 | --with-curl \ 99 | --enable-sockets \ 100 | --with-xpm-dir \ 101 | --with-openssl \ 102 | --enable-pcntl \ 103 | --enable-shmop \ 104 | --enable-sysvmsg \ 105 | --enable-sysvsem \ 106 | --enable-sysvshm \ 107 | --with-gettext \ 108 | --with-bz2 \ 109 | --enable-zip \ 110 | --enable-intl \ 111 | --with-xmlrpc \ 112 | --enable-inline-optimization 113 | make && make install 114 | cp php.ini-production /data/server/php/etc/php.ini 115 | ln -s /data/server/php/etc/php.ini /etc/php.ini 116 | cp /data/server/php/etc/php-fpm.conf.default /data/server/php/etc/php-fpm.conf 117 | cp sapi/fpm/init.d.php-fpm /etc/init.d/php-fpm 118 | ln -s /data/server/php/bin/php /usr/bin/php 119 | chmod +x /etc/init.d/php-fpm 120 | chkconfig php-fpm on 121 | 122 | #config file 123 | php_config=/data/server/php/etc/php.ini 124 | fpm_config=/data/server/php/etc/php-fpm.conf 125 | prc="date.timezone = PRC" 126 | sed -i "926a $prc" $php_config 127 | sed -i "1872a opcache.enable=1" $php_config 128 | sed -i "1873a opcache.enable_cli=1" $php_config 129 | sed -i "1874a opcache.memory_consumption=1024" $php_config 130 | sed -i "1875a opcache.interned_strings_buffer=8" $php_config 131 | sed -i "1876a opcache.max_accelerated_files=4000" $php_config 132 | sed -i "1877a opcache.revalidate_freq=60" $php_config 133 | sed -i "1878a opcache.fast_shutdown=1" $php_config 134 | sed -i "1879a zend_extension=opcache.so" $php_config 135 | sed -i "s/pm = dynamic/pm = static/g" $fpm_config 136 | sed -i "s/pm.max_children = 5/pm.max_children = $max/g" $fpm_config 137 | service php-fpm start 138 | chkconfig php-fpm on 139 | echo "install successful!" 140 | -------------------------------------------------------------------------------- /redis/files/redis: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Simple Redis init.d script conceived to work on Linux systems 4 | # as it does use of the /proc filesystem. 5 | # chkconfig: - 85 15 6 | REDISPORT=6379 7 | EXEC=/data/server/redis/src/redis-server 8 | CLIEXEC=/data/server/redis/src/redis-cli 9 | 10 | PIDFILE=/var/run/redis.pid 11 | CONF="/data/server/redis/redis.conf" 12 | 13 | case "$1" in 14 | start) 15 | if [ -f $PIDFILE ] 16 | then 17 | echo "$PIDFILE exists, process is already running or crashed" 18 | else 19 | echo "Starting Redis server..." 20 | $EXEC $CONF 21 | fi 22 | ;; 23 | stop) 24 | if [ ! -f $PIDFILE ] 25 | then 26 | echo "$PIDFILE does not exist, process is not running" 27 | else 28 | PID=$(cat $PIDFILE) 29 | echo "Stopping ..." 30 | $CLIEXEC -p $REDISPORT shutdown 31 | while [ -x /proc/${PID} ] 32 | do 33 | echo "Waiting for Redis to shutdown ..." 34 | sleep 1 35 | done 36 | echo "Redis stopped" 37 | fi 38 | ;; 39 | status) 40 | if [ -f $PIDFILE ] 41 | then 42 | echo "redis server is running....." 43 | else 44 | echo "redis is stopped" 45 | fi 46 | ;; 47 | *) 48 | echo "Please use start or stop or status" 49 | ;; 50 | esac 51 | -------------------------------------------------------------------------------- /redis/files/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## NETWORK ##################################### 39 | 40 | # By default, if no "bind" configuration directive is specified, Redis listens 41 | # for connections from all the network interfaces available on the server. 42 | # It is possible to listen to just one or multiple selected interfaces using 43 | # the "bind" configuration directive, followed by one or more IP addresses. 44 | # 45 | # Examples: 46 | # 47 | # bind 192.168.1.100 10.0.0.1 48 | # bind 127.0.0.1 ::1 49 | # 50 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 51 | # internet, binding to all the interfaces is dangerous and will expose the 52 | # instance to everybody on the internet. So by default we uncomment the 53 | # following bind directive, that will force Redis to listen only into 54 | # the IPv4 lookback interface address (this means Redis will be able to 55 | # accept connections only from clients running into the same computer it 56 | # is running). 57 | # 58 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 59 | # JUST COMMENT THE FOLLOWING LINE. 60 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | bind 127.0.0.1 62 | 63 | # Protected mode is a layer of security protection, in order to avoid that 64 | # Redis instances left open on the internet are accessed and exploited. 65 | # 66 | # When protected mode is on and if: 67 | # 68 | # 1) The server is not binding explicitly to a set of addresses using the 69 | # "bind" directive. 70 | # 2) No password is configured. 71 | # 72 | # The server only accepts connections from clients connecting from the 73 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 74 | # sockets. 75 | # 76 | # By default protected mode is enabled. You should disable it only if 77 | # you are sure you want clients from other hosts to connect to Redis 78 | # even if no authentication is configured, nor a specific set of interfaces 79 | # are explicitly listed using the "bind" directive. 80 | protected-mode yes 81 | 82 | # Accept connections on the specified port, default is 6379 (IANA #815344). 83 | # If port 0 is specified Redis will not listen on a TCP socket. 84 | port 6379 85 | 86 | # TCP listen() backlog. 87 | # 88 | # In high requests-per-second environments you need an high backlog in order 89 | # to avoid slow clients connections issues. Note that the Linux kernel 90 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 91 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 92 | # in order to get the desired effect. 93 | tcp-backlog 511 94 | 95 | # Unix socket. 96 | # 97 | # Specify the path for the Unix socket that will be used to listen for 98 | # incoming connections. There is no default, so Redis will not listen 99 | # on a unix socket when not specified. 100 | # 101 | # unixsocket /tmp/redis.sock 102 | # unixsocketperm 700 103 | 104 | # Close the connection after a client is idle for N seconds (0 to disable) 105 | timeout 0 106 | 107 | # TCP keepalive. 108 | # 109 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 110 | # of communication. This is useful for two reasons: 111 | # 112 | # 1) Detect dead peers. 113 | # 2) Take the connection alive from the point of view of network 114 | # equipment in the middle. 115 | # 116 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 117 | # Note that to close the connection the double of the time is needed. 118 | # On other kernels the period depends on the kernel configuration. 119 | # 120 | # A reasonable value for this option is 60 seconds. 121 | tcp-keepalive 0 122 | 123 | ################################# GENERAL ##################################### 124 | 125 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 126 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 127 | daemonize yes 128 | 129 | # If you run Redis from upstart or systemd, Redis can interact with your 130 | # supervision tree. Options: 131 | # supervised no - no supervision interaction 132 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 133 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 134 | # supervised auto - detect upstart or systemd method based on 135 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 136 | # Note: these supervision methods only signal "process is ready." 137 | # They do not enable continuous liveness pings back to your supervisor. 138 | supervised no 139 | 140 | # If a pid file is specified, Redis writes it where specified at startup 141 | # and removes it at exit. 142 | # 143 | # When the server runs non daemonized, no pid file is created if none is 144 | # specified in the configuration. When the server is daemonized, the pid file 145 | # is used even if not specified, defaulting to "/var/run/redis.pid". 146 | # 147 | # Creating a pid file is best effort: if Redis is not able to create it 148 | # nothing bad happens, the server will start and run normally. 149 | pidfile /var/run/redis.pid 150 | 151 | # Specify the server verbosity level. 152 | # This can be one of: 153 | # debug (a lot of information, useful for development/testing) 154 | # verbose (many rarely useful info, but not a mess like the debug level) 155 | # notice (moderately verbose, what you want in production probably) 156 | # warning (only very important / critical messages are logged) 157 | loglevel notice 158 | 159 | # Specify the log file name. Also the empty string can be used to force 160 | # Redis to log on the standard output. Note that if you use standard 161 | # output for logging but daemonize, logs will be sent to /dev/null 162 | logfile "" 163 | 164 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 165 | # and optionally update the other syslog parameters to suit your needs. 166 | # syslog-enabled no 167 | 168 | # Specify the syslog identity. 169 | # syslog-ident redis 170 | 171 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 172 | # syslog-facility local0 173 | 174 | # Set the number of databases. The default database is DB 0, you can select 175 | # a different one on a per-connection basis using SELECT where 176 | # dbid is a number between 0 and 'databases'-1 177 | databases 16 178 | 179 | ################################ SNAPSHOTTING ################################ 180 | # 181 | # Save the DB on disk: 182 | # 183 | # save 184 | # 185 | # Will save the DB if both the given number of seconds and the given 186 | # number of write operations against the DB occurred. 187 | # 188 | # In the example below the behaviour will be to save: 189 | # after 900 sec (15 min) if at least 1 key changed 190 | # after 300 sec (5 min) if at least 10 keys changed 191 | # after 60 sec if at least 10000 keys changed 192 | # 193 | # Note: you can disable saving completely by commenting out all "save" lines. 194 | # 195 | # It is also possible to remove all the previously configured save 196 | # points by adding a save directive with a single empty string argument 197 | # like in the following example: 198 | # 199 | # save "" 200 | 201 | save 900 1 202 | save 300 10 203 | save 60 10000 204 | 205 | # By default Redis will stop accepting writes if RDB snapshots are enabled 206 | # (at least one save point) and the latest background save failed. 207 | # This will make the user aware (in a hard way) that data is not persisting 208 | # on disk properly, otherwise chances are that no one will notice and some 209 | # disaster will happen. 210 | # 211 | # If the background saving process will start working again Redis will 212 | # automatically allow writes again. 213 | # 214 | # However if you have setup your proper monitoring of the Redis server 215 | # and persistence, you may want to disable this feature so that Redis will 216 | # continue to work as usual even if there are problems with disk, 217 | # permissions, and so forth. 218 | stop-writes-on-bgsave-error yes 219 | 220 | # Compress string objects using LZF when dump .rdb databases? 221 | # For default that's set to 'yes' as it's almost always a win. 222 | # If you want to save some CPU in the saving child set it to 'no' but 223 | # the dataset will likely be bigger if you have compressible values or keys. 224 | rdbcompression yes 225 | 226 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 227 | # This makes the format more resistant to corruption but there is a performance 228 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 229 | # for maximum performances. 230 | # 231 | # RDB files created with checksum disabled have a checksum of zero that will 232 | # tell the loading code to skip the check. 233 | rdbchecksum yes 234 | 235 | # The filename where to dump the DB 236 | dbfilename dump.rdb 237 | 238 | # The working directory. 239 | # 240 | # The DB will be written inside this directory, with the filename specified 241 | # above using the 'dbfilename' configuration directive. 242 | # 243 | # The Append Only File will also be created inside this directory. 244 | # 245 | # Note that you must specify a directory here, not a file name. 246 | dir ./ 247 | 248 | ################################# REPLICATION ################################# 249 | 250 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 251 | # another Redis server. A few things to understand ASAP about Redis replication. 252 | # 253 | # 1) Redis replication is asynchronous, but you can configure a master to 254 | # stop accepting writes if it appears to be not connected with at least 255 | # a given number of slaves. 256 | # 2) Redis slaves are able to perform a partial resynchronization with the 257 | # master if the replication link is lost for a relatively small amount of 258 | # time. You may want to configure the replication backlog size (see the next 259 | # sections of this file) with a sensible value depending on your needs. 260 | # 3) Replication is automatic and does not need user intervention. After a 261 | # network partition slaves automatically try to reconnect to masters 262 | # and resynchronize with them. 263 | # 264 | # slaveof 265 | 266 | # If the master is password protected (using the "requirepass" configuration 267 | # directive below) it is possible to tell the slave to authenticate before 268 | # starting the replication synchronization process, otherwise the master will 269 | # refuse the slave request. 270 | # 271 | # masterauth 272 | 273 | # When a slave loses its connection with the master, or when the replication 274 | # is still in progress, the slave can act in two different ways: 275 | # 276 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 277 | # still reply to client requests, possibly with out of date data, or the 278 | # data set may just be empty if this is the first synchronization. 279 | # 280 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 281 | # an error "SYNC with master in progress" to all the kind of commands 282 | # but to INFO and SLAVEOF. 283 | # 284 | slave-serve-stale-data yes 285 | 286 | # You can configure a slave instance to accept writes or not. Writing against 287 | # a slave instance may be useful to store some ephemeral data (because data 288 | # written on a slave will be easily deleted after resync with the master) but 289 | # may also cause problems if clients are writing to it because of a 290 | # misconfiguration. 291 | # 292 | # Since Redis 2.6 by default slaves are read-only. 293 | # 294 | # Note: read only slaves are not designed to be exposed to untrusted clients 295 | # on the internet. It's just a protection layer against misuse of the instance. 296 | # Still a read only slave exports by default all the administrative commands 297 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 298 | # security of read only slaves using 'rename-command' to shadow all the 299 | # administrative / dangerous commands. 300 | slave-read-only yes 301 | 302 | # Replication SYNC strategy: disk or socket. 303 | # 304 | # ------------------------------------------------------- 305 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 306 | # ------------------------------------------------------- 307 | # 308 | # New slaves and reconnecting slaves that are not able to continue the replication 309 | # process just receiving differences, need to do what is called a "full 310 | # synchronization". An RDB file is transmitted from the master to the slaves. 311 | # The transmission can happen in two different ways: 312 | # 313 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 314 | # file on disk. Later the file is transferred by the parent 315 | # process to the slaves incrementally. 316 | # 2) Diskless: The Redis master creates a new process that directly writes the 317 | # RDB file to slave sockets, without touching the disk at all. 318 | # 319 | # With disk-backed replication, while the RDB file is generated, more slaves 320 | # can be queued and served with the RDB file as soon as the current child producing 321 | # the RDB file finishes its work. With diskless replication instead once 322 | # the transfer starts, new slaves arriving will be queued and a new transfer 323 | # will start when the current one terminates. 324 | # 325 | # When diskless replication is used, the master waits a configurable amount of 326 | # time (in seconds) before starting the transfer in the hope that multiple slaves 327 | # will arrive and the transfer can be parallelized. 328 | # 329 | # With slow disks and fast (large bandwidth) networks, diskless replication 330 | # works better. 331 | repl-diskless-sync no 332 | 333 | # When diskless replication is enabled, it is possible to configure the delay 334 | # the server waits in order to spawn the child that transfers the RDB via socket 335 | # to the slaves. 336 | # 337 | # This is important since once the transfer starts, it is not possible to serve 338 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 339 | # waits a delay in order to let more slaves arrive. 340 | # 341 | # The delay is specified in seconds, and by default is 5 seconds. To disable 342 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 343 | repl-diskless-sync-delay 5 344 | 345 | # Slaves send PINGs to server in a predefined interval. It's possible to change 346 | # this interval with the repl_ping_slave_period option. The default value is 10 347 | # seconds. 348 | # 349 | # repl-ping-slave-period 10 350 | 351 | # The following option sets the replication timeout for: 352 | # 353 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 354 | # 2) Master timeout from the point of view of slaves (data, pings). 355 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 356 | # 357 | # It is important to make sure that this value is greater than the value 358 | # specified for repl-ping-slave-period otherwise a timeout will be detected 359 | # every time there is low traffic between the master and the slave. 360 | # 361 | # repl-timeout 60 362 | 363 | # Disable TCP_NODELAY on the slave socket after SYNC? 364 | # 365 | # If you select "yes" Redis will use a smaller number of TCP packets and 366 | # less bandwidth to send data to slaves. But this can add a delay for 367 | # the data to appear on the slave side, up to 40 milliseconds with 368 | # Linux kernels using a default configuration. 369 | # 370 | # If you select "no" the delay for data to appear on the slave side will 371 | # be reduced but more bandwidth will be used for replication. 372 | # 373 | # By default we optimize for low latency, but in very high traffic conditions 374 | # or when the master and slaves are many hops away, turning this to "yes" may 375 | # be a good idea. 376 | repl-disable-tcp-nodelay no 377 | 378 | # Set the replication backlog size. The backlog is a buffer that accumulates 379 | # slave data when slaves are disconnected for some time, so that when a slave 380 | # wants to reconnect again, often a full resync is not needed, but a partial 381 | # resync is enough, just passing the portion of data the slave missed while 382 | # disconnected. 383 | # 384 | # The bigger the replication backlog, the longer the time the slave can be 385 | # disconnected and later be able to perform a partial resynchronization. 386 | # 387 | # The backlog is only allocated once there is at least a slave connected. 388 | # 389 | # repl-backlog-size 1mb 390 | 391 | # After a master has no longer connected slaves for some time, the backlog 392 | # will be freed. The following option configures the amount of seconds that 393 | # need to elapse, starting from the time the last slave disconnected, for 394 | # the backlog buffer to be freed. 395 | # 396 | # A value of 0 means to never release the backlog. 397 | # 398 | # repl-backlog-ttl 3600 399 | 400 | # The slave priority is an integer number published by Redis in the INFO output. 401 | # It is used by Redis Sentinel in order to select a slave to promote into a 402 | # master if the master is no longer working correctly. 403 | # 404 | # A slave with a low priority number is considered better for promotion, so 405 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 406 | # pick the one with priority 10, that is the lowest. 407 | # 408 | # However a special priority of 0 marks the slave as not able to perform the 409 | # role of master, so a slave with priority of 0 will never be selected by 410 | # Redis Sentinel for promotion. 411 | # 412 | # By default the priority is 100. 413 | slave-priority 100 414 | 415 | # It is possible for a master to stop accepting writes if there are less than 416 | # N slaves connected, having a lag less or equal than M seconds. 417 | # 418 | # The N slaves need to be in "online" state. 419 | # 420 | # The lag in seconds, that must be <= the specified value, is calculated from 421 | # the last ping received from the slave, that is usually sent every second. 422 | # 423 | # This option does not GUARANTEE that N replicas will accept the write, but 424 | # will limit the window of exposure for lost writes in case not enough slaves 425 | # are available, to the specified number of seconds. 426 | # 427 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 428 | # 429 | # min-slaves-to-write 3 430 | # min-slaves-max-lag 10 431 | # 432 | # Setting one or the other to 0 disables the feature. 433 | # 434 | # By default min-slaves-to-write is set to 0 (feature disabled) and 435 | # min-slaves-max-lag is set to 10. 436 | 437 | ################################## SECURITY ################################### 438 | 439 | # Require clients to issue AUTH before processing any other 440 | # commands. This might be useful in environments in which you do not trust 441 | # others with access to the host running redis-server. 442 | # 443 | # This should stay commented out for backward compatibility and because most 444 | # people do not need auth (e.g. they run their own servers). 445 | # 446 | # Warning: since Redis is pretty fast an outside user can try up to 447 | # 150k passwords per second against a good box. This means that you should 448 | # use a very strong password otherwise it will be very easy to break. 449 | # 450 | # requirepass foobared 451 | 452 | # Command renaming. 453 | # 454 | # It is possible to change the name of dangerous commands in a shared 455 | # environment. For instance the CONFIG command may be renamed into something 456 | # hard to guess so that it will still be available for internal-use tools 457 | # but not available for general clients. 458 | # 459 | # Example: 460 | # 461 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 462 | # 463 | # It is also possible to completely kill a command by renaming it into 464 | # an empty string: 465 | # 466 | # rename-command CONFIG "" 467 | # 468 | # Please note that changing the name of commands that are logged into the 469 | # AOF file or transmitted to slaves may cause problems. 470 | 471 | ################################### LIMITS #################################### 472 | 473 | # Set the max number of connected clients at the same time. By default 474 | # this limit is set to 10000 clients, however if the Redis server is not 475 | # able to configure the process file limit to allow for the specified limit 476 | # the max number of allowed clients is set to the current file limit 477 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 478 | # 479 | # Once the limit is reached Redis will close all the new connections sending 480 | # an error 'max number of clients reached'. 481 | # 482 | # maxclients 10000 483 | 484 | # Don't use more memory than the specified amount of bytes. 485 | # When the memory limit is reached Redis will try to remove keys 486 | # according to the eviction policy selected (see maxmemory-policy). 487 | # 488 | # If Redis can't remove keys according to the policy, or if the policy is 489 | # set to 'noeviction', Redis will start to reply with errors to commands 490 | # that would use more memory, like SET, LPUSH, and so on, and will continue 491 | # to reply to read-only commands like GET. 492 | # 493 | # This option is usually useful when using Redis as an LRU cache, or to set 494 | # a hard memory limit for an instance (using the 'noeviction' policy). 495 | # 496 | # WARNING: If you have slaves attached to an instance with maxmemory on, 497 | # the size of the output buffers needed to feed the slaves are subtracted 498 | # from the used memory count, so that network problems / resyncs will 499 | # not trigger a loop where keys are evicted, and in turn the output 500 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 501 | # of more keys, and so forth until the database is completely emptied. 502 | # 503 | # In short... if you have slaves attached it is suggested that you set a lower 504 | # limit for maxmemory so that there is some free RAM on the system for slave 505 | # output buffers (but this is not needed if the policy is 'noeviction'). 506 | # 507 | # maxmemory 508 | 509 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 510 | # is reached. You can select among five behaviors: 511 | # 512 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 513 | # allkeys-lru -> remove any key according to the LRU algorithm 514 | # volatile-random -> remove a random key with an expire set 515 | # allkeys-random -> remove a random key, any key 516 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 517 | # noeviction -> don't expire at all, just return an error on write operations 518 | # 519 | # Note: with any of the above policies, Redis will return an error on write 520 | # operations, when there are no suitable keys for eviction. 521 | # 522 | # At the date of writing these commands are: set setnx setex append 523 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 524 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 525 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 526 | # getset mset msetnx exec sort 527 | # 528 | # The default is: 529 | # 530 | # maxmemory-policy noeviction 531 | 532 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 533 | # algorithms (in order to save memory), so you can tune it for speed or 534 | # accuracy. For default Redis will check five keys and pick the one that was 535 | # used less recently, you can change the sample size using the following 536 | # configuration directive. 537 | # 538 | # The default of 5 produces good enough results. 10 Approximates very closely 539 | # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. 540 | # 541 | # maxmemory-samples 5 542 | 543 | ############################## APPEND ONLY MODE ############################### 544 | 545 | # By default Redis asynchronously dumps the dataset on disk. This mode is 546 | # good enough in many applications, but an issue with the Redis process or 547 | # a power outage may result into a few minutes of writes lost (depending on 548 | # the configured save points). 549 | # 550 | # The Append Only File is an alternative persistence mode that provides 551 | # much better durability. For instance using the default data fsync policy 552 | # (see later in the config file) Redis can lose just one second of writes in a 553 | # dramatic event like a server power outage, or a single write if something 554 | # wrong with the Redis process itself happens, but the operating system is 555 | # still running correctly. 556 | # 557 | # AOF and RDB persistence can be enabled at the same time without problems. 558 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 559 | # with the better durability guarantees. 560 | # 561 | # Please check http://redis.io/topics/persistence for more information. 562 | 563 | appendonly no 564 | 565 | # The name of the append only file (default: "appendonly.aof") 566 | 567 | appendfilename "appendonly.aof" 568 | 569 | # The fsync() call tells the Operating System to actually write data on disk 570 | # instead of waiting for more data in the output buffer. Some OS will really flush 571 | # data on disk, some other OS will just try to do it ASAP. 572 | # 573 | # Redis supports three different modes: 574 | # 575 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 576 | # always: fsync after every write to the append only log. Slow, Safest. 577 | # everysec: fsync only one time every second. Compromise. 578 | # 579 | # The default is "everysec", as that's usually the right compromise between 580 | # speed and data safety. It's up to you to understand if you can relax this to 581 | # "no" that will let the operating system flush the output buffer when 582 | # it wants, for better performances (but if you can live with the idea of 583 | # some data loss consider the default persistence mode that's snapshotting), 584 | # or on the contrary, use "always" that's very slow but a bit safer than 585 | # everysec. 586 | # 587 | # More details please check the following article: 588 | # http://antirez.com/post/redis-persistence-demystified.html 589 | # 590 | # If unsure, use "everysec". 591 | 592 | # appendfsync always 593 | appendfsync everysec 594 | # appendfsync no 595 | 596 | # When the AOF fsync policy is set to always or everysec, and a background 597 | # saving process (a background save or AOF log background rewriting) is 598 | # performing a lot of I/O against the disk, in some Linux configurations 599 | # Redis may block too long on the fsync() call. Note that there is no fix for 600 | # this currently, as even performing fsync in a different thread will block 601 | # our synchronous write(2) call. 602 | # 603 | # In order to mitigate this problem it's possible to use the following option 604 | # that will prevent fsync() from being called in the main process while a 605 | # BGSAVE or BGREWRITEAOF is in progress. 606 | # 607 | # This means that while another child is saving, the durability of Redis is 608 | # the same as "appendfsync none". In practical terms, this means that it is 609 | # possible to lose up to 30 seconds of log in the worst scenario (with the 610 | # default Linux settings). 611 | # 612 | # If you have latency problems turn this to "yes". Otherwise leave it as 613 | # "no" that is the safest pick from the point of view of durability. 614 | 615 | no-appendfsync-on-rewrite no 616 | 617 | # Automatic rewrite of the append only file. 618 | # Redis is able to automatically rewrite the log file implicitly calling 619 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 620 | # 621 | # This is how it works: Redis remembers the size of the AOF file after the 622 | # latest rewrite (if no rewrite has happened since the restart, the size of 623 | # the AOF at startup is used). 624 | # 625 | # This base size is compared to the current size. If the current size is 626 | # bigger than the specified percentage, the rewrite is triggered. Also 627 | # you need to specify a minimal size for the AOF file to be rewritten, this 628 | # is useful to avoid rewriting the AOF file even if the percentage increase 629 | # is reached but it is still pretty small. 630 | # 631 | # Specify a percentage of zero in order to disable the automatic AOF 632 | # rewrite feature. 633 | 634 | auto-aof-rewrite-percentage 100 635 | auto-aof-rewrite-min-size 64mb 636 | 637 | # An AOF file may be found to be truncated at the end during the Redis 638 | # startup process, when the AOF data gets loaded back into memory. 639 | # This may happen when the system where Redis is running 640 | # crashes, especially when an ext4 filesystem is mounted without the 641 | # data=ordered option (however this can't happen when Redis itself 642 | # crashes or aborts but the operating system still works correctly). 643 | # 644 | # Redis can either exit with an error when this happens, or load as much 645 | # data as possible (the default now) and start if the AOF file is found 646 | # to be truncated at the end. The following option controls this behavior. 647 | # 648 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 649 | # the Redis server starts emitting a log to inform the user of the event. 650 | # Otherwise if the option is set to no, the server aborts with an error 651 | # and refuses to start. When the option is set to no, the user requires 652 | # to fix the AOF file using the "redis-check-aof" utility before to restart 653 | # the server. 654 | # 655 | # Note that if the AOF file will be found to be corrupted in the middle 656 | # the server will still exit with an error. This option only applies when 657 | # Redis will try to read more data from the AOF file but not enough bytes 658 | # will be found. 659 | aof-load-truncated yes 660 | 661 | ################################ LUA SCRIPTING ############################### 662 | 663 | # Max execution time of a Lua script in milliseconds. 664 | # 665 | # If the maximum execution time is reached Redis will log that a script is 666 | # still in execution after the maximum allowed time and will start to 667 | # reply to queries with an error. 668 | # 669 | # When a long running script exceeds the maximum execution time only the 670 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 671 | # used to stop a script that did not yet called write commands. The second 672 | # is the only way to shut down the server in the case a write command was 673 | # already issued by the script but the user doesn't want to wait for the natural 674 | # termination of the script. 675 | # 676 | # Set it to 0 or a negative value for unlimited execution without warnings. 677 | lua-time-limit 5000 678 | 679 | ################################ REDIS CLUSTER ############################### 680 | # 681 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 682 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 683 | # in order to mark it as "mature" we need to wait for a non trivial percentage 684 | # of users to deploy it in production. 685 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 686 | # 687 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 688 | # started as cluster nodes can. In order to start a Redis instance as a 689 | # cluster node enable the cluster support uncommenting the following: 690 | # 691 | # cluster-enabled yes 692 | 693 | # Every cluster node has a cluster configuration file. This file is not 694 | # intended to be edited by hand. It is created and updated by Redis nodes. 695 | # Every Redis Cluster node requires a different cluster configuration file. 696 | # Make sure that instances running in the same system do not have 697 | # overlapping cluster configuration file names. 698 | # 699 | # cluster-config-file nodes-6379.conf 700 | 701 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 702 | # for it to be considered in failure state. 703 | # Most other internal time limits are multiple of the node timeout. 704 | # 705 | # cluster-node-timeout 15000 706 | 707 | # A slave of a failing master will avoid to start a failover if its data 708 | # looks too old. 709 | # 710 | # There is no simple way for a slave to actually have a exact measure of 711 | # its "data age", so the following two checks are performed: 712 | # 713 | # 1) If there are multiple slaves able to failover, they exchange messages 714 | # in order to try to give an advantage to the slave with the best 715 | # replication offset (more data from the master processed). 716 | # Slaves will try to get their rank by offset, and apply to the start 717 | # of the failover a delay proportional to their rank. 718 | # 719 | # 2) Every single slave computes the time of the last interaction with 720 | # its master. This can be the last ping or command received (if the master 721 | # is still in the "connected" state), or the time that elapsed since the 722 | # disconnection with the master (if the replication link is currently down). 723 | # If the last interaction is too old, the slave will not try to failover 724 | # at all. 725 | # 726 | # The point "2" can be tuned by user. Specifically a slave will not perform 727 | # the failover if, since the last interaction with the master, the time 728 | # elapsed is greater than: 729 | # 730 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 731 | # 732 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 733 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 734 | # slave will not try to failover if it was not able to talk with the master 735 | # for longer than 310 seconds. 736 | # 737 | # A large slave-validity-factor may allow slaves with too old data to failover 738 | # a master, while a too small value may prevent the cluster from being able to 739 | # elect a slave at all. 740 | # 741 | # For maximum availability, it is possible to set the slave-validity-factor 742 | # to a value of 0, which means, that slaves will always try to failover the 743 | # master regardless of the last time they interacted with the master. 744 | # (However they'll always try to apply a delay proportional to their 745 | # offset rank). 746 | # 747 | # Zero is the only value able to guarantee that when all the partitions heal 748 | # the cluster will always be able to continue. 749 | # 750 | # cluster-slave-validity-factor 10 751 | 752 | # Cluster slaves are able to migrate to orphaned masters, that are masters 753 | # that are left without working slaves. This improves the cluster ability 754 | # to resist to failures as otherwise an orphaned master can't be failed over 755 | # in case of failure if it has no working slaves. 756 | # 757 | # Slaves migrate to orphaned masters only if there are still at least a 758 | # given number of other working slaves for their old master. This number 759 | # is the "migration barrier". A migration barrier of 1 means that a slave 760 | # will migrate only if there is at least 1 other working slave for its master 761 | # and so forth. It usually reflects the number of slaves you want for every 762 | # master in your cluster. 763 | # 764 | # Default is 1 (slaves migrate only if their masters remain with at least 765 | # one slave). To disable migration just set it to a very large value. 766 | # A value of 0 can be set but is useful only for debugging and dangerous 767 | # in production. 768 | # 769 | # cluster-migration-barrier 1 770 | 771 | # By default Redis Cluster nodes stop accepting queries if they detect there 772 | # is at least an hash slot uncovered (no available node is serving it). 773 | # This way if the cluster is partially down (for example a range of hash slots 774 | # are no longer covered) all the cluster becomes, eventually, unavailable. 775 | # It automatically returns available as soon as all the slots are covered again. 776 | # 777 | # However sometimes you want the subset of the cluster which is working, 778 | # to continue to accept queries for the part of the key space that is still 779 | # covered. In order to do so, just set the cluster-require-full-coverage 780 | # option to no. 781 | # 782 | # cluster-require-full-coverage yes 783 | 784 | # In order to setup your cluster make sure to read the documentation 785 | # available at http://redis.io web site. 786 | 787 | ################################## SLOW LOG ################################### 788 | 789 | # The Redis Slow Log is a system to log queries that exceeded a specified 790 | # execution time. The execution time does not include the I/O operations 791 | # like talking with the client, sending the reply and so forth, 792 | # but just the time needed to actually execute the command (this is the only 793 | # stage of command execution where the thread is blocked and can not serve 794 | # other requests in the meantime). 795 | # 796 | # You can configure the slow log with two parameters: one tells Redis 797 | # what is the execution time, in microseconds, to exceed in order for the 798 | # command to get logged, and the other parameter is the length of the 799 | # slow log. When a new command is logged the oldest one is removed from the 800 | # queue of logged commands. 801 | 802 | # The following time is expressed in microseconds, so 1000000 is equivalent 803 | # to one second. Note that a negative number disables the slow log, while 804 | # a value of zero forces the logging of every command. 805 | slowlog-log-slower-than 10000 806 | 807 | # There is no limit to this length. Just be aware that it will consume memory. 808 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 809 | slowlog-max-len 128 810 | 811 | ################################ LATENCY MONITOR ############################## 812 | 813 | # The Redis latency monitoring subsystem samples different operations 814 | # at runtime in order to collect data related to possible sources of 815 | # latency of a Redis instance. 816 | # 817 | # Via the LATENCY command this information is available to the user that can 818 | # print graphs and obtain reports. 819 | # 820 | # The system only logs operations that were performed in a time equal or 821 | # greater than the amount of milliseconds specified via the 822 | # latency-monitor-threshold configuration directive. When its value is set 823 | # to zero, the latency monitor is turned off. 824 | # 825 | # By default latency monitoring is disabled since it is mostly not needed 826 | # if you don't have latency issues, and collecting data has a performance 827 | # impact, that while very small, can be measured under big load. Latency 828 | # monitoring can easily be enabled at runtime using the command 829 | # "CONFIG SET latency-monitor-threshold " if needed. 830 | latency-monitor-threshold 0 831 | 832 | ############################# EVENT NOTIFICATION ############################## 833 | 834 | # Redis can notify Pub/Sub clients about events happening in the key space. 835 | # This feature is documented at http://redis.io/topics/notifications 836 | # 837 | # For instance if keyspace events notification is enabled, and a client 838 | # performs a DEL operation on key "foo" stored in the Database 0, two 839 | # messages will be published via Pub/Sub: 840 | # 841 | # PUBLISH __keyspace@0__:foo del 842 | # PUBLISH __keyevent@0__:del foo 843 | # 844 | # It is possible to select the events that Redis will notify among a set 845 | # of classes. Every class is identified by a single character: 846 | # 847 | # K Keyspace events, published with __keyspace@__ prefix. 848 | # E Keyevent events, published with __keyevent@__ prefix. 849 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 850 | # $ String commands 851 | # l List commands 852 | # s Set commands 853 | # h Hash commands 854 | # z Sorted set commands 855 | # x Expired events (events generated every time a key expires) 856 | # e Evicted events (events generated when a key is evicted for maxmemory) 857 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 858 | # 859 | # The "notify-keyspace-events" takes as argument a string that is composed 860 | # of zero or multiple characters. The empty string means that notifications 861 | # are disabled. 862 | # 863 | # Example: to enable list and generic events, from the point of view of the 864 | # event name, use: 865 | # 866 | # notify-keyspace-events Elg 867 | # 868 | # Example 2: to get the stream of the expired keys subscribing to channel 869 | # name __keyevent@0__:expired use: 870 | # 871 | # notify-keyspace-events Ex 872 | # 873 | # By default all notifications are disabled because most users don't need 874 | # this feature and the feature has some overhead. Note that if you don't 875 | # specify at least one of K or E, no events will be delivered. 876 | notify-keyspace-events "" 877 | 878 | ############################### ADVANCED CONFIG ############################### 879 | 880 | # Hashes are encoded using a memory efficient data structure when they have a 881 | # small number of entries, and the biggest entry does not exceed a given 882 | # threshold. These thresholds can be configured using the following directives. 883 | hash-max-ziplist-entries 512 884 | hash-max-ziplist-value 64 885 | 886 | # Lists are also encoded in a special way to save a lot of space. 887 | # The number of entries allowed per internal list node can be specified 888 | # as a fixed maximum size or a maximum number of elements. 889 | # For a fixed maximum size, use -5 through -1, meaning: 890 | # -5: max size: 64 Kb <-- not recommended for normal workloads 891 | # -4: max size: 32 Kb <-- not recommended 892 | # -3: max size: 16 Kb <-- probably not recommended 893 | # -2: max size: 8 Kb <-- good 894 | # -1: max size: 4 Kb <-- good 895 | # Positive numbers mean store up to _exactly_ that number of elements 896 | # per list node. 897 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 898 | # but if your use case is unique, adjust the settings as necessary. 899 | list-max-ziplist-size -2 900 | 901 | # Lists may also be compressed. 902 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 903 | # the list to *exclude* from compression. The head and tail of the list 904 | # are always uncompressed for fast push/pop operations. Settings are: 905 | # 0: disable all list compression 906 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 907 | # going from either the head or tail" 908 | # So: [head]->node->node->...->node->[tail] 909 | # [head], [tail] will always be uncompressed; inner nodes will compress. 910 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 911 | # 2 here means: don't compress head or head->next or tail->prev or tail, 912 | # but compress all nodes between them. 913 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 914 | # etc. 915 | list-compress-depth 0 916 | 917 | # Sets have a special encoding in just one case: when a set is composed 918 | # of just strings that happen to be integers in radix 10 in the range 919 | # of 64 bit signed integers. 920 | # The following configuration setting sets the limit in the size of the 921 | # set in order to use this special memory saving encoding. 922 | set-max-intset-entries 512 923 | 924 | # Similarly to hashes and lists, sorted sets are also specially encoded in 925 | # order to save a lot of space. This encoding is only used when the length and 926 | # elements of a sorted set are below the following limits: 927 | zset-max-ziplist-entries 128 928 | zset-max-ziplist-value 64 929 | 930 | # HyperLogLog sparse representation bytes limit. The limit includes the 931 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 932 | # this limit, it is converted into the dense representation. 933 | # 934 | # A value greater than 16000 is totally useless, since at that point the 935 | # dense representation is more memory efficient. 936 | # 937 | # The suggested value is ~ 3000 in order to have the benefits of 938 | # the space efficient encoding without slowing down too much PFADD, 939 | # which is O(N) with the sparse encoding. The value can be raised to 940 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 941 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 942 | hll-sparse-max-bytes 3000 943 | 944 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 945 | # order to help rehashing the main Redis hash table (the one mapping top-level 946 | # keys to values). The hash table implementation Redis uses (see dict.c) 947 | # performs a lazy rehashing: the more operation you run into a hash table 948 | # that is rehashing, the more rehashing "steps" are performed, so if the 949 | # server is idle the rehashing is never complete and some more memory is used 950 | # by the hash table. 951 | # 952 | # The default is to use this millisecond 10 times every second in order to 953 | # actively rehash the main dictionaries, freeing memory when possible. 954 | # 955 | # If unsure: 956 | # use "activerehashing no" if you have hard latency requirements and it is 957 | # not a good thing in your environment that Redis can reply from time to time 958 | # to queries with 2 milliseconds delay. 959 | # 960 | # use "activerehashing yes" if you don't have such hard requirements but 961 | # want to free memory asap when possible. 962 | activerehashing yes 963 | 964 | # The client output buffer limits can be used to force disconnection of clients 965 | # that are not reading data from the server fast enough for some reason (a 966 | # common reason is that a Pub/Sub client can't consume messages as fast as the 967 | # publisher can produce them). 968 | # 969 | # The limit can be set differently for the three different classes of clients: 970 | # 971 | # normal -> normal clients including MONITOR clients 972 | # slave -> slave clients 973 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 974 | # 975 | # The syntax of every client-output-buffer-limit directive is the following: 976 | # 977 | # client-output-buffer-limit 978 | # 979 | # A client is immediately disconnected once the hard limit is reached, or if 980 | # the soft limit is reached and remains reached for the specified number of 981 | # seconds (continuously). 982 | # So for instance if the hard limit is 32 megabytes and the soft limit is 983 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 984 | # if the size of the output buffers reach 32 megabytes, but will also get 985 | # disconnected if the client reaches 16 megabytes and continuously overcomes 986 | # the limit for 10 seconds. 987 | # 988 | # By default normal clients are not limited because they don't receive data 989 | # without asking (in a push way), but just after a request, so only 990 | # asynchronous clients may create a scenario where data is requested faster 991 | # than it can read. 992 | # 993 | # Instead there is a default limit for pubsub and slave clients, since 994 | # subscribers and slaves receive data in a push fashion. 995 | # 996 | # Both the hard or the soft limit can be disabled by setting them to zero. 997 | client-output-buffer-limit normal 0 0 0 998 | client-output-buffer-limit slave 256mb 64mb 60 999 | client-output-buffer-limit pubsub 32mb 8mb 60 1000 | 1001 | # Redis calls an internal function to perform many background tasks, like 1002 | # closing connections of clients in timeout, purging expired keys that are 1003 | # never requested, and so forth. 1004 | # 1005 | # Not all tasks are performed with the same frequency, but Redis checks for 1006 | # tasks to perform according to the specified "hz" value. 1007 | # 1008 | # By default "hz" is set to 10. Raising the value will use more CPU when 1009 | # Redis is idle, but at the same time will make Redis more responsive when 1010 | # there are many keys expiring at the same time, and timeouts may be 1011 | # handled with more precision. 1012 | # 1013 | # The range is between 1 and 500, however a value over 100 is usually not 1014 | # a good idea. Most users should use the default of 10 and raise this up to 1015 | # 100 only in environments where very low latency is required. 1016 | hz 10 1017 | 1018 | # When a child rewrites the AOF file, if the following option is enabled 1019 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1020 | # in order to commit the file to the disk more incrementally and avoid 1021 | # big latency spikes. 1022 | aof-rewrite-incremental-fsync yes 1023 | -------------------------------------------------------------------------------- /redis/redis_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | mkdir -p /data/rpm 3 | cd /data/rpm 4 | [ -f /data/rpm/redis-3.2.0.tar.gz ] || wget http://download.redis.io/releases/redis-3.2.0.tar.gz 5 | tar zxvf redis-3.2.0.tar.gz 6 | mv redis-3.2.0 /data/server/redis 7 | cd /data/server/redis 8 | make 9 | cp /data/scripts/redis/files/redis /etc/init.d 10 | rm -rf /data/server/redis/redis.conf 11 | cp /data/scripts/redis/files/redis.conf /data/server/redis/redis.conf 12 | chmod +x /etc/init.d/redis 13 | ln -s /data/server/redis/src/redis-cli /usr/bin/redis-cli 14 | chkconfig redis on 15 | service redis start 16 | service redis status -------------------------------------------------------------------------------- /svn/command.txt: -------------------------------------------------------------------------------- 1 | svn import ansible file:///data/svn/config/op.hb2.ali.prod.ipusoft.com/ansible -m "import ansible" 2 | svn import scripts file:///data/svn/config/op.hb2.ali.prod.ipusoft.com/scripts -m "import scripts" 3 | svn co file:///data/svn/config/op.hb2.ali.prod.ipusoft.com/scripts scripts 4 | svn co file:///data/svn/config/op.hb2.ali.prod.ipusoft.com/ansible ansible 5 | cp post-commit pre-commit /data/svn/config/hooks/ 6 | cd /data/svn/config/hooks/ 7 | chmod +x post-commit pre-commit -------------------------------------------------------------------------------- /svn/files/authz: -------------------------------------------------------------------------------- 1 | [groups] 2 | admin=admin 3 | -------------------------------------------------------------------------------- /svn/files/ext.txt: -------------------------------------------------------------------------------- 1 | storage/* 2 | .env 3 | .svn 4 | -------------------------------------------------------------------------------- /svn/files/passwd: -------------------------------------------------------------------------------- 1 | [users] 2 | admin = admin123 3 | -------------------------------------------------------------------------------- /svn/files/post-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | REPOS="$1" 4 | REV="$2" 5 | 6 | export LANG=zh_CN.UTF-8 7 | PRODIR=$PRODIR/ 8 | SVN=$PRODIR/svn 9 | WEB=/data/www/website_v2 10 | LOG=/data/svn/auto_svn.log 11 | SVNLOOK=$PRODIR/svnlook 12 | #update the code from the SVN 13 | sleep 1 14 | $SVN update $WEB 15 | 16 | -------------------------------------------------------------------------------- /svn/files/pre-commit: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #!/bin/sh 3 | REPOS="$1" 4 | TXN="$2" 5 | SVNLOOK=/usr/bin/svnlook 6 | # check that logmessage contains at least 10 alphanumeric characters 7 | LOGMSG=`$SVNLOOK log -t "$TXN" "$REPOS" | tr -d ' ' | wc -c` 8 | if [ "$LOGMSG" -lt 8 ]; 9 | then 10 | echo -e "\n至少输入7个注释字符!" 1>&2 11 | exit 1 12 | fi 13 | -------------------------------------------------------------------------------- /svn/files/svn: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # chkconfig: 345 10 90 3 | # description: svn server 4 | . /etc/rc.d/init.d/functions 5 | 6 | 7 | exec=/usr/bin/svnserve 8 | prog=svnserve 9 | logfile=/data/svn/svn.log 10 | pidfile=/data/svn/svn.pid 11 | [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog 12 | 13 | lockfile=/var/lock/subsys/$prog 14 | 15 | start() { 16 | [ -x $exec ] || exit 5 17 | [ -f $config ] || exit 6 18 | echo -n $"Starting $prog: " 19 | $exec -d -r /data/svn --log-file $logfile --pid-file $pidfile 20 | action "" /bin/true 21 | retval=$? 22 | echo 23 | if [ $retval -eq 0 ]; then 24 | touch $lockfile || retval=4 25 | fi 26 | return $retval 27 | } 28 | 29 | stop() { 30 | echo -n $"Stopping $prog: " 31 | killproc -p ${pidfile} $prog 32 | retval=$? 33 | echo 34 | [ $retval -eq 0 ] && rm -f $lockfile 35 | return $retval 36 | } 37 | 38 | restart() { 39 | stop 40 | start 41 | } 42 | 43 | reload() { 44 | restart 45 | } 46 | 47 | force_reload() { 48 | restart 49 | } 50 | 51 | rh_status() { 52 | # run checks to determine if the service is running or use generic status 53 | status -p ${pidfile} $prog 54 | } 55 | 56 | rh_status_q() { 57 | rh_status >/dev/null 2>&1 58 | } 59 | 60 | case "$1" in 61 | start) 62 | rh_status_q && exit 0 63 | $1 64 | ;; 65 | stop) 66 | rh_status_q || exit 0 67 | $1 68 | ;; 69 | restart) 70 | $1 71 | ;; 72 | reload) 73 | rh_status_q || exit 7 74 | $1 75 | ;; 76 | force-reload) 77 | force_reload 78 | ;; 79 | status) 80 | rh_status 81 | ;; 82 | condrestart|try-restart) 83 | rh_status_q || exit 0 84 | restart 85 | ;; 86 | *) 87 | echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" 88 | exit 2 89 | esac 90 | exit $? 91 | -------------------------------------------------------------------------------- /svn/files/svnserve.conf: -------------------------------------------------------------------------------- 1 | [general] 2 | anon-access = none 3 | auth-access = write 4 | password-db = /data/svn/conf/passwd 5 | authz-db = /data/svn/conf/authz 6 | realm = website_v2 7 | 8 | [sasl] 9 | -------------------------------------------------------------------------------- /svn/svn_create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | REPO=$1 3 | error() 4 | { 5 | echo "$1" 1>&2 6 | exit 1 7 | } 8 | if [ -z $REPO ];then 9 | error "shell script exit,please give a repo name!example: ./svn_create.sh repo_name" 10 | fi 11 | SVN_CONF=/data/scripts/svn/files/svnserve.conf 12 | svnadmin create /data/svn/$REPO 13 | sed "s/website_v2/$REPO/g" $SVN_CONF > /data/svn/$REPO/conf/svnserve.conf 14 | echo "[$REPO:/]" >> /data/svn/conf/authz 15 | echo "@admin = rw" >> /data/svn/conf/authz 16 | echo "* =" >> /data/svn/conf/authz 17 | #cp -f /data/shell/src/pre-commit /data/svn/$REPO/hooks/. 18 | #sed "s/website_v2/$REPO/g" /data/shell/src/post-commit > /data/svn/$REPO/hooks/post-commit 19 | #chmod +x /data/svn/$REPO/hooks/post-commit 20 | #mkdir -p /data/www/$REPO 21 | #svn co file:///data/svn/$REPO /data/www/$REPO 22 | #mkdir -p /data/www/$REPO/trunk 23 | #mkdir -p /data/www/$REPO/brunches 24 | #mkdir -p /data/www/$REPO/tags 25 | #svn add /data/www/$REPO/* 26 | #svn ci /data/www/$REPO/* -m 'create brunches' 27 | echo "create svn $REPO repo finished." 28 | -------------------------------------------------------------------------------- /zookeeper/zk_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | yum install -y java-1.6.0 java-1.6.0-openjdk-devel 3 | cp /nm1_share/zookeeper-3.4.6.tar.gz /data/server/ 4 | cd /data/server/ 5 | tar zxvf zookeeper-3.4.6.tar.gz 6 | cd zookeeper-3.4.6 7 | cp zookeeper-3.4.6/conf/zoo_sample.cfg zookeeper-3.4.6/conf/zoo.cfg 8 | /data/server/zookeeper-3.4.6/bin/zkServer.sh start 9 | echo "/data/server/zookeeper-3.4.6/bin/zkServer.sh start" >> /etc/rc.local 10 | --------------------------------------------------------------------------------