├── README.md ├── canal ├── docker-compose.yml ├── run_cannal_admin.sh └── run_cannal_server.sh ├── curator └── curator使用.md ├── docker_install ├── docker-compose │ └── docker-compose-Linux-x86_64_1.5.2 ├── docker-io-1.7.1-2.el6.x86_64.rpm ├── docker_install.sh ├── epel-release-6-8.noarch.rpm ├── libcgroup-0.40.rc1-23.el6.x86_64.rpm ├── libcgroup-0.40.rc1-24.el6_9.x86_64.rpm ├── lua-alt-getopt-0.7.0-1.el6.noarch.rpm ├── lua-filesystem-1.4.2-1.el6.x86_64.rpm ├── lua-lxc-1.0.11-1.el6.x86_64.rpm ├── lxc-1.0.11-1.el6.x86_64.rpm └── lxc-libs-1.0.11-1.el6.x86_64.rpm ├── elk_ver1 ├── README.md ├── master │ └── docker-compose.yml ├── slave1 │ └── docker-compose.yml └── slave2 │ └── docker-compose.yml ├── metricbeat ├── metricbeat.yml └── system.yml ├── mysql ├── mysqld.cnf └── 启动mysql.md ├── nessus └── docker安装Nessus.md ├── nginx └── docker-compose.yml ├── ntopng ├── command.md ├── edited │ ├── Dockerfile │ └── redis.conf └── origin │ └── Dockerfile └── snort ├── centos6 ├── Dockerfile ├── docker-compose.yml └── rsyslog.conf ├── centos7 ├── Dockerfile └── rsyslog.conf └── command.txt /README.md: -------------------------------------------------------------------------------- 1 | ## 曾自己经使用过的dockerfile 2 | ### 介绍 3 | - curator 4 | 定时清理es表 5 | - docker_install 6 | 一键安装docker和docker-compose 7 | - elk_ver1 8 | elk集群 9 | - nessus 10 | 漏洞扫描 11 | - nginx 12 | 一个nginx服务例子 13 | - ntopng 14 | ntopng流量监控 15 | - snort 16 | snort入侵检测 17 | -------------------------------------------------------------------------------- /canal/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | canal-server: 4 | image: canal/canal-server:v1.1.4 5 | container_name: canal-server 6 | restart: always 7 | ports: 8 | - 11110:11110 9 | - 11111:11111 10 | - 11112:11112 11 | environment: 12 | - canal.instance.mysql.slaveId=1234 13 | - canal.instance.master.address=192.168.1.36:3306 14 | - canal.instance.dbUsername=canal 15 | - canal.instance.dbPassword=canal 16 | - canal.instance.connectionCharset=UTF-8 17 | - canal.instance.filter.regex=.*\..* \ 18 | volumes: 19 | - ./canal-logs:/home/admin/canal-server/logs -------------------------------------------------------------------------------- /canal/run_cannal_admin.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #安装docker 3 | dockercount=`rpm -qa|grep docker|wc -l` 4 | if [ $dockercount == 0 ]; then 5 | cd /opt/docker_install 6 | rpm -Uvh epel-release-6-8.noarch.rpm 7 | rpm -ivh lxc-libs-1.0.11-1.el6.x86_64.rpm 8 | rpm -ivh lua-filesystem-1.4.2-1.el6.x86_64.rpm 9 | rpm -ivh lua-alt-getopt-0.7.0-1.el6.noarch.rpm 10 | rpm -ivh lua-lxc-1.0.11-1.el6.x86_64.rpm 11 | rpm -ivh lxc-1.0.11-1.el6.x86_64.rpm 12 | rpm -ivh libcgroup-0.40.rc1-24.el6_9.x86_64.rpm 13 | rpm -ivh docker-io-1.7.1-2.el6.x86_64.rpm 14 | chkconfig docker on 15 | service docker start 16 | fi 17 | 18 | #检测docker服务是否启动 19 | dockerpid=`ps -ef|grep /usr/bin/docker|grep -v grep|awk '{print $2}'` 20 | if [ "$dockerpid" != "" ]; then 21 | echo "docker is runing!" 22 | else 23 | service docker start 24 | fi 25 | 26 | #安装docker-compose 27 | if which docker-compose 2>/dev/null; then 28 | echo "docker-compose exists!" 29 | else 30 | echo "nope, no docker-compose installed." 31 | cp ./docker-compose/docker-compose-Linux-x86_64_1.5.2 /usr/bin/docker-compose 32 | chmod +x /usr/bin/docker-compose 33 | docker-compose --version 34 | fi -------------------------------------------------------------------------------- /canal/run_cannal_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #安装docker 3 | dockercount=`rpm -qa|grep docker|wc -l` 4 | if [ $dockercount == 0 ]; then 5 | cd /opt/docker_install 6 | rpm -Uvh epel-release-6-8.noarch.rpm 7 | rpm -ivh lxc-libs-1.0.11-1.el6.x86_64.rpm 8 | rpm -ivh lua-filesystem-1.4.2-1.el6.x86_64.rpm 9 | rpm -ivh lua-alt-getopt-0.7.0-1.el6.noarch.rpm 10 | rpm -ivh lua-lxc-1.0.11-1.el6.x86_64.rpm 11 | rpm -ivh lxc-1.0.11-1.el6.x86_64.rpm 12 | rpm -ivh libcgroup-0.40.rc1-24.el6_9.x86_64.rpm 13 | rpm -ivh docker-io-1.7.1-2.el6.x86_64.rpm 14 | chkconfig docker on 15 | service docker start 16 | fi 17 | 18 | #检测docker服务是否启动 19 | dockerpid=`ps -ef|grep /usr/bin/docker|grep -v grep|awk '{print $2}'` 20 | if [ "$dockerpid" != "" ]; then 21 | echo "docker is runing!" 22 | else 23 | service docker start 24 | fi 25 | 26 | #安装docker-compose 27 | if which docker-compose 2>/dev/null; then 28 | echo "docker-compose exists!" 29 | else 30 | echo "nope, no docker-compose installed." 31 | cp ./docker-compose/docker-compose-Linux-x86_64_1.5.2 /usr/bin/docker-compose 32 | chmod +x /usr/bin/docker-compose 33 | docker-compose --version 34 | fi -------------------------------------------------------------------------------- /curator/curator使用.md: -------------------------------------------------------------------------------- 1 | #### 介绍 2 | Elasticsearch Curator通过以下方式帮助您策划或管理您的Elasticsearch索引和快照: 3 | - 从集群中获取索引(或快照)的完整列表,作为可操作列表 4 | - 迭代用户定义的过滤器列表,根据需要逐步从此可操作列表中删除索引(或快照) 5 | - 对用户定义的动作列表中的项目执行各种操作(包括Create Index、Delete Indices、Reindex、Snapshot等) 6 | #### 使用 7 | curator的命令行语法如下: 8 | ```` 9 | curator [--config CONFIG.YML] [--dry-run] ACTION_FILE.YML 10 | ```` 11 | - CONFIG.YML:配置ES的基本信息 12 | - ACTION_FILE.YML:具体的执行脚本 13 | - dry-run:curator将尽可能接近地模拟ACTION_FILE.YML中的动作,而不实际进行任何更改 14 | 下面是具体的使用例子: 15 | curator.yml: 16 | ```` 17 | client: 18 | hosts: 19 | - 192.168.1.188 20 | port: 9200 21 | url_prefix: 22 | use_ssl: False 23 | certificate: 24 | client_cert: 25 | client_key: 26 | ssl_no_validate: False 27 | http_auth: 28 | timeout: 30 29 | master_only: False 30 | 31 | logging: 32 | loglevel: INFO 33 | logfile: 34 | logformat: default 35 | blacklist: ['elasticsearch', 'urllib3'] 36 | ```` 37 | deleteIndecies.yml: 38 | ```` 39 | actions: 40 | 1: 41 | action: delete_indices 42 | description: >- 43 | 删除超过120天的索引(基于索引名称),用于nessus- 44 | 前缀索引。如果过滤器没有导致错误,请忽略错误 45 | 可操作的索引列表(ignore_empty_list)并彻底退出. 46 | options: 47 | ignore_empty_list: True 48 | disable_action: False 49 | filters: 50 | - filtertype: pattern 51 | kind: prefix 52 | value: nessus- 53 | - filtertype: age 54 | source: name 55 | direction: older 56 | timestring: '%Y.%m.%d' 57 | unit: days 58 | unit_count: 120 59 | ```` 60 | 61 | **最后执行以下命令即可运行(实际使用时记得删除--dry-run)** 62 | 63 | ``` 64 | docker run --rm -v /opt/curator-docker:/curator-docker fang/curator --dry-run /curator-docker/deleteIndecies.yml 65 | ``` 66 | 67 | 对比一下非docker下的命令为: 68 | ``` 69 | curator --config /opt/elasticsearch-curator/curator.yml --dry-run /opt/elasticsearch-curator/deleteIndecies.yml 70 | ``` 71 | 72 | 其它功能可以前往官网查看[官方文档](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/index.html)或[使用实例](https://www.elastic.co/guide/en/elasticsearch/client/curator/current/ex_delete_indices.html) 73 | 74 | -------------------------------------------------------------------------------- /docker_install/docker-compose/docker-compose-Linux-x86_64_1.5.2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/docker-compose/docker-compose-Linux-x86_64_1.5.2 -------------------------------------------------------------------------------- /docker_install/docker-io-1.7.1-2.el6.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/docker-io-1.7.1-2.el6.x86_64.rpm -------------------------------------------------------------------------------- /docker_install/docker_install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #安装docker 3 | dockercount=`rpm -qa|grep docker|wc -l` 4 | if [ $dockercount == 0 ]; then 5 | cd /opt/docker_install 6 | rpm -Uvh epel-release-6-8.noarch.rpm 7 | rpm -ivh lxc-libs-1.0.11-1.el6.x86_64.rpm 8 | rpm -ivh lua-filesystem-1.4.2-1.el6.x86_64.rpm 9 | rpm -ivh lua-alt-getopt-0.7.0-1.el6.noarch.rpm 10 | rpm -ivh lua-lxc-1.0.11-1.el6.x86_64.rpm 11 | rpm -ivh lxc-1.0.11-1.el6.x86_64.rpm 12 | rpm -ivh libcgroup-0.40.rc1-24.el6_9.x86_64.rpm 13 | rpm -ivh docker-io-1.7.1-2.el6.x86_64.rpm 14 | chkconfig docker on 15 | service docker start 16 | fi 17 | 18 | #检测docker服务是否启动 19 | dockerpid=`ps -ef|grep /usr/bin/docker|grep -v grep|awk '{print $2}'` 20 | if [ "$dockerpid" != "" ]; then 21 | echo "docker is runing!" 22 | else 23 | service docker start 24 | fi 25 | 26 | #安装docker-compose 27 | if which docker-compose 2>/dev/null; then 28 | echo "docker-compose exists!" 29 | else 30 | echo "nope, no docker-compose installed." 31 | cp ./docker-compose/docker-compose-Linux-x86_64_1.5.2 /usr/bin/docker-compose 32 | chmod +x /usr/bin/docker-compose 33 | docker-compose --version 34 | fi -------------------------------------------------------------------------------- /docker_install/epel-release-6-8.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/epel-release-6-8.noarch.rpm -------------------------------------------------------------------------------- /docker_install/libcgroup-0.40.rc1-23.el6.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/libcgroup-0.40.rc1-23.el6.x86_64.rpm -------------------------------------------------------------------------------- /docker_install/libcgroup-0.40.rc1-24.el6_9.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/libcgroup-0.40.rc1-24.el6_9.x86_64.rpm -------------------------------------------------------------------------------- /docker_install/lua-alt-getopt-0.7.0-1.el6.noarch.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/lua-alt-getopt-0.7.0-1.el6.noarch.rpm -------------------------------------------------------------------------------- /docker_install/lua-filesystem-1.4.2-1.el6.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/lua-filesystem-1.4.2-1.el6.x86_64.rpm -------------------------------------------------------------------------------- /docker_install/lua-lxc-1.0.11-1.el6.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/lua-lxc-1.0.11-1.el6.x86_64.rpm -------------------------------------------------------------------------------- /docker_install/lxc-1.0.11-1.el6.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/lxc-1.0.11-1.el6.x86_64.rpm -------------------------------------------------------------------------------- /docker_install/lxc-libs-1.0.11-1.el6.x86_64.rpm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gui66497/dockerfiles/08d2decb54a4c95903635209174cace1da392617/docker_install/lxc-libs-1.0.11-1.el6.x86_64.rpm -------------------------------------------------------------------------------- /elk_ver1/README.md: -------------------------------------------------------------------------------- 1 | version 1版本的ELK docker-compose 2 | docker版本: 1.7.1 3 | docker-compose版本:1.5.2 4 | ELK版本:6.2.2 5 | -------------------------------------------------------------------------------- /elk_ver1/master/docker-compose.yml: -------------------------------------------------------------------------------- 1 | es1: 2 | image: zzjz/es:6.2.2 3 | container_name: es 4 | restart: always 5 | ports: 6 | - 9200:9200 7 | - 9300:9300 8 | environment: 9 | - cluster.name=soc 10 | - bootstrap.memory_lock=true 11 | - bootstrap.system_call_filter=false 12 | - "ES_JAVA_OPTS=-Xms4G -Xmx4G" 13 | ulimits: 14 | memlock: 15 | soft: -1 16 | hard: -1 17 | nofile: 18 | soft: 65536 19 | hard: 65536 20 | volumes: 21 | - ./x-pack-6.2.2.zip:/x-pack-6.2.2.zip 22 | - ./config/es1.yml:/usr/share/elasticsearch/config/elasticsearch.yml 23 | - ./datas/es/:/usr/share/elasticsearch/data 24 | 25 | kibana: 26 | image: zzjz/kibana:6.2.2 27 | container_name: kibana 28 | restart: always 29 | ports: 30 | - 5601:5601 31 | environment: 32 | - "ELASTICSEARCH_URL=http://es1:9200" 33 | volumes: 34 | - ./x-pack-6.2.2.zip:/x-pack-6.2.2.zip 35 | - ./kibana/optimize/:/usr/share/kibana/optimize/ 36 | - ./kibana/src/:/usr/share/kibana/src/ 37 | - ./kibana/ui_framework/:/usr/share/kibana/ui_framework/ 38 | - ./kibana/fonts:/usr/share/fonts/chinese 39 | links: 40 | - es1 41 | 42 | logstash: 43 | image: zzjz/logstash:6.2.2 44 | container_name: logstash 45 | restart: always 46 | privileged: true 47 | user: root 48 | ports: 49 | - 10010:10010 50 | - 10010:10010/udp 51 | - 10011:10011 52 | - 10012:10012 53 | - 10014:10014 54 | - 10015:10015 55 | - 5055:5055 56 | - 7777:7777 57 | - 3456:3456 58 | - 5140:5140 59 | - 5140:5140/udp 60 | - 5047:5047 61 | - 7435:7435 62 | - 5144:5144/udp 63 | - 5679:5679/udp 64 | 65 | volumes: 66 | - ./x-pack-6.2.2.zip:/x-pack-6.2.2.zip 67 | - ./mysql-connector-java-5.1.42.jar:/mysql-connector-java-5.1.42.jar 68 | - ./config/logstash.yml:/usr/share/logstash/config/logstash.yml 69 | - ./logstash/scripts:/scripts/ 70 | - ./logstash/plugin:/plugin/ 71 | - ./logstash/lastrun:/usr/share/logstash/lastrun/ 72 | links: 73 | - es1 74 | 75 | metricbeat: 76 | image: metricbeat:6.2.2 77 | container_name: metricbeat 78 | restart: always 79 | hostname: master 80 | environment: 81 | - "system.hostfs=/hostfs" 82 | volumes: 83 | - /proc:/hostfs/proc:ro 84 | - /cgroup:/hostfs/cgroup:ro 85 | - /:/hostfs:ro 86 | - ./config/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml 87 | - ./config/system.yml:/usr/share/metricbeat/modules.d/system.yml 88 | net: host -------------------------------------------------------------------------------- /elk_ver1/slave1/docker-compose.yml: -------------------------------------------------------------------------------- 1 | es1: 2 | image: zzjz/es:6.2.2 3 | container_name: es 4 | restart: always 5 | ports: 6 | - 9200:9200 7 | - 9300:9300 8 | environment: 9 | - cluster.name=soc 10 | - bootstrap.memory_lock=true 11 | - bootstrap.system_call_filter=false 12 | - "ES_JAVA_OPTS=-Xms4G -Xmx4G" 13 | ulimits: 14 | memlock: 15 | soft: -1 16 | hard: -1 17 | nofile: 18 | soft: 65536 19 | hard: 65536 20 | volumes: 21 | - ./x-pack-6.2.2.zip:/x-pack-6.2.2.zip 22 | - ./config/es1.yml:/usr/share/elasticsearch/config/elasticsearch.yml 23 | - ./datas/es/:/usr/share/elasticsearch/data 24 | 25 | metricbeat: 26 | image: metricbeat:6.2.2 27 | container_name: metricbeat 28 | restart: always 29 | hostname: datanode2 30 | environment: 31 | - "system.hostfs=/hostfs" 32 | volumes: 33 | - /proc:/hostfs/proc:ro 34 | - /cgroup:/hostfs/cgroup:ro 35 | - /:/hostfs:ro 36 | - ./config/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml 37 | - ./config/system.yml:/usr/share/metricbeat/modules.d/system.yml 38 | net: host 39 | 40 | heartbeat: 41 | image: heartbeat:6.2.2 42 | container_name: heartbeat 43 | restart: always 44 | hostname: datanode2 45 | volumes: 46 | - ./config/heartbeat.yml:/usr/share/heartbeat/heartbeat.yml 47 | -------------------------------------------------------------------------------- /elk_ver1/slave2/docker-compose.yml: -------------------------------------------------------------------------------- 1 | es1: 2 | image: zzjz/es:6.2.2 3 | container_name: es 4 | restart: always 5 | ports: 6 | - 9200:9200 7 | - 9300:9300 8 | environment: 9 | - cluster.name=soc 10 | - bootstrap.memory_lock=true 11 | - bootstrap.system_call_filter=false 12 | - "ES_JAVA_OPTS=-Xms4G -Xmx4G" 13 | ulimits: 14 | memlock: 15 | soft: -1 16 | hard: -1 17 | nofile: 18 | soft: 65536 19 | hard: 65536 20 | volumes: 21 | - ./x-pack-6.2.2.zip:/x-pack-6.2.2.zip 22 | - ./config/es1.yml:/usr/share/elasticsearch/config/elasticsearch.yml 23 | - ./datas/es/:/usr/share/elasticsearch/data 24 | 25 | metricbeat: 26 | image: metricbeat:6.2.2 27 | container_name: metricbeat 28 | restart: always 29 | hostname: datanode1 30 | environment: 31 | - "system.hostfs=/hostfs" 32 | volumes: 33 | - /proc:/hostfs/proc:ro 34 | - /cgroup:/hostfs/cgroup:ro 35 | - /:/hostfs:ro 36 | - ./config/metricbeat.yml:/usr/share/metricbeat/metricbeat.yml 37 | - ./config/system.yml:/usr/share/metricbeat/modules.d/system.yml 38 | net: host 39 | 40 | packetbeat: 41 | image: packetbeat:6.2.2 42 | container_name: packetbeat 43 | restart: always 44 | hostname: datanode1 45 | cap_add: 46 | - NET_ADMIN 47 | volumes: 48 | - ./config/packetbeat.yml:/usr/share/packetbeat/packetbeat.yml 49 | net: host 50 | -------------------------------------------------------------------------------- /metricbeat/metricbeat.yml: -------------------------------------------------------------------------------- 1 | ###################### Metricbeat Configuration Example ####################### 2 | 3 | # This file is an example configuration file highlighting only the most common 4 | # options. The metricbeat.reference.yml file from the same directory contains all the 5 | # supported options with more comments. You can use it as a reference. 6 | # 7 | # You can find the full configuration reference here: 8 | # https://www.elastic.co/guide/en/beats/metricbeat/index.html 9 | 10 | #========================== Modules configuration ============================ 11 | 12 | metricbeat.config.modules: 13 | # Glob pattern for configuration loading 14 | path: ${path.config}/modules.d/*.yml 15 | 16 | # Set to true to enable config reloading 17 | reload.enabled: false 18 | 19 | # Period on which files under path should be checked for changes 20 | #reload.period: 10s 21 | 22 | #==================== Elasticsearch template setting ========================== 23 | 24 | setup.template.settings: 25 | index.number_of_shards: 1 26 | index.codec: best_compression 27 | #_source.enabled: false 28 | 29 | #================================ General ===================================== 30 | 31 | # The name of the shipper that publishes the network data. It can be used to group 32 | # all the transactions sent by a single shipper in the web interface. 33 | #name: 34 | 35 | # The tags of the shipper are included in their own field with each 36 | # transaction published. 37 | #tags: ["service-X", "web-tier"] 38 | 39 | # Optional fields that you can specify to add additional information to the 40 | # output. 41 | #fields: 42 | # env: staging 43 | 44 | 45 | #============================== Dashboards ===================================== 46 | # These settings control loading the sample dashboards to the Kibana index. Loading 47 | # the dashboards is disabled by default and can be enabled either by setting the 48 | # options here, or by using the `-setup` CLI flag or the `setup` command. 49 | #setup.dashboards.enabled: false 50 | 51 | # The URL from where to download the dashboards archive. By default this URL 52 | # has a value which is computed based on the Beat name and version. For released 53 | # versions, this URL points to the dashboard archive on the artifacts.elastic.co 54 | # website. 55 | #setup.dashboards.url: 56 | 57 | #============================== Kibana ===================================== 58 | 59 | # Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API. 60 | # This requires a Kibana endpoint configuration. 61 | setup.kibana: 62 | 63 | # Kibana Host 64 | # Scheme and port can be left out and will be set to the default (http and 5601) 65 | # In case you specify and additional path, the scheme is required: http://localhost:5601/path 66 | # IPv6 addresses should always be defined as: https://[2001:db8::1]:5601 67 | host: "datanode1:5601" 68 | 69 | #============================= Elastic Cloud ================================== 70 | 71 | # These settings simplify using metricbeat with the Elastic Cloud (https://cloud.elastic.co/). 72 | 73 | # The cloud.id setting overwrites the `output.elasticsearch.hosts` and 74 | # `setup.kibana.host` options. 75 | # You can find the `cloud.id` in the Elastic Cloud web UI. 76 | #cloud.id: 77 | 78 | # The cloud.auth setting overwrites the `output.elasticsearch.username` and 79 | # `output.elasticsearch.password` settings. The format is `:`. 80 | #cloud.auth: 81 | 82 | #================================ Outputs ===================================== 83 | 84 | # Configure what output to use when sending the data collected by the beat. 85 | 86 | #-------------------------- Elasticsearch output ------------------------------ 87 | #output.elasticsearch: 88 | # Array of hosts to connect to. 89 | #hosts: ["192.168.1.216:9200"] 90 | 91 | # Optional protocol and basic auth credentials. 92 | #protocol: "https" 93 | #username: "elastic" 94 | #password: "changeme" 95 | 96 | #----------------------------- Logstash output -------------------------------- 97 | output.logstash: 98 | # The Logstash hosts 99 | hosts: ["datanode1:5055"] 100 | 101 | # Optional SSL. By default is off. 102 | # List of root certificates for HTTPS server verifications 103 | #ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] 104 | 105 | # Certificate for SSL client authentication 106 | #ssl.certificate: "/etc/pki/client/cert.pem" 107 | 108 | # Client Certificate Key 109 | #ssl.key: "/etc/pki/client/cert.key" 110 | 111 | #================================ Logging ===================================== 112 | 113 | # Sets log level. The default log level is info. 114 | # Available log levels are: error, warning, info, debug 115 | #logging.level: debug 116 | 117 | # At debug level, you can selectively enable logging only for some components. 118 | # To enable all selectors use ["*"]. Examples of other selectors are "beat", 119 | # "publish", "service". 120 | #logging.selectors: ["*"] 121 | 122 | #============================== Xpack Monitoring =============================== 123 | # metricbeat can export internal metrics to a central Elasticsearch monitoring 124 | # cluster. This requires xpack monitoring to be enabled in Elasticsearch. The 125 | # reporting is disabled by default. 126 | 127 | # Set to true to enable the monitoring reporter. 128 | xpack.monitoring.enabled: true 129 | 130 | # Uncomment to send the metrics to Elasticsearch. Most settings from the 131 | # Elasticsearch output are accepted here as well. Any setting that is not set is 132 | # automatically inherited from the Elasticsearch output configuration, so if you 133 | # have the Elasticsearch output configured, you can simply uncomment the 134 | # following line. 135 | xpack.monitoring.elasticsearch: 136 | hosts: ["datanode1:9200"] 137 | username: "elastic" 138 | password: "zzjz1234" -------------------------------------------------------------------------------- /metricbeat/system.yml: -------------------------------------------------------------------------------- 1 | - module: system 2 | period: 10s 3 | metricsets: 4 | - cpu 5 | - load 6 | - memory 7 | - network 8 | - process 9 | - process_summary 10 | #- core 11 | #- diskio 12 | - socket 13 | processes: ['.*'] 14 | process.include_top_n: 15 | by_cpu: 5 # include top 5 processes by CPU 16 | by_memory: 5 # include top 5 processes by memory 17 | fields: 18 | ip: "10.1.242.78" 19 | 20 | - module: system 21 | period: 1m 22 | metricsets: 23 | - filesystem 24 | - fsstat 25 | processors: 26 | - drop_event.when.regexp: 27 | system.filesystem.mount_point: '^/(sys|cgroup|proc|dev|etc|host|lib)($|/)' 28 | fields: 29 | ip: "10.1.242.78" 30 | 31 | - module: system 32 | period: 15m 33 | metricsets: 34 | - uptime 35 | fields: 36 | ip: "10.1.242.78" 37 | -------------------------------------------------------------------------------- /mysql/mysqld.cnf: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. 2 | # 3 | # This program is free software; you can redistribute it and/or modify 4 | # it under the terms of the GNU General Public License, version 2.0, 5 | # as published by the Free Software Foundation. 6 | # 7 | # This program is also distributed with certain software (including 8 | # but not limited to OpenSSL) that is licensed under separate terms, 9 | # as designated in a particular file or component or in included license 10 | # documentation. The authors of MySQL hereby grant you an additional 11 | # permission to link the program and your derivative works with the 12 | # separately licensed software that they have included with MySQL. 13 | # 14 | # This program is distributed in the hope that it will be useful, 15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 | # GNU General Public License, version 2.0, for more details. 18 | # 19 | # You should have received a copy of the GNU General Public License 20 | # along with this program; if not, write to the Free Software 21 | # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 | 23 | # 24 | # The MySQL Server configuration file. 25 | # 26 | # For explanations see 27 | # http://dev.mysql.com/doc/mysql/en/server-system-variables.html 28 | 29 | [mysqld] 30 | pid-file = /var/run/mysqld/mysqld.pid 31 | socket = /var/run/mysqld/mysqld.sock 32 | datadir = /var/lib/mysql 33 | #log-error = /var/log/mysql/error.log 34 | # Disabling symbolic-links is recommended to prevent assorted security risks 35 | symbolic-links=0 36 | 37 | log_bin_trust_function_creators = 1 38 | 39 | log-bin=mysql-bin 40 | binlog-format=ROW 41 | server_id=1 42 | -------------------------------------------------------------------------------- /mysql/启动mysql.md: -------------------------------------------------------------------------------- 1 | 启动命令 2 | ```$xslt 3 | docker run --name mysql5.6 --restart always -p 3306:3306 -e MYSQL_ROOT_PASSWORD=root \ 4 | -v /home/soc/mysql/data:/var/lib/mysql -d mysql:5.6 --lower_case_table_names=1 5 | ``` 6 | 参数说明 7 | - --name mysql5.6: 指定运行容器名称 8 | - --restart always: 容器意外退出后自动重启 9 | - -p 3306:3306: 映射主机 3306 端口到容器 3306 端口 10 | - -e MYSQL_ROOT_PASSWORD=12345: 指定 mysql root 密码,该参数是为必须的 11 | - -v /home/vagrant/mysql5.7/data:/var/lib/mysql: mysql 数据持久化,主机 /home/vagrant/mysql5.7/data 目录挂载到容器 /var/lib/mysql 目录 12 | - --lower_case_table_names=1 设置大小写不敏感 13 | 14 | docker-compose版本 15 | ```$xslt 16 | version: '3' 17 | services: 18 | mysql: 19 | image: mysql:5.6 20 | container_name: mysql 21 | restart: always 22 | ports: 23 | - 3306:3306 24 | command: 25 | --lower_case_table_names=1 26 | volumes: 27 | - /home/soc/mysql/data:/var/lib/mysql 28 | environment: 29 | MYSQL_ROOT_PASSWORD: root 30 | ``` 31 | -------------------------------------------------------------------------------- /nessus/docker安装Nessus.md: -------------------------------------------------------------------------------- 1 | * 执行以下命令导入nessus镜像 2 | ``` 3 | docker load < nessus7.0.3.tar //导入到本地docker 4 | docker tag XX nessus:7.0.3 //设置tag 5 | ``` 6 | * 由于6.11.1版本扫描只能出一台机器的结果,因此推荐使用7.0.3版本的,通过python脚本一样是能出结果的 7 | * 镜像加载完成之后启动docker镜像 8 | ``` 9 | docker run -d -p 8834:8834 --restart=always --name nessus sometheycallme/docker-nessus 10 | ``` 11 | * 访问http://www.tenable.com/products/nessus-home 输入邮箱获取activation code(这里可以使用[临时邮箱](http://24mail.chacuo.net/)) 12 | * 浏览器输入https://ip:8834 进入Nessus的web界面,激活方式选择offline,离线激活,会得到一串challenge code 13 | * 然后访问https://plugins.nessus.org/v2/offline.php 输入刚获得的challenge code和我们用邮箱申请的activation code(注意每个activation code只能使用一次) 14 | * 成功后我们就会得到license和plugins包(all-2.0.tar.gz 大小199MB),浏览器输入license后成功激活 15 | * 复制下载的all-2.0.tar.gz到内网Nessus安装的服务器上,登陆Nessus → Setting → Software Update 右上角 Manual Software Update 16 | * 选择 "Upload your own plugin archive",下一步,选择刚上传的all-2.0.tar.gz文件,等一段时间就升级好了。可以看看CPU使用率,升级插件是CPU使用率较高,降下来后升级就是完成了。 17 | 18 | 参考:[Nessus离线安装及升级插件](https://www.jianshu.com/p/6a1ec52d216a) | [nessus-docker](https://github.com/fuku2014/nessus-docker) -------------------------------------------------------------------------------- /nginx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | web: 2 | image: nginx 3 | container_name: nsaweb 4 | volumes: 5 | - ./nsaweb:/html/nsaweb 6 | - ./nginx.conf:/etc/nginx/nginx.conf 7 | ports: 8 | - "6080:6080" 9 | restart: always -------------------------------------------------------------------------------- /ntopng/command.md: -------------------------------------------------------------------------------- 1 | ### 区别解释 2 | origin版本表示没做任何修改的版本,edited版修改了以来的redis服务的端口,由原来的6379改为了6377,如果es开启了安全则要在最后加user:password验证信息 3 | ### 命令 4 | #### origin 5 | ``` 6 | docker run --net=host -t -p 3000:3000 --restart=always --name ntopng fang/ntopng \ 7 | -i eth0 -F "es;ntopng;ntopng-%Y.%m.%d;http://192.168.1.188:9200/_bulk;" 8 | ``` 9 | #### edited 10 | ``` 11 | docker run --net=host -t -p 3000:3000 --restart=always --name ntopng fang/ntopng \ 12 | -i eth0 -F "es;ntopng;ntopng-%Y.%m.%d;http://192.168.1.188:9200/_bulk;" \ 13 | -r localhost:6377 14 | ``` 15 | #### 加密 16 | ``` 17 | docker run --net=host -t -p 3000:3000 --restart=always --name ntopng fang/ntopng \ 18 | -i eth0 -F "es;ntopng;ntopng-%Y.%m.%d;http://192.168.1.188:9200/_bulk;elastic:zzjz1234;" 19 | ``` 20 | 21 | ### 命令详解 22 | #### --net=host 23 | host 模式,在这个模式下,docker 不会为容器创建单独的网络 namespace,而是共享主机的 network namespace,也就是说:容器可以直接访问主机上所有的网络信息. 24 | 25 | #### --restart=always 26 | 宿主机重启,容器也会跟着重启,如果不做设置容器是不会自动重启的 27 | 28 | docker run --net=host -t -p 3000:3000 --restart=always --name ntopng fang/ntopng \ 29 | -i eth0 -F "es;ntopng;ntopng-%Y.%m.%d;http://192.168.1.188:9200/_bulk;elastic:zzjz1234;" -------------------------------------------------------------------------------- /ntopng/edited/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | MAINTAINER fgt 3 | 4 | RUN apt-get update 5 | RUN apt-get -y -q install wget lsb-core 6 | RUN wget http://apt-stable.ntop.org/16.04/all/apt-ntop-stable.deb 7 | RUN dpkg -i apt-ntop-stable.deb 8 | 9 | RUN apt-get clean all 10 | RUN apt-get update 11 | RUN apt-get -y -q install pfring nprobe ntopng ntopng-data n2disk cento 12 | 13 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 14 | 15 | EXPOSE 3000 16 | 17 | #替换默认redis配置文件,修改了端口 18 | COPY redis.conf /etc/redis/redis.conf 19 | 20 | RUN echo '#!/bin/bash\n/etc/init.d/redis-server start\nntopng "$@"' > /tmp/run.sh 21 | RUN chmod +x /tmp/run.sh 22 | 23 | ENTRYPOINT ["/tmp/run.sh"] -------------------------------------------------------------------------------- /ntopng/edited/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################ GENERAL ##################################### 39 | 40 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 41 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 42 | daemonize yes 43 | 44 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 45 | # default. You can specify a custom pid file location here. 46 | pidfile /var/run/redis/redis-server.pid 47 | 48 | # Accept connections on the specified port, default is 6379. 49 | # If port 0 is specified Redis will not listen on a TCP socket. 50 | port 6377 51 | 52 | # TCP listen() backlog. 53 | # 54 | # In high requests-per-second environments you need an high backlog in order 55 | # to avoid slow clients connections issues. Note that the Linux kernel 56 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 57 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 58 | # in order to get the desired effect. 59 | tcp-backlog 511 60 | 61 | # By default Redis listens for connections from all the network interfaces 62 | # available on the server. It is possible to listen to just one or multiple 63 | # interfaces using the "bind" configuration directive, followed by one or 64 | # more IP addresses. 65 | # 66 | # Examples: 67 | # 68 | # bind 192.168.1.100 10.0.0.1 69 | bind 127.0.0.1 70 | 71 | # Specify the path for the Unix socket that will be used to listen for 72 | # incoming connections. There is no default, so Redis will not listen 73 | # on a unix socket when not specified. 74 | # 75 | # unixsocket /var/run/redis/redis.sock 76 | # unixsocketperm 700 77 | 78 | # Close the connection after a client is idle for N seconds (0 to disable) 79 | timeout 0 80 | 81 | # TCP keepalive. 82 | # 83 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 84 | # of communication. This is useful for two reasons: 85 | # 86 | # 1) Detect dead peers. 87 | # 2) Take the connection alive from the point of view of network 88 | # equipment in the middle. 89 | # 90 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 91 | # Note that to close the connection the double of the time is needed. 92 | # On other kernels the period depends on the kernel configuration. 93 | # 94 | # A reasonable value for this option is 60 seconds. 95 | tcp-keepalive 0 96 | 97 | # Specify the server verbosity level. 98 | # This can be one of: 99 | # debug (a lot of information, useful for development/testing) 100 | # verbose (many rarely useful info, but not a mess like the debug level) 101 | # notice (moderately verbose, what you want in production probably) 102 | # warning (only very important / critical messages are logged) 103 | loglevel notice 104 | 105 | # Specify the log file name. Also the empty string can be used to force 106 | # Redis to log on the standard output. Note that if you use standard 107 | # output for logging but daemonize, logs will be sent to /dev/null 108 | logfile /var/log/redis/redis-server.log 109 | 110 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 111 | # and optionally update the other syslog parameters to suit your needs. 112 | # syslog-enabled no 113 | 114 | # Specify the syslog identity. 115 | # syslog-ident redis 116 | 117 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 118 | # syslog-facility local0 119 | 120 | # Set the number of databases. The default database is DB 0, you can select 121 | # a different one on a per-connection basis using SELECT where 122 | # dbid is a number between 0 and 'databases'-1 123 | databases 16 124 | 125 | ################################ SNAPSHOTTING ################################ 126 | # 127 | # Save the DB on disk: 128 | # 129 | # save 130 | # 131 | # Will save the DB if both the given number of seconds and the given 132 | # number of write operations against the DB occurred. 133 | # 134 | # In the example below the behaviour will be to save: 135 | # after 900 sec (15 min) if at least 1 key changed 136 | # after 300 sec (5 min) if at least 10 keys changed 137 | # after 60 sec if at least 10000 keys changed 138 | # 139 | # Note: you can disable saving completely by commenting out all "save" lines. 140 | # 141 | # It is also possible to remove all the previously configured save 142 | # points by adding a save directive with a single empty string argument 143 | # like in the following example: 144 | # 145 | # save "" 146 | 147 | save 900 1 148 | save 300 10 149 | save 60 10000 150 | 151 | # By default Redis will stop accepting writes if RDB snapshots are enabled 152 | # (at least one save point) and the latest background save failed. 153 | # This will make the user aware (in a hard way) that data is not persisting 154 | # on disk properly, otherwise chances are that no one will notice and some 155 | # disaster will happen. 156 | # 157 | # If the background saving process will start working again Redis will 158 | # automatically allow writes again. 159 | # 160 | # However if you have setup your proper monitoring of the Redis server 161 | # and persistence, you may want to disable this feature so that Redis will 162 | # continue to work as usual even if there are problems with disk, 163 | # permissions, and so forth. 164 | stop-writes-on-bgsave-error yes 165 | 166 | # Compress string objects using LZF when dump .rdb databases? 167 | # For default that's set to 'yes' as it's almost always a win. 168 | # If you want to save some CPU in the saving child set it to 'no' but 169 | # the dataset will likely be bigger if you have compressible values or keys. 170 | rdbcompression yes 171 | 172 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 173 | # This makes the format more resistant to corruption but there is a performance 174 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 175 | # for maximum performances. 176 | # 177 | # RDB files created with checksum disabled have a checksum of zero that will 178 | # tell the loading code to skip the check. 179 | rdbchecksum yes 180 | 181 | # The filename where to dump the DB 182 | dbfilename dump.rdb 183 | 184 | # The working directory. 185 | # 186 | # The DB will be written inside this directory, with the filename specified 187 | # above using the 'dbfilename' configuration directive. 188 | # 189 | # The Append Only File will also be created inside this directory. 190 | # 191 | # Note that you must specify a directory here, not a file name. 192 | dir /var/lib/redis 193 | 194 | ################################# REPLICATION ################################# 195 | 196 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 197 | # another Redis server. A few things to understand ASAP about Redis replication. 198 | # 199 | # 1) Redis replication is asynchronous, but you can configure a master to 200 | # stop accepting writes if it appears to be not connected with at least 201 | # a given number of slaves. 202 | # 2) Redis slaves are able to perform a partial resynchronization with the 203 | # master if the replication link is lost for a relatively small amount of 204 | # time. You may want to configure the replication backlog size (see the next 205 | # sections of this file) with a sensible value depending on your needs. 206 | # 3) Replication is automatic and does not need user intervention. After a 207 | # network partition slaves automatically try to reconnect to masters 208 | # and resynchronize with them. 209 | # 210 | # slaveof 211 | 212 | # If the master is password protected (using the "requirepass" configuration 213 | # directive below) it is possible to tell the slave to authenticate before 214 | # starting the replication synchronization process, otherwise the master will 215 | # refuse the slave request. 216 | # 217 | # masterauth 218 | 219 | # When a slave loses its connection with the master, or when the replication 220 | # is still in progress, the slave can act in two different ways: 221 | # 222 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 223 | # still reply to client requests, possibly with out of date data, or the 224 | # data set may just be empty if this is the first synchronization. 225 | # 226 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 227 | # an error "SYNC with master in progress" to all the kind of commands 228 | # but to INFO and SLAVEOF. 229 | # 230 | slave-serve-stale-data yes 231 | 232 | # You can configure a slave instance to accept writes or not. Writing against 233 | # a slave instance may be useful to store some ephemeral data (because data 234 | # written on a slave will be easily deleted after resync with the master) but 235 | # may also cause problems if clients are writing to it because of a 236 | # misconfiguration. 237 | # 238 | # Since Redis 2.6 by default slaves are read-only. 239 | # 240 | # Note: read only slaves are not designed to be exposed to untrusted clients 241 | # on the internet. It's just a protection layer against misuse of the instance. 242 | # Still a read only slave exports by default all the administrative commands 243 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 244 | # security of read only slaves using 'rename-command' to shadow all the 245 | # administrative / dangerous commands. 246 | slave-read-only yes 247 | 248 | # Replication SYNC strategy: disk or socket. 249 | # 250 | # ------------------------------------------------------- 251 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 252 | # ------------------------------------------------------- 253 | # 254 | # New slaves and reconnecting slaves that are not able to continue the replication 255 | # process just receiving differences, need to do what is called a "full 256 | # synchronization". An RDB file is transmitted from the master to the slaves. 257 | # The transmission can happen in two different ways: 258 | # 259 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 260 | # file on disk. Later the file is transferred by the parent 261 | # process to the slaves incrementally. 262 | # 2) Diskless: The Redis master creates a new process that directly writes the 263 | # RDB file to slave sockets, without touching the disk at all. 264 | # 265 | # With disk-backed replication, while the RDB file is generated, more slaves 266 | # can be queued and served with the RDB file as soon as the current child producing 267 | # the RDB file finishes its work. With diskless replication instead once 268 | # the transfer starts, new slaves arriving will be queued and a new transfer 269 | # will start when the current one terminates. 270 | # 271 | # When diskless replication is used, the master waits a configurable amount of 272 | # time (in seconds) before starting the transfer in the hope that multiple slaves 273 | # will arrive and the transfer can be parallelized. 274 | # 275 | # With slow disks and fast (large bandwidth) networks, diskless replication 276 | # works better. 277 | repl-diskless-sync no 278 | 279 | # When diskless replication is enabled, it is possible to configure the delay 280 | # the server waits in order to spawn the child that transfers the RDB via socket 281 | # to the slaves. 282 | # 283 | # This is important since once the transfer starts, it is not possible to serve 284 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 285 | # waits a delay in order to let more slaves arrive. 286 | # 287 | # The delay is specified in seconds, and by default is 5 seconds. To disable 288 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 289 | repl-diskless-sync-delay 5 290 | 291 | # Slaves send PINGs to server in a predefined interval. It's possible to change 292 | # this interval with the repl_ping_slave_period option. The default value is 10 293 | # seconds. 294 | # 295 | # repl-ping-slave-period 10 296 | 297 | # The following option sets the replication timeout for: 298 | # 299 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 300 | # 2) Master timeout from the point of view of slaves (data, pings). 301 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 302 | # 303 | # It is important to make sure that this value is greater than the value 304 | # specified for repl-ping-slave-period otherwise a timeout will be detected 305 | # every time there is low traffic between the master and the slave. 306 | # 307 | # repl-timeout 60 308 | 309 | # Disable TCP_NODELAY on the slave socket after SYNC? 310 | # 311 | # If you select "yes" Redis will use a smaller number of TCP packets and 312 | # less bandwidth to send data to slaves. But this can add a delay for 313 | # the data to appear on the slave side, up to 40 milliseconds with 314 | # Linux kernels using a default configuration. 315 | # 316 | # If you select "no" the delay for data to appear on the slave side will 317 | # be reduced but more bandwidth will be used for replication. 318 | # 319 | # By default we optimize for low latency, but in very high traffic conditions 320 | # or when the master and slaves are many hops away, turning this to "yes" may 321 | # be a good idea. 322 | repl-disable-tcp-nodelay no 323 | 324 | # Set the replication backlog size. The backlog is a buffer that accumulates 325 | # slave data when slaves are disconnected for some time, so that when a slave 326 | # wants to reconnect again, often a full resync is not needed, but a partial 327 | # resync is enough, just passing the portion of data the slave missed while 328 | # disconnected. 329 | # 330 | # The bigger the replication backlog, the longer the time the slave can be 331 | # disconnected and later be able to perform a partial resynchronization. 332 | # 333 | # The backlog is only allocated once there is at least a slave connected. 334 | # 335 | # repl-backlog-size 1mb 336 | 337 | # After a master has no longer connected slaves for some time, the backlog 338 | # will be freed. The following option configures the amount of seconds that 339 | # need to elapse, starting from the time the last slave disconnected, for 340 | # the backlog buffer to be freed. 341 | # 342 | # A value of 0 means to never release the backlog. 343 | # 344 | # repl-backlog-ttl 3600 345 | 346 | # The slave priority is an integer number published by Redis in the INFO output. 347 | # It is used by Redis Sentinel in order to select a slave to promote into a 348 | # master if the master is no longer working correctly. 349 | # 350 | # A slave with a low priority number is considered better for promotion, so 351 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 352 | # pick the one with priority 10, that is the lowest. 353 | # 354 | # However a special priority of 0 marks the slave as not able to perform the 355 | # role of master, so a slave with priority of 0 will never be selected by 356 | # Redis Sentinel for promotion. 357 | # 358 | # By default the priority is 100. 359 | slave-priority 100 360 | 361 | # It is possible for a master to stop accepting writes if there are less than 362 | # N slaves connected, having a lag less or equal than M seconds. 363 | # 364 | # The N slaves need to be in "online" state. 365 | # 366 | # The lag in seconds, that must be <= the specified value, is calculated from 367 | # the last ping received from the slave, that is usually sent every second. 368 | # 369 | # This option does not GUARANTEE that N replicas will accept the write, but 370 | # will limit the window of exposure for lost writes in case not enough slaves 371 | # are available, to the specified number of seconds. 372 | # 373 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 374 | # 375 | # min-slaves-to-write 3 376 | # min-slaves-max-lag 10 377 | # 378 | # Setting one or the other to 0 disables the feature. 379 | # 380 | # By default min-slaves-to-write is set to 0 (feature disabled) and 381 | # min-slaves-max-lag is set to 10. 382 | 383 | ################################## SECURITY ################################### 384 | 385 | # Require clients to issue AUTH before processing any other 386 | # commands. This might be useful in environments in which you do not trust 387 | # others with access to the host running redis-server. 388 | # 389 | # This should stay commented out for backward compatibility and because most 390 | # people do not need auth (e.g. they run their own servers). 391 | # 392 | # Warning: since Redis is pretty fast an outside user can try up to 393 | # 150k passwords per second against a good box. This means that you should 394 | # use a very strong password otherwise it will be very easy to break. 395 | # 396 | # requirepass foobared 397 | 398 | # Command renaming. 399 | # 400 | # It is possible to change the name of dangerous commands in a shared 401 | # environment. For instance the CONFIG command may be renamed into something 402 | # hard to guess so that it will still be available for internal-use tools 403 | # but not available for general clients. 404 | # 405 | # Example: 406 | # 407 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 408 | # 409 | # It is also possible to completely kill a command by renaming it into 410 | # an empty string: 411 | # 412 | # rename-command CONFIG "" 413 | # 414 | # Please note that changing the name of commands that are logged into the 415 | # AOF file or transmitted to slaves may cause problems. 416 | 417 | ################################### LIMITS #################################### 418 | 419 | # Set the max number of connected clients at the same time. By default 420 | # this limit is set to 10000 clients, however if the Redis server is not 421 | # able to configure the process file limit to allow for the specified limit 422 | # the max number of allowed clients is set to the current file limit 423 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 424 | # 425 | # Once the limit is reached Redis will close all the new connections sending 426 | # an error 'max number of clients reached'. 427 | # 428 | # maxclients 10000 429 | 430 | # Don't use more memory than the specified amount of bytes. 431 | # When the memory limit is reached Redis will try to remove keys 432 | # according to the eviction policy selected (see maxmemory-policy). 433 | # 434 | # If Redis can't remove keys according to the policy, or if the policy is 435 | # set to 'noeviction', Redis will start to reply with errors to commands 436 | # that would use more memory, like SET, LPUSH, and so on, and will continue 437 | # to reply to read-only commands like GET. 438 | # 439 | # This option is usually useful when using Redis as an LRU cache, or to set 440 | # a hard memory limit for an instance (using the 'noeviction' policy). 441 | # 442 | # WARNING: If you have slaves attached to an instance with maxmemory on, 443 | # the size of the output buffers needed to feed the slaves are subtracted 444 | # from the used memory count, so that network problems / resyncs will 445 | # not trigger a loop where keys are evicted, and in turn the output 446 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 447 | # of more keys, and so forth until the database is completely emptied. 448 | # 449 | # In short... if you have slaves attached it is suggested that you set a lower 450 | # limit for maxmemory so that there is some free RAM on the system for slave 451 | # output buffers (but this is not needed if the policy is 'noeviction'). 452 | # 453 | # maxmemory 454 | 455 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 456 | # is reached. You can select among five behaviors: 457 | # 458 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 459 | # allkeys-lru -> remove any key according to the LRU algorithm 460 | # volatile-random -> remove a random key with an expire set 461 | # allkeys-random -> remove a random key, any key 462 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 463 | # noeviction -> don't expire at all, just return an error on write operations 464 | # 465 | # Note: with any of the above policies, Redis will return an error on write 466 | # operations, when there are no suitable keys for eviction. 467 | # 468 | # At the date of writing these commands are: set setnx setex append 469 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 470 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 471 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 472 | # getset mset msetnx exec sort 473 | # 474 | # The default is: 475 | # 476 | # maxmemory-policy noeviction 477 | 478 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 479 | # algorithms (in order to save memory), so you can tune it for speed or 480 | # accuracy. For default Redis will check five keys and pick the one that was 481 | # used less recently, you can change the sample size using the following 482 | # configuration directive. 483 | # 484 | # The default of 5 produces good enough results. 10 Approximates very closely 485 | # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. 486 | # 487 | # maxmemory-samples 5 488 | 489 | ############################## APPEND ONLY MODE ############################### 490 | 491 | # By default Redis asynchronously dumps the dataset on disk. This mode is 492 | # good enough in many applications, but an issue with the Redis process or 493 | # a power outage may result into a few minutes of writes lost (depending on 494 | # the configured save points). 495 | # 496 | # The Append Only File is an alternative persistence mode that provides 497 | # much better durability. For instance using the default data fsync policy 498 | # (see later in the config file) Redis can lose just one second of writes in a 499 | # dramatic event like a server power outage, or a single write if something 500 | # wrong with the Redis process itself happens, but the operating system is 501 | # still running correctly. 502 | # 503 | # AOF and RDB persistence can be enabled at the same time without problems. 504 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 505 | # with the better durability guarantees. 506 | # 507 | # Please check http://redis.io/topics/persistence for more information. 508 | 509 | appendonly no 510 | 511 | # The name of the append only file (default: "appendonly.aof") 512 | 513 | appendfilename "appendonly.aof" 514 | 515 | # The fsync() call tells the Operating System to actually write data on disk 516 | # instead of waiting for more data in the output buffer. Some OS will really flush 517 | # data on disk, some other OS will just try to do it ASAP. 518 | # 519 | # Redis supports three different modes: 520 | # 521 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 522 | # always: fsync after every write to the append only log. Slow, Safest. 523 | # everysec: fsync only one time every second. Compromise. 524 | # 525 | # The default is "everysec", as that's usually the right compromise between 526 | # speed and data safety. It's up to you to understand if you can relax this to 527 | # "no" that will let the operating system flush the output buffer when 528 | # it wants, for better performances (but if you can live with the idea of 529 | # some data loss consider the default persistence mode that's snapshotting), 530 | # or on the contrary, use "always" that's very slow but a bit safer than 531 | # everysec. 532 | # 533 | # More details please check the following article: 534 | # http://antirez.com/post/redis-persistence-demystified.html 535 | # 536 | # If unsure, use "everysec". 537 | 538 | # appendfsync always 539 | appendfsync everysec 540 | # appendfsync no 541 | 542 | # When the AOF fsync policy is set to always or everysec, and a background 543 | # saving process (a background save or AOF log background rewriting) is 544 | # performing a lot of I/O against the disk, in some Linux configurations 545 | # Redis may block too long on the fsync() call. Note that there is no fix for 546 | # this currently, as even performing fsync in a different thread will block 547 | # our synchronous write(2) call. 548 | # 549 | # In order to mitigate this problem it's possible to use the following option 550 | # that will prevent fsync() from being called in the main process while a 551 | # BGSAVE or BGREWRITEAOF is in progress. 552 | # 553 | # This means that while another child is saving, the durability of Redis is 554 | # the same as "appendfsync none". In practical terms, this means that it is 555 | # possible to lose up to 30 seconds of log in the worst scenario (with the 556 | # default Linux settings). 557 | # 558 | # If you have latency problems turn this to "yes". Otherwise leave it as 559 | # "no" that is the safest pick from the point of view of durability. 560 | 561 | no-appendfsync-on-rewrite no 562 | 563 | # Automatic rewrite of the append only file. 564 | # Redis is able to automatically rewrite the log file implicitly calling 565 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 566 | # 567 | # This is how it works: Redis remembers the size of the AOF file after the 568 | # latest rewrite (if no rewrite has happened since the restart, the size of 569 | # the AOF at startup is used). 570 | # 571 | # This base size is compared to the current size. If the current size is 572 | # bigger than the specified percentage, the rewrite is triggered. Also 573 | # you need to specify a minimal size for the AOF file to be rewritten, this 574 | # is useful to avoid rewriting the AOF file even if the percentage increase 575 | # is reached but it is still pretty small. 576 | # 577 | # Specify a percentage of zero in order to disable the automatic AOF 578 | # rewrite feature. 579 | 580 | auto-aof-rewrite-percentage 100 581 | auto-aof-rewrite-min-size 64mb 582 | 583 | # An AOF file may be found to be truncated at the end during the Redis 584 | # startup process, when the AOF data gets loaded back into memory. 585 | # This may happen when the system where Redis is running 586 | # crashes, especially when an ext4 filesystem is mounted without the 587 | # data=ordered option (however this can't happen when Redis itself 588 | # crashes or aborts but the operating system still works correctly). 589 | # 590 | # Redis can either exit with an error when this happens, or load as much 591 | # data as possible (the default now) and start if the AOF file is found 592 | # to be truncated at the end. The following option controls this behavior. 593 | # 594 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 595 | # the Redis server starts emitting a log to inform the user of the event. 596 | # Otherwise if the option is set to no, the server aborts with an error 597 | # and refuses to start. When the option is set to no, the user requires 598 | # to fix the AOF file using the "redis-check-aof" utility before to restart 599 | # the server. 600 | # 601 | # Note that if the AOF file will be found to be corrupted in the middle 602 | # the server will still exit with an error. This option only applies when 603 | # Redis will try to read more data from the AOF file but not enough bytes 604 | # will be found. 605 | aof-load-truncated yes 606 | 607 | ################################ LUA SCRIPTING ############################### 608 | 609 | # Max execution time of a Lua script in milliseconds. 610 | # 611 | # If the maximum execution time is reached Redis will log that a script is 612 | # still in execution after the maximum allowed time and will start to 613 | # reply to queries with an error. 614 | # 615 | # When a long running script exceeds the maximum execution time only the 616 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 617 | # used to stop a script that did not yet called write commands. The second 618 | # is the only way to shut down the server in the case a write command was 619 | # already issued by the script but the user doesn't want to wait for the natural 620 | # termination of the script. 621 | # 622 | # Set it to 0 or a negative value for unlimited execution without warnings. 623 | lua-time-limit 5000 624 | 625 | ################################ REDIS CLUSTER ############################### 626 | # 627 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 628 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 629 | # in order to mark it as "mature" we need to wait for a non trivial percentage 630 | # of users to deploy it in production. 631 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 632 | # 633 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 634 | # started as cluster nodes can. In order to start a Redis instance as a 635 | # cluster node enable the cluster support uncommenting the following: 636 | # 637 | # cluster-enabled yes 638 | 639 | # Every cluster node has a cluster configuration file. This file is not 640 | # intended to be edited by hand. It is created and updated by Redis nodes. 641 | # Every Redis Cluster node requires a different cluster configuration file. 642 | # Make sure that instances running in the same system do not have 643 | # overlapping cluster configuration file names. 644 | # 645 | # cluster-config-file nodes-6379.conf 646 | 647 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 648 | # for it to be considered in failure state. 649 | # Most other internal time limits are multiple of the node timeout. 650 | # 651 | # cluster-node-timeout 15000 652 | 653 | # A slave of a failing master will avoid to start a failover if its data 654 | # looks too old. 655 | # 656 | # There is no simple way for a slave to actually have a exact measure of 657 | # its "data age", so the following two checks are performed: 658 | # 659 | # 1) If there are multiple slaves able to failover, they exchange messages 660 | # in order to try to give an advantage to the slave with the best 661 | # replication offset (more data from the master processed). 662 | # Slaves will try to get their rank by offset, and apply to the start 663 | # of the failover a delay proportional to their rank. 664 | # 665 | # 2) Every single slave computes the time of the last interaction with 666 | # its master. This can be the last ping or command received (if the master 667 | # is still in the "connected" state), or the time that elapsed since the 668 | # disconnection with the master (if the replication link is currently down). 669 | # If the last interaction is too old, the slave will not try to failover 670 | # at all. 671 | # 672 | # The point "2" can be tuned by user. Specifically a slave will not perform 673 | # the failover if, since the last interaction with the master, the time 674 | # elapsed is greater than: 675 | # 676 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 677 | # 678 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 679 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 680 | # slave will not try to failover if it was not able to talk with the master 681 | # for longer than 310 seconds. 682 | # 683 | # A large slave-validity-factor may allow slaves with too old data to failover 684 | # a master, while a too small value may prevent the cluster from being able to 685 | # elect a slave at all. 686 | # 687 | # For maximum availability, it is possible to set the slave-validity-factor 688 | # to a value of 0, which means, that slaves will always try to failover the 689 | # master regardless of the last time they interacted with the master. 690 | # (However they'll always try to apply a delay proportional to their 691 | # offset rank). 692 | # 693 | # Zero is the only value able to guarantee that when all the partitions heal 694 | # the cluster will always be able to continue. 695 | # 696 | # cluster-slave-validity-factor 10 697 | 698 | # Cluster slaves are able to migrate to orphaned masters, that are masters 699 | # that are left without working slaves. This improves the cluster ability 700 | # to resist to failures as otherwise an orphaned master can't be failed over 701 | # in case of failure if it has no working slaves. 702 | # 703 | # Slaves migrate to orphaned masters only if there are still at least a 704 | # given number of other working slaves for their old master. This number 705 | # is the "migration barrier". A migration barrier of 1 means that a slave 706 | # will migrate only if there is at least 1 other working slave for its master 707 | # and so forth. It usually reflects the number of slaves you want for every 708 | # master in your cluster. 709 | # 710 | # Default is 1 (slaves migrate only if their masters remain with at least 711 | # one slave). To disable migration just set it to a very large value. 712 | # A value of 0 can be set but is useful only for debugging and dangerous 713 | # in production. 714 | # 715 | # cluster-migration-barrier 1 716 | 717 | # By default Redis Cluster nodes stop accepting queries if they detect there 718 | # is at least an hash slot uncovered (no available node is serving it). 719 | # This way if the cluster is partially down (for example a range of hash slots 720 | # are no longer covered) all the cluster becomes, eventually, unavailable. 721 | # It automatically returns available as soon as all the slots are covered again. 722 | # 723 | # However sometimes you want the subset of the cluster which is working, 724 | # to continue to accept queries for the part of the key space that is still 725 | # covered. In order to do so, just set the cluster-require-full-coverage 726 | # option to no. 727 | # 728 | # cluster-require-full-coverage yes 729 | 730 | # In order to setup your cluster make sure to read the documentation 731 | # available at http://redis.io web site. 732 | 733 | ################################## SLOW LOG ################################### 734 | 735 | # The Redis Slow Log is a system to log queries that exceeded a specified 736 | # execution time. The execution time does not include the I/O operations 737 | # like talking with the client, sending the reply and so forth, 738 | # but just the time needed to actually execute the command (this is the only 739 | # stage of command execution where the thread is blocked and can not serve 740 | # other requests in the meantime). 741 | # 742 | # You can configure the slow log with two parameters: one tells Redis 743 | # what is the execution time, in microseconds, to exceed in order for the 744 | # command to get logged, and the other parameter is the length of the 745 | # slow log. When a new command is logged the oldest one is removed from the 746 | # queue of logged commands. 747 | 748 | # The following time is expressed in microseconds, so 1000000 is equivalent 749 | # to one second. Note that a negative number disables the slow log, while 750 | # a value of zero forces the logging of every command. 751 | slowlog-log-slower-than 10000 752 | 753 | # There is no limit to this length. Just be aware that it will consume memory. 754 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 755 | slowlog-max-len 128 756 | 757 | ################################ LATENCY MONITOR ############################## 758 | 759 | # The Redis latency monitoring subsystem samples different operations 760 | # at runtime in order to collect data related to possible sources of 761 | # latency of a Redis instance. 762 | # 763 | # Via the LATENCY command this information is available to the user that can 764 | # print graphs and obtain reports. 765 | # 766 | # The system only logs operations that were performed in a time equal or 767 | # greater than the amount of milliseconds specified via the 768 | # latency-monitor-threshold configuration directive. When its value is set 769 | # to zero, the latency monitor is turned off. 770 | # 771 | # By default latency monitoring is disabled since it is mostly not needed 772 | # if you don't have latency issues, and collecting data has a performance 773 | # impact, that while very small, can be measured under big load. Latency 774 | # monitoring can easily be enabled at runtime using the command 775 | # "CONFIG SET latency-monitor-threshold " if needed. 776 | latency-monitor-threshold 0 777 | 778 | ############################# EVENT NOTIFICATION ############################## 779 | 780 | # Redis can notify Pub/Sub clients about events happening in the key space. 781 | # This feature is documented at http://redis.io/topics/notifications 782 | # 783 | # For instance if keyspace events notification is enabled, and a client 784 | # performs a DEL operation on key "foo" stored in the Database 0, two 785 | # messages will be published via Pub/Sub: 786 | # 787 | # PUBLISH __keyspace@0__:foo del 788 | # PUBLISH __keyevent@0__:del foo 789 | # 790 | # It is possible to select the events that Redis will notify among a set 791 | # of classes. Every class is identified by a single character: 792 | # 793 | # K Keyspace events, published with __keyspace@__ prefix. 794 | # E Keyevent events, published with __keyevent@__ prefix. 795 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 796 | # $ String commands 797 | # l List commands 798 | # s Set commands 799 | # h Hash commands 800 | # z Sorted set commands 801 | # x Expired events (events generated every time a key expires) 802 | # e Evicted events (events generated when a key is evicted for maxmemory) 803 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 804 | # 805 | # The "notify-keyspace-events" takes as argument a string that is composed 806 | # of zero or multiple characters. The empty string means that notifications 807 | # are disabled. 808 | # 809 | # Example: to enable list and generic events, from the point of view of the 810 | # event name, use: 811 | # 812 | # notify-keyspace-events Elg 813 | # 814 | # Example 2: to get the stream of the expired keys subscribing to channel 815 | # name __keyevent@0__:expired use: 816 | # 817 | # notify-keyspace-events Ex 818 | # 819 | # By default all notifications are disabled because most users don't need 820 | # this feature and the feature has some overhead. Note that if you don't 821 | # specify at least one of K or E, no events will be delivered. 822 | notify-keyspace-events "" 823 | 824 | ############################### ADVANCED CONFIG ############################### 825 | 826 | # Hashes are encoded using a memory efficient data structure when they have a 827 | # small number of entries, and the biggest entry does not exceed a given 828 | # threshold. These thresholds can be configured using the following directives. 829 | hash-max-ziplist-entries 512 830 | hash-max-ziplist-value 64 831 | 832 | # Similarly to hashes, small lists are also encoded in a special way in order 833 | # to save a lot of space. The special representation is only used when 834 | # you are under the following limits: 835 | list-max-ziplist-entries 512 836 | list-max-ziplist-value 64 837 | 838 | # Sets have a special encoding in just one case: when a set is composed 839 | # of just strings that happen to be integers in radix 10 in the range 840 | # of 64 bit signed integers. 841 | # The following configuration setting sets the limit in the size of the 842 | # set in order to use this special memory saving encoding. 843 | set-max-intset-entries 512 844 | 845 | # Similarly to hashes and lists, sorted sets are also specially encoded in 846 | # order to save a lot of space. This encoding is only used when the length and 847 | # elements of a sorted set are below the following limits: 848 | zset-max-ziplist-entries 128 849 | zset-max-ziplist-value 64 850 | 851 | # HyperLogLog sparse representation bytes limit. The limit includes the 852 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 853 | # this limit, it is converted into the dense representation. 854 | # 855 | # A value greater than 16000 is totally useless, since at that point the 856 | # dense representation is more memory efficient. 857 | # 858 | # The suggested value is ~ 3000 in order to have the benefits of 859 | # the space efficient encoding without slowing down too much PFADD, 860 | # which is O(N) with the sparse encoding. The value can be raised to 861 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 862 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 863 | hll-sparse-max-bytes 3000 864 | 865 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 866 | # order to help rehashing the main Redis hash table (the one mapping top-level 867 | # keys to values). The hash table implementation Redis uses (see dict.c) 868 | # performs a lazy rehashing: the more operation you run into a hash table 869 | # that is rehashing, the more rehashing "steps" are performed, so if the 870 | # server is idle the rehashing is never complete and some more memory is used 871 | # by the hash table. 872 | # 873 | # The default is to use this millisecond 10 times every second in order to 874 | # actively rehash the main dictionaries, freeing memory when possible. 875 | # 876 | # If unsure: 877 | # use "activerehashing no" if you have hard latency requirements and it is 878 | # not a good thing in your environment that Redis can reply from time to time 879 | # to queries with 2 milliseconds delay. 880 | # 881 | # use "activerehashing yes" if you don't have such hard requirements but 882 | # want to free memory asap when possible. 883 | activerehashing yes 884 | 885 | # The client output buffer limits can be used to force disconnection of clients 886 | # that are not reading data from the server fast enough for some reason (a 887 | # common reason is that a Pub/Sub client can't consume messages as fast as the 888 | # publisher can produce them). 889 | # 890 | # The limit can be set differently for the three different classes of clients: 891 | # 892 | # normal -> normal clients including MONITOR clients 893 | # slave -> slave clients 894 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 895 | # 896 | # The syntax of every client-output-buffer-limit directive is the following: 897 | # 898 | # client-output-buffer-limit 899 | # 900 | # A client is immediately disconnected once the hard limit is reached, or if 901 | # the soft limit is reached and remains reached for the specified number of 902 | # seconds (continuously). 903 | # So for instance if the hard limit is 32 megabytes and the soft limit is 904 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 905 | # if the size of the output buffers reach 32 megabytes, but will also get 906 | # disconnected if the client reaches 16 megabytes and continuously overcomes 907 | # the limit for 10 seconds. 908 | # 909 | # By default normal clients are not limited because they don't receive data 910 | # without asking (in a push way), but just after a request, so only 911 | # asynchronous clients may create a scenario where data is requested faster 912 | # than it can read. 913 | # 914 | # Instead there is a default limit for pubsub and slave clients, since 915 | # subscribers and slaves receive data in a push fashion. 916 | # 917 | # Both the hard or the soft limit can be disabled by setting them to zero. 918 | client-output-buffer-limit normal 0 0 0 919 | client-output-buffer-limit slave 256mb 64mb 60 920 | client-output-buffer-limit pubsub 32mb 8mb 60 921 | 922 | # Redis calls an internal function to perform many background tasks, like 923 | # closing connections of clients in timeout, purging expired keys that are 924 | # never requested, and so forth. 925 | # 926 | # Not all tasks are performed with the same frequency, but Redis checks for 927 | # tasks to perform according to the specified "hz" value. 928 | # 929 | # By default "hz" is set to 10. Raising the value will use more CPU when 930 | # Redis is idle, but at the same time will make Redis more responsive when 931 | # there are many keys expiring at the same time, and timeouts may be 932 | # handled with more precision. 933 | # 934 | # The range is between 1 and 500, however a value over 100 is usually not 935 | # a good idea. Most users should use the default of 10 and raise this up to 936 | # 100 only in environments where very low latency is required. 937 | hz 10 938 | 939 | # When a child rewrites the AOF file, if the following option is enabled 940 | # the file will be fsync-ed every 32 MB of data generated. This is useful 941 | # in order to commit the file to the disk more incrementally and avoid 942 | # big latency spikes. 943 | aof-rewrite-incremental-fsync yes -------------------------------------------------------------------------------- /ntopng/origin/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | MAINTAINER fgt 3 | 4 | RUN apt-get update 5 | RUN apt-get -y -q install wget lsb-core 6 | #RUN wget http://apt-stable.ntop.org/16.04/all/apt-ntop-stable.deb 7 | RUN wget https://packages.ntop.org/apt-stable/16.04/all/apt-ntop-stable.deb 8 | #RUN dpkg -i apt-ntop-stable.deb 9 | RUN apt install -y ./apt-ntop-stable.deb 10 | 11 | RUN apt-get clean all 12 | RUN apt-get update 13 | RUN apt-get -y -q install pfring nprobe ntopng ntopng-data n2disk cento 14 | 15 | RUN apt-get clean && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 16 | 17 | EXPOSE 3000 18 | 19 | RUN echo '#!/bin/bash\n/etc/init.d/redis-server start\nntopng "$@"' > /tmp/run.sh 20 | RUN chmod +x /tmp/run.sh 21 | 22 | ENTRYPOINT ["/tmp/run.sh"] -------------------------------------------------------------------------------- /snort/centos6/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:6.7 2 | MAINTAINER fgt 3 | 4 | ENV DAQ_VERSION 2.0.6 5 | ENV SNORT_VERSION 2.9.8.3 6 | ENV DNET_VERSION 1.12 7 | 8 | RUN yum -y install yum-plugin-ovl 9 | RUN yum -y install epel-release 10 | RUN yum -y install rsyslog 11 | RUN yum -y install gcc gcc-c++ flex libpcap* pcre* bison libpcre-devel zlib-devel wget tar 12 | 13 | #指定工作目录 14 | WORKDIR /opt 15 | 16 | #编译安装libdnet 17 | RUN wget https://github.com/dugsong/libdnet/archive/libdnet-${DNET_VERSION}.tar.gz \ 18 | && tar xvfz libdnet-${DNET_VERSION}.tar.gz \ 19 | && cd libdnet-libdnet-${DNET_VERSION} \ 20 | && ./configure; make; make install 21 | 22 | #编译安装daq 23 | RUN wget https://www.snort.org/downloads/archive/snort/daq-${DAQ_VERSION}.tar.gz \ 24 | && tar xvfz daq-${DAQ_VERSION}.tar.gz \ 25 | && cd daq-${DAQ_VERSION} \ 26 | && ./configure; make; make install 27 | 28 | #编译安装snort 29 | RUN wget https://www.snort.org/downloads/archive/snort/snort-${SNORT_VERSION}.tar.gz \ 30 | && tar xvfz snort-${SNORT_VERSION}.tar.gz \ 31 | && cd snort-${SNORT_VERSION} \ 32 | && ./configure; make; make install 33 | 34 | #配置syslog发送 35 | COPY ./rsyslog.conf /etc/rsyslog.conf 36 | 37 | #重启syslog 重启无效 38 | #RUN service rsyslog start 39 | 40 | #清理 41 | RUN yum clean all && \ 42 | rm -rf /var/log/* || true \ 43 | rm -rf /var/tmp/* \ 44 | rm -rf /tmp/* 45 | 46 | RUN snort -V 47 | 48 | #ENTRYPOINT service rsyslog restart && /usr/local/bin/snort 49 | CMD service rsyslog restart && /usr/local/bin/snort -------------------------------------------------------------------------------- /snort/centos6/docker-compose.yml: -------------------------------------------------------------------------------- 1 | snort: 2 | image: fang/snort_new 3 | container_name: snort 4 | command: sh -c "service rsyslog restart && /usr/local/bin/snort -c /home/snort/etc/snort.conf -i eth1" 5 | volumes: 6 | - /opt/snortrules-snapshot-back:/home/snort 7 | cap_add: 8 | - NET_ADMIN 9 | net: host 10 | restart: always -------------------------------------------------------------------------------- /snort/centos6/rsyslog.conf: -------------------------------------------------------------------------------- 1 | # rsyslog v5 configuration file 2 | 3 | # For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html 4 | # If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html 5 | 6 | #### MODULES #### 7 | 8 | $ModLoad imuxsock # provides support for local system logging (e.g. via logger command) 9 | $ModLoad imklog # provides kernel logging support (previously done by rklogd) 10 | #$ModLoad immark # provides --MARK-- message capability 11 | 12 | # Provides UDP syslog reception 13 | #$ModLoad imudp 14 | #$UDPServerRun 514 15 | 16 | # Provides TCP syslog reception 17 | #$ModLoad imtcp 18 | #$InputTCPServerRun 514 19 | 20 | 21 | #### GLOBAL DIRECTIVES #### 22 | 23 | # Use default timestamp format 24 | $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat 25 | 26 | # File syncing capability is disabled by default. This feature is usually not required, 27 | # not useful and an extreme performance hit 28 | #$ActionFileEnableSync on 29 | 30 | # Include all config files in /etc/rsyslog.d/ 31 | $IncludeConfig /etc/rsyslog.d/*.conf 32 | 33 | 34 | #### RULES #### 35 | 36 | # Log all kernel messages to the console. 37 | # Logging much else clutters up the screen. 38 | #kern.* /dev/console 39 | 40 | # Log anything (except mail) of level info or higher. 41 | # Don't log private authentication messages! 42 | *.info;mail.none;authpriv.none;cron.none /var/log/messages 43 | 44 | # The authpriv file has restricted access. 45 | authpriv.* /var/log/secure 46 | 47 | # snort的报警消息 通过udp发送到远程主机的5140端口 48 | local5.* @syslogPC:5140 49 | #auth.* /var/log/snort.log 50 | 51 | # Log all the mail messages in one place. 52 | mail.* -/var/log/maillog 53 | 54 | 55 | # Log cron stuff 56 | cron.* /var/log/cron 57 | 58 | # Everybody gets emergency messages 59 | *.emerg * 60 | 61 | # Save news errors of level crit and higher in a special file. 62 | uucp,news.crit /var/log/spooler 63 | 64 | # Save boot messages also to boot.log 65 | local7.* /var/log/boot.log 66 | 67 | 68 | # ### begin forwarding rule ### 69 | # The statement between the begin ... end define a SINGLE forwarding 70 | # rule. They belong together, do NOT split them. If you create multiple 71 | # forwarding rules, duplicate the whole block! 72 | # Remote Logging (we use TCP for reliable delivery) 73 | # 74 | # An on-disk queue is created for this action. If the remote host is 75 | # down, messages are spooled to disk and sent when it is up again. 76 | #$WorkDirectory /var/lib/rsyslog # where to place spool files 77 | #$ActionQueueFileName fwdRule1 # unique name prefix for spool files 78 | #$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) 79 | #$ActionQueueSaveOnShutdown on # save messages to disk on shutdown 80 | #$ActionQueueType LinkedList # run asynchronously 81 | #$ActionResumeRetryCount -1 # infinite retries if host is down 82 | # remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional 83 | #*.* @@remote-host:514 84 | # ### end of the forwarding rule ### -------------------------------------------------------------------------------- /snort/centos7/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:7.2.1511 2 | MAINTAINER fgt 3 | 4 | ENV DAQ_VERSION 2.0.6 5 | ENV SNORT_VERSION 2.9.8.3 6 | 7 | RUN yum -y install rsyslog 8 | RUN yum -y install epel-release 9 | RUN yum -y install \ 10 | https://www.snort.org/downloads/archive/snort/daq-${DAQ_VERSION}-1.centos7.x86_64.rpm \ 11 | https://www.snort.org/downloads/archive/snort/snort-${SNORT_VERSION}-1.centos7.x86_64.rpm 12 | 13 | RUN ln -s /usr/lib64/snort-${SNORT_VERSION}_dynamicengine \ 14 | /usr/local/lib/snort_dynamicengine && \ 15 | ln -s /usr/lib64/snort-${SNORT_VERSION}_dynamicpreprocessor \ 16 | /usr/local/lib/snort_dynamicpreprocessor 17 | 18 | #配置syslog发送 19 | COPY ./rsyslog.conf /etc/rsyslog.conf 20 | 21 | #重启syslog 会报Failed to get D-Bus connection: Operation not permitted无法重启服务 22 | #因此此镜像存在问题 23 | #RUN systemctl restart rsyslog.service 24 | 25 | #清理 26 | RUN yum clean all && \ 27 | rm -rf /var/log/* || true \ 28 | rm -rf /var/tmp/* \ 29 | rm -rf /tmp/* 30 | 31 | RUN /usr/sbin/snort -V 32 | 33 | CMD [ "snort", "-c", "/home/snort/etc/snort.conf" ] -------------------------------------------------------------------------------- /snort/centos7/rsyslog.conf: -------------------------------------------------------------------------------- 1 | # rsyslog configuration file 2 | 3 | # For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html 4 | # If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html 5 | 6 | #### MODULES #### 7 | 8 | # The imjournal module bellow is now used as a message source instead of imuxsock. 9 | $ModLoad imuxsock # provides support for local system logging (e.g. via logger command) 10 | $ModLoad imjournal # provides access to the systemd journal 11 | #$ModLoad imklog # reads kernel messages (the same are read from journald) 12 | #$ModLoad immark # provides --MARK-- message capability 13 | 14 | # Provides UDP syslog reception 15 | #$ModLoad imudp 16 | #$UDPServerRun 514 17 | 18 | # Provides TCP syslog reception 19 | #$ModLoad imtcp 20 | #$InputTCPServerRun 514 21 | 22 | 23 | #### GLOBAL DIRECTIVES #### 24 | 25 | # Where to place auxiliary files 26 | $WorkDirectory /var/lib/rsyslog 27 | 28 | # Use default timestamp format 29 | $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat 30 | 31 | # File syncing capability is disabled by default. This feature is usually not required, 32 | # not useful and an extreme performance hit 33 | #$ActionFileEnableSync on 34 | 35 | # Include all config files in /etc/rsyslog.d/ 36 | $IncludeConfig /etc/rsyslog.d/*.conf 37 | 38 | # Turn off message reception via local log socket; 39 | # local messages are retrieved through imjournal now. 40 | $OmitLocalLogging on 41 | 42 | # File to store the position in the journal 43 | $IMJournalStateFile imjournal.state 44 | 45 | 46 | #### RULES #### 47 | 48 | # Log all kernel messages to the console. 49 | # Logging much else clutters up the screen. 50 | #kern.* /dev/console 51 | 52 | # Log anything (except mail) of level info or higher. 53 | # Don't log private authentication messages! 54 | *.info;mail.none;authpriv.none;cron.none /var/log/messages 55 | 56 | # The authpriv file has restricted access. 57 | authpriv.* /var/log/secure 58 | auth.* @@192.168.1.188:5140 59 | #auth.* /var/log/snort.log 60 | 61 | # Log all the mail messages in one place. 62 | mail.* -/var/log/maillog 63 | 64 | 65 | # Log cron stuff 66 | cron.* /var/log/cron 67 | 68 | # Everybody gets emergency messages 69 | *.emerg :omusrmsg:* 70 | 71 | # Save news errors of level crit and higher in a special file. 72 | uucp,news.crit /var/log/spooler 73 | 74 | # Save boot messages also to boot.log 75 | local7.* /var/log/boot.log 76 | 77 | 78 | # ### begin forwarding rule ### 79 | # The statement between the begin ... end define a SINGLE forwarding 80 | # rule. They belong together, do NOT split them. If you create multiple 81 | # forwarding rules, duplicate the whole block! 82 | # Remote Logging (we use TCP for reliable delivery) 83 | # 84 | # An on-disk queue is created for this action. If the remote host is 85 | # down, messages are spooled to disk and sent when it is up again. 86 | #$ActionQueueFileName fwdRule1 # unique name prefix for spool files 87 | #$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible) 88 | #$ActionQueueSaveOnShutdown on # save messages to disk on shutdown 89 | #$ActionQueueType LinkedList # run asynchronously 90 | #$ActionResumeRetryCount -1 # infinite retries if host is down 91 | # remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional 92 | #*.* @@remote-host:514 93 | # ### end of the forwarding rule ### 94 | -------------------------------------------------------------------------------- /snort/command.txt: -------------------------------------------------------------------------------- 1 | #进入调试 2 | docker run -it --rm --net=host -v /opt/snortrules-snapshot-back:/home/snort fang/snort /bin/bash 3 | #启动snort 4 | docker run -it -d --net=host --privileged=true -v /opt/snortrules-snapshot-back:/home/snort --name snort fang/snort -i enp2s0 5 | 6 | #打印输出到控制台 7 | -A console 8 | 9 | #启动snort同时启动rsyslog 10 | docker run -it --rm --net=host --privileged=true -v /opt/snortrules-snapshot-back:/home/snort --name snort fang/snort6.7 \ 11 | sh -c "service rsyslog restart && /usr/local/bin/snort -c /home/snort/etc/snort.conf -i eht0" --------------------------------------------------------------------------------