├── .gitignore ├── multiproc ├── __init__.py ├── requirements.txt ├── ssdc.conf ├── run_compute.sh ├── compare.py ├── README.md ├── compute.py ├── import_dir.py ├── lib.py └── redis.conf ├── setup.py ├── LICENSE ├── README.txt ├── README.md └── ssdc /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | -------------------------------------------------------------------------------- /multiproc/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /multiproc/requirements.txt: -------------------------------------------------------------------------------- 1 | redis 2 | networkx 3 | pydeep 4 | -------------------------------------------------------------------------------- /multiproc/ssdc.conf: -------------------------------------------------------------------------------- 1 | [redis] 2 | host=127.0.0.1 3 | port=6389 4 | -------------------------------------------------------------------------------- /multiproc/run_compute.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | seq 24 | parallel ./compute.py 7 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | from distutils.core import setup 4 | 5 | setup(name='ssdc', 6 | version='1.1.0', 7 | description='Clusters files based on their ssdeep hash', 8 | author='Brian Wallace', 9 | author_email='bwall@ballastsecurity.net', 10 | url='https://github.com/bwall/ssdc', 11 | requires=['pydeep'], 12 | scripts=['ssdc'], 13 | ) -------------------------------------------------------------------------------- /multiproc/compare.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import time 5 | from lib import init_redis, make_groups, make_graph, display 6 | 7 | 8 | if __name__ == "__main__": 9 | r = init_redis('ssdc.conf') 10 | while not r.exists('stop'): 11 | if not r.exists('to_process') and not r.exists('all_keys_similar_new'): 12 | make_groups(r) 13 | break 14 | else: 15 | time.sleep(1) 16 | make_graph(r) 17 | display(r, True) 18 | -------------------------------------------------------------------------------- /multiproc/README.md: -------------------------------------------------------------------------------- 1 | Why 2 | === 3 | 4 | This code uses the same ideas as the standalone ssdc code but allows to store everything 5 | in a redis database, compute the hashes and compare the samples with multiple processes at once. 6 | 7 | Installation 8 | ============ 9 | 10 | `pip install -r requirements.txt` 11 | 12 | Usage 13 | ===== 14 | 15 | 1. Run redis with the config file provided: `redis-server redis.conf` 16 | 2. Edit `run_compute.sh` and modify the seq value to the amount of CPU you have on the machine 17 | 3. Run `run_compute.sh` 18 | 4. Run `import_dir.py -r ` 19 | 5. When finished (check if the `compute.py` processes are working), run `compare.py` 20 | -------------------------------------------------------------------------------- /multiproc/compute.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import time 5 | from lib import init_redis, prepare_hashes, find_matches 6 | 7 | 8 | def compute(r): 9 | while not r.exists('stop'): 10 | path_key = r.spop('to_process') 11 | if path_key is not None: 12 | buf = r.get(path_key) 13 | prepare_hashes(r, buf, path_key) 14 | r.delete(path_key) 15 | elif r.exists('all_keys_similar_new'): 16 | find_matches(r.spop('all_keys_similar_new'), r) 17 | else: 18 | time.sleep(1) 19 | 20 | 21 | if __name__ == "__main__": 22 | r = init_redis('ssdc.conf') 23 | r.delete('stop') 24 | compute(r) 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Brian Wallace 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.txt: -------------------------------------------------------------------------------- 1 | ssdeep Cluster 2 | ============== 3 | ssdeep Cluster clusters files using ssdeep as a comparison algorithm. Results are written out to a tar file, which puts 4 | the files into a directory with the files its comparable to. A file can be in multiple groups. I have found this tool 5 | to be helpful when needing to analyze a large number of samples, with an ever decreasing amount of time to do it in. 6 | 7 | Included in the resulting tar file is a .gexf file. This can be used to visualize the results in [Gephi](https://github.com/gephi/gephi). 8 | 9 | Installation 10 | ============ 11 | git clone https://github.com/bwall/ssdc.git 12 | cd ssdc 13 | sudo python setup.py install 14 | 15 | Examples 16 | ======== 17 | 18 | help 19 | ---- 20 | bwall@highwind:~$ ssdc -h 21 | usage: /usr/local/bin/ssdc [-h] [-v] [-r] [-o [output]] [-s] path [path ...] 22 | 23 | Clusters files based on their ssdeep hash 24 | 25 | positional arguments: 26 | path Paths to files or directories to scan 27 | 28 | optional arguments: 29 | -h, --help show this help message and exit 30 | -v, --version show program's version number and exit 31 | -r, --recursive Scan paths recursively 32 | -o [output], --output [output] 33 | Path to write the resulting tarball to 34 | (default=output.tar) 35 | -s, --storefiles Store files in output tar 36 | 37 | /usr/local/bin/ssdc v1.1.0 by Brian Wallace (@botnet_hunter) 38 | -------------------------------------------------------------------------------- /multiproc/import_dir.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | from os.path import abspath, isfile, isdir, join 5 | from glob import iglob 6 | from argparse import ArgumentParser 7 | from lib import init_redis 8 | 9 | REDIS_HOST = '127.0.0.1' 10 | REDIS_PORT = 6389 11 | 12 | 13 | def enumerate_paths(path_list, recursive_scan): 14 | ret_paths = [] 15 | while path_list: 16 | path = path_list.pop() 17 | file_path = abspath(path) 18 | if isfile(file_path): 19 | ret_paths.append(file_path) 20 | elif isdir(file_path): 21 | for p in iglob(join(file_path, "*")): 22 | p = join(file_path, p) 23 | if isfile(p) or (isdir(p) and recursive_scan): 24 | path_list.append(p) 25 | return ret_paths 26 | 27 | if __name__ == "__main__": 28 | parser = ArgumentParser(description="Push a directory into redis.") 29 | parser.add_argument('path', metavar='path', type=str, nargs='+', help="Paths to files or directories to scan") 30 | parser.add_argument('-r', '--recursive', default=False, required=False, action='store_true', 31 | help="Scan paths recursively") 32 | args = parser.parse_args() 33 | root_path = args.path 34 | paths = enumerate_paths(root_path, args.recursive) 35 | 36 | r = init_redis('ssdc.conf') 37 | 38 | for p in paths: 39 | try: 40 | r.set(p, open(p, 'r').read()) 41 | r.sadd('to_process', p) 42 | except Exception as e: 43 | print(e) 44 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ssdeep Cluster 2 | ============== 3 | ssdeep Cluster clusters files using ssdeep as a comparison algorithm. Results are written out to a tar file, which puts 4 | the files into a directory with the files its comparable to. A file can be in multiple groups. I have found this tool 5 | to be helpful when needing to analyze a large number of samples, with an ever decreasing amount of time to do it in. 6 | 7 | Included in the resulting tar file is a .gexf file. This can be used to visualize the results in [Gephi](https://github.com/gephi/gephi). 8 | 9 | Installation 10 | ============ 11 | git clone https://github.com/bwall/ssdc.git 12 | cd ssdc 13 | sudo python setup.py install 14 | 15 | Examples 16 | ======== 17 | 18 | help 19 | ---- 20 | bwall@highwind:~$ ssdc -h 21 | usage: /usr/local/bin/ssdc [-h] [-v] [-r] [-o [output]] [-s] path [path ...] 22 | 23 | Clusters files based on their ssdeep hash 24 | 25 | positional arguments: 26 | path Paths to files or directories to scan 27 | 28 | optional arguments: 29 | -h, --help show this help message and exit 30 | -v, --version show program's version number and exit 31 | -r, --recursive Scan paths recursively 32 | -o [output], --output [output] 33 | Path to write the resulting tarball to 34 | (default=output.tar) 35 | -s, --storefiles Store files in output tar 36 | 37 | /usr/local/bin/ssdc v1.1.0 by Brian Wallace (@botnet_hunter) 38 | -------------------------------------------------------------------------------- /multiproc/lib.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import networkx 5 | import hashlib 6 | import pydeep 7 | from struct import unpack 8 | import base64 9 | import re 10 | from ConfigParser import SafeConfigParser 11 | import redis 12 | 13 | # https://www.virusbtn.com/virusbulletin/archive/2015/11/vb201511-ssDeep 14 | # https://github.com/trendmicro/tlsh 15 | # https://drive.google.com/file/d/0B6FS3SVQ1i0GTXk5eDl3Y29QWlk/edit 16 | # https://www.usenix.org/system/files/conference/cset15/cset15-li.pdf 17 | 18 | 19 | def init_redis(configfile): 20 | config = SafeConfigParser() 21 | config.read('ssdc.conf') 22 | return redis.StrictRedis(host=config.get('redis', 'host'), port=config.get('redis', 'port')) 23 | 24 | 25 | def get_all_7_char_chunks(h): 26 | return set((unpack(" 1: 63 | cur_hash = similar_hashes.pop() 64 | cur_ssdeep = r.hget(cur_hash, 'ssdeep') 65 | p = r.pipeline(False) 66 | for sha256 in similar_hashes: 67 | score = pydeep.compare(cur_ssdeep, r.hget(sha256, 'ssdeep')) 68 | if score > 0: 69 | p.zadd('matches_{}'.format(cur_hash), score, sha256) 70 | p.zadd('matches_{}'.format(sha256), score, cur_hash) 71 | p.execute() 72 | 73 | 74 | def compute_all_similarities(r): 75 | for key in r.smembers('all_keys_similar'): 76 | find_matches(key, r) 77 | 78 | 79 | def clean_groups(r): 80 | for g in r.smembers('groups'): 81 | r.delete(g) 82 | r.delete('groups') 83 | r.delete('no_matches') 84 | 85 | 86 | def make_groups(r): 87 | clean_groups(r) 88 | all_hashes = r.smembers('hashes') 89 | while all_hashes: 90 | cur_hash = all_hashes.pop() 91 | matches = r.zrange('matches_{}'.format(cur_hash), 0, -1) 92 | if matches: 93 | if isinstance(matches, list): 94 | matches = set(matches) 95 | else: 96 | matches = set([matches]) 97 | all_hashes -= matches 98 | matches |= set([cur_hash]) 99 | else: 100 | # NOTE: Should we make a group? 101 | # matches = set([cur_hash]) 102 | r.sadd('no_matches', cur_hash) 103 | continue 104 | key = 'group_{}'.format(r.scard('groups')) 105 | r.sadd('groups', key) 106 | r.sadd(key, *matches) 107 | 108 | 109 | def display(r, verbose=False): 110 | print("{0} files are in no group ".format(r.scard('no_matches'))) 111 | print("{0} files organized into {1} groups".format(r.scard('hashes') - r.scard('no_matches'), r.scard('groups'))) 112 | print("Groups distribution:") 113 | for group in r.smembers('groups'): 114 | if r.scard(group) > 1: 115 | print("Group {0} has {1} files".format(group, r.scard(group))) 116 | if verbose: 117 | for sha256 in r.smembers(group): 118 | print("\t{}".format(r.hget(sha256, 'path'))) 119 | 120 | 121 | def make_graph(r): 122 | g = networkx.Graph() 123 | groups = r.smembers('groups') 124 | for group in groups: 125 | if r.scard(group) < 2: 126 | continue 127 | g.add_node(group) 128 | for h in r.smembers(group): 129 | g.add_edge(group, h) 130 | 131 | networkx.write_gexf(g, './test.gexf') 132 | -------------------------------------------------------------------------------- /ssdc: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | import pydeep 3 | from os.path import abspath, isfile, isdir, join 4 | import json 5 | import StringIO 6 | import tarfile 7 | import hashlib 8 | import base64 9 | from struct import unpack 10 | from glob import iglob 11 | 12 | 13 | def get_all_7_char_chunks(h): 14 | return set((unpack("' \ 50 | '' \ 51 | 'bwall' \ 52 | '' \ 53 | '' \ 54 | '' \ 55 | '' \ 56 | '' \ 57 | '' \ 58 | '' 59 | 60 | key_index = 0 61 | for path_keys in bin_scores.keys(): 62 | gexf += '' \ 63 | '' \ 64 | ''.format(key_index, path_keys) 65 | ids[path_keys] = key_index 66 | key_index += 1 67 | 68 | gexf += '' \ 69 | '' 70 | 71 | edge_index = 0 72 | for path_key in bin_scores.keys(): 73 | for other_key in bin_scores[path_key].keys(): 74 | gexf += ''.format(edge_index, ids[path_key], 75 | ids[other_key], 76 | float(bin_scores[path_key] 77 | [other_key]) / 100) 78 | edge_index += 1 79 | 80 | gexf += '' \ 81 | '' \ 82 | '' 83 | 84 | return gexf 85 | 86 | 87 | def get_version(): 88 | return "1.1.0" 89 | 90 | if __name__ == "__main__": 91 | from argparse import ArgumentParser 92 | 93 | parser = ArgumentParser( 94 | prog=__file__, 95 | description="Clusters files based on their ssdeep hash", 96 | version="%(prog)s v" + get_version() + " by Brian Wallace (@botnet_hunter)", 97 | epilog="%(prog)s v" + get_version() + " by Brian Wallace (@botnet_hunter)" 98 | ) 99 | parser.add_argument('path', metavar='path', type=str, nargs='+', help="Paths to files or directories to scan") 100 | parser.add_argument('-r', '--recursive', default=False, required=False, action='store_true', 101 | help="Scan paths recursively") 102 | parser.add_argument('-o', '--output', metavar='output', type=str, nargs='?', default="output.tar", 103 | help="Path to write the resulting tarball to (default=output.tar)") 104 | parser.add_argument('-s', '--storefiles', default=False, required=False, action='store_true', 105 | help="Store files in output tar") 106 | 107 | args = parser.parse_args() 108 | root_path = args.path 109 | paths = enumerate_paths(root_path, args.recursive) 110 | hashes = {} 111 | sha256s = {} 112 | integerdb = {} 113 | 114 | matches = {} 115 | scores = {} 116 | 117 | def add_to_integer_db(block_size, chunk, path): 118 | global integerdb 119 | if block_size not in integerdb: 120 | integerdb[block_size] = {} 121 | 122 | similar_to = set() 123 | for i in chunk: 124 | if i not in integerdb[block_size]: 125 | integerdb[block_size][i] = set() 126 | else: 127 | similar_to |= integerdb[block_size][i] 128 | integerdb[block_size][i].add(path) 129 | 130 | return similar_to 131 | 132 | for path in paths: 133 | hashes[path] = pydeep.hash_file(path) 134 | sha256s[path] = hashlib.sha256(file(path, 'rb').read()).hexdigest() 135 | block_size, chunk, double_chunk = preprocess_hash(hashes[path]) 136 | 137 | similar_to = add_to_integer_db(block_size, chunk, path) | add_to_integer_db(block_size * 2, double_chunk, path) 138 | 139 | h = hashes[path] 140 | matches[path] = set() 141 | for other in similar_to: 142 | score = pydeep.compare(h, hashes[other]) 143 | if score > 0: 144 | matches[path].add(other) 145 | matches[other].add(path) 146 | if path not in scores: 147 | scores[path] = {} 148 | if other not in scores[path]: 149 | scores[path][other] = score 150 | 151 | if other not in scores: 152 | scores[other] = {} 153 | if path not in scores[other]: 154 | scores[other][path] = score 155 | 156 | print "{0}\tSHA256: {1}\tssdeep: {2}".format(path, sha256s[path], hashes[path]) 157 | 158 | groups = [] 159 | for path in matches.keys(): 160 | in_a_group = False 161 | for g in xrange(len(groups)): 162 | if path in groups[g]: 163 | in_a_group = True 164 | continue 165 | should_add = True 166 | for h in groups[g]: 167 | if h not in matches[path]: 168 | should_add = False 169 | if should_add: 170 | groups[g].append(path) 171 | in_a_group = True 172 | if not in_a_group: 173 | groups.append([path]) 174 | 175 | for g in xrange(len(groups)): 176 | groups[g].sort() 177 | 178 | # Copy files to zipfile 179 | with tarfile.TarFile(args.output, mode='a') as mytar: 180 | if args.storefiles: 181 | for group in xrange(len(groups)): 182 | for path in groups[group]: 183 | mytar.add(path, arcname=join(str(group), sha256s[path])) 184 | 185 | gexf_tar_file = StringIO.StringIO() 186 | gexf_tar_file.write(generate_gexf(scores)) 187 | gexf_tar_file.seek(0) 188 | info = tarfile.TarInfo(name="file_distance.gexf") 189 | info.size = len(gexf_tar_file.buf) 190 | mytar.addfile(tarinfo=info, fileobj=gexf_tar_file) 191 | 192 | json_tar_file = StringIO.StringIO() 193 | json_tar_file.write(json.dumps({"groups": groups, "hashes": hashes})) 194 | json_tar_file.seek(0) 195 | info = tarfile.TarInfo(name="groups.json") 196 | info.size = len(json_tar_file.buf) 197 | mytar.addfile(tarinfo=info, fileobj=json_tar_file) 198 | 199 | print "{0} files organized into {1} groups".format(len(hashes), len(groups)) 200 | print "Groups distribution:" 201 | for group in xrange(len(groups)): 202 | print "Group {0} has {1} files".format(group, len(groups[group])) -------------------------------------------------------------------------------- /multiproc/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################ GENERAL ##################################### 39 | 40 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 41 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 42 | daemonize no 43 | 44 | # When running daemonized, Redis writes a pid file in /var/run/redis.pid by 45 | # default. You can specify a custom pid file location here. 46 | pidfile /var/run/redis.pid 47 | 48 | # Accept connections on the specified port, default is 6379. 49 | # If port 0 is specified Redis will not listen on a TCP socket. 50 | port 6389 51 | 52 | # TCP listen() backlog. 53 | # 54 | # In high requests-per-second environments you need an high backlog in order 55 | # to avoid slow clients connections issues. Note that the Linux kernel 56 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 57 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 58 | # in order to get the desired effect. 59 | tcp-backlog 511 60 | 61 | # By default Redis listens for connections from all the network interfaces 62 | # available on the server. It is possible to listen to just one or multiple 63 | # interfaces using the "bind" configuration directive, followed by one or 64 | # more IP addresses. 65 | # 66 | # Examples: 67 | # 68 | # bind 192.168.1.100 10.0.0.1 69 | # bind 127.0.0.1 70 | 71 | # Specify the path for the Unix socket that will be used to listen for 72 | # incoming connections. There is no default, so Redis will not listen 73 | # on a unix socket when not specified. 74 | # 75 | # unixsocket /tmp/redis.sock 76 | # unixsocketperm 700 77 | 78 | # Close the connection after a client is idle for N seconds (0 to disable) 79 | timeout 0 80 | 81 | # TCP keepalive. 82 | # 83 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 84 | # of communication. This is useful for two reasons: 85 | # 86 | # 1) Detect dead peers. 87 | # 2) Take the connection alive from the point of view of network 88 | # equipment in the middle. 89 | # 90 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 91 | # Note that to close the connection the double of the time is needed. 92 | # On other kernels the period depends on the kernel configuration. 93 | # 94 | # A reasonable value for this option is 60 seconds. 95 | tcp-keepalive 0 96 | 97 | # Specify the server verbosity level. 98 | # This can be one of: 99 | # debug (a lot of information, useful for development/testing) 100 | # verbose (many rarely useful info, but not a mess like the debug level) 101 | # notice (moderately verbose, what you want in production probably) 102 | # warning (only very important / critical messages are logged) 103 | loglevel notice 104 | 105 | # Specify the log file name. Also the empty string can be used to force 106 | # Redis to log on the standard output. Note that if you use standard 107 | # output for logging but daemonize, logs will be sent to /dev/null 108 | logfile "" 109 | 110 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 111 | # and optionally update the other syslog parameters to suit your needs. 112 | # syslog-enabled no 113 | 114 | # Specify the syslog identity. 115 | # syslog-ident redis 116 | 117 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 118 | # syslog-facility local0 119 | 120 | # Set the number of databases. The default database is DB 0, you can select 121 | # a different one on a per-connection basis using SELECT where 122 | # dbid is a number between 0 and 'databases'-1 123 | databases 16 124 | 125 | ################################ SNAPSHOTTING ################################ 126 | # 127 | # Save the DB on disk: 128 | # 129 | # save 130 | # 131 | # Will save the DB if both the given number of seconds and the given 132 | # number of write operations against the DB occurred. 133 | # 134 | # In the example below the behaviour will be to save: 135 | # after 900 sec (15 min) if at least 1 key changed 136 | # after 300 sec (5 min) if at least 10 keys changed 137 | # after 60 sec if at least 10000 keys changed 138 | # 139 | # Note: you can disable saving completely by commenting out all "save" lines. 140 | # 141 | # It is also possible to remove all the previously configured save 142 | # points by adding a save directive with a single empty string argument 143 | # like in the following example: 144 | # 145 | # save "" 146 | 147 | #save 900 1 148 | #save 300 10 149 | #save 60 10000 150 | 151 | # By default Redis will stop accepting writes if RDB snapshots are enabled 152 | # (at least one save point) and the latest background save failed. 153 | # This will make the user aware (in a hard way) that data is not persisting 154 | # on disk properly, otherwise chances are that no one will notice and some 155 | # disaster will happen. 156 | # 157 | # If the background saving process will start working again Redis will 158 | # automatically allow writes again. 159 | # 160 | # However if you have setup your proper monitoring of the Redis server 161 | # and persistence, you may want to disable this feature so that Redis will 162 | # continue to work as usual even if there are problems with disk, 163 | # permissions, and so forth. 164 | stop-writes-on-bgsave-error yes 165 | 166 | # Compress string objects using LZF when dump .rdb databases? 167 | # For default that's set to 'yes' as it's almost always a win. 168 | # If you want to save some CPU in the saving child set it to 'no' but 169 | # the dataset will likely be bigger if you have compressible values or keys. 170 | rdbcompression yes 171 | 172 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 173 | # This makes the format more resistant to corruption but there is a performance 174 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 175 | # for maximum performances. 176 | # 177 | # RDB files created with checksum disabled have a checksum of zero that will 178 | # tell the loading code to skip the check. 179 | rdbchecksum yes 180 | 181 | # The filename where to dump the DB 182 | dbfilename dump.rdb 183 | 184 | # The working directory. 185 | # 186 | # The DB will be written inside this directory, with the filename specified 187 | # above using the 'dbfilename' configuration directive. 188 | # 189 | # The Append Only File will also be created inside this directory. 190 | # 191 | # Note that you must specify a directory here, not a file name. 192 | dir ./ 193 | 194 | ################################# REPLICATION ################################# 195 | 196 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 197 | # another Redis server. A few things to understand ASAP about Redis replication. 198 | # 199 | # 1) Redis replication is asynchronous, but you can configure a master to 200 | # stop accepting writes if it appears to be not connected with at least 201 | # a given number of slaves. 202 | # 2) Redis slaves are able to perform a partial resynchronization with the 203 | # master if the replication link is lost for a relatively small amount of 204 | # time. You may want to configure the replication backlog size (see the next 205 | # sections of this file) with a sensible value depending on your needs. 206 | # 3) Replication is automatic and does not need user intervention. After a 207 | # network partition slaves automatically try to reconnect to masters 208 | # and resynchronize with them. 209 | # 210 | # slaveof 211 | 212 | # If the master is password protected (using the "requirepass" configuration 213 | # directive below) it is possible to tell the slave to authenticate before 214 | # starting the replication synchronization process, otherwise the master will 215 | # refuse the slave request. 216 | # 217 | # masterauth 218 | 219 | # When a slave loses its connection with the master, or when the replication 220 | # is still in progress, the slave can act in two different ways: 221 | # 222 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 223 | # still reply to client requests, possibly with out of date data, or the 224 | # data set may just be empty if this is the first synchronization. 225 | # 226 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 227 | # an error "SYNC with master in progress" to all the kind of commands 228 | # but to INFO and SLAVEOF. 229 | # 230 | slave-serve-stale-data yes 231 | 232 | # You can configure a slave instance to accept writes or not. Writing against 233 | # a slave instance may be useful to store some ephemeral data (because data 234 | # written on a slave will be easily deleted after resync with the master) but 235 | # may also cause problems if clients are writing to it because of a 236 | # misconfiguration. 237 | # 238 | # Since Redis 2.6 by default slaves are read-only. 239 | # 240 | # Note: read only slaves are not designed to be exposed to untrusted clients 241 | # on the internet. It's just a protection layer against misuse of the instance. 242 | # Still a read only slave exports by default all the administrative commands 243 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 244 | # security of read only slaves using 'rename-command' to shadow all the 245 | # administrative / dangerous commands. 246 | slave-read-only yes 247 | 248 | # Replication SYNC strategy: disk or socket. 249 | # 250 | # ------------------------------------------------------- 251 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 252 | # ------------------------------------------------------- 253 | # 254 | # New slaves and reconnecting slaves that are not able to continue the replication 255 | # process just receiving differences, need to do what is called a "full 256 | # synchronization". An RDB file is transmitted from the master to the slaves. 257 | # The transmission can happen in two different ways: 258 | # 259 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 260 | # file on disk. Later the file is transferred by the parent 261 | # process to the slaves incrementally. 262 | # 2) Diskless: The Redis master creates a new process that directly writes the 263 | # RDB file to slave sockets, without touching the disk at all. 264 | # 265 | # With disk-backed replication, while the RDB file is generated, more slaves 266 | # can be queued and served with the RDB file as soon as the current child producing 267 | # the RDB file finishes its work. With diskless replication instead once 268 | # the transfer starts, new slaves arriving will be queued and a new transfer 269 | # will start when the current one terminates. 270 | # 271 | # When diskless replication is used, the master waits a configurable amount of 272 | # time (in seconds) before starting the transfer in the hope that multiple slaves 273 | # will arrive and the transfer can be parallelized. 274 | # 275 | # With slow disks and fast (large bandwidth) networks, diskless replication 276 | # works better. 277 | repl-diskless-sync no 278 | 279 | # When diskless replication is enabled, it is possible to configure the delay 280 | # the server waits in order to spawn the child that transfers the RDB via socket 281 | # to the slaves. 282 | # 283 | # This is important since once the transfer starts, it is not possible to serve 284 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 285 | # waits a delay in order to let more slaves arrive. 286 | # 287 | # The delay is specified in seconds, and by default is 5 seconds. To disable 288 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 289 | repl-diskless-sync-delay 5 290 | 291 | # Slaves send PINGs to server in a predefined interval. It's possible to change 292 | # this interval with the repl_ping_slave_period option. The default value is 10 293 | # seconds. 294 | # 295 | # repl-ping-slave-period 10 296 | 297 | # The following option sets the replication timeout for: 298 | # 299 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 300 | # 2) Master timeout from the point of view of slaves (data, pings). 301 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 302 | # 303 | # It is important to make sure that this value is greater than the value 304 | # specified for repl-ping-slave-period otherwise a timeout will be detected 305 | # every time there is low traffic between the master and the slave. 306 | # 307 | # repl-timeout 60 308 | 309 | # Disable TCP_NODELAY on the slave socket after SYNC? 310 | # 311 | # If you select "yes" Redis will use a smaller number of TCP packets and 312 | # less bandwidth to send data to slaves. But this can add a delay for 313 | # the data to appear on the slave side, up to 40 milliseconds with 314 | # Linux kernels using a default configuration. 315 | # 316 | # If you select "no" the delay for data to appear on the slave side will 317 | # be reduced but more bandwidth will be used for replication. 318 | # 319 | # By default we optimize for low latency, but in very high traffic conditions 320 | # or when the master and slaves are many hops away, turning this to "yes" may 321 | # be a good idea. 322 | repl-disable-tcp-nodelay no 323 | 324 | # Set the replication backlog size. The backlog is a buffer that accumulates 325 | # slave data when slaves are disconnected for some time, so that when a slave 326 | # wants to reconnect again, often a full resync is not needed, but a partial 327 | # resync is enough, just passing the portion of data the slave missed while 328 | # disconnected. 329 | # 330 | # The bigger the replication backlog, the longer the time the slave can be 331 | # disconnected and later be able to perform a partial resynchronization. 332 | # 333 | # The backlog is only allocated once there is at least a slave connected. 334 | # 335 | # repl-backlog-size 1mb 336 | 337 | # After a master has no longer connected slaves for some time, the backlog 338 | # will be freed. The following option configures the amount of seconds that 339 | # need to elapse, starting from the time the last slave disconnected, for 340 | # the backlog buffer to be freed. 341 | # 342 | # A value of 0 means to never release the backlog. 343 | # 344 | # repl-backlog-ttl 3600 345 | 346 | # The slave priority is an integer number published by Redis in the INFO output. 347 | # It is used by Redis Sentinel in order to select a slave to promote into a 348 | # master if the master is no longer working correctly. 349 | # 350 | # A slave with a low priority number is considered better for promotion, so 351 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 352 | # pick the one with priority 10, that is the lowest. 353 | # 354 | # However a special priority of 0 marks the slave as not able to perform the 355 | # role of master, so a slave with priority of 0 will never be selected by 356 | # Redis Sentinel for promotion. 357 | # 358 | # By default the priority is 100. 359 | slave-priority 100 360 | 361 | # It is possible for a master to stop accepting writes if there are less than 362 | # N slaves connected, having a lag less or equal than M seconds. 363 | # 364 | # The N slaves need to be in "online" state. 365 | # 366 | # The lag in seconds, that must be <= the specified value, is calculated from 367 | # the last ping received from the slave, that is usually sent every second. 368 | # 369 | # This option does not GUARANTEE that N replicas will accept the write, but 370 | # will limit the window of exposure for lost writes in case not enough slaves 371 | # are available, to the specified number of seconds. 372 | # 373 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 374 | # 375 | # min-slaves-to-write 3 376 | # min-slaves-max-lag 10 377 | # 378 | # Setting one or the other to 0 disables the feature. 379 | # 380 | # By default min-slaves-to-write is set to 0 (feature disabled) and 381 | # min-slaves-max-lag is set to 10. 382 | 383 | ################################## SECURITY ################################### 384 | 385 | # Require clients to issue AUTH before processing any other 386 | # commands. This might be useful in environments in which you do not trust 387 | # others with access to the host running redis-server. 388 | # 389 | # This should stay commented out for backward compatibility and because most 390 | # people do not need auth (e.g. they run their own servers). 391 | # 392 | # Warning: since Redis is pretty fast an outside user can try up to 393 | # 150k passwords per second against a good box. This means that you should 394 | # use a very strong password otherwise it will be very easy to break. 395 | # 396 | # requirepass foobared 397 | 398 | # Command renaming. 399 | # 400 | # It is possible to change the name of dangerous commands in a shared 401 | # environment. For instance the CONFIG command may be renamed into something 402 | # hard to guess so that it will still be available for internal-use tools 403 | # but not available for general clients. 404 | # 405 | # Example: 406 | # 407 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 408 | # 409 | # It is also possible to completely kill a command by renaming it into 410 | # an empty string: 411 | # 412 | # rename-command CONFIG "" 413 | # 414 | # Please note that changing the name of commands that are logged into the 415 | # AOF file or transmitted to slaves may cause problems. 416 | 417 | ################################### LIMITS #################################### 418 | 419 | # Set the max number of connected clients at the same time. By default 420 | # this limit is set to 10000 clients, however if the Redis server is not 421 | # able to configure the process file limit to allow for the specified limit 422 | # the max number of allowed clients is set to the current file limit 423 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 424 | # 425 | # Once the limit is reached Redis will close all the new connections sending 426 | # an error 'max number of clients reached'. 427 | # 428 | # maxclients 10000 429 | 430 | # Don't use more memory than the specified amount of bytes. 431 | # When the memory limit is reached Redis will try to remove keys 432 | # according to the eviction policy selected (see maxmemory-policy). 433 | # 434 | # If Redis can't remove keys according to the policy, or if the policy is 435 | # set to 'noeviction', Redis will start to reply with errors to commands 436 | # that would use more memory, like SET, LPUSH, and so on, and will continue 437 | # to reply to read-only commands like GET. 438 | # 439 | # This option is usually useful when using Redis as an LRU cache, or to set 440 | # a hard memory limit for an instance (using the 'noeviction' policy). 441 | # 442 | # WARNING: If you have slaves attached to an instance with maxmemory on, 443 | # the size of the output buffers needed to feed the slaves are subtracted 444 | # from the used memory count, so that network problems / resyncs will 445 | # not trigger a loop where keys are evicted, and in turn the output 446 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 447 | # of more keys, and so forth until the database is completely emptied. 448 | # 449 | # In short... if you have slaves attached it is suggested that you set a lower 450 | # limit for maxmemory so that there is some free RAM on the system for slave 451 | # output buffers (but this is not needed if the policy is 'noeviction'). 452 | # 453 | # maxmemory 454 | 455 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 456 | # is reached. You can select among five behaviors: 457 | # 458 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 459 | # allkeys-lru -> remove any key according to the LRU algorithm 460 | # volatile-random -> remove a random key with an expire set 461 | # allkeys-random -> remove a random key, any key 462 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 463 | # noeviction -> don't expire at all, just return an error on write operations 464 | # 465 | # Note: with any of the above policies, Redis will return an error on write 466 | # operations, when there are no suitable keys for eviction. 467 | # 468 | # At the date of writing these commands are: set setnx setex append 469 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 470 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 471 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 472 | # getset mset msetnx exec sort 473 | # 474 | # The default is: 475 | # 476 | # maxmemory-policy noeviction 477 | 478 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 479 | # algorithms (in order to save memory), so you can tune it for speed or 480 | # accuracy. For default Redis will check five keys and pick the one that was 481 | # used less recently, you can change the sample size using the following 482 | # configuration directive. 483 | # 484 | # The default of 5 produces good enough results. 10 Approximates very closely 485 | # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. 486 | # 487 | # maxmemory-samples 5 488 | 489 | ############################## APPEND ONLY MODE ############################### 490 | 491 | # By default Redis asynchronously dumps the dataset on disk. This mode is 492 | # good enough in many applications, but an issue with the Redis process or 493 | # a power outage may result into a few minutes of writes lost (depending on 494 | # the configured save points). 495 | # 496 | # The Append Only File is an alternative persistence mode that provides 497 | # much better durability. For instance using the default data fsync policy 498 | # (see later in the config file) Redis can lose just one second of writes in a 499 | # dramatic event like a server power outage, or a single write if something 500 | # wrong with the Redis process itself happens, but the operating system is 501 | # still running correctly. 502 | # 503 | # AOF and RDB persistence can be enabled at the same time without problems. 504 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 505 | # with the better durability guarantees. 506 | # 507 | # Please check http://redis.io/topics/persistence for more information. 508 | 509 | appendonly no 510 | 511 | # The name of the append only file (default: "appendonly.aof") 512 | 513 | appendfilename "appendonly.aof" 514 | 515 | # The fsync() call tells the Operating System to actually write data on disk 516 | # instead of waiting for more data in the output buffer. Some OS will really flush 517 | # data on disk, some other OS will just try to do it ASAP. 518 | # 519 | # Redis supports three different modes: 520 | # 521 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 522 | # always: fsync after every write to the append only log. Slow, Safest. 523 | # everysec: fsync only one time every second. Compromise. 524 | # 525 | # The default is "everysec", as that's usually the right compromise between 526 | # speed and data safety. It's up to you to understand if you can relax this to 527 | # "no" that will let the operating system flush the output buffer when 528 | # it wants, for better performances (but if you can live with the idea of 529 | # some data loss consider the default persistence mode that's snapshotting), 530 | # or on the contrary, use "always" that's very slow but a bit safer than 531 | # everysec. 532 | # 533 | # More details please check the following article: 534 | # http://antirez.com/post/redis-persistence-demystified.html 535 | # 536 | # If unsure, use "everysec". 537 | 538 | # appendfsync always 539 | appendfsync everysec 540 | # appendfsync no 541 | 542 | # When the AOF fsync policy is set to always or everysec, and a background 543 | # saving process (a background save or AOF log background rewriting) is 544 | # performing a lot of I/O against the disk, in some Linux configurations 545 | # Redis may block too long on the fsync() call. Note that there is no fix for 546 | # this currently, as even performing fsync in a different thread will block 547 | # our synchronous write(2) call. 548 | # 549 | # In order to mitigate this problem it's possible to use the following option 550 | # that will prevent fsync() from being called in the main process while a 551 | # BGSAVE or BGREWRITEAOF is in progress. 552 | # 553 | # This means that while another child is saving, the durability of Redis is 554 | # the same as "appendfsync none". In practical terms, this means that it is 555 | # possible to lose up to 30 seconds of log in the worst scenario (with the 556 | # default Linux settings). 557 | # 558 | # If you have latency problems turn this to "yes". Otherwise leave it as 559 | # "no" that is the safest pick from the point of view of durability. 560 | 561 | no-appendfsync-on-rewrite no 562 | 563 | # Automatic rewrite of the append only file. 564 | # Redis is able to automatically rewrite the log file implicitly calling 565 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 566 | # 567 | # This is how it works: Redis remembers the size of the AOF file after the 568 | # latest rewrite (if no rewrite has happened since the restart, the size of 569 | # the AOF at startup is used). 570 | # 571 | # This base size is compared to the current size. If the current size is 572 | # bigger than the specified percentage, the rewrite is triggered. Also 573 | # you need to specify a minimal size for the AOF file to be rewritten, this 574 | # is useful to avoid rewriting the AOF file even if the percentage increase 575 | # is reached but it is still pretty small. 576 | # 577 | # Specify a percentage of zero in order to disable the automatic AOF 578 | # rewrite feature. 579 | 580 | auto-aof-rewrite-percentage 100 581 | auto-aof-rewrite-min-size 64mb 582 | 583 | # An AOF file may be found to be truncated at the end during the Redis 584 | # startup process, when the AOF data gets loaded back into memory. 585 | # This may happen when the system where Redis is running 586 | # crashes, especially when an ext4 filesystem is mounted without the 587 | # data=ordered option (however this can't happen when Redis itself 588 | # crashes or aborts but the operating system still works correctly). 589 | # 590 | # Redis can either exit with an error when this happens, or load as much 591 | # data as possible (the default now) and start if the AOF file is found 592 | # to be truncated at the end. The following option controls this behavior. 593 | # 594 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 595 | # the Redis server starts emitting a log to inform the user of the event. 596 | # Otherwise if the option is set to no, the server aborts with an error 597 | # and refuses to start. When the option is set to no, the user requires 598 | # to fix the AOF file using the "redis-check-aof" utility before to restart 599 | # the server. 600 | # 601 | # Note that if the AOF file will be found to be corrupted in the middle 602 | # the server will still exit with an error. This option only applies when 603 | # Redis will try to read more data from the AOF file but not enough bytes 604 | # will be found. 605 | aof-load-truncated yes 606 | 607 | ################################ LUA SCRIPTING ############################### 608 | 609 | # Max execution time of a Lua script in milliseconds. 610 | # 611 | # If the maximum execution time is reached Redis will log that a script is 612 | # still in execution after the maximum allowed time and will start to 613 | # reply to queries with an error. 614 | # 615 | # When a long running script exceeds the maximum execution time only the 616 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 617 | # used to stop a script that did not yet called write commands. The second 618 | # is the only way to shut down the server in the case a write command was 619 | # already issued by the script but the user doesn't want to wait for the natural 620 | # termination of the script. 621 | # 622 | # Set it to 0 or a negative value for unlimited execution without warnings. 623 | lua-time-limit 5000 624 | 625 | ################################ REDIS CLUSTER ############################### 626 | # 627 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 628 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 629 | # in order to mark it as "mature" we need to wait for a non trivial percentage 630 | # of users to deploy it in production. 631 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 632 | # 633 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 634 | # started as cluster nodes can. In order to start a Redis instance as a 635 | # cluster node enable the cluster support uncommenting the following: 636 | # 637 | # cluster-enabled yes 638 | 639 | # Every cluster node has a cluster configuration file. This file is not 640 | # intended to be edited by hand. It is created and updated by Redis nodes. 641 | # Every Redis Cluster node requires a different cluster configuration file. 642 | # Make sure that instances running in the same system do not have 643 | # overlapping cluster configuration file names. 644 | # 645 | # cluster-config-file nodes-6379.conf 646 | 647 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 648 | # for it to be considered in failure state. 649 | # Most other internal time limits are multiple of the node timeout. 650 | # 651 | # cluster-node-timeout 15000 652 | 653 | # A slave of a failing master will avoid to start a failover if its data 654 | # looks too old. 655 | # 656 | # There is no simple way for a slave to actually have a exact measure of 657 | # its "data age", so the following two checks are performed: 658 | # 659 | # 1) If there are multiple slaves able to failover, they exchange messages 660 | # in order to try to give an advantage to the slave with the best 661 | # replication offset (more data from the master processed). 662 | # Slaves will try to get their rank by offset, and apply to the start 663 | # of the failover a delay proportional to their rank. 664 | # 665 | # 2) Every single slave computes the time of the last interaction with 666 | # its master. This can be the last ping or command received (if the master 667 | # is still in the "connected" state), or the time that elapsed since the 668 | # disconnection with the master (if the replication link is currently down). 669 | # If the last interaction is too old, the slave will not try to failover 670 | # at all. 671 | # 672 | # The point "2" can be tuned by user. Specifically a slave will not perform 673 | # the failover if, since the last interaction with the master, the time 674 | # elapsed is greater than: 675 | # 676 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 677 | # 678 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 679 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 680 | # slave will not try to failover if it was not able to talk with the master 681 | # for longer than 310 seconds. 682 | # 683 | # A large slave-validity-factor may allow slaves with too old data to failover 684 | # a master, while a too small value may prevent the cluster from being able to 685 | # elect a slave at all. 686 | # 687 | # For maximum availability, it is possible to set the slave-validity-factor 688 | # to a value of 0, which means, that slaves will always try to failover the 689 | # master regardless of the last time they interacted with the master. 690 | # (However they'll always try to apply a delay proportional to their 691 | # offset rank). 692 | # 693 | # Zero is the only value able to guarantee that when all the partitions heal 694 | # the cluster will always be able to continue. 695 | # 696 | # cluster-slave-validity-factor 10 697 | 698 | # Cluster slaves are able to migrate to orphaned masters, that are masters 699 | # that are left without working slaves. This improves the cluster ability 700 | # to resist to failures as otherwise an orphaned master can't be failed over 701 | # in case of failure if it has no working slaves. 702 | # 703 | # Slaves migrate to orphaned masters only if there are still at least a 704 | # given number of other working slaves for their old master. This number 705 | # is the "migration barrier". A migration barrier of 1 means that a slave 706 | # will migrate only if there is at least 1 other working slave for its master 707 | # and so forth. It usually reflects the number of slaves you want for every 708 | # master in your cluster. 709 | # 710 | # Default is 1 (slaves migrate only if their masters remain with at least 711 | # one slave). To disable migration just set it to a very large value. 712 | # A value of 0 can be set but is useful only for debugging and dangerous 713 | # in production. 714 | # 715 | # cluster-migration-barrier 1 716 | 717 | # By default Redis Cluster nodes stop accepting queries if they detect there 718 | # is at least an hash slot uncovered (no available node is serving it). 719 | # This way if the cluster is partially down (for example a range of hash slots 720 | # are no longer covered) all the cluster becomes, eventually, unavailable. 721 | # It automatically returns available as soon as all the slots are covered again. 722 | # 723 | # However sometimes you want the subset of the cluster which is working, 724 | # to continue to accept queries for the part of the key space that is still 725 | # covered. In order to do so, just set the cluster-require-full-coverage 726 | # option to no. 727 | # 728 | # cluster-require-full-coverage yes 729 | 730 | # In order to setup your cluster make sure to read the documentation 731 | # available at http://redis.io web site. 732 | 733 | ################################## SLOW LOG ################################### 734 | 735 | # The Redis Slow Log is a system to log queries that exceeded a specified 736 | # execution time. The execution time does not include the I/O operations 737 | # like talking with the client, sending the reply and so forth, 738 | # but just the time needed to actually execute the command (this is the only 739 | # stage of command execution where the thread is blocked and can not serve 740 | # other requests in the meantime). 741 | # 742 | # You can configure the slow log with two parameters: one tells Redis 743 | # what is the execution time, in microseconds, to exceed in order for the 744 | # command to get logged, and the other parameter is the length of the 745 | # slow log. When a new command is logged the oldest one is removed from the 746 | # queue of logged commands. 747 | 748 | # The following time is expressed in microseconds, so 1000000 is equivalent 749 | # to one second. Note that a negative number disables the slow log, while 750 | # a value of zero forces the logging of every command. 751 | slowlog-log-slower-than 10000 752 | 753 | # There is no limit to this length. Just be aware that it will consume memory. 754 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 755 | slowlog-max-len 128 756 | 757 | ################################ LATENCY MONITOR ############################## 758 | 759 | # The Redis latency monitoring subsystem samples different operations 760 | # at runtime in order to collect data related to possible sources of 761 | # latency of a Redis instance. 762 | # 763 | # Via the LATENCY command this information is available to the user that can 764 | # print graphs and obtain reports. 765 | # 766 | # The system only logs operations that were performed in a time equal or 767 | # greater than the amount of milliseconds specified via the 768 | # latency-monitor-threshold configuration directive. When its value is set 769 | # to zero, the latency monitor is turned off. 770 | # 771 | # By default latency monitoring is disabled since it is mostly not needed 772 | # if you don't have latency issues, and collecting data has a performance 773 | # impact, that while very small, can be measured under big load. Latency 774 | # monitoring can easily be enabled at runtime using the command 775 | # "CONFIG SET latency-monitor-threshold " if needed. 776 | latency-monitor-threshold 0 777 | 778 | ############################# EVENT NOTIFICATION ############################## 779 | 780 | # Redis can notify Pub/Sub clients about events happening in the key space. 781 | # This feature is documented at http://redis.io/topics/notifications 782 | # 783 | # For instance if keyspace events notification is enabled, and a client 784 | # performs a DEL operation on key "foo" stored in the Database 0, two 785 | # messages will be published via Pub/Sub: 786 | # 787 | # PUBLISH __keyspace@0__:foo del 788 | # PUBLISH __keyevent@0__:del foo 789 | # 790 | # It is possible to select the events that Redis will notify among a set 791 | # of classes. Every class is identified by a single character: 792 | # 793 | # K Keyspace events, published with __keyspace@__ prefix. 794 | # E Keyevent events, published with __keyevent@__ prefix. 795 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 796 | # $ String commands 797 | # l List commands 798 | # s Set commands 799 | # h Hash commands 800 | # z Sorted set commands 801 | # x Expired events (events generated every time a key expires) 802 | # e Evicted events (events generated when a key is evicted for maxmemory) 803 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 804 | # 805 | # The "notify-keyspace-events" takes as argument a string that is composed 806 | # of zero or multiple characters. The empty string means that notifications 807 | # are disabled. 808 | # 809 | # Example: to enable list and generic events, from the point of view of the 810 | # event name, use: 811 | # 812 | # notify-keyspace-events Elg 813 | # 814 | # Example 2: to get the stream of the expired keys subscribing to channel 815 | # name __keyevent@0__:expired use: 816 | # 817 | # notify-keyspace-events Ex 818 | # 819 | # By default all notifications are disabled because most users don't need 820 | # this feature and the feature has some overhead. Note that if you don't 821 | # specify at least one of K or E, no events will be delivered. 822 | notify-keyspace-events "" 823 | 824 | ############################### ADVANCED CONFIG ############################### 825 | 826 | # Hashes are encoded using a memory efficient data structure when they have a 827 | # small number of entries, and the biggest entry does not exceed a given 828 | # threshold. These thresholds can be configured using the following directives. 829 | hash-max-ziplist-entries 512 830 | hash-max-ziplist-value 64 831 | 832 | # Similarly to hashes, small lists are also encoded in a special way in order 833 | # to save a lot of space. The special representation is only used when 834 | # you are under the following limits: 835 | list-max-ziplist-entries 512 836 | list-max-ziplist-value 64 837 | 838 | # Sets have a special encoding in just one case: when a set is composed 839 | # of just strings that happen to be integers in radix 10 in the range 840 | # of 64 bit signed integers. 841 | # The following configuration setting sets the limit in the size of the 842 | # set in order to use this special memory saving encoding. 843 | set-max-intset-entries 512 844 | 845 | # Similarly to hashes and lists, sorted sets are also specially encoded in 846 | # order to save a lot of space. This encoding is only used when the length and 847 | # elements of a sorted set are below the following limits: 848 | zset-max-ziplist-entries 128 849 | zset-max-ziplist-value 64 850 | 851 | # HyperLogLog sparse representation bytes limit. The limit includes the 852 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 853 | # this limit, it is converted into the dense representation. 854 | # 855 | # A value greater than 16000 is totally useless, since at that point the 856 | # dense representation is more memory efficient. 857 | # 858 | # The suggested value is ~ 3000 in order to have the benefits of 859 | # the space efficient encoding without slowing down too much PFADD, 860 | # which is O(N) with the sparse encoding. The value can be raised to 861 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 862 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 863 | hll-sparse-max-bytes 3000 864 | 865 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 866 | # order to help rehashing the main Redis hash table (the one mapping top-level 867 | # keys to values). The hash table implementation Redis uses (see dict.c) 868 | # performs a lazy rehashing: the more operation you run into a hash table 869 | # that is rehashing, the more rehashing "steps" are performed, so if the 870 | # server is idle the rehashing is never complete and some more memory is used 871 | # by the hash table. 872 | # 873 | # The default is to use this millisecond 10 times every second in order to 874 | # actively rehash the main dictionaries, freeing memory when possible. 875 | # 876 | # If unsure: 877 | # use "activerehashing no" if you have hard latency requirements and it is 878 | # not a good thing in your environment that Redis can reply from time to time 879 | # to queries with 2 milliseconds delay. 880 | # 881 | # use "activerehashing yes" if you don't have such hard requirements but 882 | # want to free memory asap when possible. 883 | activerehashing yes 884 | 885 | # The client output buffer limits can be used to force disconnection of clients 886 | # that are not reading data from the server fast enough for some reason (a 887 | # common reason is that a Pub/Sub client can't consume messages as fast as the 888 | # publisher can produce them). 889 | # 890 | # The limit can be set differently for the three different classes of clients: 891 | # 892 | # normal -> normal clients including MONITOR clients 893 | # slave -> slave clients 894 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 895 | # 896 | # The syntax of every client-output-buffer-limit directive is the following: 897 | # 898 | # client-output-buffer-limit 899 | # 900 | # A client is immediately disconnected once the hard limit is reached, or if 901 | # the soft limit is reached and remains reached for the specified number of 902 | # seconds (continuously). 903 | # So for instance if the hard limit is 32 megabytes and the soft limit is 904 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 905 | # if the size of the output buffers reach 32 megabytes, but will also get 906 | # disconnected if the client reaches 16 megabytes and continuously overcomes 907 | # the limit for 10 seconds. 908 | # 909 | # By default normal clients are not limited because they don't receive data 910 | # without asking (in a push way), but just after a request, so only 911 | # asynchronous clients may create a scenario where data is requested faster 912 | # than it can read. 913 | # 914 | # Instead there is a default limit for pubsub and slave clients, since 915 | # subscribers and slaves receive data in a push fashion. 916 | # 917 | # Both the hard or the soft limit can be disabled by setting them to zero. 918 | client-output-buffer-limit normal 0 0 0 919 | client-output-buffer-limit slave 256mb 64mb 60 920 | client-output-buffer-limit pubsub 32mb 8mb 60 921 | 922 | # Redis calls an internal function to perform many background tasks, like 923 | # closing connections of clients in timeout, purging expired keys that are 924 | # never requested, and so forth. 925 | # 926 | # Not all tasks are performed with the same frequency, but Redis checks for 927 | # tasks to perform according to the specified "hz" value. 928 | # 929 | # By default "hz" is set to 10. Raising the value will use more CPU when 930 | # Redis is idle, but at the same time will make Redis more responsive when 931 | # there are many keys expiring at the same time, and timeouts may be 932 | # handled with more precision. 933 | # 934 | # The range is between 1 and 500, however a value over 100 is usually not 935 | # a good idea. Most users should use the default of 10 and raise this up to 936 | # 100 only in environments where very low latency is required. 937 | hz 10 938 | 939 | # When a child rewrites the AOF file, if the following option is enabled 940 | # the file will be fsync-ed every 32 MB of data generated. This is useful 941 | # in order to commit the file to the disk more incrementally and avoid 942 | # big latency spikes. 943 | aof-rewrite-incremental-fsync yes 944 | --------------------------------------------------------------------------------