├── Dockerfile ├── README.md ├── config ├── comment.sty ├── gsad ├── redis.conf └── texlive.repo ├── openvas-docker-setup.sh └── run.sh /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:latest 2 | MAINTAINER Support 3 | 4 | #VOLUME ["/var/lib/openvas"] 5 | 6 | ADD run.sh /run.sh 7 | ADD openvas-docker-setup.sh /openvas-docker-setup.sh 8 | ADD config/redis.conf /etc/redis.conf 9 | ADD config/texlive.repo /etc/yum.repos.d/texlive.repo 10 | 11 | 12 | 13 | RUN /openvas-docker-setup.sh && rm -f /openvas-docker-setup.sh 14 | 15 | CMD /run.sh 16 | EXPOSE 443 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **Openvas Docker container** 2 | 3 | This container is based on Centos 7 for FIPS-140-2 compliance. It is a self contained Openvas Scanner with web console on port 443. 4 | 5 | 6 | **Launch** 7 | 8 | docker run -d -p 443:443 --name openvas atomicorp/openvas 9 | 10 | 11 | https:/// 12 | Default login / password: admin / admin 13 | 14 | **Launch with a Volume** 15 | 16 | docker volume create openvas 17 | 18 | docker run -d -p 443:443 -v openvas:/var/lib/openvas/mgr --name openvas atomicorp/openvas 19 | 20 | **Set Admin Password** 21 | 22 | docker run -d -p 443:443 -e OV_PASSWORD=iliketurtles --name openvas atomicorp/openvas 23 | 24 | **Update NVT data** 25 | 26 | Note: This process may take take some time. 27 | 28 | docker run -d -p 443:443 -e OV_UPDATE=yes --name openvas atomicorp/openvas 29 | 30 | 31 | **Attach to running** 32 | 33 | docker exec -it openvas bash 34 | 35 | 36 | **Thanks** 37 | 38 | Michael Meyer @Greenbone 39 | 40 | Jan-Oliver Wagner @Greenbone 41 | 42 | Everyone at Greenbone that made this project possible 43 | 44 | The Arachni Project 45 | 46 | Openvas Docker creators used as a reference: Mike Splain, William Collani, Serge Katzmann, and Daniel Popescu 47 | -------------------------------------------------------------------------------- /config/comment.sty: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % Comment.sty version 3.8, July 2016 3 | % copyright 1998-2016 Victor Eijkhout 4 | % 5 | % Purpose: 6 | % selectively in/exclude pieces of text: the user can define new 7 | % comment versions, and each is controlled separately. 8 | % Special comments can be defined where the user specifies the 9 | % action that is to be taken with each comment line. 10 | % 11 | % Author 12 | % Victor Eijkhout 13 | % Texas Advanced Computing Center 14 | % The University of Texas at Austin 15 | % Austin TX 78758 16 | % USA 17 | % 18 | % victor@eijkhout.net 19 | % 20 | % This program is free software; you can redistribute it and/or 21 | % modify it under the terms of the GNU General Public License 22 | % as published by the Free Software Foundation; either version 2 23 | % of the License, or (at your option) any later version. 24 | % 25 | % This program is distributed in the hope that it will be useful, 26 | % but WITHOUT ANY WARRANTY; without even the implied warranty of 27 | % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 28 | % GNU General Public License for more details. 29 | % 30 | % For a copy of the GNU General Public License, write to the 31 | % Free Software Foundation, Inc., 32 | % 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA, 33 | % or find it on the net, for instance at 34 | % http://www.gnu.org/copyleft/gpl.html 35 | % 36 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 37 | % 38 | % Usage: all text included between 39 | % \begin{comment} 40 | % ... 41 | % \end{comment} 42 | % is discarded. 43 | % 44 | % The opening and closing commands should appear on a line 45 | % of their own. No starting spaces, nothing after it. 46 | % This environment should work with arbitrary amounts 47 | % of comment, and the comment can be arbitrary text. 48 | % 49 | % Other `comment' environments are defined by 50 | % and are selected/deselected with 51 | % \includecomment{versiona} 52 | % \excludecoment{versionb} 53 | % 54 | % These environments are used as 55 | % \begin{versiona} ... \end{versiona} 56 | % with the opening and closing commands again on a line of 57 | % their own. 58 | % 59 | % This is not a LaTeX environment: for an included comment, the 60 | % \begin and \end lines act as if they don't exist. 61 | % In particular, they don't imply grouping, so assignments 62 | % &c are not local. 63 | % 64 | %% 65 | %% Guide to special effects 66 | %% 67 | % To understand what happens here, you need to know just a bit about 68 | % the implementation. Lines inside a comment are scooped up one at a 69 | % time, and written to an external file. This file can then be 70 | % included, or ignored. Or you can do stuff with it as you'll see now. 71 | % 72 | % Special comments are defined as 73 | % \specialcomment{name}{before commands}{after commands} 74 | % where the second and third arguments are executed before 75 | % and after each comment block. You can use this for global 76 | % formatting commands. 77 | % To keep definitions &c local, you can include \begingroup 78 | % in the `before commands' and \endgroup in the `after commands'. 79 | % ex: 80 | % \specialcomment{smalltt} 81 | % {\begingroup\ttfamily\footnotesize}{\endgroup} 82 | % Of course, in this case you could probably have used the standard 83 | % LaTeX \newenvironment. 84 | % 85 | % With \specialcomment you do *not* have to do an additional 86 | % \includecomment{smalltt} 87 | % To remove 'smalltt' blocks, give \excludecomment{smalltt} 88 | % after the definition. 89 | % 90 | % The comment environments use two auxiliary commands. You can get 91 | % nifty special effects by redefining them. 92 | % 1/ the commented text is written to an external file. Default definition: 93 | % \def\CommentCutFile{comment.cut} 94 | % 2/ included comments are processed like this: 95 | % \def\ProcessCutFile{\input{\CommentCutFile}\relax} 96 | % and excluded files have 97 | % \def\ProcessCutFile{} 98 | % 99 | % Fun use of special comments: the inclusion of the comment is done 100 | % by \ProcessCutFile, so you can redefine that: 101 | % \specialcomment{mathexamplewithcode} 102 | % {\begingroup\def\ProcessCutFile{}} %1 103 | % {\verbatiminput{\CommentCutFile} %2 104 | % \endgroup 105 | % This gives: 106 | % \begin{equation} \input{\CommentCutFile} \end{equation} 107 | % } 108 | % 1: do not standard include the file 109 | % 2: input it verbatim, then again inside display math 110 | % 111 | % You can also apply processing to each line. 112 | % By defining a control sequence 113 | % \def\Thiscomment##1{...} in the before commands the user can 114 | % specify what is to be done with each comment line. If something 115 | % needs to be written to file, use \WriteCommentLine{the stuff} 116 | % Example: 117 | % \specialcomment{underlinecomment} 118 | % {\def\ThisComment##1{\WriteCommentLine{\underline{##1}\par}} 119 | % \par} 120 | % {\par} 121 | % 122 | % Trick for short in/exclude macros (such as \maybe{this snippet}): 123 | %\includecomment{cond} 124 | %\newcommand{\maybe}[1]{} 125 | %\begin{cond} 126 | %\renewcommand{\maybe}[1]{#1} 127 | %\end{cond} 128 | % 129 | % Changes in 3.8 130 | % - utf8 is now correctly handled, at least if you use eTeX. 131 | % (Thanks Henry Gregory for the solution) 132 | % Changes in 3.7 133 | % - only LaTeX support from now on 134 | % - code cleanup, and improvements on \specialcomment 135 | % - cleanup of the docs. 136 | % Changed in 3.6 137 | % - documentation update 138 | % - comment file inclusion is now a customizable command 139 | % Changes in 3.5 140 | % - corrected typo in header. 141 | % - changed author email 142 | % - corrected \specialcomment yet again. 143 | % - fixed excludecomment of an earlier defined environment. 144 | % Changes in 3.4 145 | % - added GNU public license 146 | % - added \processcomment, because Ivo's fix (above) brought an 147 | % inconsistency to light. 148 | % Changes in 3.3 149 | % - updated author's address again 150 | % - parametrised \CommentCutFile 151 | % Changes in 3.2 152 | % - \specialcomment brought up to date (thanks to Ivo Welch). 153 | % Changes in version 3.1 154 | % - updated author's address 155 | % - cleaned up some code 156 | % - trailing contents on \begin{env} line is always discarded 157 | % even if you've done \includecomment{env} 158 | % - comments no longer define grouping!! you can even 159 | % \includecomment{env} 160 | % \begin{env} 161 | % \begin{itemize} 162 | % \end{env} 163 | % Isn't that something ... 164 | % - included comments are written to file and input again. 165 | % 166 | % Known bugs: 167 | % - excludecomment leads to one superfluous space 168 | % - processcomment leads to a superfluous line break at the start 169 | % 170 | \def\makeinnocent#1{\catcode`#1=12 } 171 | \def\csarg#1#2{\expandafter#1\csname#2\endcsname} 172 | \def\latexname{lplain}\def\latexename{LaTeX2e} 173 | \newwrite\CommentStream 174 | \def\DefaultCutFileName{\def\CommentCutFile{comment.cut}} 175 | \DefaultCutFileName 176 | 177 | % begin / end processing 178 | % 179 | % this contains the only real begin/endgroup commands, to keep the 180 | % catcode changes local. 181 | \def\ProcessComment#1% start it all of 182 | {\def\CurrentComment{#1}% 183 | \begingroup 184 | \let\do\makeinnocent \dospecials 185 | \makeinnocent\^^L% and whatever other special cases 186 | \endlinechar`\^^M\relax \catcode`\^^M=12\relax \xComment} 187 | {\catcode`\^^M=12 \endlinechar=-1 % 188 | \gdef\xComment#1^^M{\ProcessCommentLine} 189 | \gdef\ProcessCommentLine#1^^M{\def\test{#1} 190 | \csarg\ifx{End\CurrentComment Test}\test 191 | \edef\next{\endgroup\noexpand\EndOfComment{\CurrentComment}}% 192 | \else \ThisComment{#1}\let\next\ProcessCommentLine 193 | \fi \next} 194 | } 195 | 196 | %% 197 | %% Initial action: SetUpCutFile opens the CommentCutFile 198 | %% hook for initial actions: PrepareCutFile, default null 199 | %% 200 | \def\SetUpCutFile 201 | {\immediate\openout\CommentStream=\CommentCutFile 202 | \PrepareCutFile} 203 | \def\PrepareCutFile{} 204 | 205 | %% 206 | %% Each line action: ThisComment, 207 | %% default: WriteCommentLine on line contents 208 | %% version 3.8: write unexpanded if using eTeX 209 | %% 210 | \expandafter\ifx\csname eTeXversion\endcsname\relax 211 | \long\def\WriteCommentLine#1{\immediate\write\CommentStream{#1}} 212 | \else 213 | \long\def\WriteCommentLine#1{\immediate\write\CommentStream{\unexpanded{#1}}} 214 | \fi 215 | \let\ThisComment\WriteCommentLine 216 | 217 | %% 218 | %% Final action: ProcessCutFile 219 | %% hook for final action before file closing: FinalizeCutFile, default null 220 | %% 221 | \def\ProcessCutFile 222 | {\message{Straight input of \CommentCutFile.}% 223 | \input{\CommentCutFile}\relax} 224 | \def\CloseAndInputCutFile 225 | {\FinalizeCutFile 226 | \immediate\closeout\CommentStream 227 | \ProcessCutFile} 228 | \def\FinalizeCutFile{} 229 | 230 | %% 231 | %% Define the different comment types 232 | %% 233 | % included comments: all the default actions 234 | \def\includecomment 235 | #1{\message{Include comment '#1'}% 236 | \csarg\def{After#1Comment}{\CloseAndInputCutFile} 237 | \csarg\def{#1}{\endgroup \message{Including '#1' comment.}% 238 | \DefaultCutFileName \SetUpCutFile \ProcessComment{#1}}% 239 | \CommentEndDef{#1}} 240 | % excluded comment: also default 241 | \def\excludecomment 242 | #1{\message{Excluding comment '#1'}% 243 | \csarg\def{#1}{\endgroup \message{Excluding '#1' comment.}% 244 | \begingroup 245 | \DefaultCutFileName \def\ProcessCutFile{}% 246 | \def\ThisComment####1{}\ProcessComment{#1}}% 247 | \csarg\def{After#1Comment}{\CloseAndInputCutFile \endgroup} 248 | \CommentEndDef{#1}} 249 | % special comment 250 | \long\def\specialcomment 251 | #1#2#3{\message{Special comment '#1'}% 252 | \csarg\def{#1}{\endgroup \message{Processing '#1' comment.}% 253 | \DefaultCutFileName 254 | #2\relax \SetUpCutFile 255 | % #2 before SetUp, so we can do renaming. 256 | \message{Comment '#1' writing to \CommentCutFile.}% 257 | \ProcessComment{#1}}% 258 | \csarg\def{After#1Comment}{\CloseAndInputCutFile #3}% 259 | \CommentEndDef{#1}} 260 | \long\def\generalcomment 261 | #1#2#3{\message{General comment '#1'}% 262 | \csarg\def{#1}{\endgroup % counter the environment open of LaTeX 263 | #2 \relax \SetUpCutFile \ProcessComment{#1}}% 264 | \csarg\def{After#1Comment}{\CloseAndInputCutFile #3}% 265 | \CommentEndDef{#1}} 266 | \long\def\processcomment 267 | #1#2#3#4{\message{Lines-Processing comment '#1'}% 268 | \csarg\def{#1}{\endgroup \SetUpCutFile #2\relax 269 | \ProcessComment{#1}}% 270 | \csarg\def{After#1Comment}{#3\CloseAndInputCutFile #4}% 271 | \CommentEndDef{#1}} 272 | \def\leveledcomment 273 | #1#2{\message{Include comment '#1' up to level '#2'}% 274 | %\csarg\newif{if#1IsStreamingComment} 275 | %\csarg\newif{if#1IsLeveledComment} 276 | %\csname #1IsLeveledCommenttrue\endcsname 277 | \csarg\let{After#1Comment}\CloseAndInputCutFile 278 | \csarg\def{#1}{\SetUpCutFile 279 | \ProcessCommentWithArg{#1}}% 280 | \CommentEndDef{#1}} 281 | 282 | \makeatletter 283 | \def\EndOfComment#1{% 284 | \csname After#1Comment\endcsname 285 | % sabotage LaTeX's environment testing 286 | \begingroup\def\@currenvir{#1}\end{#1}} 287 | \def\CommentEndDef#1{{\escapechar=-1\relax 288 | \csarg\xdef{End#1Test}{\string\\end\string\{#1\string\}}% 289 | }} 290 | \makeatother 291 | 292 | \excludecomment{comment} 293 | 294 | \endinput 295 | -------------------------------------------------------------------------------- /config/gsad: -------------------------------------------------------------------------------- 1 | OPTIONS="" 2 | # 3 | # The address the Greenbone Security Assistant will listen on. 4 | # 5 | GSA_ADDRESS=0.0.0.0 6 | # 7 | # The port the Greenbone Security Assistant will listen on. 8 | # 9 | GSA_PORT=443 10 | 11 | 12 | -------------------------------------------------------------------------------- /config/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## NETWORK ##################################### 39 | 40 | # By default, if no "bind" configuration directive is specified, Redis listens 41 | # for connections from all the network interfaces available on the server. 42 | # It is possible to listen to just one or multiple selected interfaces using 43 | # the "bind" configuration directive, followed by one or more IP addresses. 44 | # 45 | # Examples: 46 | # 47 | # bind 192.168.1.100 10.0.0.1 48 | # bind 127.0.0.1 ::1 49 | # 50 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 51 | # internet, binding to all the interfaces is dangerous and will expose the 52 | # instance to everybody on the internet. So by default we uncomment the 53 | # following bind directive, that will force Redis to listen only into 54 | # the IPv4 lookback interface address (this means Redis will be able to 55 | # accept connections only from clients running into the same computer it 56 | # is running). 57 | # 58 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 59 | # JUST COMMENT THE FOLLOWING LINE. 60 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 61 | bind 127.0.0.1 62 | 63 | # Protected mode is a layer of security protection, in order to avoid that 64 | # Redis instances left open on the internet are accessed and exploited. 65 | # 66 | # When protected mode is on and if: 67 | # 68 | # 1) The server is not binding explicitly to a set of addresses using the 69 | # "bind" directive. 70 | # 2) No password is configured. 71 | # 72 | # The server only accepts connections from clients connecting from the 73 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 74 | # sockets. 75 | # 76 | # By default protected mode is enabled. You should disable it only if 77 | # you are sure you want clients from other hosts to connect to Redis 78 | # even if no authentication is configured, nor a specific set of interfaces 79 | # are explicitly listed using the "bind" directive. 80 | # protected-mode yes 81 | 82 | # Accept connections on the specified port, default is 6379 (IANA #815344). 83 | # If port 0 is specified Redis will not listen on a TCP socket. 84 | # port 6379 85 | port 0 86 | 87 | # TCP listen() backlog. 88 | # 89 | # In high requests-per-second environments you need an high backlog in order 90 | # to avoid slow clients connections issues. Note that the Linux kernel 91 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 92 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 93 | # in order to get the desired effect. 94 | tcp-backlog 511 95 | 96 | # Unix socket. 97 | # 98 | # Specify the path for the Unix socket that will be used to listen for 99 | # incoming connections. There is no default, so Redis will not listen 100 | # on a unix socket when not specified. 101 | # 102 | # unixsocket /tmp/redis.sock 103 | # unixsocketperm 700 104 | unixsocket /tmp/redis.sock 105 | unixsocketperm 700 106 | 107 | # Close the connection after a client is idle for N seconds (0 to disable) 108 | timeout 0 109 | 110 | # TCP keepalive. 111 | # 112 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 113 | # of communication. This is useful for two reasons: 114 | # 115 | # 1) Detect dead peers. 116 | # 2) Take the connection alive from the point of view of network 117 | # equipment in the middle. 118 | # 119 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 120 | # Note that to close the connection the double of the time is needed. 121 | # On other kernels the period depends on the kernel configuration. 122 | # 123 | # A reasonable value for this option is 300 seconds, which is the new 124 | # Redis default starting with Redis 3.2.1. 125 | tcp-keepalive 300 126 | 127 | ################################# GENERAL ##################################### 128 | 129 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 130 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 131 | daemonize no 132 | 133 | # If you run Redis from upstart or systemd, Redis can interact with your 134 | # supervision tree. Options: 135 | # supervised no - no supervision interaction 136 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 137 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 138 | # supervised auto - detect upstart or systemd method based on 139 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 140 | # Note: these supervision methods only signal "process is ready." 141 | # They do not enable continuous liveness pings back to your supervisor. 142 | #supervised no 143 | 144 | # If a pid file is specified, Redis writes it where specified at startup 145 | # and removes it at exit. 146 | # 147 | # When the server runs non daemonized, no pid file is created if none is 148 | # specified in the configuration. When the server is daemonized, the pid file 149 | # is used even if not specified, defaulting to "/var/run/redis.pid". 150 | # 151 | # Creating a pid file is best effort: if Redis is not able to create it 152 | # nothing bad happens, the server will start and run normally. 153 | pidfile /var/run/redis/redis.pid 154 | 155 | # Specify the server verbosity level. 156 | # This can be one of: 157 | # debug (a lot of information, useful for development/testing) 158 | # verbose (many rarely useful info, but not a mess like the debug level) 159 | # notice (moderately verbose, what you want in production probably) 160 | # warning (only very important / critical messages are logged) 161 | loglevel notice 162 | 163 | # Specify the log file name. Also the empty string can be used to force 164 | # Redis to log on the standard output. Note that if you use standard 165 | # output for logging but daemonize, logs will be sent to /dev/null 166 | logfile /var/log/redis/redis.log 167 | 168 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 169 | # and optionally update the other syslog parameters to suit your needs. 170 | # syslog-enabled no 171 | 172 | # Specify the syslog identity. 173 | # syslog-ident redis 174 | 175 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 176 | # syslog-facility local0 177 | 178 | # Set the number of databases. The default database is DB 0, you can select 179 | # a different one on a per-connection basis using SELECT where 180 | # dbid is a number between 0 and 'databases'-1 181 | # databases 16 182 | databases 513 183 | 184 | ################################ SNAPSHOTTING ################################ 185 | # 186 | # Save the DB on disk: 187 | # 188 | # save 189 | # 190 | # Will save the DB if both the given number of seconds and the given 191 | # number of write operations against the DB occurred. 192 | # 193 | # In the example below the behaviour will be to save: 194 | # after 900 sec (15 min) if at least 1 key changed 195 | # after 300 sec (5 min) if at least 10 keys changed 196 | # after 60 sec if at least 10000 keys changed 197 | # 198 | # Note: you can disable saving completely by commenting out all "save" lines. 199 | # 200 | # It is also possible to remove all the previously configured save 201 | # points by adding a save directive with a single empty string argument 202 | # like in the following example: 203 | # 204 | # save "" 205 | 206 | # Using save statements causing a freeze/hang in the connection between openvas-scanner to redis 207 | #save 900 1 208 | #save 300 10 209 | #save 60 10000 210 | 211 | # By default Redis will stop accepting writes if RDB snapshots are enabled 212 | # (at least one save point) and the latest background save failed. 213 | # This will make the user aware (in a hard way) that data is not persisting 214 | # on disk properly, otherwise chances are that no one will notice and some 215 | # disaster will happen. 216 | # 217 | # If the background saving process will start working again Redis will 218 | # automatically allow writes again. 219 | # 220 | # However if you have setup your proper monitoring of the Redis server 221 | # and persistence, you may want to disable this feature so that Redis will 222 | # continue to work as usual even if there are problems with disk, 223 | # permissions, and so forth. 224 | stop-writes-on-bgsave-error yes 225 | 226 | # Compress string objects using LZF when dump .rdb databases? 227 | # For default that's set to 'yes' as it's almost always a win. 228 | # If you want to save some CPU in the saving child set it to 'no' but 229 | # the dataset will likely be bigger if you have compressible values or keys. 230 | rdbcompression yes 231 | 232 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 233 | # This makes the format more resistant to corruption but there is a performance 234 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 235 | # for maximum performances. 236 | # 237 | # RDB files created with checksum disabled have a checksum of zero that will 238 | # tell the loading code to skip the check. 239 | rdbchecksum yes 240 | 241 | # The filename where to dump the DB 242 | dbfilename dump.rdb 243 | 244 | # The working directory. 245 | # 246 | # The DB will be written inside this directory, with the filename specified 247 | # above using the 'dbfilename' configuration directive. 248 | # 249 | # The Append Only File will also be created inside this directory. 250 | # 251 | # Note that you must specify a directory here, not a file name. 252 | dir /var/lib/redis 253 | 254 | ################################# REPLICATION ################################# 255 | 256 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 257 | # another Redis server. A few things to understand ASAP about Redis replication. 258 | # 259 | # 1) Redis replication is asynchronous, but you can configure a master to 260 | # stop accepting writes if it appears to be not connected with at least 261 | # a given number of slaves. 262 | # 2) Redis slaves are able to perform a partial resynchronization with the 263 | # master if the replication link is lost for a relatively small amount of 264 | # time. You may want to configure the replication backlog size (see the next 265 | # sections of this file) with a sensible value depending on your needs. 266 | # 3) Replication is automatic and does not need user intervention. After a 267 | # network partition slaves automatically try to reconnect to masters 268 | # and resynchronize with them. 269 | # 270 | # slaveof 271 | 272 | # If the master is password protected (using the "requirepass" configuration 273 | # directive below) it is possible to tell the slave to authenticate before 274 | # starting the replication synchronization process, otherwise the master will 275 | # refuse the slave request. 276 | # 277 | # masterauth 278 | 279 | # When a slave loses its connection with the master, or when the replication 280 | # is still in progress, the slave can act in two different ways: 281 | # 282 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 283 | # still reply to client requests, possibly with out of date data, or the 284 | # data set may just be empty if this is the first synchronization. 285 | # 286 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 287 | # an error "SYNC with master in progress" to all the kind of commands 288 | # but to INFO and SLAVEOF. 289 | # 290 | slave-serve-stale-data yes 291 | 292 | # You can configure a slave instance to accept writes or not. Writing against 293 | # a slave instance may be useful to store some ephemeral data (because data 294 | # written on a slave will be easily deleted after resync with the master) but 295 | # may also cause problems if clients are writing to it because of a 296 | # misconfiguration. 297 | # 298 | # Since Redis 2.6 by default slaves are read-only. 299 | # 300 | # Note: read only slaves are not designed to be exposed to untrusted clients 301 | # on the internet. It's just a protection layer against misuse of the instance. 302 | # Still a read only slave exports by default all the administrative commands 303 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 304 | # security of read only slaves using 'rename-command' to shadow all the 305 | # administrative / dangerous commands. 306 | slave-read-only yes 307 | 308 | # Replication SYNC strategy: disk or socket. 309 | # 310 | # ------------------------------------------------------- 311 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 312 | # ------------------------------------------------------- 313 | # 314 | # New slaves and reconnecting slaves that are not able to continue the replication 315 | # process just receiving differences, need to do what is called a "full 316 | # synchronization". An RDB file is transmitted from the master to the slaves. 317 | # The transmission can happen in two different ways: 318 | # 319 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 320 | # file on disk. Later the file is transferred by the parent 321 | # process to the slaves incrementally. 322 | # 2) Diskless: The Redis master creates a new process that directly writes the 323 | # RDB file to slave sockets, without touching the disk at all. 324 | # 325 | # With disk-backed replication, while the RDB file is generated, more slaves 326 | # can be queued and served with the RDB file as soon as the current child producing 327 | # the RDB file finishes its work. With diskless replication instead once 328 | # the transfer starts, new slaves arriving will be queued and a new transfer 329 | # will start when the current one terminates. 330 | # 331 | # When diskless replication is used, the master waits a configurable amount of 332 | # time (in seconds) before starting the transfer in the hope that multiple slaves 333 | # will arrive and the transfer can be parallelized. 334 | # 335 | # With slow disks and fast (large bandwidth) networks, diskless replication 336 | # works better. 337 | repl-diskless-sync no 338 | 339 | # When diskless replication is enabled, it is possible to configure the delay 340 | # the server waits in order to spawn the child that transfers the RDB via socket 341 | # to the slaves. 342 | # 343 | # This is important since once the transfer starts, it is not possible to serve 344 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 345 | # waits a delay in order to let more slaves arrive. 346 | # 347 | # The delay is specified in seconds, and by default is 5 seconds. To disable 348 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 349 | repl-diskless-sync-delay 5 350 | 351 | # Slaves send PINGs to server in a predefined interval. It's possible to change 352 | # this interval with the repl_ping_slave_period option. The default value is 10 353 | # seconds. 354 | # 355 | # repl-ping-slave-period 10 356 | 357 | # The following option sets the replication timeout for: 358 | # 359 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 360 | # 2) Master timeout from the point of view of slaves (data, pings). 361 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 362 | # 363 | # It is important to make sure that this value is greater than the value 364 | # specified for repl-ping-slave-period otherwise a timeout will be detected 365 | # every time there is low traffic between the master and the slave. 366 | # 367 | # repl-timeout 60 368 | 369 | # Disable TCP_NODELAY on the slave socket after SYNC? 370 | # 371 | # If you select "yes" Redis will use a smaller number of TCP packets and 372 | # less bandwidth to send data to slaves. But this can add a delay for 373 | # the data to appear on the slave side, up to 40 milliseconds with 374 | # Linux kernels using a default configuration. 375 | # 376 | # If you select "no" the delay for data to appear on the slave side will 377 | # be reduced but more bandwidth will be used for replication. 378 | # 379 | # By default we optimize for low latency, but in very high traffic conditions 380 | # or when the master and slaves are many hops away, turning this to "yes" may 381 | # be a good idea. 382 | repl-disable-tcp-nodelay no 383 | 384 | # Set the replication backlog size. The backlog is a buffer that accumulates 385 | # slave data when slaves are disconnected for some time, so that when a slave 386 | # wants to reconnect again, often a full resync is not needed, but a partial 387 | # resync is enough, just passing the portion of data the slave missed while 388 | # disconnected. 389 | # 390 | # The bigger the replication backlog, the longer the time the slave can be 391 | # disconnected and later be able to perform a partial resynchronization. 392 | # 393 | # The backlog is only allocated once there is at least a slave connected. 394 | # 395 | # repl-backlog-size 1mb 396 | 397 | # After a master has no longer connected slaves for some time, the backlog 398 | # will be freed. The following option configures the amount of seconds that 399 | # need to elapse, starting from the time the last slave disconnected, for 400 | # the backlog buffer to be freed. 401 | # 402 | # A value of 0 means to never release the backlog. 403 | # 404 | # repl-backlog-ttl 3600 405 | 406 | # The slave priority is an integer number published by Redis in the INFO output. 407 | # It is used by Redis Sentinel in order to select a slave to promote into a 408 | # master if the master is no longer working correctly. 409 | # 410 | # A slave with a low priority number is considered better for promotion, so 411 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 412 | # pick the one with priority 10, that is the lowest. 413 | # 414 | # However a special priority of 0 marks the slave as not able to perform the 415 | # role of master, so a slave with priority of 0 will never be selected by 416 | # Redis Sentinel for promotion. 417 | # 418 | # By default the priority is 100. 419 | slave-priority 100 420 | 421 | # It is possible for a master to stop accepting writes if there are less than 422 | # N slaves connected, having a lag less or equal than M seconds. 423 | # 424 | # The N slaves need to be in "online" state. 425 | # 426 | # The lag in seconds, that must be <= the specified value, is calculated from 427 | # the last ping received from the slave, that is usually sent every second. 428 | # 429 | # This option does not GUARANTEE that N replicas will accept the write, but 430 | # will limit the window of exposure for lost writes in case not enough slaves 431 | # are available, to the specified number of seconds. 432 | # 433 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 434 | # 435 | # min-slaves-to-write 3 436 | # min-slaves-max-lag 10 437 | # 438 | # Setting one or the other to 0 disables the feature. 439 | # 440 | # By default min-slaves-to-write is set to 0 (feature disabled) and 441 | # min-slaves-max-lag is set to 10. 442 | 443 | # A Redis master is able to list the address and port of the attached 444 | # slaves in different ways. For example the "INFO replication" section 445 | # offers this information, which is used, among other tools, by 446 | # Redis Sentinel in order to discover slave instances. 447 | # Another place where this info is available is in the output of the 448 | # "ROLE" command of a masteer. 449 | # 450 | # The listed IP and address normally reported by a slave is obtained 451 | # in the following way: 452 | # 453 | # IP: The address is auto detected by checking the peer address 454 | # of the socket used by the slave to connect with the master. 455 | # 456 | # Port: The port is communicated by the slave during the replication 457 | # handshake, and is normally the port that the slave is using to 458 | # list for connections. 459 | # 460 | # However when port forwarding or Network Address Translation (NAT) is 461 | # used, the slave may be actually reachable via different IP and port 462 | # pairs. The following two options can be used by a slave in order to 463 | # report to its master a specific set of IP and port, so that both INFO 464 | # and ROLE will report those values. 465 | # 466 | # There is no need to use both the options if you need to override just 467 | # the port or the IP address. 468 | # 469 | # slave-announce-ip 5.5.5.5 470 | # slave-announce-port 1234 471 | 472 | ################################## SECURITY ################################### 473 | 474 | # Require clients to issue AUTH before processing any other 475 | # commands. This might be useful in environments in which you do not trust 476 | # others with access to the host running redis-server. 477 | # 478 | # This should stay commented out for backward compatibility and because most 479 | # people do not need auth (e.g. they run their own servers). 480 | # 481 | # Warning: since Redis is pretty fast an outside user can try up to 482 | # 150k passwords per second against a good box. This means that you should 483 | # use a very strong password otherwise it will be very easy to break. 484 | # 485 | # requirepass foobared 486 | 487 | # Command renaming. 488 | # 489 | # It is possible to change the name of dangerous commands in a shared 490 | # environment. For instance the CONFIG command may be renamed into something 491 | # hard to guess so that it will still be available for internal-use tools 492 | # but not available for general clients. 493 | # 494 | # Example: 495 | # 496 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 497 | # 498 | # It is also possible to completely kill a command by renaming it into 499 | # an empty string: 500 | # 501 | # rename-command CONFIG "" 502 | # 503 | # Please note that changing the name of commands that are logged into the 504 | # AOF file or transmitted to slaves may cause problems. 505 | 506 | ################################### LIMITS #################################### 507 | 508 | # Set the max number of connected clients at the same time. By default 509 | # this limit is set to 10000 clients, however if the Redis server is not 510 | # able to configure the process file limit to allow for the specified limit 511 | # the max number of allowed clients is set to the current file limit 512 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 513 | # 514 | # Once the limit is reached Redis will close all the new connections sending 515 | # an error 'max number of clients reached'. 516 | # 517 | # maxclients 10000 518 | 519 | # Don't use more memory than the specified amount of bytes. 520 | # When the memory limit is reached Redis will try to remove keys 521 | # according to the eviction policy selected (see maxmemory-policy). 522 | # 523 | # If Redis can't remove keys according to the policy, or if the policy is 524 | # set to 'noeviction', Redis will start to reply with errors to commands 525 | # that would use more memory, like SET, LPUSH, and so on, and will continue 526 | # to reply to read-only commands like GET. 527 | # 528 | # This option is usually useful when using Redis as an LRU cache, or to set 529 | # a hard memory limit for an instance (using the 'noeviction' policy). 530 | # 531 | # WARNING: If you have slaves attached to an instance with maxmemory on, 532 | # the size of the output buffers needed to feed the slaves are subtracted 533 | # from the used memory count, so that network problems / resyncs will 534 | # not trigger a loop where keys are evicted, and in turn the output 535 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 536 | # of more keys, and so forth until the database is completely emptied. 537 | # 538 | # In short... if you have slaves attached it is suggested that you set a lower 539 | # limit for maxmemory so that there is some free RAM on the system for slave 540 | # output buffers (but this is not needed if the policy is 'noeviction'). 541 | # 542 | # maxmemory 543 | 544 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 545 | # is reached. You can select among five behaviors: 546 | # 547 | # volatile-lru -> remove the key with an expire set using an LRU algorithm 548 | # allkeys-lru -> remove any key according to the LRU algorithm 549 | # volatile-random -> remove a random key with an expire set 550 | # allkeys-random -> remove a random key, any key 551 | # volatile-ttl -> remove the key with the nearest expire time (minor TTL) 552 | # noeviction -> don't expire at all, just return an error on write operations 553 | # 554 | # Note: with any of the above policies, Redis will return an error on write 555 | # operations, when there are no suitable keys for eviction. 556 | # 557 | # At the date of writing these commands are: set setnx setex append 558 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 559 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 560 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 561 | # getset mset msetnx exec sort 562 | # 563 | # The default is: 564 | # 565 | # maxmemory-policy noeviction 566 | 567 | # LRU and minimal TTL algorithms are not precise algorithms but approximated 568 | # algorithms (in order to save memory), so you can tune it for speed or 569 | # accuracy. For default Redis will check five keys and pick the one that was 570 | # used less recently, you can change the sample size using the following 571 | # configuration directive. 572 | # 573 | # The default of 5 produces good enough results. 10 Approximates very closely 574 | # true LRU but costs a bit more CPU. 3 is very fast but not very accurate. 575 | # 576 | # maxmemory-samples 5 577 | 578 | ############################## APPEND ONLY MODE ############################### 579 | 580 | # By default Redis asynchronously dumps the dataset on disk. This mode is 581 | # good enough in many applications, but an issue with the Redis process or 582 | # a power outage may result into a few minutes of writes lost (depending on 583 | # the configured save points). 584 | # 585 | # The Append Only File is an alternative persistence mode that provides 586 | # much better durability. For instance using the default data fsync policy 587 | # (see later in the config file) Redis can lose just one second of writes in a 588 | # dramatic event like a server power outage, or a single write if something 589 | # wrong with the Redis process itself happens, but the operating system is 590 | # still running correctly. 591 | # 592 | # AOF and RDB persistence can be enabled at the same time without problems. 593 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 594 | # with the better durability guarantees. 595 | # 596 | # Please check http://redis.io/topics/persistence for more information. 597 | 598 | appendonly no 599 | 600 | # The name of the append only file (default: "appendonly.aof") 601 | 602 | appendfilename "appendonly.aof" 603 | 604 | # The fsync() call tells the Operating System to actually write data on disk 605 | # instead of waiting for more data in the output buffer. Some OS will really flush 606 | # data on disk, some other OS will just try to do it ASAP. 607 | # 608 | # Redis supports three different modes: 609 | # 610 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 611 | # always: fsync after every write to the append only log. Slow, Safest. 612 | # everysec: fsync only one time every second. Compromise. 613 | # 614 | # The default is "everysec", as that's usually the right compromise between 615 | # speed and data safety. It's up to you to understand if you can relax this to 616 | # "no" that will let the operating system flush the output buffer when 617 | # it wants, for better performances (but if you can live with the idea of 618 | # some data loss consider the default persistence mode that's snapshotting), 619 | # or on the contrary, use "always" that's very slow but a bit safer than 620 | # everysec. 621 | # 622 | # More details please check the following article: 623 | # http://antirez.com/post/redis-persistence-demystified.html 624 | # 625 | # If unsure, use "everysec". 626 | 627 | # appendfsync always 628 | appendfsync everysec 629 | # appendfsync no 630 | 631 | # When the AOF fsync policy is set to always or everysec, and a background 632 | # saving process (a background save or AOF log background rewriting) is 633 | # performing a lot of I/O against the disk, in some Linux configurations 634 | # Redis may block too long on the fsync() call. Note that there is no fix for 635 | # this currently, as even performing fsync in a different thread will block 636 | # our synchronous write(2) call. 637 | # 638 | # In order to mitigate this problem it's possible to use the following option 639 | # that will prevent fsync() from being called in the main process while a 640 | # BGSAVE or BGREWRITEAOF is in progress. 641 | # 642 | # This means that while another child is saving, the durability of Redis is 643 | # the same as "appendfsync none". In practical terms, this means that it is 644 | # possible to lose up to 30 seconds of log in the worst scenario (with the 645 | # default Linux settings). 646 | # 647 | # If you have latency problems turn this to "yes". Otherwise leave it as 648 | # "no" that is the safest pick from the point of view of durability. 649 | 650 | no-appendfsync-on-rewrite no 651 | 652 | # Automatic rewrite of the append only file. 653 | # Redis is able to automatically rewrite the log file implicitly calling 654 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 655 | # 656 | # This is how it works: Redis remembers the size of the AOF file after the 657 | # latest rewrite (if no rewrite has happened since the restart, the size of 658 | # the AOF at startup is used). 659 | # 660 | # This base size is compared to the current size. If the current size is 661 | # bigger than the specified percentage, the rewrite is triggered. Also 662 | # you need to specify a minimal size for the AOF file to be rewritten, this 663 | # is useful to avoid rewriting the AOF file even if the percentage increase 664 | # is reached but it is still pretty small. 665 | # 666 | # Specify a percentage of zero in order to disable the automatic AOF 667 | # rewrite feature. 668 | 669 | auto-aof-rewrite-percentage 100 670 | auto-aof-rewrite-min-size 64mb 671 | 672 | # An AOF file may be found to be truncated at the end during the Redis 673 | # startup process, when the AOF data gets loaded back into memory. 674 | # This may happen when the system where Redis is running 675 | # crashes, especially when an ext4 filesystem is mounted without the 676 | # data=ordered option (however this can't happen when Redis itself 677 | # crashes or aborts but the operating system still works correctly). 678 | # 679 | # Redis can either exit with an error when this happens, or load as much 680 | # data as possible (the default now) and start if the AOF file is found 681 | # to be truncated at the end. The following option controls this behavior. 682 | # 683 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 684 | # the Redis server starts emitting a log to inform the user of the event. 685 | # Otherwise if the option is set to no, the server aborts with an error 686 | # and refuses to start. When the option is set to no, the user requires 687 | # to fix the AOF file using the "redis-check-aof" utility before to restart 688 | # the server. 689 | # 690 | # Note that if the AOF file will be found to be corrupted in the middle 691 | # the server will still exit with an error. This option only applies when 692 | # Redis will try to read more data from the AOF file but not enough bytes 693 | # will be found. 694 | aof-load-truncated yes 695 | 696 | ################################ LUA SCRIPTING ############################### 697 | 698 | # Max execution time of a Lua script in milliseconds. 699 | # 700 | # If the maximum execution time is reached Redis will log that a script is 701 | # still in execution after the maximum allowed time and will start to 702 | # reply to queries with an error. 703 | # 704 | # When a long running script exceeds the maximum execution time only the 705 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 706 | # used to stop a script that did not yet called write commands. The second 707 | # is the only way to shut down the server in the case a write command was 708 | # already issued by the script but the user doesn't want to wait for the natural 709 | # termination of the script. 710 | # 711 | # Set it to 0 or a negative value for unlimited execution without warnings. 712 | lua-time-limit 5000 713 | 714 | ################################ REDIS CLUSTER ############################### 715 | # 716 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 717 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 718 | # in order to mark it as "mature" we need to wait for a non trivial percentage 719 | # of users to deploy it in production. 720 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 721 | # 722 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 723 | # started as cluster nodes can. In order to start a Redis instance as a 724 | # cluster node enable the cluster support uncommenting the following: 725 | # 726 | # cluster-enabled yes 727 | 728 | # Every cluster node has a cluster configuration file. This file is not 729 | # intended to be edited by hand. It is created and updated by Redis nodes. 730 | # Every Redis Cluster node requires a different cluster configuration file. 731 | # Make sure that instances running in the same system do not have 732 | # overlapping cluster configuration file names. 733 | # 734 | # cluster-config-file nodes-6379.conf 735 | 736 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 737 | # for it to be considered in failure state. 738 | # Most other internal time limits are multiple of the node timeout. 739 | # 740 | # cluster-node-timeout 15000 741 | 742 | # A slave of a failing master will avoid to start a failover if its data 743 | # looks too old. 744 | # 745 | # There is no simple way for a slave to actually have a exact measure of 746 | # its "data age", so the following two checks are performed: 747 | # 748 | # 1) If there are multiple slaves able to failover, they exchange messages 749 | # in order to try to give an advantage to the slave with the best 750 | # replication offset (more data from the master processed). 751 | # Slaves will try to get their rank by offset, and apply to the start 752 | # of the failover a delay proportional to their rank. 753 | # 754 | # 2) Every single slave computes the time of the last interaction with 755 | # its master. This can be the last ping or command received (if the master 756 | # is still in the "connected" state), or the time that elapsed since the 757 | # disconnection with the master (if the replication link is currently down). 758 | # If the last interaction is too old, the slave will not try to failover 759 | # at all. 760 | # 761 | # The point "2" can be tuned by user. Specifically a slave will not perform 762 | # the failover if, since the last interaction with the master, the time 763 | # elapsed is greater than: 764 | # 765 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 766 | # 767 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 768 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 769 | # slave will not try to failover if it was not able to talk with the master 770 | # for longer than 310 seconds. 771 | # 772 | # A large slave-validity-factor may allow slaves with too old data to failover 773 | # a master, while a too small value may prevent the cluster from being able to 774 | # elect a slave at all. 775 | # 776 | # For maximum availability, it is possible to set the slave-validity-factor 777 | # to a value of 0, which means, that slaves will always try to failover the 778 | # master regardless of the last time they interacted with the master. 779 | # (However they'll always try to apply a delay proportional to their 780 | # offset rank). 781 | # 782 | # Zero is the only value able to guarantee that when all the partitions heal 783 | # the cluster will always be able to continue. 784 | # 785 | # cluster-slave-validity-factor 10 786 | 787 | # Cluster slaves are able to migrate to orphaned masters, that are masters 788 | # that are left without working slaves. This improves the cluster ability 789 | # to resist to failures as otherwise an orphaned master can't be failed over 790 | # in case of failure if it has no working slaves. 791 | # 792 | # Slaves migrate to orphaned masters only if there are still at least a 793 | # given number of other working slaves for their old master. This number 794 | # is the "migration barrier". A migration barrier of 1 means that a slave 795 | # will migrate only if there is at least 1 other working slave for its master 796 | # and so forth. It usually reflects the number of slaves you want for every 797 | # master in your cluster. 798 | # 799 | # Default is 1 (slaves migrate only if their masters remain with at least 800 | # one slave). To disable migration just set it to a very large value. 801 | # A value of 0 can be set but is useful only for debugging and dangerous 802 | # in production. 803 | # 804 | # cluster-migration-barrier 1 805 | 806 | # By default Redis Cluster nodes stop accepting queries if they detect there 807 | # is at least an hash slot uncovered (no available node is serving it). 808 | # This way if the cluster is partially down (for example a range of hash slots 809 | # are no longer covered) all the cluster becomes, eventually, unavailable. 810 | # It automatically returns available as soon as all the slots are covered again. 811 | # 812 | # However sometimes you want the subset of the cluster which is working, 813 | # to continue to accept queries for the part of the key space that is still 814 | # covered. In order to do so, just set the cluster-require-full-coverage 815 | # option to no. 816 | # 817 | # cluster-require-full-coverage yes 818 | 819 | # In order to setup your cluster make sure to read the documentation 820 | # available at http://redis.io web site. 821 | 822 | ################################## SLOW LOG ################################### 823 | 824 | # The Redis Slow Log is a system to log queries that exceeded a specified 825 | # execution time. The execution time does not include the I/O operations 826 | # like talking with the client, sending the reply and so forth, 827 | # but just the time needed to actually execute the command (this is the only 828 | # stage of command execution where the thread is blocked and can not serve 829 | # other requests in the meantime). 830 | # 831 | # You can configure the slow log with two parameters: one tells Redis 832 | # what is the execution time, in microseconds, to exceed in order for the 833 | # command to get logged, and the other parameter is the length of the 834 | # slow log. When a new command is logged the oldest one is removed from the 835 | # queue of logged commands. 836 | 837 | # The following time is expressed in microseconds, so 1000000 is equivalent 838 | # to one second. Note that a negative number disables the slow log, while 839 | # a value of zero forces the logging of every command. 840 | slowlog-log-slower-than 10000 841 | 842 | # There is no limit to this length. Just be aware that it will consume memory. 843 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 844 | slowlog-max-len 128 845 | 846 | ################################ LATENCY MONITOR ############################## 847 | 848 | # The Redis latency monitoring subsystem samples different operations 849 | # at runtime in order to collect data related to possible sources of 850 | # latency of a Redis instance. 851 | # 852 | # Via the LATENCY command this information is available to the user that can 853 | # print graphs and obtain reports. 854 | # 855 | # The system only logs operations that were performed in a time equal or 856 | # greater than the amount of milliseconds specified via the 857 | # latency-monitor-threshold configuration directive. When its value is set 858 | # to zero, the latency monitor is turned off. 859 | # 860 | # By default latency monitoring is disabled since it is mostly not needed 861 | # if you don't have latency issues, and collecting data has a performance 862 | # impact, that while very small, can be measured under big load. Latency 863 | # monitoring can easily be enabled at runtime using the command 864 | # "CONFIG SET latency-monitor-threshold " if needed. 865 | latency-monitor-threshold 0 866 | 867 | ############################# EVENT NOTIFICATION ############################## 868 | 869 | # Redis can notify Pub/Sub clients about events happening in the key space. 870 | # This feature is documented at http://redis.io/topics/notifications 871 | # 872 | # For instance if keyspace events notification is enabled, and a client 873 | # performs a DEL operation on key "foo" stored in the Database 0, two 874 | # messages will be published via Pub/Sub: 875 | # 876 | # PUBLISH __keyspace@0__:foo del 877 | # PUBLISH __keyevent@0__:del foo 878 | # 879 | # It is possible to select the events that Redis will notify among a set 880 | # of classes. Every class is identified by a single character: 881 | # 882 | # K Keyspace events, published with __keyspace@__ prefix. 883 | # E Keyevent events, published with __keyevent@__ prefix. 884 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 885 | # $ String commands 886 | # l List commands 887 | # s Set commands 888 | # h Hash commands 889 | # z Sorted set commands 890 | # x Expired events (events generated every time a key expires) 891 | # e Evicted events (events generated when a key is evicted for maxmemory) 892 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 893 | # 894 | # The "notify-keyspace-events" takes as argument a string that is composed 895 | # of zero or multiple characters. The empty string means that notifications 896 | # are disabled. 897 | # 898 | # Example: to enable list and generic events, from the point of view of the 899 | # event name, use: 900 | # 901 | # notify-keyspace-events Elg 902 | # 903 | # Example 2: to get the stream of the expired keys subscribing to channel 904 | # name __keyevent@0__:expired use: 905 | # 906 | # notify-keyspace-events Ex 907 | # 908 | # By default all notifications are disabled because most users don't need 909 | # this feature and the feature has some overhead. Note that if you don't 910 | # specify at least one of K or E, no events will be delivered. 911 | notify-keyspace-events "" 912 | 913 | ############################### ADVANCED CONFIG ############################### 914 | 915 | # Hashes are encoded using a memory efficient data structure when they have a 916 | # small number of entries, and the biggest entry does not exceed a given 917 | # threshold. These thresholds can be configured using the following directives. 918 | hash-max-ziplist-entries 512 919 | hash-max-ziplist-value 64 920 | 921 | # Lists are also encoded in a special way to save a lot of space. 922 | # The number of entries allowed per internal list node can be specified 923 | # as a fixed maximum size or a maximum number of elements. 924 | # For a fixed maximum size, use -5 through -1, meaning: 925 | # -5: max size: 64 Kb <-- not recommended for normal workloads 926 | # -4: max size: 32 Kb <-- not recommended 927 | # -3: max size: 16 Kb <-- probably not recommended 928 | # -2: max size: 8 Kb <-- good 929 | # -1: max size: 4 Kb <-- good 930 | # Positive numbers mean store up to _exactly_ that number of elements 931 | # per list node. 932 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 933 | # but if your use case is unique, adjust the settings as necessary. 934 | # list-max-ziplist-size -2 935 | 936 | # Lists may also be compressed. 937 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 938 | # the list to *exclude* from compression. The head and tail of the list 939 | # are always uncompressed for fast push/pop operations. Settings are: 940 | # 0: disable all list compression 941 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 942 | # going from either the head or tail" 943 | # So: [head]->node->node->...->node->[tail] 944 | # [head], [tail] will always be uncompressed; inner nodes will compress. 945 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 946 | # 2 here means: don't compress head or head->next or tail->prev or tail, 947 | # but compress all nodes between them. 948 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 949 | # etc. 950 | # list-compress-depth 0 951 | 952 | # Sets have a special encoding in just one case: when a set is composed 953 | # of just strings that happen to be integers in radix 10 in the range 954 | # of 64 bit signed integers. 955 | # The following configuration setting sets the limit in the size of the 956 | # set in order to use this special memory saving encoding. 957 | set-max-intset-entries 512 958 | 959 | # Similarly to hashes and lists, sorted sets are also specially encoded in 960 | # order to save a lot of space. This encoding is only used when the length and 961 | # elements of a sorted set are below the following limits: 962 | zset-max-ziplist-entries 128 963 | zset-max-ziplist-value 64 964 | 965 | # HyperLogLog sparse representation bytes limit. The limit includes the 966 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 967 | # this limit, it is converted into the dense representation. 968 | # 969 | # A value greater than 16000 is totally useless, since at that point the 970 | # dense representation is more memory efficient. 971 | # 972 | # The suggested value is ~ 3000 in order to have the benefits of 973 | # the space efficient encoding without slowing down too much PFADD, 974 | # which is O(N) with the sparse encoding. The value can be raised to 975 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 976 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 977 | hll-sparse-max-bytes 3000 978 | 979 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 980 | # order to help rehashing the main Redis hash table (the one mapping top-level 981 | # keys to values). The hash table implementation Redis uses (see dict.c) 982 | # performs a lazy rehashing: the more operation you run into a hash table 983 | # that is rehashing, the more rehashing "steps" are performed, so if the 984 | # server is idle the rehashing is never complete and some more memory is used 985 | # by the hash table. 986 | # 987 | # The default is to use this millisecond 10 times every second in order to 988 | # actively rehash the main dictionaries, freeing memory when possible. 989 | # 990 | # If unsure: 991 | # use "activerehashing no" if you have hard latency requirements and it is 992 | # not a good thing in your environment that Redis can reply from time to time 993 | # to queries with 2 milliseconds delay. 994 | # 995 | # use "activerehashing yes" if you don't have such hard requirements but 996 | # want to free memory asap when possible. 997 | activerehashing yes 998 | 999 | # The client output buffer limits can be used to force disconnection of clients 1000 | # that are not reading data from the server fast enough for some reason (a 1001 | # common reason is that a Pub/Sub client can't consume messages as fast as the 1002 | # publisher can produce them). 1003 | # 1004 | # The limit can be set differently for the three different classes of clients: 1005 | # 1006 | # normal -> normal clients including MONITOR clients 1007 | # slave -> slave clients 1008 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1009 | # 1010 | # The syntax of every client-output-buffer-limit directive is the following: 1011 | # 1012 | # client-output-buffer-limit 1013 | # 1014 | # A client is immediately disconnected once the hard limit is reached, or if 1015 | # the soft limit is reached and remains reached for the specified number of 1016 | # seconds (continuously). 1017 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1018 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1019 | # if the size of the output buffers reach 32 megabytes, but will also get 1020 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1021 | # the limit for 10 seconds. 1022 | # 1023 | # By default normal clients are not limited because they don't receive data 1024 | # without asking (in a push way), but just after a request, so only 1025 | # asynchronous clients may create a scenario where data is requested faster 1026 | # than it can read. 1027 | # 1028 | # Instead there is a default limit for pubsub and slave clients, since 1029 | # subscribers and slaves receive data in a push fashion. 1030 | # 1031 | # Both the hard or the soft limit can be disabled by setting them to zero. 1032 | client-output-buffer-limit normal 0 0 0 1033 | client-output-buffer-limit slave 256mb 64mb 60 1034 | client-output-buffer-limit pubsub 32mb 8mb 60 1035 | 1036 | # Redis calls an internal function to perform many background tasks, like 1037 | # closing connections of clients in timeout, purging expired keys that are 1038 | # never requested, and so forth. 1039 | # 1040 | # Not all tasks are performed with the same frequency, but Redis checks for 1041 | # tasks to perform according to the specified "hz" value. 1042 | # 1043 | # By default "hz" is set to 10. Raising the value will use more CPU when 1044 | # Redis is idle, but at the same time will make Redis more responsive when 1045 | # there are many keys expiring at the same time, and timeouts may be 1046 | # handled with more precision. 1047 | # 1048 | # The range is between 1 and 500, however a value over 100 is usually not 1049 | # a good idea. Most users should use the default of 10 and raise this up to 1050 | # 100 only in environments where very low latency is required. 1051 | hz 10 1052 | 1053 | # When a child rewrites the AOF file, if the following option is enabled 1054 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1055 | # in order to commit the file to the disk more incrementally and avoid 1056 | # big latency spikes. 1057 | aof-rewrite-incremental-fsync yes 1058 | -------------------------------------------------------------------------------- /config/texlive.repo: -------------------------------------------------------------------------------- 1 | [TeXLive] 2 | name=TeXLive Packages for CentOS 7 - $basearch 3 | baseurl=https://raw.githubusercontent.com/FluidityProject/yum-centos7-texlive/master/$basearch 4 | enabled=1 5 | gpgcheck=0 6 | 7 | -------------------------------------------------------------------------------- /openvas-docker-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | 4 | # set up GSAD config 5 | cat << EOF > /etc/sysconfig/gsad 6 | OPTIONS="" 7 | # 8 | # The address the Greenbone Security Assistant will listen on. 9 | # 10 | GSA_ADDRESS=0.0.0.0 11 | # 12 | # The port the Greenbone Security Assistant will listen on. 13 | # 14 | GSA_PORT=443 15 | EOF 16 | 17 | # Proxy optimization 18 | sed -i "s/^mirrorlist/#mirrorlist/g" /etc/yum.repos.d/CentOS-Base.repo 19 | sed -i "s/^#base/base/g" /etc/yum.repos.d/CentOS-Base.repo 20 | 21 | 22 | yum -y install wget 23 | cd /root; NON_INT=1 wget -q -O - https://updates.atomicorp.com/installers/atomic |sh 24 | yum clean all 25 | yum -y update 26 | yum -y install alien bzip2 useradd net-tools openssh texlive-changepage texlive-titlesec texlive-collection-latexextra 27 | 28 | mkdir -p /usr/share/texlive/texmf-local/tex/latex/comment 29 | texhash 30 | 31 | yum -y install openvas OSPd-nmap OSPd 32 | 33 | 34 | wget https://github.com/Arachni/arachni/releases/download/v1.5.1/arachni-1.5.1-0.5.12-linux-x86_64.tar.gz && \ 35 | tar xvf arachni-1.5.1-0.5.12-linux-x86_64.tar.gz && \ 36 | mv arachni-1.5.1-0.5.12 /opt/arachni && \ 37 | ln -s /opt/arachni/bin/* /usr/local/bin/ && \ 38 | rm -rf arachni* 39 | 40 | 41 | /usr/sbin/greenbone-nvt-sync 42 | /usr/sbin/greenbone-certdata-sync 43 | /usr/sbin/greenbone-scapdata-sync 44 | BUILD=true /run.sh 45 | 46 | rm -rf /var/cache/yum/* 47 | 48 | 49 | 50 | -------------------------------------------------------------------------------- /run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DATAVOL=/var/lib/openvas/ 4 | OV_PASSWORD=${OV_PASSWORD:-admin} 5 | OV_UPDATE=${OV_UPDATE:0} 6 | ADDRESS=127.0.0.1 7 | KEY_FILE=/var/lib/openvas/private/CA/clientkey.pem 8 | CERT_FILE=/var/lib/openvas/CA/clientcert.pem 9 | CA_FILE=/var/lib/openvas/CA/cacert.pem 10 | 11 | 12 | redis-server /etc/redis.conf & 13 | 14 | echo "Testing redis status..." 15 | X="$(redis-cli -s /tmp/redis.sock ping)" 16 | while [ "${X}" != "PONG" ]; do 17 | echo "Redis not yet ready..." 18 | sleep 1 19 | X="$(redis-cli -s /tmp/redis.sock ping)" 20 | done 21 | echo "Redis ready." 22 | 23 | #echo 24 | #echo "Initializing persistent directory layout" 25 | #pushd /var/lib/openvas 26 | # 27 | #DATA_DIRS="CA cert-data mgr private/CA plugins scap-data" 28 | #for dir in $DATA_DIRS; do 29 | # if [ ! -d $dir ]; then 30 | # mkdir $dir 31 | # fi 32 | #done 33 | #popd 34 | 35 | 36 | # Check certs 37 | if [ ! -f /var/lib/openvas/CA/cacert.pem ]; then 38 | /usr/bin/openvas-manage-certs -a 39 | fi 40 | 41 | if [ "$OV_UPDATE" == "yes" ];then 42 | /usr/sbin/greenbone-nvt-sync 43 | /usr/sbin/greenbone-certdata-sync 44 | /usr/sbin/greenbone-scapdata-sync 45 | fi 46 | 47 | if [ ! -d /usr/share/openvas/gsa/locale ]; then 48 | mkdir -p /usr/share/openvas/gsa/locale 49 | fi 50 | 51 | echo "Restarting services" 52 | /usr/sbin/openvassd 53 | /usr/sbin/openvasmd 54 | /usr/sbin/gsad 55 | 56 | echo 57 | echo -n "Checking for scanners: " 58 | SCANNER=$(/usr/sbin/openvasmd --get-scanners) 59 | echo "Done" 60 | 61 | if ! echo $SCANNER | grep -q nmap ; then 62 | echo "Adding nmap scanner" 63 | /usr/bin/ospd-nmap --bind-address $ADDRESS --port 40001 --key-file $KEY_FILE --cert-file $CERT_FILE --ca-file $CA_FILE & 64 | /usr/sbin/openvasmd --create-scanner=ospd-nmap --scanner-host=localhost --scanner-port=40001 --scanner-type=OSP --scanner-ca-pub=/var/lib/openvas/CA/cacert.pem --scanner-key-pub=/var/lib/openvas/CA/clientcert.pem --scanner-key-priv=/var/lib/openvas/private/CA/clientkey.pem 65 | echo 66 | else 67 | /usr/bin/ospd-nmap --bind-address $ADDRESS --port 40001 --key-file $KEY_FILE --cert-file $CERT_FILE --ca-file $CA_FILE & 68 | 69 | fi 70 | 71 | 72 | echo "Reloading NVTs" 73 | openvasmd --rebuild --progress 74 | 75 | # Check for users, and create admin 76 | if ! [[ $(openvasmd --get-users) ]] ; then 77 | /usr/sbin/openvasmd openvasmd --create-user=admin 78 | /usr/sbin/openvasmd --user=admin --new-password=$OV_PASSWORD 79 | fi 80 | 81 | if [ -n "$OV_PASSWORD" ]; then 82 | echo "Setting admin password" 83 | /usr/sbin/openvasmd --user=admin --new-password=$OV_PASSWORD 84 | fi 85 | 86 | echo "Checking setup" 87 | /usr/bin/openvas-check-setup --v9 88 | 89 | 90 | if [ -z "$BUILD" ]; then 91 | echo "Tailing logs" 92 | tail -F /var/log/openvas/* 93 | fi 94 | 95 | --------------------------------------------------------------------------------