108 | When an IPFS hash is requested to be converted a subprocess is spawned
109 | that downloads the file and converts it with FFMPEG. During this time,
110 | the endpoint can be continously be queried by a script (polling). The
111 | `status` property will indicate the status of the processing.
112 |
164 | It's important to use the folder hash (usually the last in the array
165 | `files`) as `testhash`'s value.
166 |
167 |
175 |
176 | );
177 | }
178 | }
179 |
180 | const domContainer = document.querySelector('.root');
181 | ReactDOM.render(, domContainer);
182 |
--------------------------------------------------------------------------------
/client/styles.css:
--------------------------------------------------------------------------------
1 | *, *:before, *:after {
2 | box-sizing: border-box;
3 | }
4 |
5 | body, input {
6 | font-family: 'Inconsolata', monospace;
7 | }
8 |
9 | h1 {
10 | font-weight: bold;
11 | font-size: 2.5em;
12 | }
13 |
14 | h2 {
15 | margin-top: 1.5em;
16 | text-decoration: underline;
17 | }
18 |
19 | .wrapper {
20 | text-align: center;
21 | }
22 |
23 | .root {
24 | display: flex;
25 | align-items: center;
26 | justify-content: center;
27 | }
28 |
29 | iframe {
30 | display: inline-block;
31 | }
32 |
33 | @media screen and (min-width: 900px) {
34 | .app {
35 | width: 50%;
36 | }
37 | }
38 |
39 | .app input {
40 | width: 87%;
41 | }
42 |
43 | .app input, .app .btn {
44 | border: 1px solid black;
45 | font-size: 14px;
46 | padding: 10px 2px;
47 | }
48 |
49 | .app .btn {
50 | cursor: pointer;
51 | background-color: white;
52 | margin-left: 1%;
53 | width: 12%;
54 | display: block;
55 | float: right;
56 | text-align: center;
57 | }
58 |
59 | .app .btn:hover {
60 | filter: invert(100%);
61 | }
62 |
63 |
64 | *:focus {
65 | outline: none;
66 | }
67 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | web:
4 | build: .
5 | ports:
6 | - "80:3000"
7 | volumes:
8 | - .:/usr/src/app
9 | links:
10 | - redis
11 | ipfs:
12 | build: ipfs/
13 | volumes:
14 | - /tmp/ipfs-docker-staging:/export
15 | - /tmp/ipfs-docker-data:/data/ipfs
16 | ports:
17 | - 8080:8080
18 | - 4001:4001
19 | - 127.0.0.1:5001:5001
20 | # NOTE: THIS DISABLES LOGGING FOR THE IPFS SERVICE
21 | logging:
22 | driver: none
23 | redis:
24 | image: 'bitnami/redis:latest'
25 | restart: "always"
26 | command: redis-server /usr/local/etc/redis/redis.conf --requirepass ${REDIS_PSWD}
27 | environment:
28 | - ALLOW_EMPTY_PASSWORD=yes
29 | ports:
30 | - '6379:6379'
31 | volumes:
32 | - /tmp/redis:/bitnami/redis/data
33 | - ./redis/redis.conf:/usr/local/etc/redis/redis.conf
34 |
--------------------------------------------------------------------------------
/ipfs/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ipfs/go-ipfs:latest
2 | COPY start_ipfs.sh /usr/local/bin/start_ipfs
3 | RUN ["chmod", "+x", "/usr/local/bin/start_ipfs"]
4 | ENTRYPOINT ["/usr/local/bin/start_ipfs"]
5 |
--------------------------------------------------------------------------------
/ipfs/start_ipfs.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | ipfs init
4 | ipfs config Addresses.API /ip4/0.0.0.0/tcp/5001
5 | ipfs config --json API.HTTPHeaders.Access-Control-Allow-Origin '["*"]'
6 | ipfs config --json API.HTTPHeaders.Access-Control-Allow-Methods '["PUT", "GET", "POST"]'
7 |
8 | exec env IPFS_LOGGING=info ipfs daemon --enable-gc
9 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "ipfs-convert",
3 | "version": "0.0.0",
4 | "private": true,
5 | "scripts": {
6 | "start": "concurrently 'nodemon --ignore client/ --ignore public/ ./bin/www' 'webpack --config webpack.config.js'"
7 | },
8 | "dependencies": {
9 | "bull": "^3.4.8",
10 | "concurrently": "^4.0.1",
11 | "cookie-parser": "~1.4.3",
12 | "debug": "~2.6.9",
13 | "dotenv": "^6.0.0",
14 | "express": "~4.16.0",
15 | "fs": "0.0.1-security",
16 | "html-webpack-plugin": "^3.2.0",
17 | "http-errors": "~1.6.2",
18 | "ipfs-api": "^24.0.1",
19 | "jade": "~1.11.0",
20 | "morgan": "~1.9.0",
21 | "pouchdb-node": "^7.0.0",
22 | "rc-progress": "^2.2.6",
23 | "react": "^16.5.2",
24 | "react-dom": "^16.5.2",
25 | "react-highlight": "^0.12.0",
26 | "rimraf": "^2.6.2",
27 | "webpack-dev-middleware": "2.0.6",
28 | "webpack-hot-middleware": "^2.24.2",
29 | "webpack-middleware-hmr": "^1.0.3"
30 | },
31 | "devDependencies": {
32 | "@babel/core": "^7.1.2",
33 | "@babel/plugin-proposal-class-properties": "^7.1.0",
34 | "@babel/preset-env": "^7.1.0",
35 | "@babel/preset-react": "^7.0.0",
36 | "babel-loader": "^8.0.4",
37 | "css-loader": "^1.0.0",
38 | "style-loader": "^0.23.0",
39 | "webpack": "^4.20.2",
40 | "webpack-cli": "^3.1.1"
41 | }
42 | }
43 |
--------------------------------------------------------------------------------
/redis/redis.conf:
--------------------------------------------------------------------------------
1 | # Redis configuration file example.
2 | #
3 | # Note that in order to read the configuration file, Redis must be
4 | # started with the file path as first argument:
5 | #
6 | # ./redis-server /path/to/redis.conf
7 |
8 | # Note on units: when memory size is needed, it is possible to specify
9 | # it in the usual form of 1k 5GB 4M and so forth:
10 | #
11 | # 1k => 1000 bytes
12 | # 1kb => 1024 bytes
13 | # 1m => 1000000 bytes
14 | # 1mb => 1024*1024 bytes
15 | # 1g => 1000000000 bytes
16 | # 1gb => 1024*1024*1024 bytes
17 | #
18 | # units are case insensitive so 1GB 1Gb 1gB are all the same.
19 |
20 | ################################## INCLUDES ###################################
21 |
22 | # Include one or more other config files here. This is useful if you
23 | # have a standard template that goes to all Redis servers but also need
24 | # to customize a few per-server settings. Include files can include
25 | # other files, so use this wisely.
26 | #
27 | # Notice option "include" won't be rewritten by command "CONFIG REWRITE"
28 | # from admin or Redis Sentinel. Since Redis always uses the last processed
29 | # line as value of a configuration directive, you'd better put includes
30 | # at the beginning of this file to avoid overwriting config change at runtime.
31 | #
32 | # If instead you are interested in using includes to override configuration
33 | # options, it is better to use include as the last line.
34 | #
35 | # include /path/to/local.conf
36 | # include /path/to/other.conf
37 |
38 | ################################## MODULES #####################################
39 |
40 | # Load modules at startup. If the server is not able to load modules
41 | # it will abort. It is possible to use multiple loadmodule directives.
42 | #
43 | # loadmodule /path/to/my_module.so
44 | # loadmodule /path/to/other_module.so
45 |
46 | ################################## NETWORK #####################################
47 |
48 | # By default, if no "bind" configuration directive is specified, Redis listens
49 | # for connections from all the network interfaces available on the server.
50 | # It is possible to listen to just one or multiple selected interfaces using
51 | # the "bind" configuration directive, followed by one or more IP addresses.
52 | #
53 | # Examples:
54 | #
55 | # bind 192.168.1.100 10.0.0.1
56 | # bind 127.0.0.1 ::1
57 | #
58 | # ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the
59 | # internet, binding to all the interfaces is dangerous and will expose the
60 | # instance to everybody on the internet. So by default we uncomment the
61 | # following bind directive, that will force Redis to listen only into
62 | # the IPv4 lookback interface address (this means Redis will be able to
63 | # accept connections only from clients running into the same computer it
64 | # is running).
65 | #
66 | # IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES
67 | # JUST COMMENT THE FOLLOWING LINE.
68 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
69 | # bind 127.0.0.1
70 |
71 | # Protected mode is a layer of security protection, in order to avoid that
72 | # Redis instances left open on the internet are accessed and exploited.
73 | #
74 | # When protected mode is on and if:
75 | #
76 | # 1) The server is not binding explicitly to a set of addresses using the
77 | # "bind" directive.
78 | # 2) No password is configured.
79 | #
80 | # The server only accepts connections from clients connecting from the
81 | # IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain
82 | # sockets.
83 | #
84 | # By default protected mode is enabled. You should disable it only if
85 | # you are sure you want clients from other hosts to connect to Redis
86 | # even if no authentication is configured, nor a specific set of interfaces
87 | # are explicitly listed using the "bind" directive.
88 | protected-mode yes
89 |
90 | # Accept connections on the specified port, default is 6379 (IANA #815344).
91 | # If port 0 is specified Redis will not listen on a TCP socket.
92 | port 6379
93 |
94 | # TCP listen() backlog.
95 | #
96 | # In high requests-per-second environments you need an high backlog in order
97 | # to avoid slow clients connections issues. Note that the Linux kernel
98 | # will silently truncate it to the value of /proc/sys/net/core/somaxconn so
99 | # make sure to raise both the value of somaxconn and tcp_max_syn_backlog
100 | # in order to get the desired effect.
101 | tcp-backlog 511
102 |
103 | # Unix socket.
104 | #
105 | # Specify the path for the Unix socket that will be used to listen for
106 | # incoming connections. There is no default, so Redis will not listen
107 | # on a unix socket when not specified.
108 | #
109 | # unixsocket /tmp/redis.sock
110 | # unixsocketperm 700
111 |
112 | # Close the connection after a client is idle for N seconds (0 to disable)
113 | timeout 0
114 |
115 | # TCP keepalive.
116 | #
117 | # If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence
118 | # of communication. This is useful for two reasons:
119 | #
120 | # 1) Detect dead peers.
121 | # 2) Take the connection alive from the point of view of network
122 | # equipment in the middle.
123 | #
124 | # On Linux, the specified value (in seconds) is the period used to send ACKs.
125 | # Note that to close the connection the double of the time is needed.
126 | # On other kernels the period depends on the kernel configuration.
127 | #
128 | # A reasonable value for this option is 300 seconds, which is the new
129 | # Redis default starting with Redis 3.2.1.
130 | tcp-keepalive 300
131 |
132 | ################################# GENERAL #####################################
133 |
134 | # By default Redis does not run as a daemon. Use 'yes' if you need it.
135 | # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
136 | daemonize no
137 |
138 | # If you run Redis from upstart or systemd, Redis can interact with your
139 | # supervision tree. Options:
140 | # supervised no - no supervision interaction
141 | # supervised upstart - signal upstart by putting Redis into SIGSTOP mode
142 | # supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET
143 | # supervised auto - detect upstart or systemd method based on
144 | # UPSTART_JOB or NOTIFY_SOCKET environment variables
145 | # Note: these supervision methods only signal "process is ready."
146 | # They do not enable continuous liveness pings back to your supervisor.
147 | supervised no
148 |
149 | # If a pid file is specified, Redis writes it where specified at startup
150 | # and removes it at exit.
151 | #
152 | # When the server runs non daemonized, no pid file is created if none is
153 | # specified in the configuration. When the server is daemonized, the pid file
154 | # is used even if not specified, defaulting to "/var/run/redis.pid".
155 | #
156 | # Creating a pid file is best effort: if Redis is not able to create it
157 | # nothing bad happens, the server will start and run normally.
158 | pidfile /var/run/redis_6379.pid
159 |
160 | # Specify the server verbosity level.
161 | # This can be one of:
162 | # debug (a lot of information, useful for development/testing)
163 | # verbose (many rarely useful info, but not a mess like the debug level)
164 | # notice (moderately verbose, what you want in production probably)
165 | # warning (only very important / critical messages are logged)
166 | loglevel notice
167 |
168 | # Specify the log file name. Also the empty string can be used to force
169 | # Redis to log on the standard output. Note that if you use standard
170 | # output for logging but daemonize, logs will be sent to /dev/null
171 | logfile ""
172 |
173 | # To enable logging to the system logger, just set 'syslog-enabled' to yes,
174 | # and optionally update the other syslog parameters to suit your needs.
175 | # syslog-enabled no
176 |
177 | # Specify the syslog identity.
178 | # syslog-ident redis
179 |
180 | # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
181 | # syslog-facility local0
182 |
183 | # Set the number of databases. The default database is DB 0, you can select
184 | # a different one on a per-connection basis using SELECT where
185 | # dbid is a number between 0 and 'databases'-1
186 | databases 16
187 |
188 | # By default Redis shows an ASCII art logo only when started to log to the
189 | # standard output and if the standard output is a TTY. Basically this means
190 | # that normally a logo is displayed only in interactive sessions.
191 | #
192 | # However it is possible to force the pre-4.0 behavior and always show a
193 | # ASCII art logo in startup logs by setting the following option to yes.
194 | always-show-logo yes
195 |
196 | ################################ SNAPSHOTTING ################################
197 | #
198 | # Save the DB on disk:
199 | #
200 | # save
201 | #
202 | # Will save the DB if both the given number of seconds and the given
203 | # number of write operations against the DB occurred.
204 | #
205 | # In the example below the behaviour will be to save:
206 | # after 900 sec (15 min) if at least 1 key changed
207 | # after 300 sec (5 min) if at least 10 keys changed
208 | # after 60 sec if at least 10000 keys changed
209 | #
210 | # Note: you can disable saving completely by commenting out all "save" lines.
211 | #
212 | # It is also possible to remove all the previously configured save
213 | # points by adding a save directive with a single empty string argument
214 | # like in the following example:
215 | #
216 | # save ""
217 |
218 | save 900 1
219 | save 300 10
220 | save 60 10000
221 |
222 | # By default Redis will stop accepting writes if RDB snapshots are enabled
223 | # (at least one save point) and the latest background save failed.
224 | # This will make the user aware (in a hard way) that data is not persisting
225 | # on disk properly, otherwise chances are that no one will notice and some
226 | # disaster will happen.
227 | #
228 | # If the background saving process will start working again Redis will
229 | # automatically allow writes again.
230 | #
231 | # However if you have setup your proper monitoring of the Redis server
232 | # and persistence, you may want to disable this feature so that Redis will
233 | # continue to work as usual even if there are problems with disk,
234 | # permissions, and so forth.
235 | stop-writes-on-bgsave-error yes
236 |
237 | # Compress string objects using LZF when dump .rdb databases?
238 | # For default that's set to 'yes' as it's almost always a win.
239 | # If you want to save some CPU in the saving child set it to 'no' but
240 | # the dataset will likely be bigger if you have compressible values or keys.
241 | rdbcompression yes
242 |
243 | # Since version 5 of RDB a CRC64 checksum is placed at the end of the file.
244 | # This makes the format more resistant to corruption but there is a performance
245 | # hit to pay (around 10%) when saving and loading RDB files, so you can disable it
246 | # for maximum performances.
247 | #
248 | # RDB files created with checksum disabled have a checksum of zero that will
249 | # tell the loading code to skip the check.
250 | rdbchecksum yes
251 |
252 | # The filename where to dump the DB
253 | dbfilename dump.rdb
254 |
255 | # The working directory.
256 | #
257 | # The DB will be written inside this directory, with the filename specified
258 | # above using the 'dbfilename' configuration directive.
259 | #
260 | # The Append Only File will also be created inside this directory.
261 | #
262 | # Note that you must specify a directory here, not a file name.
263 | dir ./
264 |
265 | ################################# REPLICATION #################################
266 |
267 | # Master-Slave replication. Use slaveof to make a Redis instance a copy of
268 | # another Redis server. A few things to understand ASAP about Redis replication.
269 | #
270 | # 1) Redis replication is asynchronous, but you can configure a master to
271 | # stop accepting writes if it appears to be not connected with at least
272 | # a given number of slaves.
273 | # 2) Redis slaves are able to perform a partial resynchronization with the
274 | # master if the replication link is lost for a relatively small amount of
275 | # time. You may want to configure the replication backlog size (see the next
276 | # sections of this file) with a sensible value depending on your needs.
277 | # 3) Replication is automatic and does not need user intervention. After a
278 | # network partition slaves automatically try to reconnect to masters
279 | # and resynchronize with them.
280 | #
281 | # slaveof
282 |
283 | # If the master is password protected (using the "requirepass" configuration
284 | # directive below) it is possible to tell the slave to authenticate before
285 | # starting the replication synchronization process, otherwise the master will
286 | # refuse the slave request.
287 | #
288 | # masterauth
289 |
290 | # When a slave loses its connection with the master, or when the replication
291 | # is still in progress, the slave can act in two different ways:
292 | #
293 | # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
294 | # still reply to client requests, possibly with out of date data, or the
295 | # data set may just be empty if this is the first synchronization.
296 | #
297 | # 2) if slave-serve-stale-data is set to 'no' the slave will reply with
298 | # an error "SYNC with master in progress" to all the kind of commands
299 | # but to INFO and SLAVEOF.
300 | #
301 | slave-serve-stale-data yes
302 |
303 | # You can configure a slave instance to accept writes or not. Writing against
304 | # a slave instance may be useful to store some ephemeral data (because data
305 | # written on a slave will be easily deleted after resync with the master) but
306 | # may also cause problems if clients are writing to it because of a
307 | # misconfiguration.
308 | #
309 | # Since Redis 2.6 by default slaves are read-only.
310 | #
311 | # Note: read only slaves are not designed to be exposed to untrusted clients
312 | # on the internet. It's just a protection layer against misuse of the instance.
313 | # Still a read only slave exports by default all the administrative commands
314 | # such as CONFIG, DEBUG, and so forth. To a limited extent you can improve
315 | # security of read only slaves using 'rename-command' to shadow all the
316 | # administrative / dangerous commands.
317 | slave-read-only yes
318 |
319 | # Replication SYNC strategy: disk or socket.
320 | #
321 | # -------------------------------------------------------
322 | # WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY
323 | # -------------------------------------------------------
324 | #
325 | # New slaves and reconnecting slaves that are not able to continue the replication
326 | # process just receiving differences, need to do what is called a "full
327 | # synchronization". An RDB file is transmitted from the master to the slaves.
328 | # The transmission can happen in two different ways:
329 | #
330 | # 1) Disk-backed: The Redis master creates a new process that writes the RDB
331 | # file on disk. Later the file is transferred by the parent
332 | # process to the slaves incrementally.
333 | # 2) Diskless: The Redis master creates a new process that directly writes the
334 | # RDB file to slave sockets, without touching the disk at all.
335 | #
336 | # With disk-backed replication, while the RDB file is generated, more slaves
337 | # can be queued and served with the RDB file as soon as the current child producing
338 | # the RDB file finishes its work. With diskless replication instead once
339 | # the transfer starts, new slaves arriving will be queued and a new transfer
340 | # will start when the current one terminates.
341 | #
342 | # When diskless replication is used, the master waits a configurable amount of
343 | # time (in seconds) before starting the transfer in the hope that multiple slaves
344 | # will arrive and the transfer can be parallelized.
345 | #
346 | # With slow disks and fast (large bandwidth) networks, diskless replication
347 | # works better.
348 | repl-diskless-sync no
349 |
350 | # When diskless replication is enabled, it is possible to configure the delay
351 | # the server waits in order to spawn the child that transfers the RDB via socket
352 | # to the slaves.
353 | #
354 | # This is important since once the transfer starts, it is not possible to serve
355 | # new slaves arriving, that will be queued for the next RDB transfer, so the server
356 | # waits a delay in order to let more slaves arrive.
357 | #
358 | # The delay is specified in seconds, and by default is 5 seconds. To disable
359 | # it entirely just set it to 0 seconds and the transfer will start ASAP.
360 | repl-diskless-sync-delay 5
361 |
362 | # Slaves send PINGs to server in a predefined interval. It's possible to change
363 | # this interval with the repl_ping_slave_period option. The default value is 10
364 | # seconds.
365 | #
366 | # repl-ping-slave-period 10
367 |
368 | # The following option sets the replication timeout for:
369 | #
370 | # 1) Bulk transfer I/O during SYNC, from the point of view of slave.
371 | # 2) Master timeout from the point of view of slaves (data, pings).
372 | # 3) Slave timeout from the point of view of masters (REPLCONF ACK pings).
373 | #
374 | # It is important to make sure that this value is greater than the value
375 | # specified for repl-ping-slave-period otherwise a timeout will be detected
376 | # every time there is low traffic between the master and the slave.
377 | #
378 | # repl-timeout 60
379 |
380 | # Disable TCP_NODELAY on the slave socket after SYNC?
381 | #
382 | # If you select "yes" Redis will use a smaller number of TCP packets and
383 | # less bandwidth to send data to slaves. But this can add a delay for
384 | # the data to appear on the slave side, up to 40 milliseconds with
385 | # Linux kernels using a default configuration.
386 | #
387 | # If you select "no" the delay for data to appear on the slave side will
388 | # be reduced but more bandwidth will be used for replication.
389 | #
390 | # By default we optimize for low latency, but in very high traffic conditions
391 | # or when the master and slaves are many hops away, turning this to "yes" may
392 | # be a good idea.
393 | repl-disable-tcp-nodelay no
394 |
395 | # Set the replication backlog size. The backlog is a buffer that accumulates
396 | # slave data when slaves are disconnected for some time, so that when a slave
397 | # wants to reconnect again, often a full resync is not needed, but a partial
398 | # resync is enough, just passing the portion of data the slave missed while
399 | # disconnected.
400 | #
401 | # The bigger the replication backlog, the longer the time the slave can be
402 | # disconnected and later be able to perform a partial resynchronization.
403 | #
404 | # The backlog is only allocated once there is at least a slave connected.
405 | #
406 | # repl-backlog-size 1mb
407 |
408 | # After a master has no longer connected slaves for some time, the backlog
409 | # will be freed. The following option configures the amount of seconds that
410 | # need to elapse, starting from the time the last slave disconnected, for
411 | # the backlog buffer to be freed.
412 | #
413 | # Note that slaves never free the backlog for timeout, since they may be
414 | # promoted to masters later, and should be able to correctly "partially
415 | # resynchronize" with the slaves: hence they should always accumulate backlog.
416 | #
417 | # A value of 0 means to never release the backlog.
418 | #
419 | # repl-backlog-ttl 3600
420 |
421 | # The slave priority is an integer number published by Redis in the INFO output.
422 | # It is used by Redis Sentinel in order to select a slave to promote into a
423 | # master if the master is no longer working correctly.
424 | #
425 | # A slave with a low priority number is considered better for promotion, so
426 | # for instance if there are three slaves with priority 10, 100, 25 Sentinel will
427 | # pick the one with priority 10, that is the lowest.
428 | #
429 | # However a special priority of 0 marks the slave as not able to perform the
430 | # role of master, so a slave with priority of 0 will never be selected by
431 | # Redis Sentinel for promotion.
432 | #
433 | # By default the priority is 100.
434 | slave-priority 100
435 |
436 | # It is possible for a master to stop accepting writes if there are less than
437 | # N slaves connected, having a lag less or equal than M seconds.
438 | #
439 | # The N slaves need to be in "online" state.
440 | #
441 | # The lag in seconds, that must be <= the specified value, is calculated from
442 | # the last ping received from the slave, that is usually sent every second.
443 | #
444 | # This option does not GUARANTEE that N replicas will accept the write, but
445 | # will limit the window of exposure for lost writes in case not enough slaves
446 | # are available, to the specified number of seconds.
447 | #
448 | # For example to require at least 3 slaves with a lag <= 10 seconds use:
449 | #
450 | # min-slaves-to-write 3
451 | # min-slaves-max-lag 10
452 | #
453 | # Setting one or the other to 0 disables the feature.
454 | #
455 | # By default min-slaves-to-write is set to 0 (feature disabled) and
456 | # min-slaves-max-lag is set to 10.
457 |
458 | # A Redis master is able to list the address and port of the attached
459 | # slaves in different ways. For example the "INFO replication" section
460 | # offers this information, which is used, among other tools, by
461 | # Redis Sentinel in order to discover slave instances.
462 | # Another place where this info is available is in the output of the
463 | # "ROLE" command of a master.
464 | #
465 | # The listed IP and address normally reported by a slave is obtained
466 | # in the following way:
467 | #
468 | # IP: The address is auto detected by checking the peer address
469 | # of the socket used by the slave to connect with the master.
470 | #
471 | # Port: The port is communicated by the slave during the replication
472 | # handshake, and is normally the port that the slave is using to
473 | # list for connections.
474 | #
475 | # However when port forwarding or Network Address Translation (NAT) is
476 | # used, the slave may be actually reachable via different IP and port
477 | # pairs. The following two options can be used by a slave in order to
478 | # report to its master a specific set of IP and port, so that both INFO
479 | # and ROLE will report those values.
480 | #
481 | # There is no need to use both the options if you need to override just
482 | # the port or the IP address.
483 | #
484 | # slave-announce-ip 5.5.5.5
485 | # slave-announce-port 1234
486 |
487 | ################################## SECURITY ###################################
488 |
489 | # Require clients to issue AUTH before processing any other
490 | # commands. This might be useful in environments in which you do not trust
491 | # others with access to the host running redis-server.
492 | #
493 | # This should stay commented out for backward compatibility and because most
494 | # people do not need auth (e.g. they run their own servers).
495 | #
496 | # Warning: since Redis is pretty fast an outside user can try up to
497 | # 150k passwords per second against a good box. This means that you should
498 | # use a very strong password otherwise it will be very easy to break.
499 | #
500 | # requirepass foobared
501 |
502 | # Command renaming.
503 | #
504 | # It is possible to change the name of dangerous commands in a shared
505 | # environment. For instance the CONFIG command may be renamed into something
506 | # hard to guess so that it will still be available for internal-use tools
507 | # but not available for general clients.
508 | #
509 | # Example:
510 | #
511 | # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
512 | #
513 | # It is also possible to completely kill a command by renaming it into
514 | # an empty string:
515 | #
516 | # rename-command CONFIG ""
517 | #
518 | # Please note that changing the name of commands that are logged into the
519 | # AOF file or transmitted to slaves may cause problems.
520 |
521 | ################################### CLIENTS ####################################
522 |
523 | # Set the max number of connected clients at the same time. By default
524 | # this limit is set to 10000 clients, however if the Redis server is not
525 | # able to configure the process file limit to allow for the specified limit
526 | # the max number of allowed clients is set to the current file limit
527 | # minus 32 (as Redis reserves a few file descriptors for internal uses).
528 | #
529 | # Once the limit is reached Redis will close all the new connections sending
530 | # an error 'max number of clients reached'.
531 | #
532 | # maxclients 10000
533 |
534 | ############################## MEMORY MANAGEMENT ################################
535 |
536 | # Set a memory usage limit to the specified amount of bytes.
537 | # When the memory limit is reached Redis will try to remove keys
538 | # according to the eviction policy selected (see maxmemory-policy).
539 | #
540 | # If Redis can't remove keys according to the policy, or if the policy is
541 | # set to 'noeviction', Redis will start to reply with errors to commands
542 | # that would use more memory, like SET, LPUSH, and so on, and will continue
543 | # to reply to read-only commands like GET.
544 | #
545 | # This option is usually useful when using Redis as an LRU or LFU cache, or to
546 | # set a hard memory limit for an instance (using the 'noeviction' policy).
547 | #
548 | # WARNING: If you have slaves attached to an instance with maxmemory on,
549 | # the size of the output buffers needed to feed the slaves are subtracted
550 | # from the used memory count, so that network problems / resyncs will
551 | # not trigger a loop where keys are evicted, and in turn the output
552 | # buffer of slaves is full with DELs of keys evicted triggering the deletion
553 | # of more keys, and so forth until the database is completely emptied.
554 | #
555 | # In short... if you have slaves attached it is suggested that you set a lower
556 | # limit for maxmemory so that there is some free RAM on the system for slave
557 | # output buffers (but this is not needed if the policy is 'noeviction').
558 | #
559 | # maxmemory
560 |
561 | # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
562 | # is reached. You can select among five behaviors:
563 | #
564 | # volatile-lru -> Evict using approximated LRU among the keys with an expire set.
565 | # allkeys-lru -> Evict any key using approximated LRU.
566 | # volatile-lfu -> Evict using approximated LFU among the keys with an expire set.
567 | # allkeys-lfu -> Evict any key using approximated LFU.
568 | # volatile-random -> Remove a random key among the ones with an expire set.
569 | # allkeys-random -> Remove a random key, any key.
570 | # volatile-ttl -> Remove the key with the nearest expire time (minor TTL)
571 | # noeviction -> Don't evict anything, just return an error on write operations.
572 | #
573 | # LRU means Least Recently Used
574 | # LFU means Least Frequently Used
575 | #
576 | # Both LRU, LFU and volatile-ttl are implemented using approximated
577 | # randomized algorithms.
578 | #
579 | # Note: with any of the above policies, Redis will return an error on write
580 | # operations, when there are no suitable keys for eviction.
581 | #
582 | # At the date of writing these commands are: set setnx setex append
583 | # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
584 | # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
585 | # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
586 | # getset mset msetnx exec sort
587 | #
588 | # The default is:
589 | #
590 | # maxmemory-policy noeviction
591 |
592 | # LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated
593 | # algorithms (in order to save memory), so you can tune it for speed or
594 | # accuracy. For default Redis will check five keys and pick the one that was
595 | # used less recently, you can change the sample size using the following
596 | # configuration directive.
597 | #
598 | # The default of 5 produces good enough results. 10 Approximates very closely
599 | # true LRU but costs more CPU. 3 is faster but not very accurate.
600 | #
601 | # maxmemory-samples 5
602 |
603 | ############################# LAZY FREEING ####################################
604 |
605 | # Redis has two primitives to delete keys. One is called DEL and is a blocking
606 | # deletion of the object. It means that the server stops processing new commands
607 | # in order to reclaim all the memory associated with an object in a synchronous
608 | # way. If the key deleted is associated with a small object, the time needed
609 | # in order to execute the DEL command is very small and comparable to most other
610 | # O(1) or O(log_N) commands in Redis. However if the key is associated with an
611 | # aggregated value containing millions of elements, the server can block for
612 | # a long time (even seconds) in order to complete the operation.
613 | #
614 | # For the above reasons Redis also offers non blocking deletion primitives
615 | # such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and
616 | # FLUSHDB commands, in order to reclaim memory in background. Those commands
617 | # are executed in constant time. Another thread will incrementally free the
618 | # object in the background as fast as possible.
619 | #
620 | # DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled.
621 | # It's up to the design of the application to understand when it is a good
622 | # idea to use one or the other. However the Redis server sometimes has to
623 | # delete keys or flush the whole database as a side effect of other operations.
624 | # Specifically Redis deletes objects independently of a user call in the
625 | # following scenarios:
626 | #
627 | # 1) On eviction, because of the maxmemory and maxmemory policy configurations,
628 | # in order to make room for new data, without going over the specified
629 | # memory limit.
630 | # 2) Because of expire: when a key with an associated time to live (see the
631 | # EXPIRE command) must be deleted from memory.
632 | # 3) Because of a side effect of a command that stores data on a key that may
633 | # already exist. For example the RENAME command may delete the old key
634 | # content when it is replaced with another one. Similarly SUNIONSTORE
635 | # or SORT with STORE option may delete existing keys. The SET command
636 | # itself removes any old content of the specified key in order to replace
637 | # it with the specified string.
638 | # 4) During replication, when a slave performs a full resynchronization with
639 | # its master, the content of the whole database is removed in order to
640 | # load the RDB file just transfered.
641 | #
642 | # In all the above cases the default is to delete objects in a blocking way,
643 | # like if DEL was called. However you can configure each case specifically
644 | # in order to instead release memory in a non-blocking way like if UNLINK
645 | # was called, using the following configuration directives:
646 |
647 | lazyfree-lazy-eviction no
648 | lazyfree-lazy-expire no
649 | lazyfree-lazy-server-del no
650 | slave-lazy-flush no
651 |
652 | ############################## APPEND ONLY MODE ###############################
653 |
654 | # By default Redis asynchronously dumps the dataset on disk. This mode is
655 | # good enough in many applications, but an issue with the Redis process or
656 | # a power outage may result into a few minutes of writes lost (depending on
657 | # the configured save points).
658 | #
659 | # The Append Only File is an alternative persistence mode that provides
660 | # much better durability. For instance using the default data fsync policy
661 | # (see later in the config file) Redis can lose just one second of writes in a
662 | # dramatic event like a server power outage, or a single write if something
663 | # wrong with the Redis process itself happens, but the operating system is
664 | # still running correctly.
665 | #
666 | # AOF and RDB persistence can be enabled at the same time without problems.
667 | # If the AOF is enabled on startup Redis will load the AOF, that is the file
668 | # with the better durability guarantees.
669 | #
670 | # Please check http://redis.io/topics/persistence for more information.
671 |
672 | appendonly no
673 |
674 | # The name of the append only file (default: "appendonly.aof")
675 |
676 | appendfilename "appendonly.aof"
677 |
678 | # The fsync() call tells the Operating System to actually write data on disk
679 | # instead of waiting for more data in the output buffer. Some OS will really flush
680 | # data on disk, some other OS will just try to do it ASAP.
681 | #
682 | # Redis supports three different modes:
683 | #
684 | # no: don't fsync, just let the OS flush the data when it wants. Faster.
685 | # always: fsync after every write to the append only log. Slow, Safest.
686 | # everysec: fsync only one time every second. Compromise.
687 | #
688 | # The default is "everysec", as that's usually the right compromise between
689 | # speed and data safety. It's up to you to understand if you can relax this to
690 | # "no" that will let the operating system flush the output buffer when
691 | # it wants, for better performances (but if you can live with the idea of
692 | # some data loss consider the default persistence mode that's snapshotting),
693 | # or on the contrary, use "always" that's very slow but a bit safer than
694 | # everysec.
695 | #
696 | # More details please check the following article:
697 | # http://antirez.com/post/redis-persistence-demystified.html
698 | #
699 | # If unsure, use "everysec".
700 |
701 | # appendfsync always
702 | appendfsync everysec
703 | # appendfsync no
704 |
705 | # When the AOF fsync policy is set to always or everysec, and a background
706 | # saving process (a background save or AOF log background rewriting) is
707 | # performing a lot of I/O against the disk, in some Linux configurations
708 | # Redis may block too long on the fsync() call. Note that there is no fix for
709 | # this currently, as even performing fsync in a different thread will block
710 | # our synchronous write(2) call.
711 | #
712 | # In order to mitigate this problem it's possible to use the following option
713 | # that will prevent fsync() from being called in the main process while a
714 | # BGSAVE or BGREWRITEAOF is in progress.
715 | #
716 | # This means that while another child is saving, the durability of Redis is
717 | # the same as "appendfsync none". In practical terms, this means that it is
718 | # possible to lose up to 30 seconds of log in the worst scenario (with the
719 | # default Linux settings).
720 | #
721 | # If you have latency problems turn this to "yes". Otherwise leave it as
722 | # "no" that is the safest pick from the point of view of durability.
723 |
724 | no-appendfsync-on-rewrite no
725 |
726 | # Automatic rewrite of the append only file.
727 | # Redis is able to automatically rewrite the log file implicitly calling
728 | # BGREWRITEAOF when the AOF log size grows by the specified percentage.
729 | #
730 | # This is how it works: Redis remembers the size of the AOF file after the
731 | # latest rewrite (if no rewrite has happened since the restart, the size of
732 | # the AOF at startup is used).
733 | #
734 | # This base size is compared to the current size. If the current size is
735 | # bigger than the specified percentage, the rewrite is triggered. Also
736 | # you need to specify a minimal size for the AOF file to be rewritten, this
737 | # is useful to avoid rewriting the AOF file even if the percentage increase
738 | # is reached but it is still pretty small.
739 | #
740 | # Specify a percentage of zero in order to disable the automatic AOF
741 | # rewrite feature.
742 |
743 | auto-aof-rewrite-percentage 100
744 | auto-aof-rewrite-min-size 64mb
745 |
746 | # An AOF file may be found to be truncated at the end during the Redis
747 | # startup process, when the AOF data gets loaded back into memory.
748 | # This may happen when the system where Redis is running
749 | # crashes, especially when an ext4 filesystem is mounted without the
750 | # data=ordered option (however this can't happen when Redis itself
751 | # crashes or aborts but the operating system still works correctly).
752 | #
753 | # Redis can either exit with an error when this happens, or load as much
754 | # data as possible (the default now) and start if the AOF file is found
755 | # to be truncated at the end. The following option controls this behavior.
756 | #
757 | # If aof-load-truncated is set to yes, a truncated AOF file is loaded and
758 | # the Redis server starts emitting a log to inform the user of the event.
759 | # Otherwise if the option is set to no, the server aborts with an error
760 | # and refuses to start. When the option is set to no, the user requires
761 | # to fix the AOF file using the "redis-check-aof" utility before to restart
762 | # the server.
763 | #
764 | # Note that if the AOF file will be found to be corrupted in the middle
765 | # the server will still exit with an error. This option only applies when
766 | # Redis will try to read more data from the AOF file but not enough bytes
767 | # will be found.
768 | aof-load-truncated yes
769 |
770 | # When rewriting the AOF file, Redis is able to use an RDB preamble in the
771 | # AOF file for faster rewrites and recoveries. When this option is turned
772 | # on the rewritten AOF file is composed of two different stanzas:
773 | #
774 | # [RDB file][AOF tail]
775 | #
776 | # When loading Redis recognizes that the AOF file starts with the "REDIS"
777 | # string and loads the prefixed RDB file, and continues loading the AOF
778 | # tail.
779 | #
780 | # This is currently turned off by default in order to avoid the surprise
781 | # of a format change, but will at some point be used as the default.
782 | aof-use-rdb-preamble no
783 |
784 | ################################ LUA SCRIPTING ###############################
785 |
786 | # Max execution time of a Lua script in milliseconds.
787 | #
788 | # If the maximum execution time is reached Redis will log that a script is
789 | # still in execution after the maximum allowed time and will start to
790 | # reply to queries with an error.
791 | #
792 | # When a long running script exceeds the maximum execution time only the
793 | # SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
794 | # used to stop a script that did not yet called write commands. The second
795 | # is the only way to shut down the server in the case a write command was
796 | # already issued by the script but the user doesn't want to wait for the natural
797 | # termination of the script.
798 | #
799 | # Set it to 0 or a negative value for unlimited execution without warnings.
800 | lua-time-limit 5000
801 |
802 | ################################ REDIS CLUSTER ###############################
803 | #
804 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
805 | # WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however
806 | # in order to mark it as "mature" we need to wait for a non trivial percentage
807 | # of users to deploy it in production.
808 | # ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
809 | #
810 | # Normal Redis instances can't be part of a Redis Cluster; only nodes that are
811 | # started as cluster nodes can. In order to start a Redis instance as a
812 | # cluster node enable the cluster support uncommenting the following:
813 | #
814 | # cluster-enabled yes
815 |
816 | # Every cluster node has a cluster configuration file. This file is not
817 | # intended to be edited by hand. It is created and updated by Redis nodes.
818 | # Every Redis Cluster node requires a different cluster configuration file.
819 | # Make sure that instances running in the same system do not have
820 | # overlapping cluster configuration file names.
821 | #
822 | # cluster-config-file nodes-6379.conf
823 |
824 | # Cluster node timeout is the amount of milliseconds a node must be unreachable
825 | # for it to be considered in failure state.
826 | # Most other internal time limits are multiple of the node timeout.
827 | #
828 | # cluster-node-timeout 15000
829 |
830 | # A slave of a failing master will avoid to start a failover if its data
831 | # looks too old.
832 | #
833 | # There is no simple way for a slave to actually have an exact measure of
834 | # its "data age", so the following two checks are performed:
835 | #
836 | # 1) If there are multiple slaves able to failover, they exchange messages
837 | # in order to try to give an advantage to the slave with the best
838 | # replication offset (more data from the master processed).
839 | # Slaves will try to get their rank by offset, and apply to the start
840 | # of the failover a delay proportional to their rank.
841 | #
842 | # 2) Every single slave computes the time of the last interaction with
843 | # its master. This can be the last ping or command received (if the master
844 | # is still in the "connected" state), or the time that elapsed since the
845 | # disconnection with the master (if the replication link is currently down).
846 | # If the last interaction is too old, the slave will not try to failover
847 | # at all.
848 | #
849 | # The point "2" can be tuned by user. Specifically a slave will not perform
850 | # the failover if, since the last interaction with the master, the time
851 | # elapsed is greater than:
852 | #
853 | # (node-timeout * slave-validity-factor) + repl-ping-slave-period
854 | #
855 | # So for example if node-timeout is 30 seconds, and the slave-validity-factor
856 | # is 10, and assuming a default repl-ping-slave-period of 10 seconds, the
857 | # slave will not try to failover if it was not able to talk with the master
858 | # for longer than 310 seconds.
859 | #
860 | # A large slave-validity-factor may allow slaves with too old data to failover
861 | # a master, while a too small value may prevent the cluster from being able to
862 | # elect a slave at all.
863 | #
864 | # For maximum availability, it is possible to set the slave-validity-factor
865 | # to a value of 0, which means, that slaves will always try to failover the
866 | # master regardless of the last time they interacted with the master.
867 | # (However they'll always try to apply a delay proportional to their
868 | # offset rank).
869 | #
870 | # Zero is the only value able to guarantee that when all the partitions heal
871 | # the cluster will always be able to continue.
872 | #
873 | # cluster-slave-validity-factor 10
874 |
875 | # Cluster slaves are able to migrate to orphaned masters, that are masters
876 | # that are left without working slaves. This improves the cluster ability
877 | # to resist to failures as otherwise an orphaned master can't be failed over
878 | # in case of failure if it has no working slaves.
879 | #
880 | # Slaves migrate to orphaned masters only if there are still at least a
881 | # given number of other working slaves for their old master. This number
882 | # is the "migration barrier". A migration barrier of 1 means that a slave
883 | # will migrate only if there is at least 1 other working slave for its master
884 | # and so forth. It usually reflects the number of slaves you want for every
885 | # master in your cluster.
886 | #
887 | # Default is 1 (slaves migrate only if their masters remain with at least
888 | # one slave). To disable migration just set it to a very large value.
889 | # A value of 0 can be set but is useful only for debugging and dangerous
890 | # in production.
891 | #
892 | # cluster-migration-barrier 1
893 |
894 | # By default Redis Cluster nodes stop accepting queries if they detect there
895 | # is at least an hash slot uncovered (no available node is serving it).
896 | # This way if the cluster is partially down (for example a range of hash slots
897 | # are no longer covered) all the cluster becomes, eventually, unavailable.
898 | # It automatically returns available as soon as all the slots are covered again.
899 | #
900 | # However sometimes you want the subset of the cluster which is working,
901 | # to continue to accept queries for the part of the key space that is still
902 | # covered. In order to do so, just set the cluster-require-full-coverage
903 | # option to no.
904 | #
905 | # cluster-require-full-coverage yes
906 |
907 | # This option, when set to yes, prevents slaves from trying to failover its
908 | # master during master failures. However the master can still perform a
909 | # manual failover, if forced to do so.
910 | #
911 | # This is useful in different scenarios, especially in the case of multiple
912 | # data center operations, where we want one side to never be promoted if not
913 | # in the case of a total DC failure.
914 | #
915 | # cluster-slave-no-failover no
916 |
917 | # In order to setup your cluster make sure to read the documentation
918 | # available at http://redis.io web site.
919 |
920 | ########################## CLUSTER DOCKER/NAT support ########################
921 |
922 | # In certain deployments, Redis Cluster nodes address discovery fails, because
923 | # addresses are NAT-ted or because ports are forwarded (the typical case is
924 | # Docker and other containers).
925 | #
926 | # In order to make Redis Cluster working in such environments, a static
927 | # configuration where each node knows its public address is needed. The
928 | # following two options are used for this scope, and are:
929 | #
930 | # * cluster-announce-ip
931 | # * cluster-announce-port
932 | # * cluster-announce-bus-port
933 | #
934 | # Each instruct the node about its address, client port, and cluster message
935 | # bus port. The information is then published in the header of the bus packets
936 | # so that other nodes will be able to correctly map the address of the node
937 | # publishing the information.
938 | #
939 | # If the above options are not used, the normal Redis Cluster auto-detection
940 | # will be used instead.
941 | #
942 | # Note that when remapped, the bus port may not be at the fixed offset of
943 | # clients port + 10000, so you can specify any port and bus-port depending
944 | # on how they get remapped. If the bus-port is not set, a fixed offset of
945 | # 10000 will be used as usually.
946 | #
947 | # Example:
948 | #
949 | # cluster-announce-ip 10.1.1.5
950 | # cluster-announce-port 6379
951 | # cluster-announce-bus-port 6380
952 |
953 | ################################## SLOW LOG ###################################
954 |
955 | # The Redis Slow Log is a system to log queries that exceeded a specified
956 | # execution time. The execution time does not include the I/O operations
957 | # like talking with the client, sending the reply and so forth,
958 | # but just the time needed to actually execute the command (this is the only
959 | # stage of command execution where the thread is blocked and can not serve
960 | # other requests in the meantime).
961 | #
962 | # You can configure the slow log with two parameters: one tells Redis
963 | # what is the execution time, in microseconds, to exceed in order for the
964 | # command to get logged, and the other parameter is the length of the
965 | # slow log. When a new command is logged the oldest one is removed from the
966 | # queue of logged commands.
967 |
968 | # The following time is expressed in microseconds, so 1000000 is equivalent
969 | # to one second. Note that a negative number disables the slow log, while
970 | # a value of zero forces the logging of every command.
971 | slowlog-log-slower-than 10000
972 |
973 | # There is no limit to this length. Just be aware that it will consume memory.
974 | # You can reclaim memory used by the slow log with SLOWLOG RESET.
975 | slowlog-max-len 128
976 |
977 | ################################ LATENCY MONITOR ##############################
978 |
979 | # The Redis latency monitoring subsystem samples different operations
980 | # at runtime in order to collect data related to possible sources of
981 | # latency of a Redis instance.
982 | #
983 | # Via the LATENCY command this information is available to the user that can
984 | # print graphs and obtain reports.
985 | #
986 | # The system only logs operations that were performed in a time equal or
987 | # greater than the amount of milliseconds specified via the
988 | # latency-monitor-threshold configuration directive. When its value is set
989 | # to zero, the latency monitor is turned off.
990 | #
991 | # By default latency monitoring is disabled since it is mostly not needed
992 | # if you don't have latency issues, and collecting data has a performance
993 | # impact, that while very small, can be measured under big load. Latency
994 | # monitoring can easily be enabled at runtime using the command
995 | # "CONFIG SET latency-monitor-threshold " if needed.
996 | latency-monitor-threshold 0
997 |
998 | ############################# EVENT NOTIFICATION ##############################
999 |
1000 | # Redis can notify Pub/Sub clients about events happening in the key space.
1001 | # This feature is documented at http://redis.io/topics/notifications
1002 | #
1003 | # For instance if keyspace events notification is enabled, and a client
1004 | # performs a DEL operation on key "foo" stored in the Database 0, two
1005 | # messages will be published via Pub/Sub:
1006 | #
1007 | # PUBLISH __keyspace@0__:foo del
1008 | # PUBLISH __keyevent@0__:del foo
1009 | #
1010 | # It is possible to select the events that Redis will notify among a set
1011 | # of classes. Every class is identified by a single character:
1012 | #
1013 | # K Keyspace events, published with __keyspace@__ prefix.
1014 | # E Keyevent events, published with __keyevent@__ prefix.
1015 | # g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ...
1016 | # $ String commands
1017 | # l List commands
1018 | # s Set commands
1019 | # h Hash commands
1020 | # z Sorted set commands
1021 | # x Expired events (events generated every time a key expires)
1022 | # e Evicted events (events generated when a key is evicted for maxmemory)
1023 | # A Alias for g$lshzxe, so that the "AKE" string means all the events.
1024 | #
1025 | # The "notify-keyspace-events" takes as argument a string that is composed
1026 | # of zero or multiple characters. The empty string means that notifications
1027 | # are disabled.
1028 | #
1029 | # Example: to enable list and generic events, from the point of view of the
1030 | # event name, use:
1031 | #
1032 | # notify-keyspace-events Elg
1033 | #
1034 | # Example 2: to get the stream of the expired keys subscribing to channel
1035 | # name __keyevent@0__:expired use:
1036 | #
1037 | # notify-keyspace-events Ex
1038 | #
1039 | # By default all notifications are disabled because most users don't need
1040 | # this feature and the feature has some overhead. Note that if you don't
1041 | # specify at least one of K or E, no events will be delivered.
1042 | notify-keyspace-events ""
1043 |
1044 | ############################### ADVANCED CONFIG ###############################
1045 |
1046 | # Hashes are encoded using a memory efficient data structure when they have a
1047 | # small number of entries, and the biggest entry does not exceed a given
1048 | # threshold. These thresholds can be configured using the following directives.
1049 | hash-max-ziplist-entries 512
1050 | hash-max-ziplist-value 64
1051 |
1052 | # Lists are also encoded in a special way to save a lot of space.
1053 | # The number of entries allowed per internal list node can be specified
1054 | # as a fixed maximum size or a maximum number of elements.
1055 | # For a fixed maximum size, use -5 through -1, meaning:
1056 | # -5: max size: 64 Kb <-- not recommended for normal workloads
1057 | # -4: max size: 32 Kb <-- not recommended
1058 | # -3: max size: 16 Kb <-- probably not recommended
1059 | # -2: max size: 8 Kb <-- good
1060 | # -1: max size: 4 Kb <-- good
1061 | # Positive numbers mean store up to _exactly_ that number of elements
1062 | # per list node.
1063 | # The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size),
1064 | # but if your use case is unique, adjust the settings as necessary.
1065 | list-max-ziplist-size -2
1066 |
1067 | # Lists may also be compressed.
1068 | # Compress depth is the number of quicklist ziplist nodes from *each* side of
1069 | # the list to *exclude* from compression. The head and tail of the list
1070 | # are always uncompressed for fast push/pop operations. Settings are:
1071 | # 0: disable all list compression
1072 | # 1: depth 1 means "don't start compressing until after 1 node into the list,
1073 | # going from either the head or tail"
1074 | # So: [head]->node->node->...->node->[tail]
1075 | # [head], [tail] will always be uncompressed; inner nodes will compress.
1076 | # 2: [head]->[next]->node->node->...->node->[prev]->[tail]
1077 | # 2 here means: don't compress head or head->next or tail->prev or tail,
1078 | # but compress all nodes between them.
1079 | # 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail]
1080 | # etc.
1081 | list-compress-depth 0
1082 |
1083 | # Sets have a special encoding in just one case: when a set is composed
1084 | # of just strings that happen to be integers in radix 10 in the range
1085 | # of 64 bit signed integers.
1086 | # The following configuration setting sets the limit in the size of the
1087 | # set in order to use this special memory saving encoding.
1088 | set-max-intset-entries 512
1089 |
1090 | # Similarly to hashes and lists, sorted sets are also specially encoded in
1091 | # order to save a lot of space. This encoding is only used when the length and
1092 | # elements of a sorted set are below the following limits:
1093 | zset-max-ziplist-entries 128
1094 | zset-max-ziplist-value 64
1095 |
1096 | # HyperLogLog sparse representation bytes limit. The limit includes the
1097 | # 16 bytes header. When an HyperLogLog using the sparse representation crosses
1098 | # this limit, it is converted into the dense representation.
1099 | #
1100 | # A value greater than 16000 is totally useless, since at that point the
1101 | # dense representation is more memory efficient.
1102 | #
1103 | # The suggested value is ~ 3000 in order to have the benefits of
1104 | # the space efficient encoding without slowing down too much PFADD,
1105 | # which is O(N) with the sparse encoding. The value can be raised to
1106 | # ~ 10000 when CPU is not a concern, but space is, and the data set is
1107 | # composed of many HyperLogLogs with cardinality in the 0 - 15000 range.
1108 | hll-sparse-max-bytes 3000
1109 |
1110 | # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
1111 | # order to help rehashing the main Redis hash table (the one mapping top-level
1112 | # keys to values). The hash table implementation Redis uses (see dict.c)
1113 | # performs a lazy rehashing: the more operation you run into a hash table
1114 | # that is rehashing, the more rehashing "steps" are performed, so if the
1115 | # server is idle the rehashing is never complete and some more memory is used
1116 | # by the hash table.
1117 | #
1118 | # The default is to use this millisecond 10 times every second in order to
1119 | # actively rehash the main dictionaries, freeing memory when possible.
1120 | #
1121 | # If unsure:
1122 | # use "activerehashing no" if you have hard latency requirements and it is
1123 | # not a good thing in your environment that Redis can reply from time to time
1124 | # to queries with 2 milliseconds delay.
1125 | #
1126 | # use "activerehashing yes" if you don't have such hard requirements but
1127 | # want to free memory asap when possible.
1128 | activerehashing yes
1129 |
1130 | # The client output buffer limits can be used to force disconnection of clients
1131 | # that are not reading data from the server fast enough for some reason (a
1132 | # common reason is that a Pub/Sub client can't consume messages as fast as the
1133 | # publisher can produce them).
1134 | #
1135 | # The limit can be set differently for the three different classes of clients:
1136 | #
1137 | # normal -> normal clients including MONITOR clients
1138 | # slave -> slave clients
1139 | # pubsub -> clients subscribed to at least one pubsub channel or pattern
1140 | #
1141 | # The syntax of every client-output-buffer-limit directive is the following:
1142 | #
1143 | # client-output-buffer-limit
1144 | #
1145 | # A client is immediately disconnected once the hard limit is reached, or if
1146 | # the soft limit is reached and remains reached for the specified number of
1147 | # seconds (continuously).
1148 | # So for instance if the hard limit is 32 megabytes and the soft limit is
1149 | # 16 megabytes / 10 seconds, the client will get disconnected immediately
1150 | # if the size of the output buffers reach 32 megabytes, but will also get
1151 | # disconnected if the client reaches 16 megabytes and continuously overcomes
1152 | # the limit for 10 seconds.
1153 | #
1154 | # By default normal clients are not limited because they don't receive data
1155 | # without asking (in a push way), but just after a request, so only
1156 | # asynchronous clients may create a scenario where data is requested faster
1157 | # than it can read.
1158 | #
1159 | # Instead there is a default limit for pubsub and slave clients, since
1160 | # subscribers and slaves receive data in a push fashion.
1161 | #
1162 | # Both the hard or the soft limit can be disabled by setting them to zero.
1163 | client-output-buffer-limit normal 0 0 0
1164 | client-output-buffer-limit slave 256mb 64mb 60
1165 | client-output-buffer-limit pubsub 32mb 8mb 60
1166 |
1167 | # Client query buffers accumulate new commands. They are limited to a fixed
1168 | # amount by default in order to avoid that a protocol desynchronization (for
1169 | # instance due to a bug in the client) will lead to unbound memory usage in
1170 | # the query buffer. However you can configure it here if you have very special
1171 | # needs, such us huge multi/exec requests or alike.
1172 | #
1173 | # client-query-buffer-limit 1gb
1174 |
1175 | # In the Redis protocol, bulk requests, that are, elements representing single
1176 | # strings, are normally limited ot 512 mb. However you can change this limit
1177 | # here.
1178 | #
1179 | # proto-max-bulk-len 512mb
1180 |
1181 | # Redis calls an internal function to perform many background tasks, like
1182 | # closing connections of clients in timeout, purging expired keys that are
1183 | # never requested, and so forth.
1184 | #
1185 | # Not all tasks are performed with the same frequency, but Redis checks for
1186 | # tasks to perform according to the specified "hz" value.
1187 | #
1188 | # By default "hz" is set to 10. Raising the value will use more CPU when
1189 | # Redis is idle, but at the same time will make Redis more responsive when
1190 | # there are many keys expiring at the same time, and timeouts may be
1191 | # handled with more precision.
1192 | #
1193 | # The range is between 1 and 500, however a value over 100 is usually not
1194 | # a good idea. Most users should use the default of 10 and raise this up to
1195 | # 100 only in environments where very low latency is required.
1196 | hz 10
1197 |
1198 | # When a child rewrites the AOF file, if the following option is enabled
1199 | # the file will be fsync-ed every 32 MB of data generated. This is useful
1200 | # in order to commit the file to the disk more incrementally and avoid
1201 | # big latency spikes.
1202 | aof-rewrite-incremental-fsync yes
1203 |
1204 | # Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good
1205 | # idea to start with the default settings and only change them after investigating
1206 | # how to improve the performances and how the keys LFU change over time, which
1207 | # is possible to inspect via the OBJECT FREQ command.
1208 | #
1209 | # There are two tunable parameters in the Redis LFU implementation: the
1210 | # counter logarithm factor and the counter decay time. It is important to
1211 | # understand what the two parameters mean before changing them.
1212 | #
1213 | # The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis
1214 | # uses a probabilistic increment with logarithmic behavior. Given the value
1215 | # of the old counter, when a key is accessed, the counter is incremented in
1216 | # this way:
1217 | #
1218 | # 1. A random number R between 0 and 1 is extracted.
1219 | # 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1).
1220 | # 3. The counter is incremented only if R < P.
1221 | #
1222 | # The default lfu-log-factor is 10. This is a table of how the frequency
1223 | # counter changes with a different number of accesses with different
1224 | # logarithmic factors:
1225 | #
1226 | # +--------+------------+------------+------------+------------+------------+
1227 | # | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits |
1228 | # +--------+------------+------------+------------+------------+------------+
1229 | # | 0 | 104 | 255 | 255 | 255 | 255 |
1230 | # +--------+------------+------------+------------+------------+------------+
1231 | # | 1 | 18 | 49 | 255 | 255 | 255 |
1232 | # +--------+------------+------------+------------+------------+------------+
1233 | # | 10 | 10 | 18 | 142 | 255 | 255 |
1234 | # +--------+------------+------------+------------+------------+------------+
1235 | # | 100 | 8 | 11 | 49 | 143 | 255 |
1236 | # +--------+------------+------------+------------+------------+------------+
1237 | #
1238 | # NOTE: The above table was obtained by running the following commands:
1239 | #
1240 | # redis-benchmark -n 1000000 incr foo
1241 | # redis-cli object freq foo
1242 | #
1243 | # NOTE 2: The counter initial value is 5 in order to give new objects a chance
1244 | # to accumulate hits.
1245 | #
1246 | # The counter decay time is the time, in minutes, that must elapse in order
1247 | # for the key counter to be divided by two (or decremented if it has a value
1248 | # less <= 10).
1249 | #
1250 | # The default value for the lfu-decay-time is 1. A Special value of 0 means to
1251 | # decay the counter every time it happens to be scanned.
1252 | #
1253 | # lfu-log-factor 10
1254 | # lfu-decay-time 1
1255 |
1256 | ########################### ACTIVE DEFRAGMENTATION #######################
1257 | #
1258 | # WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested
1259 | # even in production and manually tested by multiple engineers for some
1260 | # time.
1261 | #
1262 | # What is active defragmentation?
1263 | # -------------------------------
1264 | #
1265 | # Active (online) defragmentation allows a Redis server to compact the
1266 | # spaces left between small allocations and deallocations of data in memory,
1267 | # thus allowing to reclaim back memory.
1268 | #
1269 | # Fragmentation is a natural process that happens with every allocator (but
1270 | # less so with Jemalloc, fortunately) and certain workloads. Normally a server
1271 | # restart is needed in order to lower the fragmentation, or at least to flush
1272 | # away all the data and create it again. However thanks to this feature
1273 | # implemented by Oran Agra for Redis 4.0 this process can happen at runtime
1274 | # in an "hot" way, while the server is running.
1275 | #
1276 | # Basically when the fragmentation is over a certain level (see the
1277 | # configuration options below) Redis will start to create new copies of the
1278 | # values in contiguous memory regions by exploiting certain specific Jemalloc
1279 | # features (in order to understand if an allocation is causing fragmentation
1280 | # and to allocate it in a better place), and at the same time, will release the
1281 | # old copies of the data. This process, repeated incrementally for all the keys
1282 | # will cause the fragmentation to drop back to normal values.
1283 | #
1284 | # Important things to understand:
1285 | #
1286 | # 1. This feature is disabled by default, and only works if you compiled Redis
1287 | # to use the copy of Jemalloc we ship with the source code of Redis.
1288 | # This is the default with Linux builds.
1289 | #
1290 | # 2. You never need to enable this feature if you don't have fragmentation
1291 | # issues.
1292 | #
1293 | # 3. Once you experience fragmentation, you can enable this feature when
1294 | # needed with the command "CONFIG SET activedefrag yes".
1295 | #
1296 | # The configuration parameters are able to fine tune the behavior of the
1297 | # defragmentation process. If you are not sure about what they mean it is
1298 | # a good idea to leave the defaults untouched.
1299 |
1300 | # Enabled active defragmentation
1301 | # activedefrag yes
1302 |
1303 | # Minimum amount of fragmentation waste to start active defrag
1304 | # active-defrag-ignore-bytes 100mb
1305 |
1306 | # Minimum percentage of fragmentation to start active defrag
1307 | # active-defrag-threshold-lower 10
1308 |
1309 | # Maximum percentage of fragmentation at which we use maximum effort
1310 | # active-defrag-threshold-upper 100
1311 |
1312 | # Minimal effort for defrag in CPU percentage
1313 | # active-defrag-cycle-min 25
1314 |
1315 | # Maximal effort for defrag in CPU percentage
1316 | # active-defrag-cycle-max 75
1317 |
1318 |
--------------------------------------------------------------------------------
/routes/index.js:
--------------------------------------------------------------------------------
1 | // @format
2 | const {spawn} = require('child_process');
3 | const ipfsAPI = require('ipfs-api');
4 | const https = require('https');
5 | const fs = require('fs');
6 | const PouchDB = require('pouchdb-node');
7 | const rimraf = require('rimraf');
8 | const util = require('util');
9 | const Bull = require('bull');
10 | let mkdirp = require('mkdirp');
11 |
12 | mkdirp = util.promisify(mkdirp);
13 |
14 | let errors = {};
15 | const queue = new Bull('video processing', {
16 | redis: {port: 6379, host: 'redis', password: process.env.REDIS_PSWD},
17 | });
18 |
19 | queue.process(async (job, done) => {
20 | var db = new PouchDB('mydb');
21 | const {ipfsHash, random} = job.data;
22 |
23 | try {
24 | await download(
25 | 'https://ipfs.infura.io/ipfs/' + ipfsHash,
26 | process.env.PWD,
27 | ipfsHash,
28 | );
29 | } catch (err) {
30 | const doc = await db.get(ipfsHash);
31 | db.put({
32 | _id: ipfsHash,
33 | _rev: doc._rev,
34 | status: 'error',
35 | error: 'Download failed',
36 | duration: doc.duration,
37 | percentage: doc.percentage,
38 | progress: doc.progress,
39 | });
40 | return;
41 | }
42 | let duration;
43 | try {
44 | duration = await getDuration(ipfsHash);
45 | } catch (err) {
46 | const doc = await db.get(ipfsHash);
47 | db.put({
48 | _id: ipfsHash,
49 | _rev: doc._rev,
50 | status: 'error',
51 | error: err.message,
52 | });
53 | deleteFile(ipfsHash, random);
54 | }
55 | const doc = await db.get(ipfsHash);
56 | db.put({
57 | _id: ipfsHash,
58 | _rev: doc._rev,
59 | duration: duration,
60 | status: 'processing',
61 | });
62 |
63 | // https://github.com/ipfs/js-ipfs/tree/master/examples/browser-video-streaming
64 | const options = [
65 | '-i',
66 | ipfsHash,
67 | '-profile:v',
68 | 'baseline',
69 | '-level',
70 | '3.0',
71 | '-start_number',
72 | '0',
73 | '-hls_time',
74 | '5',
75 | //'-t',
76 | //'2',
77 | '-hls_list_size',
78 | '0',
79 | '-strict',
80 | '-2',
81 | '-f',
82 | 'hls',
83 | './' + ipfsHash + random + '/master.m3u8',
84 | ];
85 | console.log(options);
86 | const child = spawn('ffmpeg', options);
87 |
88 | child.stdout.on('data', data => {
89 | var textChunk = data.toString('utf8');
90 | console.log(textChunk);
91 | });
92 |
93 | child.stderr.on('data', async data => {
94 | var textChunk = data.toString('utf8');
95 | console.log(textChunk);
96 |
97 | const timeStart = textChunk.search('time=');
98 | if (timeStart !== -1) {
99 | const progress = textChunk.slice(timeStart + 5, timeStart + 16);
100 | const doc = await db.get(ipfsHash);
101 | db.put({
102 | _id: ipfsHash,
103 | _rev: doc._rev,
104 | progress: progress,
105 | duration: doc.duration,
106 | status: 'processing',
107 | percentage: getPercentage(doc.duration, progress),
108 | });
109 | job.process(getPercentage(doc.duration, progress));
110 | }
111 | });
112 |
113 | child.on('exit', async err => {
114 | if (err === 1) {
115 | const doc = await db.get(ipfsHash);
116 | db.put({
117 | _id: ipfsHash,
118 | _rev: doc._rev,
119 | status: 'error',
120 | error: 'FFMPEG failed',
121 | duration: doc.duration,
122 | percentage: doc.percentage,
123 | progress: doc.progress,
124 | });
125 | deleteFile(ipfsHash, random);
126 | return;
127 | }
128 |
129 | console.log('Uploading to local ipfs node');
130 | const ipfs = ipfsAPI(process.env.IPFS_HOST, '5001', {
131 | protocol: 'http',
132 | });
133 |
134 | const addFromFs = util.promisify(ipfs.util.addFromFs.bind(ipfs));
135 | let result;
136 | try {
137 | result = await addFromFs('./' + ipfsHash + random + '/', {
138 | recursive: true,
139 | });
140 | } catch (err) {
141 | const doc = await db.get(ipfsHash);
142 | console.log('Updating database');
143 | db.put({
144 | _id: ipfsHash,
145 | _rev: doc._rev,
146 | files: result,
147 | status: 'error',
148 | progress: doc.progress,
149 | duration: doc.duration,
150 | percentage: doc.percentage,
151 | });
152 | deleteFile(ipfsHash, random);
153 | }
154 |
155 | const doc = await db.get(ipfsHash);
156 | console.log('Updating database');
157 | db.put({
158 | _id: ipfsHash,
159 | _rev: doc._rev,
160 | files: result,
161 | status: 'finished',
162 | progress: doc.progress,
163 | duration: doc.duration,
164 | percentage: doc.percentage,
165 | });
166 | deleteFile(ipfsHash, random);
167 | done();
168 | });
169 | });
170 |
171 | var ipfsHashes = async function(req, res, next) {
172 | var db = new PouchDB('mydb');
173 | try {
174 | const doc = await db.get(req.params.ipfsHash);
175 | if (doc.status === 'error' && errors[req.params.ipfsHash]) {
176 | errors[req.params.ipfsHash] = undefined;
177 | throw new Error('Continue with encoding');
178 | } else if (doc.status === 'error' && !errors[req.params.ipfsHash]) {
179 | errors[req.params.ipfsHash] = true;
180 | res.json(200, doc);
181 | } else {
182 | res.json(200, doc);
183 | }
184 | } catch (err) {
185 | db.put({
186 | _id: req.params.ipfsHash,
187 | status: 'downloading',
188 | });
189 |
190 | res.json(200, {
191 | status: 'File is downloaded and processed, check back later',
192 | });
193 | const random = randomString(10);
194 |
195 | try {
196 | await mkdirp(req.params.ipfsHash + random);
197 | } catch (err) {
198 | const doc = await db.get(req.params.ipfsHash);
199 | db.put({
200 | _id: req.params.ipfsHash,
201 | _rev: doc._rev,
202 | status: 'error',
203 | error: "Couldn't create directory",
204 | duration: doc.duration,
205 | percentage: doc.percentage,
206 | progress: doc.progress,
207 | });
208 | deleteFile(req.params.ipfsHash, random);
209 | }
210 |
211 | queue.add({
212 | ipfsHash: req.params.ipfsHash,
213 | random: random,
214 | });
215 | queue.on('completed', job => {
216 | console.log(`Job with id ${job.id} has been completed`);
217 | });
218 | queue.on('error', function(error) {
219 | console.log(error);
220 | });
221 | queue.on('failed', function(job, err) {
222 | console.log(err);
223 | });
224 | }
225 | };
226 |
227 | function getDuration(name) {
228 | return new Promise((resolve, reject) => {
229 | const options = [
230 | '-v',
231 | 'error',
232 | '-show_entries',
233 | 'format=duration',
234 | '-of',
235 | 'default=noprint_wrappers=1:nokey=1',
236 | name,
237 | ];
238 | console.log(options);
239 | const child = spawn('ffprobe', options);
240 | child.stdout.on('data', data => {
241 | var textChunk = data.toString('utf8');
242 | resolve(parseFloat(textChunk));
243 | });
244 | child.stderr.on('data', data => {
245 | var textChunk = data.toString('utf8');
246 | console.log(textChunk);
247 | });
248 | child.on('exit', err => {
249 | if (err === 1) {
250 | reject(new Error('Fatal error reading duration'));
251 | }
252 | });
253 | });
254 | }
255 |
256 | function deleteFile(name, random) {
257 | rimraf(name + random, function() {
258 | console.log('deleting folder');
259 | });
260 | console.log('deleting file');
261 | fs.unlinkSync(name);
262 | }
263 |
264 | function getPercentage(duration, progress) {
265 | const progressSplit = progress.split(':');
266 | const progressHours = progressSplit[0];
267 | const progressMinutes = progressSplit[1];
268 | const progressSeconds = progressSplit[2];
269 |
270 | const progressTotalSeconds =
271 | parseInt(progressHours) * 3600 +
272 | parseInt(progressMinutes) * 60 +
273 | parseInt(progressSeconds);
274 |
275 | return Math.round((progressTotalSeconds / duration) * 100);
276 | }
277 |
278 | const download = function(url, dest, filename) {
279 | return new Promise((resolve, reject) => {
280 | console.log('Starting download');
281 | const file = fs.createWriteStream(dest + '/' + filename);
282 | const request = https.get(url, function(response) {
283 | const {statusCode} = response;
284 | response.pipe(file);
285 | file.on('finish', function() {
286 | if (statusCode === 504) {
287 | cb(new Error('Download failed'));
288 | reject(new Error('Download failed'));
289 | } else {
290 | file.close(resolve);
291 | }
292 | });
293 | });
294 | });
295 | };
296 |
297 | function randomString(len, charSet) {
298 | charSet =
299 | charSet || 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
300 | var randomString = '';
301 | for (var i = 0; i < len; i++) {
302 | var randomPoz = Math.floor(Math.random() * charSet.length);
303 | randomString += charSet.substring(randomPoz, randomPoz + 1);
304 | }
305 | return randomString;
306 | }
307 |
308 | module.exports = {
309 | ipfsHashes: ipfsHashes,
310 | };
311 |
--------------------------------------------------------------------------------
/webpack.config.js:
--------------------------------------------------------------------------------
1 | // @format
2 | const path = require('path');
3 |
4 | var HtmlWebpackPlugin = require('html-webpack-plugin');
5 | var HTMLWebpackPluginConfig = new HtmlWebpackPlugin({
6 | template: __dirname + '/client/index.html',
7 | filename: 'index.html',
8 | inject: 'body',
9 | });
10 | module.exports = {
11 | mode: 'development',
12 | watch: true,
13 | watchOptions: {ignored: /node_modules/, aggregateTimeout: 300, poll: 500},
14 | entry: {
15 | javascript: './client/index.js',
16 | },
17 | devtool: 'eval-source-map',
18 | module: {
19 | rules: [
20 | {
21 | exclude: /(node_modules|bower_components)/,
22 | test: /\.(js|jsx)$/,
23 | use: {
24 | loader: 'babel-loader',
25 | options: {
26 | presets: ['@babel/preset-env'],
27 | },
28 | },
29 | },
30 | {
31 | test: /\.css$/,
32 | use: [ 'style-loader', 'css-loader' ]
33 | }
34 | ],
35 | },
36 | plugins: [HTMLWebpackPluginConfig],
37 | output: {
38 | path: path.resolve(__dirname, './public'),
39 | filename: 'index.js',
40 | },
41 | };
42 |
--------------------------------------------------------------------------------