├── README.md ├── RequestQueue.js ├── haproxy_example.cfg ├── index.js ├── logs └── .gitignore ├── nginx.conf └── package.json /README.md: -------------------------------------------------------------------------------- 1 | # Example nginx, haproxy, pool-hall, node application 2 | 3 | ## Running 4 | 5 | This is designed to work on linux systems where you have node, npm, haproxy, 6 | and nginx installed, ideally from your distro's package manager of choice. 7 | 8 | Make sure you have ports 9000-9005 open 9 | 10 | ### nginx 11 | 12 | ``` 13 | nginx -c nginx.conf -p ./ 14 | ``` 15 | 16 | ### haproxy 17 | 18 | ``` 19 | haproxy -f haproxy_example.cfg 20 | ``` 21 | 22 | ### node example_app 23 | 24 | ``` 25 | npm install 26 | npm start 27 | ``` 28 | 29 | With this you should be able to make requests on http://localhost:9000/health 30 | and see 'OK' 31 | -------------------------------------------------------------------------------- /RequestQueue.js: -------------------------------------------------------------------------------- 1 | import EventEmitter from 'events'; 2 | import onFinished from 'on-finished'; 3 | 4 | /* 5 | * RequestQueue to ensure that only a single request is executing at a time. 6 | * 7 | * This middleware intercepts requests as they come in by delaying executing of 8 | * next() until previous requests finish processing. This complements external 9 | * server configuration via haproxy or similar that restricts concurrent 10 | * requests. This per-process queue allows an application level guarantee of 11 | * mutual exclusion of requests. This allows that behavior to be depended 12 | * upon, allowing for safe (but careful) use of global state. Additionally, 13 | * this allows for lifecycle hooks to be added for the periods when no request 14 | * is currently executing, before or after the request has been run. These are 15 | * ideal points to install behavior to reset global state or perform actions 16 | * against the server at a "clean state" point in time. 17 | */ 18 | export default class RequestQueue extends EventEmitter { 19 | constructor() { 20 | super(); 21 | this.queue = []; 22 | this.current = null; 23 | 24 | this.outerMiddleware = this.outerMiddleware.bind(this); 25 | this.innerMiddleware = this.innerMiddleware.bind(this); 26 | this.finishCurrent = this.finishCurrent.bind(this); 27 | } 28 | 29 | process() { 30 | if (!this.current) { 31 | this.current = this.queue.shift(); 32 | this.emit('queueLength', this.queue.length); 33 | 34 | if (this.current) { 35 | this.emit('beforeRequest'); 36 | this.current.start(); 37 | } 38 | } else { 39 | this.emit('queueLength', this.queue.length); 40 | } 41 | } 42 | 43 | /* 44 | * Outer middleware must be the very first middleware installed on the app. 45 | * This intercepts and begins queueing the request. 46 | */ 47 | outerMiddleware(req, res, next) { 48 | const job = { req, res, start: next }; 49 | 50 | this.push(job); 51 | } 52 | 53 | /* 54 | * Inner middleware must be last middleware installed before endpoints. This 55 | * is only necessary because on-finished executes its callbacks in the order 56 | * in which they were installed. We need this to be innermost so that we 57 | * advance the queue only after the request and all other on-finished 58 | * callbacks complete. 59 | * 60 | * Not adding this middleware will result in the queue never being drained. 61 | */ 62 | innerMiddleware(req, res, next) { 63 | onFinished(res, this.finishCurrent); 64 | next(); 65 | } 66 | 67 | 68 | push(job) { 69 | this.queue.push(job); 70 | this.process(); 71 | } 72 | 73 | finishCurrent() { 74 | this.current = null; 75 | this.emit('afterRequest'); 76 | this.process(); 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /haproxy_example.cfg: -------------------------------------------------------------------------------- 1 | # Sample haproxy config for the example app running 4 workers (on 9002 - 9005) 2 | # Run with haproxy -f haproxy_example.cfg 3 | global 4 | maxconn 2000 5 | nbproc 1 6 | cpu-map 1 0 7 | log 127.0.0.1 local0 8 | # by default huge buffers are allocated per backend for storing health check 9 | # results. Reduce to save memory, especially when not doing string matching 10 | tune.chksize 128 11 | 12 | defaults 13 | log global 14 | option dontlognull 15 | option log-separate-errors 16 | maxconn 2000 17 | # connect timeout on localhost doesn't matter 18 | timeout connect 5s 19 | 20 | # adjust this based on how long you can tolerate a request queueing. Kept 21 | # high for the example to demonstrate the impact of queue. 22 | timeout queue 5s 23 | 24 | timeout check 5s 25 | 26 | # client and server timeouts should be kept high as haproxy will decrement 27 | # the sessions for a backend when the timeout expires. The backend will 28 | # probably still be processing the request at the timeout, so sending a new 29 | # request its way would be bad. This should possibly be infinite, but 60s 30 | # should be safe enough with timeouts on the client, especially when 31 | # stronger process supervision is added. 32 | timeout client 60s 33 | timeout server 60s 34 | 35 | # enables trying new backends on connection failure 36 | option redispatch 37 | retries 3 38 | 39 | balance static-rr 40 | option http-keep-alive 41 | 42 | listen stats_nbproc1 :8999 43 | mode http 44 | stats enable 45 | stats uri / 46 | bind-process 1 47 | 48 | # example app backends 49 | frontend example 50 | mode http 51 | option httplog 52 | bind :9001 53 | default_backend example 54 | 55 | backend example 56 | mode http 57 | option httplog 58 | option tcplog 59 | 60 | # run 4 workers 61 | server worker1 127.0.0.1:9002 maxconn 1 62 | server worker2 127.0.0.1:9003 maxconn 1 63 | server worker3 127.0.0.1:9004 maxconn 1 64 | server worker4 127.0.0.1:9005 maxconn 1 65 | -------------------------------------------------------------------------------- /index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const process = require('process'); 3 | const { poolHall, startPoolHall } = require('pool-hall'); 4 | 5 | const app = express(); 6 | 7 | function timeToMs(tuple) { 8 | return (tuple[0] * 1000) + (tuple[1] / 1000000); 9 | } 10 | 11 | function spinWait(ms) { 12 | const start = process.hrtime(); 13 | 14 | while(timeToMs(process.hrtime(start)) < ms) { 15 | } 16 | } 17 | 18 | startPoolHall( 19 | { 20 | workerCount: 4, 21 | // never go unhealthy because of dead processes 22 | minWorkerCount: 0, 23 | workerEnv: id => ({ PORT: 9001 + (+id) }) 24 | }, 25 | // supervisor 26 | () => { 27 | process.title = 'node example_app supervisor'; 28 | 29 | process.on('SIGTERM', () => { 30 | console.log('Got SIGTERM. Going down.'); 31 | poolHall.stop().then(() => process.exit(0), () => process.exit(1)); 32 | }); 33 | 34 | process.on('SIGINT', () => { 35 | console.log('Got SIGINT. Going down.'); 36 | poolHall.stop().then(() => process.exit(0), () => process.exit(1)); 37 | }); 38 | 39 | poolHall.on('workerUp', (id) => { 40 | console.log(`Worker ${id} is up`); 41 | }); 42 | 43 | poolHall.on('workerDown', (id, info) => { 44 | console.log(`Worker ${id} is down with code ${info.signalCode || info.exitCode}`); 45 | }); 46 | }, 47 | // worker 48 | (ready) => { 49 | const workerId = poolHall.worker.id; 50 | 51 | process.title = `node example_app worker[${workerId}]`; 52 | 53 | // simulate server boot for 20s 54 | spinWait(20000); 55 | 56 | const app = express(); 57 | 58 | let healthy = false; 59 | let booted = false; 60 | 61 | poolHall.worker.on('healthy', () => { 62 | if (!healthy) { 63 | console.log(`Worker ${workerId} is healthy`); 64 | } 65 | healthy = true; 66 | booted = true; 67 | }); 68 | 69 | poolHall.worker.on('unhealthy', () => { 70 | if (healthy && booted) { 71 | console.log(`Worker ${workerId} is unhealthy`); 72 | } 73 | healthy = false; 74 | }); 75 | 76 | app.get('/health', (req, res) => { 77 | if (healthy) { 78 | res.type('text').send('OK\n'); 79 | } else { 80 | res.status(503).send('NOPE\n'); 81 | } 82 | }); 83 | 84 | app.get("/infinite", (req, res) => { 85 | while(true) { 86 | } 87 | res.type('text').send('This is awkward\n'); 88 | }); 89 | 90 | app.get('/render', (req, res) => { 91 | spinWait(200); 92 | res.type('text').send('DONE\n'); 93 | }); 94 | 95 | const server = app.listen(process.env.PORT, "localhost", ready); 96 | 97 | poolHall.worker.onShutdown = () => server.close(() => process.exit(0)); 98 | } 99 | ); 100 | -------------------------------------------------------------------------------- /logs/.gitignore: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/schleyfox/example-node-ops/a96c85dcb0f54c850eb45826febbac68630f629c/logs/.gitignore -------------------------------------------------------------------------------- /nginx.conf: -------------------------------------------------------------------------------- 1 | # Sample nginx config to run a server on localhost:9000 that proxies to an 2 | # haproxy on localhost:9001. This is a heavily simplified version of what we 3 | # run. 4 | # 5 | # Run with nginx -c nginx.conf -p ./ 6 | 7 | # Run a single worker process as that is sufficent to saturate our backend 8 | worker_processes 1; 9 | 10 | worker_rlimit_nofile 1048576; 11 | 12 | pid logs/nginx.pid; 13 | 14 | events { 15 | worker_connections 768; 16 | } 17 | 18 | # settings necessary to run unprivileged in foreground 19 | daemon off; 20 | error_log logs/error.log; 21 | 22 | http { 23 | client_max_body_size 5M; 24 | 25 | # settings necessary to run unprivileged in foreground 26 | proxy_temp_path tmp/proxy; 27 | client_body_temp_path tmp/body; 28 | fastcgi_temp_path tmp/fastcgi; 29 | uwsgi_temp_path tmp/uwsgi; 30 | scgi_temp_path tmp/scgi; 31 | access_log logs/access.log; 32 | error_log logs/error.log; 33 | 34 | proxy_read_timeout 600s; 35 | 36 | sendfile on; 37 | tcp_nopush on; 38 | tcp_nodelay on; 39 | keepalive_timeout 65; 40 | server_tokens off; 41 | 42 | include /etc/nginx/mime.types; 43 | default_type application/octet-stream; 44 | 45 | gzip on; 46 | gzip_disable "msie6"; 47 | 48 | # relevant part of config 49 | 50 | upstream example_app { 51 | # our app listens on port 9001 52 | server 127.0.0.1:9001 fail_timeout=0; 53 | } 54 | 55 | server { 56 | # nginx listens on 9000 57 | listen 9000 default_server; 58 | 59 | # size of buffer for request body before spilling to disk. 60 | # We've set this high as our renderer process large requests. 61 | client_body_buffer_size 1m; 62 | 63 | # absolute limit for client body size, analogous to body-parser's limit 64 | client_max_body_size 5m; 65 | 66 | # Response headers buffer size 67 | proxy_buffer_size 8k; 68 | # Response body buffers (1m) 69 | proxy_buffers 8 128k; 70 | 71 | # default route 72 | location / { 73 | proxy_http_version 1.1; 74 | proxy_set_header Connection ""; 75 | proxy_set_header Host $http_host; 76 | proxy_set_header X-Forwarded-Proto $scheme; 77 | proxy_redirect off; 78 | proxy_pass http://example_app; 79 | } 80 | 81 | # special case for /health that proxies request with smaller buffers 82 | location = /health { 83 | proxy_http_version 1.1; 84 | proxy_set_header Connection ""; 85 | proxy_set_header Host $http_host; 86 | proxy_set_header X-Forwarded-Proto $scheme; 87 | 88 | # set smaller buffers for /health 89 | client_body_buffer_size 8k; 90 | proxy_buffers 8 8k; 91 | proxy_redirect off; 92 | proxy_pass http://example_app; 93 | } 94 | 95 | # offload /ping connectivity check so that it never hits the node app 96 | location = /ping { 97 | return 200 'PONG 98 | '; 99 | } 100 | } 101 | } 102 | 103 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "example-node-ops", 3 | "version": "0.0.1", 4 | "description": "example of a node server with nginx, haproxy, pool-hall", 5 | "main": "index.js", 6 | "scripts": { 7 | "start": "node index.js" 8 | }, 9 | "dependencies": { 10 | "express": "^4.16.3", 11 | "pool-hall": "^0.0.2" 12 | } 13 | } 14 | --------------------------------------------------------------------------------