├── .github
├── FUNDING.yml
└── README.md
├── .gitignore
├── cluster.md
├── maintenance
├── package.json
└── index.html
├── nginx
├── setup_letsencrypt
├── dumps.inventaire.io.nginx
├── ssl.conf
├── security_headers.conf
├── setup
└── inventaire.original.nginx
├── couchdb
├── custom.ini
└── setup
├── jobs.cron
├── install_node
├── setup_firewall
├── systemd
└── inventaire.service.original
├── install_inventaire_services
├── install_inventaire
├── README.md
├── .zshrc
├── free_disk_space
├── media_storage
└── README.md
├── install_elasticsearch
├── install
├── .vimrc
└── aliases
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | liberapay: Association_Inventaire
2 |
--------------------------------------------------------------------------------
/.github/README.md:
--------------------------------------------------------------------------------
1 | ## ⚠️ We moved to Codeberg 👉 https://codeberg.org/inventaire/inventaire-deploy
2 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.custom.*
2 | *.crt
3 | *.key
4 | systemd/inventaire.service
5 | systemd/inventaire.service-local
6 | couchdb/local.custom.ini
7 | backyard
8 | node_modules
9 |
10 |
--------------------------------------------------------------------------------
/cluster.md:
--------------------------------------------------------------------------------
1 | # Cluster
2 |
3 | WIP: centralizing information on running several instances of the inventaire server
4 |
5 | * Make sure that only one sends activity summaries: set `CONFIG.activitySummary.disabled` to `true` on additional instances
6 |
--------------------------------------------------------------------------------
/maintenance/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "maintenance",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "start": "live-server --port=3007"
8 | },
9 | "author": "",
10 | "license": "AGPL-3.0",
11 | "dependencies": {
12 | "live-server": "^1.2.0"
13 | }
14 | }
15 |
--------------------------------------------------------------------------------
/nginx/setup_letsencrypt:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | # Source: https://certbot.eff.org/lets-encrypt/ubuntufocal-nginx
4 | sudo snap install core
5 | sudo snap refresh core
6 | sudo snap install --classic certbot
7 | sudo certbot --nginx --webroot /var/www/html
8 | # Choices:
9 | # 1 => Pick both 'inventaire.io' and 'www.inventaire.io'
10 |
--------------------------------------------------------------------------------
/couchdb/custom.ini:
--------------------------------------------------------------------------------
1 | [log]
2 | writer = journald
3 |
4 | [couch_httpd_auth]
5 | require_valid_user = true
6 |
7 | [ssl]
8 | ; Required for replication from a remote server
9 | enable = true
10 | cert_file = /opt/cert/couchdb.pem
11 | key_file = /opt/cert/privkey.pem
12 |
13 | [chttpd]
14 | ; Required for replication from a remote server
15 | bind_address = 0.0.0.0
16 |
--------------------------------------------------------------------------------
/jobs.cron:
--------------------------------------------------------------------------------
1 | # Setup cron by running `cat jobs.cron | crontab -`
2 |
3 | # m h day-of-month month day-of-week command
4 |
5 | NODE_ENV=production
6 | NODE_APP_INSTANCE=primary
7 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/home/admin/.nvm/versions/node/v18.16.1/bin
8 |
9 | 0 3 18 * * cd /home/admin/inventaire && npm run create-entities-dumps > "/home/admin/inventaire/logs/dumps-$(date -I).log" 2>&1
10 |
--------------------------------------------------------------------------------
/maintenance/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | inventaire.io is in maintenance
6 |
11 |
12 |
13 |
14 | inventaire.io is in maintenance
15 | We will be back in an hour or so, all our apologies for the trouble!
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/nginx/dumps.inventaire.io.nginx:
--------------------------------------------------------------------------------
1 | server {
2 | listen 443 ssl http2;
3 | listen [::]:443 ssl http2;
4 | server_name dumps.inventaire.io;
5 |
6 | include /etc/nginx/snippets/ssl_with_stapling.conf;
7 |
8 | root /home/admin/admin/dumps;
9 | location /favicon.ico {
10 | include /etc/nginx/snippets/security_headers.conf;
11 | try_files https://wiki.inventaire.io/favicon.ico favicon.ico;
12 | }
13 |
14 | location / {
15 | include /etc/nginx/snippets/security_headers.conf;
16 | autoindex on;
17 | autoindex_exact_size off;
18 | }
19 | }
20 |
--------------------------------------------------------------------------------
/install_node:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eo pipefail
4 |
5 | tmpdir=$(mktemp -d)
6 |
7 | # installing NVM, the NodeJS verion manager https://github.com/nvm-sh/nvm
8 | curl https://raw.githubusercontent.com/nvm-sh/nvm/v0.40.1/install.sh -o "${tmpdir}/install.sh"
9 |
10 | # Check that it is the right file before executing
11 | echo 0b882590028d326ce5d07a198c1061d2ab2ba00e3631e8ed96f65a60d06a8619 "${tmpdir}/install.sh" | sha256sum --check --strict
12 | bash "${tmpdir}/install.sh"
13 | source ~/.nvm/nvm.sh
14 |
15 | nvm install 20
16 | npm install -g --production lev2 couchdb-bulk2 add-to-systemd
17 |
--------------------------------------------------------------------------------
/nginx/ssl.conf:
--------------------------------------------------------------------------------
1 | # Recommended by https://ssl-config.mozilla.org/#server=nginx&version=1.18.0&config=intermediate&openssl=1.1.1f&guideline=5.6
2 |
3 | ssl_session_timeout 1d;
4 | ssl_session_cache shared:SSL:50m;
5 | ssl_session_tickets off;
6 | ssl_protocols TLSv1.2 TLSv1.3;
7 | ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384;
8 | ssl_prefer_server_ciphers on;
9 | ssl_stapling on;
10 | ssl_stapling_verify on;
11 | ssl_dhparam /etc/nginx/dhparam.pem;
12 |
--------------------------------------------------------------------------------
/setup_firewall:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | sudo ufw default deny incoming
3 | sudo ufw default allow outgoing
4 | sudo ufw allow ssh
5 | sudo ufw allow http
6 | sudo ufw allow https
7 |
8 | read -p "Enter the ip from the machine where your backup CouchDB is running (let empty to pass)" BACKUPIP
9 | [ ! -z "$BACKUPIP" ] && sudo ufw allow from $BACKUPIP to any port 6984
10 |
11 | sudo ufw enable
12 |
13 | # This actually seem to not be enough so, while waiting for a more effective solution
14 | # make sure to only bind to localhost (ex: 127.0.0.1:9999:8080) when exposing ports
15 | [[ -f /etc/default/docker ]] && {
16 | echo '
17 | # Prevent Docker modifying iptables
18 | # See https://www.techrepublic.com/article/how-to-fix-the-docker-and-ufw-security-flaw/
19 | DOCKER_OPTS="--iptables=false"
20 | ' | sudo tee -a /etc/default/docker
21 | }
22 |
--------------------------------------------------------------------------------
/systemd/inventaire.service.original:
--------------------------------------------------------------------------------
1 | # Documentation:
2 | # https://www.freedesktop.org/software/systemd/man/systemd.exec.html
3 | # https://rocketeer.be/articles/deploying-node-js-with-systemd/
4 | # https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/System_Administrators_Guide/sect-Managing_Services_with_systemd-Unit_Files.html
5 |
6 | [Unit]
7 | Description=inventaire
8 |
9 | [Service]
10 | ExecStart=HOMEFOLDER/.nvm/versions/node/NODEVERSION/bin/coffee INVFOLDER/server.coffee
11 | WorkingDirectory=INVFOLDER/
12 | Environment=PATH=HOMEFOLDER/.nvm/versions/node/NODEVERSION/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
13 | Environment=NODE_ENV=production
14 | Environment=FORCE_COLOR=true
15 | User=USERNAME
16 | Group=USERNAME
17 | Restart=always
18 | StandardOutput=journal
19 |
20 | [Install]
21 | WantedBy=multi-user.target
22 |
--------------------------------------------------------------------------------
/nginx/security_headers.conf:
--------------------------------------------------------------------------------
1 | # add_header from parent blocks are ignored when the current block also calls add_header
2 | # Thus the need for this snippet, to redefine the same headers in many blocks
3 | # See http://nginx.org/en/docs/http/ngx_http_headers_module.html#add_header
4 | add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload" always;
5 | # opt out Google Floc see: https://plausible.io/blog/google-floc#how-to-opt-out-of-floc-as-a-web-developer-set-a-permissions-policy
6 | add_header Permissions-Policy interest-cohort=();
7 | # source: https://gist.github.com/plentz/6737338
8 | add_header X-Frame-Options "SAMEORIGIN" always;
9 | # source: https://scotthelme.co.uk/hardening-your-http-response-headers/#x-content-type-options
10 | add_header X-Content-Type-Options 'nosniff' always;
11 | # source: https://scotthelme.co.uk/a-new-security-header-referrer-policy/
12 | add_header Referrer-Policy 'strict-origin' always;
13 |
--------------------------------------------------------------------------------
/install_inventaire_services:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -eu
4 |
5 | cmd="$(which node) $(pwd)/server/server.js"
6 | echo "cmd: $cmd"
7 |
8 | # env "PATH=$PATH" allows to access add-to-systemd in sudo mode
9 | sudo env "PATH=$PATH" ./node_modules/.bin/add-to-systemd inventaire \
10 | --env FORCE_COLOR=true \
11 | --env NODE_ENV=production \
12 | --env PATH="$PATH" \
13 | --option LimitNOFILE=65536 \
14 | --user "$USERNAME" -- "$cmd"
15 |
16 | # NODE_APP_INSTANCE: use a different local config
17 | # see https://github.com/lorenwest/node-config/wiki/Multiple-Node-Instances
18 | # A MemoryMax could also be set manually on the unit (ex: MemoryMax=20%)
19 | # see https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html
20 | sudo env "PATH=$PATH" ./node_modules/.bin/add-to-systemd inventaire-alt \
21 | --env FORCE_COLOR=true \
22 | --env PATH="$PATH" \
23 | --env NODE_ENV=production \
24 | --env NODE_APP_INSTANCE=alt \
25 | --option LimitNOFILE=65536 \
26 | --nice 10 \
27 | --user "$USERNAME" -- "$cmd"
28 |
--------------------------------------------------------------------------------
/install_inventaire:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env zsh
2 |
3 | git clone http://github.com/inventaire/inventaire
4 | cd inventaire
5 | npm install --production
6 | npm install add-to-systemd --production
7 |
8 | ./install_inventaire_services
9 |
10 | # Make logs persistant
11 | # https://doc.opensuse.org/documentation/leap/reference/html/book.opensuse.reference/cha.journalctl.html#journalctl.persistent
12 | sudo sed -i 's/#Storage=auto/Storage=persistent/' /etc/systemd/journald.conf
13 | # Increasing from 15% to 30%
14 | sudo sed -i 's/#SystemMaxUse=/SystemMaxUse=30%/' /etc/systemd/journald.conf
15 | # But make sure it never gets us below the 5G of available disk threshold
16 | sudo sed -i 's/#SystemKeepFree=/SystemKeepFree=5G/' /etc/systemd/journald.conf
17 | sudo systemctl restart systemd-journald
18 |
19 | # Firewall setup
20 | # - make sure SMTP isn't blocked
21 | # Scaleway: https://community.online.net/t/solved-smtp-connection-blocked/2262/3
22 |
23 | sudo systemctl start inventaire
24 | sudo systemctl start inventaire-alt
25 |
26 | # If you have existing CouchDB databases, you would now need to
27 | # * restore them, see https://git.inventaire.io/inventaire/blob/main/docs/couchdb_backup_and_restore.md#restore
28 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # inventaire.io in production
2 |
3 | Tools and scripts to document the setup of [Inventaire](https://github.com/inventaire/inventaire) in production at https://inventaire.io.
4 |
5 | This is a reference implementation, instructed by preferences, choices, and experience running Inventaire in production, but other implementations are possible: for a less opiniated, more general documentation, see [general technical Inventaire documentation](https://github.com/inventaire/inventaire/docs). See also [docker-inventaire](https://github.com/inventaire/docker-inventaire)
6 |
7 | ## Stack Map
8 | This repo correspond to the the "deployment scripts" in the [stack map](https://inventaire.github.io/stack/)
9 |
10 | ## Installation
11 | ### Debian/Ubuntu
12 | ```sh
13 | # if git isn't installed already
14 | sudo apt-get install git
15 | # cloning the deployment tools in the local directory
16 | git clone https://git.inventaire.io/inventaire-deploy
17 | ```
18 | Now you 'just have' to install everything: see `./install` for a realistic preview of what that could look like (but don't just execute those scripts as they are more meant as a documentation with shell formatting)
19 |
20 | You might also want to configure your firewall: see `./setup_firewall`
21 |
22 | ## inventaire.io specific services
23 |
24 | * [Prerender](https://github.com/inventaire/prerender)
25 |
--------------------------------------------------------------------------------
/.zshrc:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env zsh
2 |
3 | export LC_ALL=en_US.UTF-8
4 | export LANG=en_US.UTF-8
5 |
6 | # source : http://doc.ubuntu-fr.org/zsh
7 | zstyle ':completion:*:descriptions' format '%U%B%d%b%u'
8 | zstyle ':completion:*:warnings' format '%BSorry, no matches for: %d%b'
9 | zstyle ':completion:*:sudo:*' command-path /usr/local/sbin /usr/local/bin \
10 | /usr/sbin /usr/bin /sbin /bin /usr/X11R6/bin
11 | # Crée un cache des complétion possibles
12 | # très utile pour les complétion qui demandent beaucoup de temps
13 | # comme la recherche d'un paquet aptitude install moz
14 | zstyle ':completion:*' use-cache on
15 | zstyle ':completion:*' cache-path ~/.zsh_cache
16 |
17 | autoload -U promptinit
18 | promptinit
19 | prompt adam1
20 |
21 | HISTSIZE=10000
22 | SAVEHIST=10000
23 | HISTFILE=~/.history
24 |
25 | bindkey "ù" up-line-or-search
26 |
27 | # To save every command before it is executed (this is different from bash's history -a solution):
28 | setopt inc_append_history
29 | # To retrieve the history file everytime history is called upon.
30 | setopt share_history
31 |
32 | setopt BRACE_CCL # source: http://stackoverflow.com/questions/2394728/zsh-brace-expansion-seq-for-character-lists-how
33 |
34 | LH=http://localhost
35 |
36 | source ~/.aliases
37 |
38 | export EDITOR=/usr/bin/vim
39 | export GOPATH=$HOME/go
40 | export PATH=$PATH:/snap/bin/
41 |
--------------------------------------------------------------------------------
/free_disk_space:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 | # Things that can be done to recover disk space
3 | # Use `ncdu` to identify what is taking so much space
4 |
5 | # After a journal backup
6 | sudo journalctl --vacuum-size=10M
7 |
8 | # Remove linux headers among other things
9 | # Remove all cached packages stored in /var/cache (without uninstalling)
10 | sudo apt-get autoremove && sudo apt-get clean
11 |
12 | # References:
13 | # https://doc.ubuntu-fr.org/nettoyer_ubuntu
14 |
15 | # client node_modules aren't needed as the client is built on devs machines and rsynced
16 | rm -rf ~/inventaire/client/node_modules
17 |
18 | # DBs compaction, should be done automatically by CouchDB since CouchDB 2.1.1
19 | # http://docs.couchdb.org/en/latest/maintenance/compaction.html
20 | # curl -H "Content-Type: application/json" -X POST $(LHCOUCHAUTH):3456/dbname/_compact
21 | # curl -H "Content-Type: application/json" -X POST $(LHCOUCHAUTH):3456/dbname/_compact/designdocname
22 |
23 | # View cleanup is not done automatically, but should not be handled by the server 'couchdb:cleanup-after-design-docs-changes' script
24 | # curl -H "Content-Type: application/json" -X POST $(LHCOUCHAUTH):3456/entities-prod/_view_cleanup
25 | # curl -H "Content-Type: application/json" -X POST $(LHCOUCHAUTH):3456/patches-prod/_view_cleanup
26 | # curl -H "Content-Type: application/json" -X POST $(LHCOUCHAUTH):3456/tasks-prod/_view_cleanup
27 |
--------------------------------------------------------------------------------
/media_storage/README.md:
--------------------------------------------------------------------------------
1 | # media storage
2 |
3 | For media storage, we use an OpenStack Swift provider, currently OVH, with 3 containers (`assets`, `entities`, `users`) replicated between 2 regions ([learning from previous mistakes](https://twitter.com/olesovhcom/status/1369478732247932929)).
4 |
5 | ## OpenStack Swift
6 |
7 | The best way to manipulate objects in the containers is the python swift client:
8 | ```sh
9 | sudo apt-get install python-swiftclient -y
10 | ```
11 |
12 | Load the environment from the config file which can be downloaded from the provider, see:
13 | - https://docs.ovh.com/fr/public-cloud/charger-les-variables-denvironnement-openstack/
14 | - https://www.ovh.com/manager/public-cloud/index.html#/pci/projects/5897bfd533bf4d3ca0a0522046f9f535/users
15 |
16 | ```sh
17 | source ./openrc.sh
18 | # List containers in the ENV REGIONS
19 | swift list
20 | # Create a container with the content of the current folder
21 | swift upload testcontainer .
22 | # Make that new container public
23 | swift post --read-acl '.r:*,.rlistings' testcontainer
24 | ```
25 |
26 | ### Replication
27 | To created the replicated containers:
28 | * load the config for that other region
29 | ```
30 | source ./openrc_backup.sh
31 | ```
32 | * re-create the empty containers on the secondary location
33 | ```
34 | swift upload testcontainer .
35 | swift post --read-acl '.r:*,.rlistings' testcontainer
36 | ```
37 | * Follow this [tutorial](https://docs.ovh.com/gb/en/public-cloud/sync-object-containers/) to set the sync key on those containers on both regions, and start synchronizing.
38 |
--------------------------------------------------------------------------------
/install_elasticsearch:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | # Following https://www.elastic.co/guide/en/elasticsearch/reference/7.10/deb.html#deb-repo
4 | wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo apt-key add -
5 | sudo apt-get install apt-transport-https
6 | echo "deb https://artifacts.elastic.co/packages/7.x/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-7.x.list
7 | sudo apt-get update
8 | sudo apt-get install elasticsearch
9 |
10 | # Customize ElasticSearch Unit to restart on failure
11 | # see https://github.com/elastic/elasticsearch/issues/25425
12 | # https://github.com/elastic/puppet-elasticsearch/pull/870
13 | # https://www.digitalocean.com/community/tutorials/understanding-systemd-units-and-unit-files
14 | sudo mkdir -p /etc/systemd/system/elasticsearch.service.d
15 | echo '
16 | [Unit]
17 | StartLimitInterval=200
18 | StartLimitBurst=5
19 |
20 | [Service]
21 | Restart=always
22 | RestartSec=3
23 | Environment=ES_JAVA_OPTS="-Xms2g -Xmx2g"
24 | ' | sudo tee /etc/systemd/system/elasticsearch.service.d/override.conf
25 |
26 | # Start ElasticSearch at startup
27 | sudo systemctl daemon-reload
28 | sudo systemctl enable elasticsearch
29 | sudo systemctl start elasticsearch
30 |
31 |
32 | # Disabling GeoIP
33 | # see https://www.elastic.co/guide/en/elasticsearch/reference/current/geoip-processor.html
34 | # https://www.elastic.co/guide/en/elasticsearch/reference/current/cluster-update-settings.html
35 | curl -H 'content-type: application/json' -XPUT "http://localhost:9200/_cluster/settings" -d '{
36 | "persistent" : {
37 | "ingest.geoip.downloader.enabled" : false
38 | }
39 | }'
40 |
--------------------------------------------------------------------------------
/install:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | export PROJECT_ROOT=$(pwd)
4 | export NGINX_FOLDER=$PROJECT_ROOT/nginx
5 | export COUCHDB_FOLDER=$PROJECT_ROOT/couchdb
6 | export SERVER_FOLDER=$PROJECT_ROOT/inventaire
7 | export LOGS_FOLDER=$PROJECT_ROOT/inventaire/logs
8 |
9 | sudo apt-get update -y
10 |
11 | # - software-properties-common: installs add-apt-repository
12 | sudo apt-get install curl zsh git nginx graphicsmagick openssl inotify-tools software-properties-common -y
13 |
14 | # Optionals:
15 | # - build-essential: installs `make`, which is required by npm:heapdump
16 | sudo apt-get install fail2ban build-essential
17 |
18 | echo "
19 | [sshd]
20 | enabled = true
21 | port = ssh
22 | filter = sshd
23 | maxretry = 3
24 | bantime = 31536000
25 | findtime = 864000
26 | " | sudo tee /etc/fail2ban/jail.d/sshd.conf
27 |
28 | sudo systemctl restart fail2ban
29 |
30 | # NOTES
31 | # - fail2ban: should work out of the box for ssh
32 | # https://github.com/fail2ban/fail2ban
33 | # https://www.digitalocean.com/community/tutorials/how-to-protect-ssh-with-fail2ban-on-ubuntu-14-04
34 |
35 | cp aliases ~/.aliases
36 | echo "source ~/.aliases" >> ~/.profile
37 |
38 | # Increase the limit of opened files to lower risks of LevelDB throwing "IO error: Too many open files"
39 | # https://singztechmusings.wordpress.com/2011/07/11/ulimit-how-to-permanently-set-kernel-limits-in-linux/
40 | # https://stackoverflow.com/questions/21591535/is-there-any-downside-to-setting-ulimit-really-high
41 | echo "
42 | * soft nofile 16384
43 | * hard nofile 16384
44 | " | sudo tee -a /etc/security/limits.conf
45 |
46 | ./install_node
47 | cd $NGINX_FOLDER && ./setup
48 | cd $COUCHDB_FOLDER && ./setup
49 | cd $PROJECT_ROOT && ./install_elasticsearch
50 | cd $PROJECT_ROOT && ./install_inventaire
51 |
--------------------------------------------------------------------------------
/.vimrc:
--------------------------------------------------------------------------------
1 | set number
2 | syntax on
3 | set syntax=sh
4 |
5 | set history=100
6 |
7 | " read/write a .viminfo file, don't store more
8 | " than 50 lines of registers
9 | set viminfo='20,\"50
10 |
11 | " show the cursor position all the time
12 | set ruler
13 |
14 | " switch on highlighting the last used search pattern
15 | set hlsearch
16 |
17 |
18 | " Autocommands structure
19 | " source: http://learnvimscriptthehardway.stevelosh.com/chapters/12.html
20 | "
21 | " :autocmd BufNewFile *.txt :write
22 | " ^ ^ ^
23 | " | | |
24 | " | | The command to run.
25 | " | |
26 | " | A "pattern" to filter the event.
27 | " |
28 | " The "event" to watch for.
29 |
30 |
31 | " Only do this part when compiled with support for autocommands
32 | if has("autocmd")
33 |
34 | " Autocommand Groups http://learnvimscriptthehardway.stevelosh.com/chapters/14.html
35 | augroup maxlath
36 |
37 | " Remove ALL autocommands for the current group.
38 | autocmd!
39 |
40 | " In text files, always limit the width of text to 78 characters
41 | " autocmd BufRead *.txt set tw=78
42 | " When editing a file, always jump to the last cursor position
43 | autocmd BufReadPost *
44 | \ if line("'\"") > 0 && line ("'\"") <= line("$") |
45 | \ exe "normal! g'\"" |
46 | \ endif
47 |
48 | autocmd BufReadPost COMMIT_EDITMSG
49 | \ exe "normal! gg"
50 |
51 | " don't write swapfile on most commonly used directories for NFS mounts or USB sticks
52 | " autocmd BufNewFile,BufReadPre /media/*,/run/media/*,/mnt/* set directory=~/tmp,/var/tmp,/tmp
53 |
54 | " start with spec file template
55 | " autocmd BufNewFile *.spec 0r /usr/share/vim/vimfiles/template.spec
56 |
57 | augroup END
58 |
59 | endif
60 |
61 |
--------------------------------------------------------------------------------
/couchdb/setup:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | set -eu
4 |
5 | # Following https://docs.couchdb.org/en/stable/install/unix.html for Ubuntu 20.04
6 | sudo apt update && sudo apt install -y curl apt-transport-https gnupg
7 | curl https://couchdb.apache.org/repo/keys.asc | gpg --dearmor | sudo tee /usr/share/keyrings/couchdb-archive-keyring.gpg >/dev/null 2>&1
8 | source /etc/os-release
9 | echo "deb [signed-by=/usr/share/keyrings/couchdb-archive-keyring.gpg] https://apache.jfrog.io/artifactory/couchdb-deb/ ${VERSION_CODENAME} main" \
10 | | sudo tee /etc/apt/sources.list.d/couchdb.list >/dev/null
11 | sudo apt update
12 |
13 | # You will be prompted for some setups:
14 | # - cluster or standalone: standalone should be fine
15 | # - set an 'admin' user password
16 | # - host ip: set to 0.0.0.0 if you want to setup replication from a remote server
17 | sudo apt install couchdb
18 |
19 | # SSL setup based on https://docs.couchdb.org/en/stable/config/http.html?highlight=ssl#https-ssl-tls-options
20 | mkdir /opt/couchdb/etc/cert
21 | cd /opt/couchdb/etc/cert
22 | openssl genrsa > privkey.pem
23 | openssl req -new -x509 -key privkey.pem -out couchdb.pem -days 1095 -subj "/C=/ST=/L=/O=/OU=/CN=."
24 | chmod 600 privkey.pem couchdb.pem
25 | chown couchdb privkey.pem couchdb.pem
26 |
27 | # Add custom settings
28 | sudo cp custom.ini /opt/couchdb/etc/local.d/custom.ini
29 |
30 | # Increase Query Servers max memory
31 | # See https://docs.couchdb.org/en/stable/config/query-servers.html
32 | sudo mkdir -p /etc/systemd/system/couchdb.service.d/
33 | echo '
34 | [Service]
35 | Environment=COUCHDB_QUERY_SERVER_JAVASCRIPT="/opt/couchdb/bin/couchjs -S 536870912 /opt/couchdb/share/server/main.js"
36 | # See https://docs.couchdb.org/en/stable/maintenance/performance.html#disk-and-file-system-performance
37 | Environment=ERL_FLAGS="+A 4"
38 | ' | sudo tee /etc/systemd/system/couchdb.service.d/override.conf
39 |
40 | sudo systemctl daemon-reload
41 | sudo systemctl restart couchdb
42 |
43 | # Databases will then be created by the nodejs server at first startup
44 | # You still got to setup database replication on the remote server
45 |
--------------------------------------------------------------------------------
/nginx/setup:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env sh
2 |
3 | set -eu
4 |
5 | read -p "Enter a domain name (defaults to inventaire.io) " DOMAIN_NAME
6 | # if domain name is an empty string, default to inventaire.io
7 | [ -z "$DOMAIN_NAME" ] && DOMAIN_NAME='inventaire.io'
8 |
9 | read -p "Enter the url of your Prerender (https://git.inventaire.io/prerender) instance (defaults to http://localhost:3000) " PRERENDER_INSTANCE
10 | [ -z "$PRERENDER_INSTANCE" ] && PRERENDER_INSTANCE='http://localhost:3000'
11 |
12 | read -p "Enter the IP of your Prerender instance (defaults to 0.0.0.0) " PRERENDER_IP
13 | [ -z "$PRERENDER_INSTANCE" ] && PRERENDER_INSTANCE='0.0.0.0'
14 |
15 | echo 'PROJECT_ROOT' $PROJECT_ROOT
16 | echo 'PRERENDER_INSTANCE' $PRERENDER_INSTANCE
17 | echo 'DOMAIN_NAME' $DOMAIN_NAME
18 |
19 | # Customizing the Nginx config to the local needs.
20 | # Using '@' as delimiters in sed instead of '/' to avoid confusion with '/' in paths
21 | # cf http://stackoverflow.com/a/9366940/3324977
22 | cat inventaire.original.nginx |
23 | sed "s@PROJECT_ROOT@$PROJECT_ROOT@g" |
24 | sed "s@DOMAIN_NAME@$DOMAIN_NAME@g" |
25 | sed "s@PRERENDER_INSTANCE@$PRERENDER_INSTANCE@g" |
26 | sed "s@PRERENDER_IP@$PRERENDER_IP@g" > inventaire.custom.nginx
27 |
28 | sudo cp ssl.conf security_headers.conf /etc/nginx/snippets
29 | sudo cp inventaire.custom.nginx /etc/nginx/sites-available/default
30 |
31 | # generate dhparam.pem file
32 | sudo openssl dhparam -out /etc/nginx/dhparam.pem 2048
33 |
34 | sudo mkdir -p /etc/systemd/system/nginx.service.d
35 |
36 | # The "+" is required to prevent `mkdir() "/tmp/nginx/tmp" failed (2: No such file or directory)` errors
37 | # See https://unix.stackexchange.com/questions/207469/systemd-permission-issue-with-mkdir-execstartpre#comment964581_207493
38 | # and https://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
39 | echo '
40 | [Service]
41 | ExecStartPre=+/bin/mkdir -p /tmp/nginx/tmp /tmp/nginx/resize/img/users /tmp/nginx/resize/img/groups /tmp/nginx/resize/img/entities /tmp/nginx/resize/img/remote /tmp/nginx/resize/img/assets
42 | ExecStartPre=+/bin/chown -R www-data:www-data /tmp/nginx
43 | ' | sudo tee /etc/systemd/system/nginx.service.d/override.conf
44 |
45 | # setup nginx systemd service
46 | sudo systemctl daemon-reload
47 | sudo systemctl enable nginx
48 | sudo systemctl start nginx
49 |
50 | ./setup_letsencrypt
51 |
52 | # reload nginx configuration
53 | sudo nginx -s reload
54 |
55 | echo "nginx setup done"
56 |
--------------------------------------------------------------------------------
/aliases:
--------------------------------------------------------------------------------
1 | alias agi="sudo apt-get -y install"
2 | alias apti="sudo apt-get -y install"
3 | alias aptud="echo UPDATE && sudo apt-get update -y && echo UPGRADE && sudo apt-get upgrade && echo AUTOREMOVE && sudo apt-get autoremove && sudo apt clean"
4 | alias agud="sudo apt-get -y update"
5 | alias agug="sudo apt-get -y upgrade"
6 | alias agar="sudo apt-get -y autoremove"
7 | alias agu="agud && agug && agar"
8 | alias neteth="sudo nethogs eth0"
9 | alias real="source ~/.aliases"
10 | alias edal="vi +$ ~/.aliases"
11 | alias ww='weightwatcher'
12 | alias weightwatcher='du -s -h'
13 | alias wh=which
14 | alias cdp="cd .."
15 | alias cdpp="cd ../.."
16 | alias ls="ls --color=auto"
17 | alias l="ls -lha"
18 | alias ll="ls -lha"
19 | alias heaviest="du -sk * | sort -rn| head -n 20"
20 | alias chmox="chmod +x"
21 |
22 | alias npmi="echo 'npm install --production' && npm install --production"
23 | alias npmig="echo 'npm install -g --production' && npm install -g --production"
24 | alias npmug='npm uninstall -g'
25 | alias npms="npm start"
26 | alias npmt="npm test"
27 | alias npmtw="npm run test-watch"
28 | alias npmu="npm uninstall"
29 | alias npmis="echo 'npm install --production' && npm install --production"
30 | alias npmisd="echo 'npm install --save-dev --production' && npm install --save-dev --production"
31 | alias npmusd="echo 'npm uninstall --save-dev --production' && npm uninstall --save-dev --production"
32 | alias npmbw="npm run build-watch"
33 | alias npmd="npm run dist"
34 | alias npmw="npm run watch"
35 | #alias npmd="npm run debug"
36 | alias npmus="npm uninstall --save"
37 | alias ni="npmi"
38 | alias nig="npmig"
39 | alias nl="npm run lint"
40 | alias nb="npm run build"
41 |
42 | alias gl="git log --graph --abbrev-commit --decorate --date=relative --format=format:'%C(bold blue)%h%C(reset) - %C(bold green)(%ar)%C(reset) %C(white)%s%C(reset) %C(dim white)- %an%C(reset)%C(bold yellow)%d%C(reset)' --all"
43 | alias glp="git log --patch"
44 | alias ga="git add"
45 | alias gr="git reset"
46 | gs () {
47 | if [ -z "$1" ]
48 | then
49 | git status && git stash list
50 | else
51 | git show $1
52 | fi
53 | }
54 |
55 | gss(){
56 | [[ -z "$1" ]] && 1=0
57 | git show -p "stash@{${1}}"
58 | }
59 |
60 | gsa(){
61 | [[ -z "$1" ]] && 1=0
62 | git stash apply "stash@{${1}}"
63 | }
64 |
65 | gsd(){
66 | [[ -z "$1" ]] && 1=0
67 | git stash drop "stash@{${1}}"
68 | }
69 |
70 | gsp(){
71 | [[ -z "$1" ]] && 1=0
72 | gsa "$1" && gsd "$1"
73 | }
74 |
75 | alias ungc="git reset --soft HEAD^"
76 | alias gd="git diff"
77 | alias gdc="git diff --cached"
78 | alias gco="git checkout"
79 | alias gj="git checkout -B"
80 | gjo () {
81 | git checkout origin/$1 && git checkout -B $1
82 | }
83 | gcom () {
84 | gco main
85 | }
86 | gjoc () {
87 | branch=$(git symbolic-ref --short HEAD)
88 | git checkout -q origin/$branch
89 | git checkout -q -B $branch
90 | }
91 | alias gfj='git fetch --all && gjoc'
92 | alias gjom="gjo main"
93 | alias gf="git fetch"
94 | alias gfo="git fetch origin"
95 | alias gfom="git fetch origin main"
96 | alias gcop="git checkout -p"
97 | alias gap="git add --patch"
98 | alias gslast="git show HEAD"
99 |
100 | # git branch delete local all
101 | gbdla(){
102 | git branch -D "$1"
103 | git branch -Dr "origin/$1"
104 | }
105 |
106 | srm(){ mv $@ /tmp }
107 |
108 | export LH=http://localhost
109 | curljson () { /usr/bin/curl -s -H "Content-Type: application/json" -H 'Accept: application/json' $@ }
110 |
111 | alias filepath="readlink -f"
112 |
113 | find_container(){
114 | sudo docker ps | grep "$@" | awk '{print $1}'
115 | }
116 |
117 | alias d="sudo docker"
118 | alias dc="sudo docker-compose"
119 | dexec(){
120 | sudo docker exec -it $( find_container $2 ) $1
121 | # docker exec -it $2 $1
122 | }
123 | alias dbash="dexec /bin/bash"
124 | alias dsh="dexec /bin/sh"
125 | alias dzsh="dexec /bin/zsh"
126 | alias dk="sudo docker kill"
127 | dka(){ find_container $@ | xargs sudo docker kill }
128 | dlogs(){ find_container $@ | xargs sudo docker logs }
129 | alias dps="sudo docker ps"
130 |
131 | echo_blue(){ echo -e "\e[0;34m$@\e[0;0m" }
132 | echo_grey(){ echo -e "\e[0;30m$@\e[0;0m" }
133 |
134 | # Source: https://www.shellhacks.com/disk-speed-test-read-write-hdd-ssd-perfomance-linux/
135 | disk_benchmark(){
136 | echo_blue "WRITE speed"
137 | sync; dd if=/dev/zero of=tempfile bs=1M count=1024; sync
138 | echo_blue "Clear disk cache"
139 | sudo /sbin/sysctl -w vm.drop_caches=3 > /dev/null
140 | echo_blue "READ speed"
141 | dd if=tempfile of=/dev/null bs=1M count=1024
142 | rm tempfile
143 | echo_grey "(use hdparm to test cache speed. Ex: sudo hdparm -Tt /dev/sda)"
144 | }
145 |
146 | alias disk_space="df -h"
147 | alias firewall_rules="sudo iptables -L"
148 | alias ufws="sudo ufw status verbose"
149 | alias bat="batcat"
150 |
151 | find_container_using_volume(){
152 | volume_id=$1
153 | sudo docker ps -a --filter volume=$volume_id
154 | }
155 |
156 | alias ds="sudo docker stats"
157 | alias di="sudo docker image"
158 | alias dis="sudo docker images"
159 |
160 | alias c="clear"
161 | alias cc='c && cd'
162 | alias ..='cd ..'
163 | alias ...='cd ../../'
164 | alias ....='cd ../../../'
165 | alias .....='cd ../../../../'
166 |
--------------------------------------------------------------------------------
/nginx/inventaire.original.nginx:
--------------------------------------------------------------------------------
1 | # PROJECT_ROOT, DOMAIN_NAME and PRERENDER_INSTANCE are set during nginx setup script
2 |
3 | server_tokens off;
4 |
5 | # Defining a log format arbitrary named "custom"
6 | # doc: http://nginx.org/en/docs/http/ngx_http_log_module.html#log_format
7 | log_format custom '$remote_addr - $remote_user "$request" $status'
8 | ' | time: $request_time'
9 | ' | length: req $request_length / res $body_bytes_sent'
10 | ' | agent: $http_user_agent'
11 | ' | referer: $http_referer';
12 |
13 | # Access logs from journalctl
14 | access_log syslog:server=unix:/dev/log custom;
15 | error_log syslog:server=unix:/dev/log;
16 |
17 | upstream inventaire {
18 | server 127.0.0.1:3006 fail_timeout=5s;
19 | server 127.0.0.1:3007 backup;
20 | }
21 |
22 | # Using error_page as a way to have a named location that can
23 | # then be shared between several locations, see:
24 | # https://serverfault.com/questions/908086/nginx-directly-send-from-location-to-another-named-location
25 | # https://www.nginx.com/resources/wiki/start/topics/depth/ifisevil/#what-to-do-instead
26 | # Contrary to what the documentation says, the HTTP verbs aren't all converted to GET
27 | # http://nginx.org/en/docs/http/ngx_http_core_module.html#error_page
28 | error_page 543 = @invserver;
29 |
30 | server {
31 | listen 80;
32 | listen [::]:80;
33 |
34 | # Required to be able to run `certbot -w /var/www/html/`
35 | location /.well-known/ {
36 | root /var/www/html/;
37 | }
38 |
39 | location / {
40 | return 301 https://$host$request_uri;
41 | }
42 | }
43 |
44 | server {
45 | listen 443 ssl http2;
46 | listen [::]:443 ssl http2;
47 | server_name www.DOMAIN_NAME;
48 | include /etc/nginx/snippets/ssl.conf;
49 | location / {
50 | include /etc/nginx/snippets/security_headers.conf;
51 | return 301 https://DOMAIN_NAME$request_uri;
52 | }
53 | }
54 |
55 | server {
56 | listen 443 ssl http2;
57 | listen [::]:443 ssl http2;
58 | server_name DOMAIN_NAME;
59 |
60 | # 'ssl_certificate' and 'ssl_certificate_key' will be added
61 | # by LetsEncrypt certbot
62 |
63 | include /etc/nginx/snippets/ssl.conf;
64 |
65 | client_max_body_size 25M;
66 |
67 | # Disabling compression to mitigate BREACH exploit
68 | # https://en.wikipedia.org/wiki/BREACH_(security_exploit)#Mitigation
69 | # http://security.stackexchange.com/questions/39925/breach-a-new-attack-against-http-what-can-be-done
70 | # until we can confidently say that HTTP/2 solves the issue? https://blog.cloudflare.com/hpack-the-silent-killer-feature-of-http-2
71 | gzip off;
72 |
73 | # On-The-Fly Image Resizer
74 |
75 | # URLs look like /img/users/300x1200/8185d4e039f52b4faa06a1c277133e9a8232551b
76 | # for locally hosted images
77 | # or /img/remote/300x1200/630022006?href=http%3A%2F%2Fescaped.url
78 | # for remote images, with 630022006 being the hash of the passed href
79 | # generated by [hashCode](https://git.inventaire.io/inventaire/blob/35b1e63/server/lib/utils/base.js#L69-L80)
80 |
81 | # The hack: I couldn't make the proxy_store work: it never hits the cache, but
82 | # it does put the resized images in /tmp/nginx/resize, so using a try_files
83 | # directive instead
84 |
85 | # Sometimes, for some unidentified reason, the cached files end up empty, so it can be useful to add a root cron to remove those files:
86 | # 0 4 * * * /usr/bin/find /tmp/nginx -type f -size 0 -delete
87 |
88 | # Do not remove the (.*) capture group as it seems to be required by the try_files
89 | location ~ ^/img/(groups|users|entities|assets)/(.*) {
90 | include /etc/nginx/snippets/security_headers.conf;
91 | root /tmp/nginx/resize;
92 | default_type "image/jpeg";
93 | add_header Cache-Control "public, max-age=31536000, immutable";
94 | add_header X-File-Cache "hit";
95 | add_header Content-Security-Policy "sandbox";
96 | try_files $uri @invimg;
97 | limit_except GET {
98 | deny all;
99 | }
100 | }
101 |
102 | # Same as above, but without the immutable
103 | location ~ ^/img/remote/(.*) {
104 | include /etc/nginx/snippets/security_headers.conf;
105 | root /tmp/nginx/resize;
106 | default_type "image/jpeg";
107 | add_header X-File-Cache "hit";
108 | add_header Content-Security-Policy "sandbox";
109 | try_files $uri @invimg;
110 | limit_except GET {
111 | deny all;
112 | }
113 | }
114 |
115 | location ~ ^/img/(\d+)x(\d+)/(.*) {
116 | include /etc/nginx/snippets/security_headers.conf;
117 | return 404;
118 | }
119 |
120 | location @invimg {
121 | include /etc/nginx/snippets/security_headers.conf;
122 | default_type "image/jpeg";
123 | add_header X-File-Cache "miss";
124 | add_header Content-Security-Policy "sandbox";
125 | proxy_temp_path /tmp/nginx/tmp;
126 | proxy_store /tmp/nginx/resize/$uri;
127 | proxy_store_access user:rw group:rw all:r;
128 | proxy_http_version 1.1;
129 | proxy_pass http://inventaire;
130 | }
131 |
132 | # following aliases made in order to respect the url structure
133 | # the server alone would follow: especially, mounting /static on /public
134 | root PROJECT_ROOT/inventaire/client;
135 | location /public/ {
136 | include /etc/nginx/snippets/security_headers.conf;
137 | limit_except GET {
138 | deny all;
139 | }
140 | gzip_static on;
141 | # Let resources that can't be cache busted
142 | # - such as opensearch.xml or robots.txt -
143 | # out of this caching policy
144 | if ($uri ~ "^/public/(dist|fonts)/" ) {
145 | include /etc/nginx/snippets/security_headers.conf;
146 | add_header Cache-Control "public, max-age=31536000, immutable";
147 | # All headers that aren't in the last block won't be taken in account
148 | # thus the need to have CORS headers here too
149 | add_header 'Access-Control-Allow-Origin' '*' always;
150 | add_header 'Access-Control-Allow-Methods' 'GET' always;
151 | }
152 | }
153 |
154 | # Pass the request to the node.js server
155 | # with some correct headers for proxy-awareness
156 | location /api {
157 | return 543;
158 | }
159 |
160 | location /.well-known/webfinger {
161 | return 543;
162 | }
163 |
164 | # Let the API server handle all but /public JSON and RSS requests
165 | location ~ "^/[^p].*\.(json|rss)$" {
166 | limit_except GET {
167 | deny all;
168 | }
169 | return 543;
170 | }
171 |
172 | location @invserver {
173 | include /etc/nginx/snippets/security_headers.conf;
174 | # Let the server decide when CORS headers should be added
175 | proxy_set_header Host $http_host;
176 | proxy_set_header X-Forwarded-Proto https;
177 | proxy_set_header Host $host;
178 |
179 | # Set a large value to let the API determine the appropriate
180 | # timeout per endpoint
181 | # http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_read_timeout
182 | proxy_read_timeout 3600;
183 | proxy_redirect off;
184 | proxy_http_version 1.1;
185 | # Redirect Prerender API requests to their the alt server
186 | if ($remote_addr = PRERENDER_IP) {
187 | proxy_pass http://127.0.0.1:3007;
188 | break;
189 | }
190 | # Redirect bots and crawlers that were excluded from prerendering to the alt server too
191 | if ($http_user_agent ~* "bot|index|spider|crawl") {
192 | proxy_pass http://127.0.0.1:3007;
193 | break;
194 | }
195 | proxy_pass http://inventaire;
196 | }
197 |
198 | location = /favicon.ico {
199 | include /etc/nginx/snippets/security_headers.conf;
200 | try_files /public/$uri /public/images/$uri;
201 | expires 30d;
202 | add_header Cache-Control "public";
203 | }
204 |
205 | location = /robots.txt {
206 | include /etc/nginx/snippets/security_headers.conf;
207 | gzip_static on;
208 | try_files /public/$uri /$uri;
209 | expires 1d;
210 | add_header Cache-Control "public";
211 | }
212 |
213 | # Prevent exposing git folders such as /public/i18n/.git
214 | # For why this rule takes precedence over location /public/
215 | # see http://stackoverflow.com/a/34262192/3324977
216 | location ~ /\.git {
217 | deny all;
218 | }
219 |
220 | # Required to be able to run `certbot -w /var/www/html/`
221 | location /.well-known/ {
222 | include /etc/nginx/snippets/security_headers.conf;
223 | root /var/www/html/;
224 | }
225 |
226 | location / {
227 | include /etc/nginx/snippets/security_headers.conf;
228 | gzip_static on;
229 | try_files $uri @prerender;
230 | limit_except GET {
231 | deny all;
232 | }
233 | }
234 |
235 | # Dispatching requests between the JS-rendered client (for browsers)
236 | # or the prerendered version (for bots, curl and alikes)
237 | # To setup a Prerender server, see https://git.inventaire.io/prerender
238 |
239 | # Adapted from https://gist.github.com/thoop/8165802
240 | location @prerender {
241 | #proxy_set_header X-Prerender-Token YOUR_TOKEN;
242 |
243 | set $prerender 0;
244 | # ~* is case insensitive http://nginx.org/en/docs/http/ngx_http_rewrite_module.html#if:~:text=case-insensitive%20matching
245 | if ($http_user_agent ~* "bot|index|spider|crawl|facebook|embedly|quora|outbrain|pinterest|vkShare|W3C_Validator|curl|wget|slurp|Discourse|Iframely") {
246 | set $prerender 1;
247 | }
248 | if ($args ~ "_escaped_fragment_") {
249 | set $prerender 1;
250 | }
251 | if ($args ~ "__nojs") {
252 | set $prerender 1;
253 | }
254 | # - Bots that do there own prerendering: Googlebot, bingbot, Yandex, Applebot
255 | # To identify bots that do their own prerendering:
256 | # 1/ add them to this list
257 | # 2/ see if they make requests on /api by themselves, with the following command:
258 | # journalctl -u nginx --since today --grep BotName | grep '/api'
259 | # - SEO/spammer bots that should do there own prerendering
260 | # Currently allowed to use the prerender service, but should eventually be added to this list: GPTBot
261 | if ($http_user_agent ~* "bingbot|Yandex|Googlebot|SemrushBot|Applebot|PetalBot|DotBot|Barkrowler|Bytespider|AhrefsBot|seo") {
262 | set $prerender 0;
263 | }
264 | if ($http_user_agent ~ "Prerender") {
265 | set $prerender 0;
266 | }
267 | if ($uri ~ "\.(js|css|xml|less|png|jpg|jpeg|gif|pdf|doc|txt|ico|rss|zip|mp3|rar|exe|wmv|doc|avi|ppt|mpg|mpeg|tif|wav|mov|psd|ai|xls|mp4|m4a|swf|dat|dmg|iso|flv|m4v|torrent|ttf|woff)") {
268 | set $prerender 0;
269 | }
270 |
271 | proxy_http_version 1.1;
272 | # Allows Firefox Headless Prerender to send 429 errors in case of abuse
273 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
274 |
275 | if ($prerender = 1) {
276 | #setting prerender as a variable forces DNS resolution since nginx caches IPs and doesnt play well with load balancing
277 | # set $prerender "92.222.76.42:3000";
278 | rewrite .* /$scheme://$host$request_uri? break;
279 | proxy_pass PRERENDER_INSTANCE;
280 | }
281 | if ($prerender = 0) {
282 | include /etc/nginx/snippets/security_headers.conf;
283 |
284 | add_header Report-To '{"max_age":10886400,"endpoints":[{"url":"https://inventaire.io/api/reports?action=csp-report"}]}'
285 | add_header Content-Security-Policy-Report-Only "default-src 'self' www.wikidata.org; child-src 'self' 'blob'; script-src 'self' 'unsafe-inline'; style-src 'self' 'unsafe-inline'; font-src 'self'; img-src 'self' commons.wikimedia.org api.tiles.mapbox.com piwik.allmende.io data:; report-uri /api/reports?action=csp-report; report-to default"
286 |
287 | # index.html should always be fresh out of the server
288 | # time is negative => “Cache-Control: no-cache”
289 | # http://nginx.org/en/docs/http/ngx_http_headers_module.html
290 | # Those headers should be set here and not at "location /" as they would be ignored (cf http://serverfault.com/a/786248)
291 | expires -1;
292 |
293 | rewrite .* /public/index.html break;
294 | }
295 | }
296 | }
297 |
--------------------------------------------------------------------------------