├── changelog ├── items │ ├── other │ │ └── .init │ ├── bugs-fixed │ │ ├── .init │ │ ├── fix-missing-logs-dir.md │ │ └── fix-context.md │ └── key-updates │ │ ├── .init │ │ └── pinner.md ├── .changelogignore ├── README.md ├── changelog-head.md ├── generate-changelog.sh └── changelog-tail.md ├── .github ├── CODEOWNERS ├── dependabot.yml └── workflows │ └── lint-python.yml ├── .prettierignore ├── scripts ├── portal-up.sh ├── wait_to_start.sh ├── portal-down.sh ├── README.md ├── db_restore.sh ├── blocklist-skylink.sh ├── db_backup.sh └── backup-aws-s3.sh ├── logs └── .gitkeep ├── renovate.json ├── setup-scripts ├── serverload.service ├── logrotate.d │ ├── skynet-webportal-pinner │ ├── skynet-webportal-skyd │ └── skynet-webportal-nginx ├── support │ ├── sia.env │ ├── tmux.conf │ ├── crontab │ ├── authorized_keys │ ├── ssh_config │ ├── limits.conf │ └── bashrc ├── disk-usage-dump.sh ├── serverload.sh ├── log-checker.py ├── funds-checker.py ├── bot_utils.py └── health-checker.py ├── README.md ├── docker-compose.pinner.yml ├── docker-compose.blocker.yml ├── docker-compose.mongodb.yml ├── docker-compose.malware-scanner.yml ├── docker-compose.abuse-scanner.yml ├── .gitignore ├── docker-compose.accounts.yml ├── dc ├── docker ├── certbot │ └── entrypoint.sh └── clamav │ └── clamd.conf ├── CHANGELOG.md ├── docker-compose.jaeger.yml ├── LICENSE.md └── docker-compose.yml /changelog/items/other/.init: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /changelog/items/bugs-fixed/.init: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /changelog/items/key-updates/.init: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | * @kwypchlo @meeh0w 2 | -------------------------------------------------------------------------------- /changelog/.changelogignore: -------------------------------------------------------------------------------- 1 | .init 2 | .DS_Store -------------------------------------------------------------------------------- /.prettierignore: -------------------------------------------------------------------------------- 1 | /package.json 2 | /package-lock.json -------------------------------------------------------------------------------- /changelog/items/bugs-fixed/fix-missing-logs-dir.md: -------------------------------------------------------------------------------- 1 | - Fix missing `logs` dir that is required for backup scripts (cron jobs). -------------------------------------------------------------------------------- /changelog/items/key-updates/pinner.md: -------------------------------------------------------------------------------- 1 | - Add Pinner service to the portal stack. Activate it by selecting the 'p' module. 2 | -------------------------------------------------------------------------------- /scripts/portal-up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit on first error 4 | 5 | # start the health-checks service 6 | docker exec health-check cli enable 7 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: docker 4 | directory: "/docker/sia" 5 | schedule: 6 | interval: monthly 7 | -------------------------------------------------------------------------------- /logs/.gitkeep: -------------------------------------------------------------------------------- 1 | Purpose of this file is that `logs` dir can be commited to git and it will be 2 | present on portal servers. The rest of files in `logs` dir are git ignored. 3 | -------------------------------------------------------------------------------- /changelog/items/bugs-fixed/fix-context.md: -------------------------------------------------------------------------------- 1 | - Fix `dashboard-v2` Dockerfile context in `docker-compose.accounts.yml` to 2 | avoid Ansible deploy (docker compose build) `permission denied` issues. -------------------------------------------------------------------------------- /changelog/README.md: -------------------------------------------------------------------------------- 1 | # Changelog Generator 2 | 3 | For usage of changelog generator please see [readme](https://gitlab.com/NebulousLabs/changelog-generator/-/blob/master/README.md) in Changelog Generator 4 | repository. -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["config:base", ":prHourlyLimitNone"], 3 | "enabledManagers": ["docker-compose"], 4 | "packageRules": [ 5 | { "groupName": "jaegertracing", "matchPackagePatterns": ["jaegertracing"] } 6 | ] 7 | } 8 | -------------------------------------------------------------------------------- /setup-scripts/serverload.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Ensure serverload script is running to provide serverload stats. 3 | 4 | [Service] 5 | ExecStart=/bin/bash /home/user/skynet-webportal/serverload.sh 6 | 7 | [Install] 8 | WantedBy=multi-user.target 9 | -------------------------------------------------------------------------------- /setup-scripts/logrotate.d/skynet-webportal-pinner: -------------------------------------------------------------------------------- 1 | /home/user/skynet-webportal/docker/data/pinner/*.log { 2 | daily 3 | rotate 10 4 | minsize 100M 5 | copytruncate 6 | notifempty 7 | dateext 8 | missingok 9 | compress 10 | compressoptions --best 11 | } 12 | -------------------------------------------------------------------------------- /setup-scripts/logrotate.d/skynet-webportal-skyd: -------------------------------------------------------------------------------- 1 | /home/user/skynet-webportal/docker/data/sia/*.log 2 | /home/user/skynet-webportal/docker/data/sia/*/*.log { 3 | daily 4 | rotate 10 5 | minsize 100M 6 | copytruncate 7 | notifempty 8 | dateext 9 | missingok 10 | compress 11 | compressoptions --best 12 | } 13 | -------------------------------------------------------------------------------- /setup-scripts/support/sia.env: -------------------------------------------------------------------------------- 1 | # siad environment variables 2 | SIA_API_PASSWORD="" 3 | SIA_DATA_DIR="/home/user/.sia" 4 | SIAD_DATA_DIR="/home/user/siad" 5 | SIA_WALLET_PASSWORD="" 6 | 7 | # portal specific environment variables 8 | API_PORT="9980" 9 | 10 | # discord integration 11 | DISCORD_WEBHOOK_URL="" 12 | DISCORD_MENTION_USER_ID="" 13 | DISCORD_MENTION_ROLE_ID="" 14 | -------------------------------------------------------------------------------- /changelog/changelog-head.md: -------------------------------------------------------------------------------- 1 | Version Scheme 2 | -------------- 3 | Skynet Webportal uses the following versioning scheme, vX.X.X 4 | - First Digit signifies a major (compatibility breaking) release 5 | - Second Digit signifies a major (non compatibility breaking) release 6 | - Third Digit signifies a minor or patch release 7 | 8 | Version History 9 | --------------- 10 | 11 | Latest: 12 | -------------------------------------------------------------------------------- /setup-scripts/logrotate.d/skynet-webportal-nginx: -------------------------------------------------------------------------------- 1 | /home/user/skynet-webportal/docker/data/nginx/logs/*.log { 2 | daily 3 | rotate 3650 4 | minsize 500M 5 | create 644 root root 6 | notifempty 7 | dateext 8 | missingok 9 | compress 10 | compressoptions --best 11 | delaycompress 12 | sharedscripts 13 | postrotate 14 | docker exec nginx nginx -s reopen 15 | endscript 16 | } 17 | -------------------------------------------------------------------------------- /scripts/wait_to_start.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo $WAIT_COMMAND 4 | echo $WAIT_START_CMD 5 | 6 | is_ready() { 7 | eval "$WAIT_COMMAND" 8 | } 9 | 10 | # wait until is ready 11 | i=0 12 | while ! is_ready; do 13 | i=`expr $i + 1` 14 | if [ $i -ge $WAIT_LOOPS ]; then 15 | echo "$(date) - still not ready, giving up" 16 | exit 1 17 | fi 18 | echo "$(date) - waiting to be ready" 19 | sleep $WAIT_SLEEP 20 | done 21 | 22 | #start the script 23 | exec $WAIT_START_CMD 24 | -------------------------------------------------------------------------------- /setup-scripts/support/tmux.conf: -------------------------------------------------------------------------------- 1 | # remap prefix from 'C-b' to 'C-a' 2 | unbind C-b 3 | set-option -g prefix C-a 4 | bind-key C-a send-prefix 5 | 6 | # split panes using | and - 7 | bind | split-window -h 8 | bind - split-window -v 9 | unbind '"' 10 | unbind % 11 | 12 | # reload config file (change file location to your the tmux.conf you want to use) 13 | bind r source-file /home/user/.tmux.conf 14 | 15 | set -g visual-activity off 16 | set -g mouse on 17 | # This copies highlighted text. 18 | set -g mouse-select-window on 19 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Skynet Portal 2 | 3 | ## Latest Setup Documentation 4 | 5 | Latest Skynet Webportal setup documentation and the setup process Skynet Labs 6 | supports is located at https://portal-docs.skynetlabs.com/. 7 | 8 | Some scripts and setup documentation contained in this repository 9 | (`skynet-webportal`) may be outdated and generally should not be used. 10 | 11 | ## License 12 | 13 | Skynet uses a custom [License](./LICENSE.md). The Skynet License is a source code license that allows you to use, modify 14 | and distribute the software, but you must preserve the payment mechanism in the software. 15 | 16 | For the purposes of complying with our code license, you can use the following Siacoin address: 17 | 18 | `fb6c9320bc7e01fbb9cd8d8c3caaa371386928793c736837832e634aaaa484650a3177d6714a` 19 | -------------------------------------------------------------------------------- /docker-compose.pinner.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | pinner: 11 | # uncomment "build" and comment out "image" to build from sources 12 | # build: https://github.com/SkynetLabs/pinner.git#main 13 | image: skynetlabs/pinner:0.7.8 14 | container_name: pinner 15 | restart: unless-stopped 16 | logging: *default-logging 17 | env_file: 18 | - .env 19 | volumes: 20 | - ./docker/data/pinner/logs:/logs 21 | environment: 22 | - PINNER_LOG_LEVEL=${PINNER_LOG_LEVEL:-info} 23 | expose: 24 | - 4000 25 | networks: 26 | shared: 27 | ipv4_address: 10.10.10.130 28 | depends_on: 29 | - mongo 30 | - sia 31 | -------------------------------------------------------------------------------- /setup-scripts/support/crontab: -------------------------------------------------------------------------------- 1 | 0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/funds-checker.py /home/user/skynet-webportal/.env 2 | 0 0,8,16 * * * /home/user/skynet-webportal/setup-scripts/log-checker.py /home/user/skynet-webportal/.env sia 8 3 | 0 * * * * /home/user/skynet-webportal/setup-scripts/health-checker.py /home/user/skynet-webportal/.env sia 1 4 | 44 5 * * * /home/user/skynet-webportal/scripts/backup-aws-s3.sh 1>>/home/user/skynet-webportal/logs/backup-aws-s3.log 2>>/home/user/skynet-webportal/logs/backup-aws-s3.log 5 | 6 13 * * * /home/user/skynet-webportal/scripts/db_backup.sh 1>>/home/user/skynet-webportal/logs/db_backup.log 2>>/home/user/skynet-webportal/logs/db_backup.log 6 | 0 5 * * * /usr/bin/docker run --rm --net=host -e ROLLOVER=true jaegertracing/jaeger-es-index-cleaner:latest 1 http://localhost:9200 7 | -------------------------------------------------------------------------------- /docker-compose.blocker.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | health-check: 11 | environment: 12 | - BLOCKER_HOST=10.10.10.110 13 | - BLOCKER_PORT=4000 14 | 15 | blocker: 16 | # uncomment "build" and comment out "image" to build from sources 17 | # build: https://github.com/SkynetLabs/blocker.git#main 18 | image: skynetlabs/blocker:0.1.2 19 | container_name: blocker 20 | restart: unless-stopped 21 | logging: *default-logging 22 | env_file: 23 | - .env 24 | volumes: 25 | - ./docker/data/nginx/blocker:/data/nginx/blocker 26 | expose: 27 | - 4000 28 | networks: 29 | shared: 30 | ipv4_address: 10.10.10.110 31 | depends_on: 32 | - mongo 33 | - sia 34 | -------------------------------------------------------------------------------- /docker-compose.mongodb.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | sia: 11 | environment: 12 | - MONGODB_URI=mongodb://${SKYNET_DB_HOST}:${SKYNET_DB_PORT} 13 | - MONGODB_USER=${SKYNET_DB_USER} 14 | - MONGODB_PASSWORD=${SKYNET_DB_PASS} 15 | 16 | mongo: 17 | image: mongo:4.4.17 18 | command: --keyFile=/data/mgkey --replSet=${SKYNET_DB_REPLICASET:-skynet} --setParameter ShardingTaskExecutorPoolMinSize=10 19 | container_name: mongo 20 | restart: unless-stopped 21 | logging: *default-logging 22 | volumes: 23 | - ./docker/data/mongo/db:/data/db 24 | - ./docker/data/mongo/mgkey:/data/mgkey:rw 25 | networks: 26 | shared: 27 | ipv4_address: 10.10.10.71 28 | ports: 29 | - "${SKYNET_DB_PORT}:27017" 30 | -------------------------------------------------------------------------------- /scripts/portal-down.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit on first error 4 | 5 | while getopts d:t:r: flag 6 | do 7 | case "${flag}" in 8 | d) delay=${OPTARG};; 9 | t) timeout=${OPTARG};; 10 | r) reason=${OPTARG};; 11 | esac 12 | done 13 | delay=${delay:-0} # default to no delay 14 | timeout=${timeout:-300} # default timeout is 300s 15 | 16 | if [[ -z $reason ]]; then 17 | echo "Please provide a reason for disabling the portal (use '-r ')." 18 | exit 1 19 | fi 20 | 21 | countdown() { 22 | local secs=$1 23 | while [ $secs -gt 0 ]; do 24 | echo -ne "Waiting $secs\033[0K\r" 25 | sleep 1 26 | : $((secs--)) 27 | done 28 | } 29 | 30 | # delay disabling the portal 31 | countdown $delay 32 | 33 | # stop health-check so the server is taken our of load balancer 34 | docker exec health-check cli disable $reason 35 | 36 | # then wait 5 minutes for the load balancer to propagate the dns records 37 | countdown $timeout 38 | -------------------------------------------------------------------------------- /setup-scripts/support/authorized_keys: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIDgiq1etF0aD94rG/UVmYEt4ij5K8MvHZwb4wIUi6Ihr david@siasky.net 2 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAyIT2HqzDhQs6jS89ZsnY6+GJEklVMqF6fXe/i5s8d7 chris@siasky.net 3 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFxLuZzjmFN9CgVOI5vaiVhQgMwG9dLQJ688wrsbpHH/ ivaylo@siasky.net 4 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINbAhwjJNAud7YIJvLth2bmeUg3kO20xl7ZfqBTvoXn8 fil@siasky.net 5 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIG67M3zC4eDJEjma0iKKksGclteKbB86ONQtBaWY93M6 matt@siasky.net 6 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIF+XC8f0dumhzDE93i9IIMsMp7/MJPwGH+Uc9JFKOvyw karol@siasky.net 7 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIPM43lzbKjFLChe5rKETxDpWpNlqXCGTBPiWlDN2vlLD pj@siasky.net 8 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIN6Kcx8yetova4/ALUQHigo/PBMJO33ZTKOsg2jxSO2a user@deploy.siasky.dev 9 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB7prtVOTwtcSN9HkXum107RwcW5H8Vggx6Qv7T57ItT daniel@siasky.net 10 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHptEpqs57lhnHkfa+0SQgXQ4A63/YGV2cNTcGMQW+Jt david@skynetlabs.com 11 | -------------------------------------------------------------------------------- /.github/workflows/lint-python.yml: -------------------------------------------------------------------------------- 1 | name: Lint - Python Scripts 2 | 3 | on: 4 | push: 5 | paths: 6 | - "**.py" 7 | 8 | jobs: 9 | black: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v2 13 | - uses: actions/setup-python@v2 14 | with: 15 | python-version: "3.x" 16 | architecture: x64 17 | 18 | - run: pip install black 19 | - run: black --check . 20 | 21 | flake8: 22 | runs-on: ubuntu-latest 23 | steps: 24 | - uses: actions/checkout@v2 25 | - uses: actions/setup-python@v2 26 | with: 27 | python-version: "3.x" 28 | architecture: x64 29 | 30 | - run: pip install flake8 31 | 32 | # E203: https://www.flake8rules.com/rules/E203.html - Whitespace before ':' 33 | # E501: https://www.flake8rules.com/rules/E501.html - Line too long 34 | # W503: https://www.flake8rules.com/rules/W503.html - Line break occurred before a binary operator 35 | # W605: https://www.flake8rules.com/rules/W605.html - Invalid escape sequence 36 | # E722: https://www.flake8rules.com/rules/E722.html - Do not use bare except, specify exception instead 37 | - run: flake8 --max-line-length 88 --ignore E203,E501,W503,W605,E722 38 | -------------------------------------------------------------------------------- /changelog/generate-changelog.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | 4 | # Generate CHANGELOG.md from changelog directory 5 | # Requires: 6 | # - curl installed 7 | 8 | # Config 9 | 10 | main_version='v1.0.1' 11 | export main_filename='generate-changelog-main.sh' 12 | export main_url="https://gitlab.com/NebulousLabs/changelog-generator/-/raw/${main_version}/${main_filename}" 13 | export temp_dir="${HOME}/.nebulous/changelog-generator" 14 | export main_path=${temp_dir}/${main_filename} 15 | 16 | # Set working dir to script location 17 | pushd $(dirname "$0") > /dev/null 18 | 19 | # If executed in 'changelog-generator' repo, do not use the older released 20 | # version, use the latest local version 21 | repo_dir="$(basename ${PWD%/*})" 22 | if [[ "${repo_dir}" == "changelog-generator" ]] 23 | then 24 | # Call the latest local main script 25 | echo "Executing the latest local version of the main script" 26 | export local_execution=true 27 | chmod +x ../${main_filename} 28 | ../${main_filename} "$@" 29 | exit 0 30 | fi 31 | 32 | # Download main script to temp dir 33 | mkdir -p ${temp_dir} 34 | curl --show-error --fail -o ${main_path} ${main_url} 35 | 36 | # Execute downloaded main script passing arguments to the main script 37 | chmod +x ${main_path} 38 | ${main_path} "$@" 39 | 40 | popd > /dev/null 41 | -------------------------------------------------------------------------------- /docker-compose.malware-scanner.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | clamav: 11 | image: clamav/clamav:stable_base 12 | container_name: clamav 13 | restart: on-failure 14 | logging: *default-logging 15 | volumes: 16 | - ./docker/data/clamav/clamav/defs:/var/lib/clamav 17 | - ./docker/clamav/clamd.conf:/etc/clamav/clamd.conf:ro 18 | expose: 19 | - 3310 # NEVER expose this outside of the local network! 20 | networks: 21 | shared: 22 | ipv4_address: 10.10.10.100 23 | 24 | malware-scanner: 25 | # uncomment "build" and comment out "image" to build from sources 26 | # build: https://github.com/SkynetLabs/malware-scanner.git#main 27 | image: skynetlabs/malware-scanner:0.1.0 28 | container_name: malware-scanner 29 | restart: unless-stopped 30 | logging: *default-logging 31 | env_file: 32 | - .env 33 | environment: 34 | - CLAMAV_IP=10.10.10.100 35 | - CLAMAV_PORT=3310 36 | - BLOCKER_IP=10.10.10.110 37 | - BLOCKER_PORT=4000 38 | expose: 39 | - 4000 40 | networks: 41 | shared: 42 | ipv4_address: 10.10.10.101 43 | depends_on: 44 | - mongo 45 | - clamav 46 | - blocker 47 | -------------------------------------------------------------------------------- /docker-compose.abuse-scanner.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | abuse-scanner: 11 | # uncomment "build" and comment out "image" to build from sources 12 | # build: https://github.com/SkynetLabs/abuse-scanner.git#main 13 | image: skynetlabs/abuse-scanner:0.4.0 14 | container_name: abuse-scanner 15 | restart: unless-stopped 16 | logging: *default-logging 17 | env_file: 18 | - .env 19 | environment: 20 | - ABUSE_LOG_LEVEL=${ABUSE_LOG_LEVEL} 21 | - ABUSE_MAILADDRESS=${ABUSE_MAILADDRESS} 22 | - ABUSE_MAILBOX=${ABUSE_MAILBOX} 23 | - ABUSE_SPONSOR=${ABUSE_SPONSOR} 24 | - BLOCKER_HOST=10.10.10.110 25 | - BLOCKER_PORT=4000 26 | - EMAIL_SERVER=${EMAIL_SERVER} 27 | - EMAIL_USERNAME=${EMAIL_USERNAME} 28 | - EMAIL_PASSWORD=${EMAIL_PASSWORD} 29 | - SKYNET_DB_HOST=${SKYNET_DB_HOST} 30 | - SKYNET_DB_PORT=${SKYNET_DB_PORT} 31 | - SKYNET_DB_USER=${SKYNET_DB_USER} 32 | - SKYNET_DB_PASS=${SKYNET_DB_PASS} 33 | networks: 34 | shared: 35 | ipv4_address: 10.10.10.120 36 | depends_on: 37 | - mongo 38 | - blocker 39 | volumes: 40 | - /var/run/docker.sock:/var/run/docker.sock 41 | - /tmp:/tmp 42 | -------------------------------------------------------------------------------- /scripts/README.md: -------------------------------------------------------------------------------- 1 | # Skynet Webportal Scripts 2 | 3 | This package contains useful scripts for managing a Skynet Webportal. 4 | 5 | ## Available Scripts 6 | 7 | **blocklist-skylink.sh**\ 8 | The `blocklist-skylink.sh` script adds a skylink to the blocklist on all 9 | servers. 10 | 11 | **maintenance-upgrade.sh**\ 12 | The `maintenance-upgrade.sh` script upgrades the docker images for nodes on 13 | a maintenance server. 14 | 15 | **portal-down.sh**\ 16 | The `portal-down.sh` script takes a portal out of the load balancer by disabling 17 | the health check. 18 | 19 | **portal-restart.sh**\ 20 | The `portal-restart.sh` script restarts a portal by taking it out of the load 21 | balancer, restarting the docker containers, and adding the portal back to the 22 | load balancer. 23 | 24 | **portal-up.sh**\ 25 | The `portal-up.sh` script puts a portal back into the load balancer by enabling 26 | the health check. 27 | 28 | **portal-upgrade.**\ 29 | The `portal-upgrade.sh` script upgrades the docker images for a portal and 30 | clears and leftover images. 31 | 32 | ## Webportal Upgrade Procedures 33 | 34 | TODO... 35 | 36 | 1. 1 server upgraded at a time 37 | 1. Clusters of servers upgraded at a time 38 | 1. How to safetly revert to previous stable version. Document what those 39 | versions were. 40 | 1. Upgrading single subsystem 41 | 1. Upgrading multiple subsystems 42 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Logs 2 | logs 3 | *.log 4 | npm-debug.log* 5 | yarn-debug.log* 6 | yarn-error.log* 7 | 8 | # Runtime data 9 | pids 10 | *.pid 11 | *.seed 12 | *.pid.lock 13 | 14 | # Directory for instrumented libs generated by jscoverage/JSCover 15 | lib-cov 16 | 17 | # Coverage directory used by tools like istanbul 18 | coverage 19 | 20 | # nyc test coverage 21 | .nyc_output 22 | 23 | # Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files) 24 | .grunt 25 | 26 | # Bower dependency directory (https://bower.io/) 27 | bower_components 28 | 29 | # node-waf configuration 30 | .lock-wscript 31 | 32 | # Compiled binary addons (http://nodejs.org/api/addons.html) 33 | build/Release 34 | 35 | # Dependency directories 36 | node_modules/ 37 | jspm_packages/ 38 | 39 | # Typescript v1 declaration files 40 | typings/ 41 | 42 | # Optional npm cache directory 43 | .npm 44 | 45 | # Optional eslint cache 46 | .eslintcache 47 | 48 | # Optional REPL history 49 | .node_repl_history 50 | 51 | # Output of 'npm pack' 52 | *.tgz 53 | 54 | # dotenv environment variable files 55 | .env* 56 | 57 | # Mac files 58 | .DS_Store 59 | 60 | # vscode 61 | .vscode 62 | 63 | # Yarn 64 | yarn-error.log 65 | .pnp/ 66 | .pnp.js 67 | # Yarn Integrity file 68 | .yarn-integrity 69 | 70 | # Docker data 71 | docker/data 72 | 73 | # Docker override 74 | docker-compose.override.yml 75 | 76 | # Nginx custom server overrides 77 | docker/nginx/conf.d/server-override/* 78 | !docker/nginx/conf.d/server-override/example 79 | 80 | # Cache files 81 | __pycache__ 82 | /.idea/ 83 | /venv* 84 | 85 | # Luacov file 86 | luacov.stats.out 87 | luacov.report.out 88 | 89 | # Setup-script log files 90 | setup-scripts/serverload.log 91 | setup-scripts/serverload.json 92 | -------------------------------------------------------------------------------- /setup-scripts/support/ssh_config: -------------------------------------------------------------------------------- 1 | 2 | # This is the ssh client system-wide configuration file. See 3 | # ssh_config(5) for more information. This file provides defaults for 4 | # users, and the values can be changed in per-user configuration files 5 | # or on the command line. 6 | 7 | # Configuration data is parsed as follows: 8 | # 1. command line options 9 | # 2. user-specific file 10 | # 3. system-wide file 11 | # Any configuration value is only changed the first time it is set. 12 | # Thus, host-specific definitions should be at the beginning of the 13 | # configuration file, and defaults at the end. 14 | 15 | # Site-wide defaults for some commonly used options. For a comprehensive 16 | # list of available options, their meanings and defaults, please see the 17 | # ssh_config(5) man page. 18 | 19 | Host * 20 | # ForwardAgent no 21 | # ForwardX11 no 22 | # ForwardX11Trusted yes 23 | PasswordAuthentication no 24 | # HostbasedAuthentication no 25 | # GSSAPIAuthentication no 26 | # GSSAPIDelegateCredentials no 27 | # GSSAPIKeyExchange no 28 | # GSSAPITrustDNS no 29 | # BatchMode no 30 | # CheckHostIP yes 31 | # AddressFamily any 32 | # ConnectTimeout 0 33 | # StrictHostKeyChecking ask 34 | # IdentityFile ~/.ssh/id_rsa 35 | # IdentityFile ~/.ssh/id_dsa 36 | # IdentityFile ~/.ssh/id_ecdsa 37 | # IdentityFile ~/.ssh/id_ed25519 38 | # Port 22 39 | # Protocol 2 40 | # Ciphers aes128-ctr,aes192-ctr,aes256-ctr,aes128-cbc,3des-cbc 41 | # MACs hmac-md5,hmac-sha1,umac-64@openssh.com 42 | # EscapeChar ~ 43 | # Tunnel no 44 | # TunnelDevice any:any 45 | # PermitLocalCommand no 46 | # VisualHostKey no 47 | # ProxyCommand ssh -q -W %h:%p gateway.example.com 48 | # RekeyLimit 1G 1h 49 | SendEnv LANG LC_* 50 | HashKnownHosts no 51 | GSSAPIAuthentication yes 52 | -------------------------------------------------------------------------------- /setup-scripts/disk-usage-dump.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Dumps disk usage to stdout or to the file 4 | # 5 | # Parameters: 6 | # - $1 (optional): Filename to append the output to. 7 | # 8 | # Usage: 9 | # - Dump disk usage to stdout: 10 | # ./disk-usage-dump.sh 11 | # 12 | # - Dump disk usage appending to th file: 13 | # ./disk-usage-dump.sh my-log-file.log 14 | # 15 | # Use docker container to get root (script can be run under regular user, no 16 | # need for sudo) 17 | 18 | dump () { 19 | echo 20 | echo "### Disk usage dump at $(date) ###" 21 | 22 | # Free disk space 23 | echo 24 | df -h /home/user 25 | 26 | # Root dirs 27 | echo 28 | echo "Root dirs:" 29 | docker run -v /:/host-root alpine:3.15.0 sh -c 'du -hs /host-root/*' | sed 's#/host-root##' 30 | 31 | # Home dirs 32 | echo 33 | echo "Home dirs:" 34 | docker run -v /home/user:/home/user alpine:3.15.0 du -hs /home/user/* 35 | 36 | # Skynet webportal dirs 37 | echo 38 | echo "skynet-webportal dirs:" 39 | docker run -v /home/user:/home/user alpine:3.15.0 du -hs /home/user/skynet-webportal/* 40 | 41 | # Docker data dirs 42 | echo 43 | echo "Docker data dirs:" 44 | docker run -v /home/user:/home/user alpine:3.15.0 du -hs /home/user/skynet-webportal/docker/data/* 45 | 46 | # Largest dirs/files 47 | echo 48 | echo "Dirs or files over 1GB (first 100):" 49 | docker run -v /home/user:/home/user alpine:3.15.0 du -h /home/user | grep -E "^[0-9]+\.?[0-9]*G" | sort -r -n | head -100 50 | } 51 | 52 | # Check argument is present 53 | if [ -z "$1" ]; then 54 | # Dump to stdout 55 | dump 56 | else 57 | # Handle log paths 58 | filename=$(basename "$1") 59 | dirname=$(dirname "$1") 60 | abs_dirname=$(realpath "$dirname") 61 | 62 | # Make sure log dir exists 63 | mkdir -p "$abs_dirname" 64 | 65 | # Append to file 66 | { 67 | dump 68 | } >> "$abs_dirname/$filename" 2>&1 69 | fi 70 | -------------------------------------------------------------------------------- /scripts/db_restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # First of all, let's pamper awscli because Python is so special: 4 | pip3 install --upgrade awscli 5 | 6 | BACKUP=$1 7 | if [[ $BACKUP == "" ]]; then 8 | echo "No backup name given. It should look like '2020-01-29'." 9 | exit 1 10 | fi 11 | 12 | # Get current script directory (pwd doesn't cut it) 13 | csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) 14 | # Set the environment: 15 | set -o allexport 16 | cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv 17 | source .tmpenv 18 | rm .tmpenv 19 | set +o allexport 20 | # Check for AWS credentials: 21 | if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then 22 | echo "Missing AWS credentials!" 23 | exit 1 24 | fi 25 | # Check for backup path: 26 | if [[ $S3_BACKUP_PATH == "" ]]; then 27 | echo "Missing S3_BACKUP_PATH!" 28 | exit 1 29 | fi 30 | 31 | ### MONGO DB ### 32 | # Check if the backup exists: 33 | totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$BACKUP --recursive --summarize | grep "mongo.tgz" | wc -l) 34 | if [ "$totalFoundObjects" -eq "0" ]; then 35 | echo "This backup doesn't exist!" 36 | exit 1 37 | fi 38 | # Get the backup from S3: 39 | aws s3 cp $S3_BACKUP_PATH/$BACKUP/mongo.tgz mongo.tgz 40 | # Prepare a clean `to_restore` dir: 41 | rm -rf $csd/../docker/data/mongo/db/backups/to_restore 42 | mkdir -p $csd/../docker/data/mongo/db/backups/to_restore 43 | # Decompress the backup: 44 | tar -xzf mongo.tgz -C $csd/../docker/data/mongo/db/backups/to_restore 45 | rm mongo.tgz 46 | # Restore the backup: 47 | # The name of the backup is not `mongo` due to the way we're creating it, 48 | # it's $BACKUP. 49 | docker exec mongo \ 50 | mongorestore --drop \ 51 | mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT \ 52 | /data/db/backups/to_restore/$BACKUP 53 | # Clean up: 54 | rm -rf $csd/../docker/data/mongo/db/backups/to_restore 55 | -------------------------------------------------------------------------------- /setup-scripts/serverload.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | : ' 4 | This script writes the CPU usage and the free disk space to a file in a loop. 5 | The results are prepended to the file, so the most recent results are at the 6 | top. This is so that the most recent information can easily be read from the 7 | top of the file and the file can easily be truncated if needed. 8 | 9 | This script is run by the serverload.service systemd process. The 10 | serverload.service file should be copied to 11 | /etc/systemd/system/serverload.service. 12 | 13 | The systemd process can then be started with the following commands: 14 | sudo systemctl start serverload.service 15 | 16 | The status of the process can be checked with: 17 | sudo systemctl is-active serverload.service 18 | ' 19 | 20 | # Define Loop Interval 21 | loop_interval=60 22 | webportal_repo_setup_scripts="/home/user/skynet-webportal/setup-scripts" 23 | logfile_name="serverload.log" 24 | logfile=$webportal_repo_setup_scripts/$logfile_name 25 | jsonfile="serverload.json" 26 | nginx_docker_path="/usr/local/share" 27 | 28 | # Create logfile if it doesn't exist 29 | if [[ ! -e $logfile ]]; then 30 | echo "init" > $logfile 31 | fi 32 | 33 | # Write the output in an infinite loop. 34 | while true; do 35 | # CPU usage 36 | cpu=$(echo $[100-$(vmstat 1 2|tail -1|awk '{print $15}')]) 37 | sed -i "1iCPU: ${cpu}" $logfile 38 | 39 | # Disk Usage 40 | disk=$(df -Ph . | tail -1 | awk '{print $4}') 41 | sed -i "1iDISK: ${disk}" $logfile 42 | 43 | # Write the timestamp 44 | timestamp=$(date) 45 | sed -i "1iTIMESTAMP: ${timestamp}" $logfile 46 | 47 | # Write and copy a json file of the latest results to nginx docker container 48 | # to serve 49 | printf '{"cpu":"%s","disk":"%s","timestamp":"%s"}' "$cpu" "$disk" "$timestamp" > $webportal_repo_setup_scripts/$jsonfile 50 | docker cp $webportal_repo_setup_scripts/$jsonfile nginx:$nginx_docker_path/$jsonfile 51 | 52 | # Sleep 53 | sleep $loop_interval 54 | done 55 | 56 | -------------------------------------------------------------------------------- /scripts/blocklist-skylink.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | # This script is for manual skylink blocking. It accepts either a single 4 | # skylink or a file containing list of skylinks. The script is intented 5 | # for manual use and it should be run locally on each skynet webportal server. 6 | 7 | set -e # exit on first error 8 | 9 | if [ -z "$1" ]; then 10 | echo "Please provide either a skylink or a file with skylinks separated by new lines" && exit 1 11 | fi 12 | 13 | ######################################################### 14 | # read either a file containing skylinks separated by new 15 | # lines or a single skylink and put them in an array 16 | ######################################################### 17 | skylinks=() 18 | if test -f "$1"; then 19 | line_number=1 20 | 21 | # Read file including the last line even when it doesn't end with newline 22 | while IFS="" read -r line || [ -n "$line" ]; 23 | do 24 | if [[ $line =~ (^[a-zA-Z0-9_-]{46}$) ]]; then 25 | skylinks+=("$line") 26 | else 27 | echo "Incorrect skylink at line ${line_number}: $line" && exit 1 28 | fi 29 | let line_number+=1 30 | done < $1; 31 | else 32 | skylinks=("$1") # just single skylink passed as input argument 33 | fi 34 | 35 | # get local skyd ip adress 36 | ipaddress=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' sia) 37 | 38 | # get sia api password either from env variable if exists or from apipassword file in sia-data directory 39 | apipassword=$(docker exec sia sh -c '[ ! -z "${SIA_API_PASSWORD}" ] && echo ${SIA_API_PASSWORD} || $(cat /sia-data/apipassword | tr -d '\n')') 40 | 41 | # iterate over provided skylinks and block them one by one 42 | for skylink in "${skylinks[@]}"; do 43 | echo "> Blocking ${skylink} ... " 44 | 45 | # POST /skynet/blocklist always returns 200 and in case of failure print error message 46 | curl -A Sia-Agent -u "":${apipassword} --data "{\"add\":[\"$skylink\"]}" "http://${ipaddress}:9980/skynet/blocklist" 47 | done 48 | -------------------------------------------------------------------------------- /scripts/db_backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "`date +"%Y-%m-%d-%H%M"` Starting a DB backup." 4 | 5 | # First of all, let's pamper awscli because Python is so special: 6 | pip3 install --upgrade awscli 7 | 8 | # Get current script directory (pwd doesn't cut it) 9 | csd=$(cd -P -- "$(dirname -- "$0")" && pwd -P) 10 | # Set the environment. We only grab the entries we need because otherwise we 11 | # need to deal with the edge cases presented by problematic values. 12 | set -o allexport 13 | cat $csd/../.env | grep "AWS_ACCESS_KEY_ID\|AWS_SECRET_ACCESS_KEY\|S3_BACKUP_PATH\|SKYNET_DB_USER\|SKYNET_DB_PASS\|SKYNET_DB_HOST\|SKYNET_DB_PORT" >.tmpenv 14 | source .tmpenv 15 | rm .tmpenv 16 | set +o allexport 17 | # Check for AWS credentials: 18 | if [[ $AWS_ACCESS_KEY_ID == "" || $AWS_SECRET_ACCESS_KEY == "" ]]; then 19 | echo "Missing AWS credentials!" 20 | exit 1 21 | fi 22 | # Check for backup path: 23 | if [[ $S3_BACKUP_PATH == "" ]]; then 24 | echo "Missing S3_BACKUP_PATH!" 25 | exit 1 26 | fi 27 | # Take the current datetime: 28 | DT=$(date +%Y-%m-%d) 29 | 30 | ### MONGO DB ### 31 | echo "Creating a backup of MongoDB:" 32 | # Check if a backup already exists: 33 | totalFoundObjects=$(aws s3 ls $S3_BACKUP_PATH/$DT --recursive --summarize | grep "mongo" | wc -l) 34 | if [ "$totalFoundObjects" -ge "1" ]; then 35 | echo "Backup already exists for today. Skipping." 36 | else 37 | # Create the backup: 38 | docker exec mongo \ 39 | mongodump \ 40 | -o /data/db/backups/$DT \ 41 | mongodb://$SKYNET_DB_USER:$SKYNET_DB_PASS@$SKYNET_DB_HOST:$SKYNET_DB_PORT 42 | docker exec mongo chmod o+rw /data/db/backups/ 43 | if [[ $? > 0 ]]; then 44 | echo "Creating a MongoDB backup failed. Skipping." 45 | else 46 | # Compress the backup: 47 | cd $csd/../docker/data/mongo/db/backups/ && ls -l && tar -czf mongo.tgz $DT && cd - 48 | # Upload the backup to S3: 49 | aws s3 cp $csd/../docker/data/mongo/db/backups/mongo.tgz $S3_BACKUP_PATH/$DT/mongo.tgz 50 | # Clean up 51 | rm -rf $DT.tgz $csd/../docker/data/mongo/db/backups/mongo.tgz 52 | echo "Finished MongoDB backup." 53 | fi 54 | docker exec mongo rm -rf /data/db/backups/$DT 55 | fi 56 | -------------------------------------------------------------------------------- /setup-scripts/support/limits.conf: -------------------------------------------------------------------------------- 1 | # /etc/security/limits.conf 2 | # 3 | #Each line describes a limit for a user in the form: 4 | # 5 | # 6 | # 7 | #Where: 8 | # can be: 9 | # - a user name 10 | # - a group name, with @group syntax 11 | # - the wildcard *, for default entry 12 | # - the wildcard %, can be also used with %group syntax, 13 | # for maxlogin limit 14 | # - NOTE: group and wildcard limits are not applied to root. 15 | # To apply a limit to the root user, must be 16 | # the literal username root. 17 | # 18 | # can have the two values: 19 | # - "soft" for enforcing the soft limits 20 | # - "hard" for enforcing hard limits 21 | # 22 | # can be one of the following: 23 | # - core - limits the core file size (KB) 24 | # - data - max data size (KB) 25 | # - fsize - maximum filesize (KB) 26 | # - memlock - max locked-in-memory address space (KB) 27 | # - nofile - max number of open file descriptors 28 | # - rss - max resident set size (KB) 29 | # - stack - max stack size (KB) 30 | # - cpu - max CPU time (MIN) 31 | # - nproc - max number of processes 32 | # - as - address space limit (KB) 33 | # - maxlogins - max number of logins for this user 34 | # - maxsyslogins - max number of logins on the system 35 | # - priority - the priority to run user process with 36 | # - locks - max number of file locks the user can hold 37 | # - sigpending - max number of pending signals 38 | # - msgqueue - max memory used by POSIX message queues (bytes) 39 | # - nice - max nice priority allowed to raise to values: [-20, 19] 40 | # - rtprio - max realtime priority 41 | # - chroot - change root to directory (Debian-specific) 42 | # 43 | # 44 | # 45 | 46 | #* soft core 0 47 | #root hard core 100000 48 | #* hard rss 10000 49 | #@student hard nproc 20 50 | #@faculty soft nproc 20 51 | #@faculty hard nproc 50 52 | #ftp hard nproc 0 53 | #ftp - chroot /ftp 54 | #@student - maxlogins 4 55 | 56 | user soft nofile 25000 57 | # End of file 58 | ~ 59 | -------------------------------------------------------------------------------- /docker-compose.accounts.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | nginx: 11 | environment: 12 | - ACCOUNTS_ENABLED=true 13 | - ACCOUNTS_LIMIT_ACCESS=${ACCOUNTS_LIMIT_ACCESS:-authenticated} # default to authenticated access only 14 | depends_on: 15 | - accounts 16 | 17 | health-check: 18 | environment: 19 | - ACCOUNTS_ENABLED=true 20 | - ACCOUNTS_LIMIT_ACCESS=${ACCOUNTS_LIMIT_ACCESS:-authenticated} # default to authenticated access only 21 | 22 | accounts: 23 | # uncomment "build" and comment out "image" to build from sources 24 | # build: https://github.com/SkynetLabs/skynet-accounts.git#main 25 | image: skynetlabs/skynet-accounts:1.3.0 26 | container_name: accounts 27 | restart: unless-stopped 28 | logging: *default-logging 29 | env_file: 30 | - .env 31 | environment: 32 | - ACCOUNTS_EMAIL_URI=${ACCOUNTS_EMAIL_URI} 33 | - ACCOUNTS_JWKS_FILE=/conf/jwks.json 34 | - COOKIE_DOMAIN=${COOKIE_DOMAIN} 35 | - COOKIE_HASH_KEY=${COOKIE_HASH_KEY} 36 | - COOKIE_ENC_KEY=${COOKIE_ENC_KEY} 37 | - PORTAL_DOMAIN=${PORTAL_DOMAIN} 38 | - SERVER_DOMAIN=${SERVER_DOMAIN} 39 | - SKYNET_DB_HOST=${SKYNET_DB_HOST:-mongo} 40 | - SKYNET_DB_PORT=${SKYNET_DB_PORT:-27017} 41 | - SKYNET_DB_USER=${SKYNET_DB_USER} 42 | - SKYNET_DB_PASS=${SKYNET_DB_PASS} 43 | - STRIPE_API_KEY=${STRIPE_API_KEY} 44 | - STRIPE_WEBHOOK_SECRET=${STRIPE_WEBHOOK_SECRET} 45 | - SKYNET_ACCOUNTS_LOG_LEVEL=${SKYNET_ACCOUNTS_LOG_LEVEL:-info} 46 | volumes: 47 | - ./docker/data/accounts:/data 48 | - ./docker/accounts/conf:/conf 49 | expose: 50 | - 3000 51 | networks: 52 | shared: 53 | ipv4_address: 10.10.10.70 54 | depends_on: 55 | - mongo 56 | 57 | dashboard: 58 | # uncomment "build" and comment out "image" to build from sources 59 | # build: 60 | # context: https://github.com/SkynetLabs/webportal-accounts-dashboard.git#main 61 | # dockerfile: Dockerfile 62 | image: skynetlabs/webportal-accounts-dashboard:2.1.1 63 | container_name: dashboard 64 | restart: unless-stopped 65 | logging: *default-logging 66 | env_file: 67 | - .env 68 | volumes: 69 | - ./docker/data/dashboard/.cache:/usr/app/.cache 70 | - ./docker/data/dashboard/public:/usr/app/public 71 | networks: 72 | shared: 73 | ipv4_address: 10.10.10.85 74 | expose: 75 | - 9000 76 | depends_on: 77 | - mongo 78 | -------------------------------------------------------------------------------- /dc: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # The dc command is an alias to docker-compose which also scans the current portal configuration (as defined in .env) 4 | # and selects the right docker-compose files to include in the operation. You can use the command in the same way you 5 | # would use docker-compose with the only difference being that you don't need to specify compose files. For more 6 | # information you can run `./dc` or `./dc help`. 7 | 8 | # get current working directory of this script and prefix all files with it to 9 | # be able to call this script from anywhere and not only root directory of 10 | # skynet-webportal project 11 | cwd="$(dirname -- "$0";)"; 12 | 13 | # get portal modules configuration from .env file (if defined more than once, the last one is used) 14 | if [[ -f "${cwd}/.env" ]]; then 15 | PORTAL_MODULES=$(grep -e "^PORTAL_MODULES=" ${cwd}/.env | tail -1 | sed "s/PORTAL_MODULES=//") 16 | fi 17 | 18 | # include base docker compose file 19 | COMPOSE_FILES="-f ${cwd}/docker-compose.yml" 20 | 21 | for i in $(seq 1 ${#PORTAL_MODULES}); do 22 | # accounts module - alias "a" 23 | if [[ ${PORTAL_MODULES:i-1:1} == "a" ]]; then 24 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.accounts.yml" 25 | fi 26 | 27 | # blocker module - alias "b" 28 | if [[ ${PORTAL_MODULES:i-1:1} == "b" ]]; then 29 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml" 30 | fi 31 | 32 | # jaeger module - alias "j" 33 | if [[ ${PORTAL_MODULES:i-1:1} == "j" ]]; then 34 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.jaeger.yml" 35 | fi 36 | 37 | # malware-scanner module - alias "s" 38 | if [[ ${PORTAL_MODULES:i-1:1} == "s" ]]; then 39 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.malware-scanner.yml" 40 | fi 41 | 42 | # mongodb module - alias "m" 43 | if [[ ${PORTAL_MODULES:i-1:1} == "m" ]]; then 44 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml" 45 | fi 46 | 47 | # abuse-scanner module - alias "u" 48 | if [[ ${PORTAL_MODULES:i-1:1} == "u" ]]; then 49 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.blocker.yml -f ${cwd}/docker-compose.abuse-scanner.yml" 50 | fi 51 | 52 | # pinner module - alias "p" 53 | if [[ ${PORTAL_MODULES:i-1:1} == "p" ]]; then 54 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.mongodb.yml -f ${cwd}/docker-compose.pinner.yml" 55 | fi 56 | done 57 | 58 | # override file if exists 59 | if [[ -f docker-compose.override.yml ]]; then 60 | COMPOSE_FILES+=" -f ${cwd}/docker-compose.override.yml" 61 | fi 62 | 63 | docker-compose $COMPOSE_FILES $@ 64 | -------------------------------------------------------------------------------- /scripts/backup-aws-s3.sh: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env bash 2 | 3 | set -e # exit on first error 4 | 5 | echo "`date +"%Y-%m-%d %H:%M"` Starting backup process" 6 | 7 | # ensure aws cli is upgraded 8 | pip3 install --quiet --upgrade awscli 9 | 10 | # import required environment variables from .env file 11 | ENV_VARIABLES=("AWS_ACCESS_KEY_ID" "AWS_SECRET_ACCESS_KEY" "PORTAL_DOMAIN" "SERVER_DOMAIN" "SERVER_UID") 12 | 13 | for ENV_VARIABLE in "${ENV_VARIABLES[@]}"; do 14 | ENV_VARIABLE_VALUE=$(grep -v '^#' /home/user/skynet-webportal/.env | grep ${ENV_VARIABLE} || true) 15 | if test -z "${ENV_VARIABLE_VALUE}"; then 16 | # all variables except SERVER_DOMAIN are required 17 | if [ "${ENV_VARIABLE}" != "SERVER_DOMAIN" ]; then 18 | echo "Environment variable ${ENV_VARIABLE} is not set" && exit 1 19 | fi 20 | else 21 | export ${ENV_VARIABLE_VALUE} 22 | fi 23 | done 24 | 25 | # create bucket skynet-backup-[portaldomain] (replace dots with dashes and strip anything other than alnum) 26 | # ie. siasky.net backup results in skynet-backup-siasky-net basket name 27 | BUCKET_NAME=$(echo skynet-backup-${PORTAL_DOMAIN} | tr '.' '-' | tr -cd '[[:alnum:]]-') 28 | 29 | # create server prefix 30 | if test -z "${SERVER_DOMAIN}"; then 31 | # if domain name is empty use just uid (replace dots with dashes and strip anything other than alnum) 32 | SERVER_PREFIX=$(echo ${SERVER_UID} | tr '.' '-' | tr -cd '[[:alnum:]]-') 33 | else 34 | # use both uid and server domain if available (replace dots with dashes and strip anything other than alnum) 35 | SERVER_PREFIX=$(echo ${SERVER_DOMAIN}-${SERVER_UID} | tr '.' '-' | tr -cd '[[:alnum:]]-') 36 | SERVER_PREFIX_LEGACY=$(echo ${SERVER_UID}-${SERVER_DOMAIN} | tr '.' '-' | tr -cd '[[:alnum:]]-') 37 | fi 38 | 39 | aws s3api create-bucket --acl private --bucket ${BUCKET_NAME} 40 | 41 | # move old backup dir to new location if legacy backup path exists 42 | if test -n "${SERVER_PREFIX_LEGACY}"; then 43 | aws s3 mv --recursive s3://${BUCKET_NAME}/${SERVER_PREFIX_LEGACY} s3://${BUCKET_NAME}/${SERVER_PREFIX} 44 | fi 45 | 46 | # sync all nginx logs 47 | mkdir -p /home/user/skynet-webportal/docker/data/nginx/logs # ensure path exists 48 | aws s3 sync --no-progress /home/user/skynet-webportal/docker/data/nginx/logs s3://${BUCKET_NAME}/${SERVER_PREFIX}/docker/data/nginx/logs 49 | 50 | # generate and sync skylinks dump 51 | SKYLINKS_PATH=logs/skylinks/$(date +"%Y-%m-%d").log 52 | mkdir -p /home/user/skynet-webportal/logs/skylinks # ensure path exists 53 | find /home/user/skynet-webportal/logs/skylinks -type f -mtime +7 -delete # delete skylink dumps older than 7 days 54 | docker exec sia siac skynet ls --recursive --alert-suppress > /home/user/skynet-webportal/${SKYLINKS_PATH} 55 | aws s3 cp --no-progress /home/user/skynet-webportal/${SKYLINKS_PATH} s3://${BUCKET_NAME}/${SERVER_PREFIX}/${SKYLINKS_PATH} 56 | 57 | echo "`date +"%Y-%m-%d %H:%M"` Backup finished successfully" 58 | -------------------------------------------------------------------------------- /docker/certbot/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Portal domain requires 3 domain certificates: 4 | # - exact portal domain, ie. example.com 5 | # - wildcard subdomain on portal domain, ie. *.example.com 6 | # used for skylinks served from portal subdomain 7 | # - wildcard subdomain on hns portal domain subdomain, ie. *.hns.example.com 8 | # used for resolving handshake domains 9 | DOMAINS=${PORTAL_DOMAIN},*.${PORTAL_DOMAIN},*.hns.${PORTAL_DOMAIN} 10 | 11 | # Add server domain when it is not empty and different from portal domain 12 | if [ ! -z "${SERVER_DOMAIN}" ] && [ "${PORTAL_DOMAIN}" != "${SERVER_DOMAIN}" ]; then 13 | # In case where server domain is not covered by portal domain's 14 | # wildcard certificate, add server domain name to domains list. 15 | # - server-001.example.com is covered by *.example.com 16 | # - server-001.servers.example.com or server-001.example-severs.com 17 | # are not covered by any already requested wildcard certificates 18 | # 19 | # The condition checks whether server domain does not match portal domain 20 | # with exactly one level of subdomain (portal domain wildcard cert): 21 | # (start) [anything but the dot] + [dot] + [portal domain] (end) 22 | if ! printf "${SERVER_DOMAIN}" | grep -q -E "^[^\.]+\.${PORTAL_DOMAIN}$"; then 23 | DOMAINS=${DOMAINS},${SERVER_DOMAIN} 24 | fi 25 | 26 | # Server domain requires the same set of domain certificates as portal domain. 27 | # Exact server domain case is handled above. 28 | DOMAINS=${DOMAINS},*.${SERVER_DOMAIN},*.hns.${SERVER_DOMAIN} 29 | fi 30 | 31 | # The "wait" will prevent an exit from the script while background tasks are 32 | # still active, so we are adding the line below as a method to prevent orphaning 33 | # the background child processe. The trap fires when docker terminates the container. 34 | trap exit TERM 35 | 36 | while :; do 37 | # Execute certbot and generate or maintain certificates for given domain string. 38 | # --non-interactive: we are running this as an automation so we cannot be prompted 39 | # --agree-tos: required flag marking agreement with letsencrypt tos 40 | # --cert-name: output directory name 41 | # --email: required for generating certificates, used for communication with CA 42 | # --domains: comma separated list of domains (will generate one bundled SAN cert) 43 | # Use CERTBOT_ARGS env variable to pass any additional arguments, ie --dns-route53 44 | certbot certonly \ 45 | --non-interactive --agree-tos --cert-name skynet-portal \ 46 | --email ${EMAIL_ADDRESS} --domains ${DOMAINS} ${CERTBOT_ARGS} 47 | 48 | # Run a background sleep process that counts down given time 49 | # Certbot docs advise running maintenance process every 12 hours 50 | sleep 12h & 51 | 52 | # Await execution until sleep process is finished (it's a background process) 53 | # Syntax explanation: ${!} expands to a pid of last ran process 54 | wait ${!} 55 | done 56 | -------------------------------------------------------------------------------- /setup-scripts/log-checker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from bot_utils import setup, send_msg 4 | from subprocess import Popen, PIPE 5 | 6 | import sys 7 | import traceback 8 | import asyncio 9 | 10 | """ 11 | log-checker checks the docker logs for siad. 12 | 13 | Arguments: 14 | 1. path to a .env file (default is none so env variables can already be 15 | preset) 16 | 17 | 2. docker container name name (default: "sia") 18 | 19 | 3. number of hours to look back in log (default: 1 hour) 20 | 21 | """ 22 | 23 | # Get the container name as an argument or use "sia" as default. 24 | CONTAINER_NAME = "sia" 25 | if len(sys.argv) > 2: 26 | CONTAINER_NAME = sys.argv[2] 27 | 28 | # Get the number of hours to look back in the logs or use 1 as default. 29 | CHECK_HOURS = 1 30 | if len(sys.argv) > 3: 31 | CHECK_HOURS = int(sys.argv[3]) 32 | 33 | # Discord messages have a limit on their length set at 2000 bytes. We use 34 | # a lower limit in order to leave some space for additional message text. 35 | DISCORD_MAX_MESSAGE_LENGTH = 1900 36 | 37 | setup() 38 | 39 | 40 | async def run_checks(): 41 | print("Running Skynet portal log checks") 42 | try: 43 | await check_docker_logs() 44 | except: # catch all exceptions 45 | trace = traceback.format_exc() 46 | await send_msg("```\n{}\n```".format(trace), force_notify=False) 47 | 48 | 49 | # check_docker_logs checks the docker logs by filtering on the docker image name 50 | async def check_docker_logs(): 51 | print("\nChecking docker logs...") 52 | 53 | since_string = "{}h".format(CHECK_HOURS) 54 | 55 | # Read the logs. 56 | print( 57 | "[DEBUG] Will run `docker logs --since {} {}`".format( 58 | since_string, CONTAINER_NAME 59 | ) 60 | ) 61 | proc = Popen( 62 | ["docker", "logs", "--since", since_string, CONTAINER_NAME], 63 | stdin=PIPE, 64 | stdout=PIPE, 65 | stderr=PIPE, 66 | text=True, 67 | ) 68 | std_out, std_err = proc.communicate() 69 | 70 | if len(std_err) > 0: 71 | # Trim the error log to under 1MB. 72 | one_mb = 1024 * 1024 73 | if len(std_err) > one_mb: 74 | pos = std_err.find("\n", -one_mb) 75 | std_err = std_err[pos + 1 :] 76 | return await send_msg( 77 | "Error(s) found in log!", file=std_err, force_notify=True 78 | ) 79 | 80 | # If there are any critical or severe errors. upload the whole log file. 81 | if "Critical" in std_out or "Severe" in std_out or "panic" in std_out: 82 | return await send_msg( 83 | "Critical or Severe error found in log!", 84 | file=std_out, 85 | force_notify=True, 86 | ) 87 | 88 | # No critical or severe errors, return a heartbeat type message 89 | return await send_msg( 90 | "No critical or severe warnings in log since {} hours".format(CHECK_HOURS), 91 | ) 92 | 93 | 94 | loop = asyncio.get_event_loop() 95 | loop.run_until_complete(run_checks()) 96 | -------------------------------------------------------------------------------- /setup-scripts/funds-checker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | funds-checker runs simple checks on a portal node using the siad API and 5 | dispatches messages to a Discord channel. 6 | """ 7 | 8 | from bot_utils import setup, send_msg, siad, sc_precision 9 | 10 | import traceback 11 | import asyncio 12 | 13 | setup() 14 | 15 | 16 | async def run_checks(): 17 | print("Running Skynet portal funds checks") 18 | try: 19 | await check_funds() 20 | except: # catch all exceptions 21 | trace = traceback.format_exc() 22 | await send_msg("```\n{}\n```".format(trace), force_notify=True) 23 | 24 | 25 | # check_funds checks that the wallet is unlocked, that it has at least 1 26 | # allowance worth of money left, and if less than half the allowance is spent. 27 | # If all checks pass it sends an informational message. 28 | async def check_funds(): 29 | print("\nChecking wallet/funds health...") 30 | wallet_get = siad.get_wallet() 31 | renter_get = siad.get_renter() 32 | 33 | if not wallet_get["unlocked"]: 34 | await send_msg("Wallet locked", force_notify=True) 35 | return 36 | 37 | confirmed_coins = int(wallet_get["confirmedsiacoinbalance"]) 38 | unconfirmed_coins = int(wallet_get["unconfirmedincomingsiacoins"]) 39 | unconfirmed_outgoing_coins = int(wallet_get["unconfirmedoutgoingsiacoins"]) 40 | balance = confirmed_coins + unconfirmed_coins - unconfirmed_outgoing_coins 41 | print("Balance: ", balance / sc_precision) 42 | 43 | allowance = renter_get["settings"]["allowance"] 44 | allowance_funds = int(allowance["funds"]) 45 | allocated_funds = int(renter_get["financialmetrics"]["totalallocated"]) 46 | unallocated_funds = allowance_funds - allocated_funds 47 | 48 | balance_msg = "Balance: {} SC, Allowance Funds: {} SC".format( 49 | round(balance / sc_precision), round(allowance_funds / sc_precision) 50 | ) 51 | alloc_msg = "Unallocated: {} SC, Allocated: {} SC".format( 52 | round(unallocated_funds / sc_precision), round(allocated_funds / sc_precision) 53 | ) 54 | 55 | # Send an alert if there is less than a certain part of allowance worth of money left in the wallet. 56 | WALLET_ALLOWANCE_THRESHOLD = 0.3 57 | if balance < allowance_funds * WALLET_ALLOWANCE_THRESHOLD: 58 | wallet_address_res = siad.get("/wallet/address") 59 | wallet_msg = "Address: {}".format(wallet_address_res["address"]) 60 | message = "__Wallet balance running low!__ {} {}".format( 61 | balance_msg, wallet_msg 62 | ) 63 | return await send_msg(message, force_notify=True) 64 | 65 | # Alert devs when only a fraction of the allowance is remaining. 66 | SPEND_THRESHOLD = 0.9 67 | if allocated_funds >= SPEND_THRESHOLD * allowance_funds: 68 | message = "__More than {:.0%} of allowance spent!__ {}".format( 69 | SPEND_THRESHOLD, alloc_msg 70 | ) 71 | return await send_msg(message, force_notify=True) 72 | 73 | # Send an informational heartbeat if all checks passed. 74 | await send_msg("Funds checks passed. {} {}".format(balance_msg, alloc_msg)) 75 | 76 | 77 | loop = asyncio.get_event_loop() 78 | loop.run_until_complete(run_checks()) 79 | -------------------------------------------------------------------------------- /changelog/changelog-tail.md: -------------------------------------------------------------------------------- 1 | ## Mar 8, 2022: 2 | ### v0.1.4 3 | **Key Updates** 4 | - expose generic skylink serving endpoint on domain aliases 5 | - Add abuse scanner service, activated by adding `u` to `PORTAL_MODULES` 6 | - Add malware scanner service, activated by adding `s` to `PORTAL_MODULES` 7 | - Remove ORY Kratos, ORY Oathkeeper, CockroachDB. 8 | - Add `/serverload` endpoint for CPU usage and free disk space 9 | 10 | **Bugs Fixed** 11 | - Add missing servers and blocklist command to the manual blocklist script. 12 | - fixed a bug when accessing file from skylink via subdomain with a filename that had escaped characters 13 | - Fix `blocklist-skylink.sh` script that didn't removed blocked skylink from 14 | nginx cache. 15 | - fixed uploaded directory name (was "undefined" before) 16 | - fixed empty directory upload progress (size was not calculated for directories) 17 | 18 | **Other** 19 | - add new critical health check that scans config and makes sure that all relevant configurations are set 20 | - Add abuse report configuration 21 | - Remove hardcoded Airtable default values from blocklist script. Portal 22 | operators need to define their own values in portal common config (LastPass). 23 | - Add health check for the blocker container 24 | - Drop `Skynet-Requested-Skylink` header 25 | - Dump disk space usage when health-checker script disables portal due to 26 | critical free disk space. 27 | - Enable the accounting module for skyd 28 | - Add link to supported setup process in Gitbook. 29 | - Set `min_free` parameter on the `proxy_cache_path` directive to `100g` 30 | - Parameterize MongoDB replicaset in `docker-compose.mongodb.yml` via 31 | `SKYNET_DB_REPLICASET` from `.env` file. 32 | - Hot reload Nginx after pruning cache files. 33 | - Added script to prune nginx cache. 34 | - Remove hardcoded server list from `blocklist-skylink.sh` so it removes server 35 | list duplication and can also be called from Ansible. 36 | - Remove outdated portal setup documentation and point to developer docs. 37 | - Block skylinks in batches to improve performance. 38 | - Add trimming Airtable skylinks from Takedown Request table. 39 | - Update handshake to use v3.0.1 40 | 41 | ## Oct 18, 2021: 42 | ### v0.1.3 43 | **Key Updates** 44 | - Change skyd 307 redirect code to 308 45 | - Set caddy dns entry ttl limit to 15 minutes to remove stranded entries. 46 | - Set skyd up to connect to the local mongodb cluster for storing TUS metadata 47 | - Update health check disable command to require reason. 48 | - Move MongoDB to a separate service (use `PORTAL_MODULES=m` to use it without accounts) 49 | - Add proper handling for options response on /skynet/tus endpoint 50 | - added unpinning skylinks from account dashboard 51 | 52 | **Bugs Fixed** 53 | - include tus header upload-concat in cors requests 54 | - fixed issue with caddy requesting new certificates instead of using existing ones from file storage 55 | - fixed the latest news link redirect in the news header 56 | - Fix extended checks error by rounding the reported datetime. 57 | 58 | **Other** 59 | - Remove outdated references to NebulousLabs 60 | 61 | 62 | 63 | ## August 9th, 2021: 64 | ### v0.1.1 65 | Monthly release 66 | 67 | ## March 24th, 2021: 68 | ### v0.1.0 69 | Initial versioned release 70 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | Version Scheme 2 | -------------- 3 | Skynet Webportal uses the following versioning scheme, vX.X.X 4 | - First Digit signifies a major (compatibility breaking) release 5 | - Second Digit signifies a major (non compatibility breaking) release 6 | - Third Digit signifies a minor or patch release 7 | 8 | Version History 9 | --------------- 10 | 11 | Latest: 12 | 13 | ## Mar 8, 2022: 14 | ### v0.1.4 15 | **Key Updates** 16 | - expose generic skylink serving endpoint on domain aliases 17 | - Add abuse scanner service, activated by adding `u` to `PORTAL_MODULES` 18 | - Add malware scanner service, activated by adding `s` to `PORTAL_MODULES` 19 | - Remove ORY Kratos, ORY Oathkeeper, CockroachDB. 20 | - Add `/serverload` endpoint for CPU usage and free disk space 21 | 22 | **Bugs Fixed** 23 | - Add missing servers and blocklist command to the manual blocklist script. 24 | - fixed a bug when accessing file from skylink via subdomain with a filename that had escaped characters 25 | - Fix `blocklist-skylink.sh` script that didn't removed blocked skylink from 26 | nginx cache. 27 | - fixed uploaded directory name (was "undefined" before) 28 | - fixed empty directory upload progress (size was not calculated for directories) 29 | 30 | **Other** 31 | - add new critical health check that scans config and makes sure that all relevant configurations are set 32 | - Add abuse report configuration 33 | - Remove hardcoded Airtable default values from blocklist script. Portal 34 | operators need to define their own values in portal common config (LastPass). 35 | - Add health check for the blocker container 36 | - Drop `Skynet-Requested-Skylink` header 37 | - Dump disk space usage when health-checker script disables portal due to 38 | critical free disk space. 39 | - Enable the accounting module for skyd 40 | - Add link to supported setup process in Gitbook. 41 | - Set `min_free` parameter on the `proxy_cache_path` directive to `100g` 42 | - Parameterize MongoDB replicaset in `docker-compose.mongodb.yml` via 43 | `SKYNET_DB_REPLICASET` from `.env` file. 44 | - Hot reload Nginx after pruning cache files. 45 | - Added script to prune nginx cache. 46 | - Remove hardcoded server list from `blocklist-skylink.sh` so it removes server 47 | list duplication and can also be called from Ansible. 48 | - Remove outdated portal setup documentation and point to developer docs. 49 | - Block skylinks in batches to improve performance. 50 | - Add trimming Airtable skylinks from Takedown Request table. 51 | - Update handshake to use v3.0.1 52 | 53 | ## Oct 18, 2021: 54 | ### v0.1.3 55 | **Key Updates** 56 | - Change skyd 307 redirect code to 308 57 | - Set caddy dns entry ttl limit to 15 minutes to remove stranded entries. 58 | - Set skyd up to connect to the local mongodb cluster for storing TUS metadata 59 | - Update health check disable command to require reason. 60 | - Move MongoDB to a separate service (use `PORTAL_MODULES=m` to use it without accounts) 61 | - Add proper handling for options response on /skynet/tus endpoint 62 | - added unpinning skylinks from account dashboard 63 | 64 | **Bugs Fixed** 65 | - include tus header upload-concat in cors requests 66 | - fixed issue with caddy requesting new certificates instead of using existing ones from file storage 67 | - fixed the latest news link redirect in the news header 68 | - Fix extended checks error by rounding the reported datetime. 69 | 70 | **Other** 71 | - Remove outdated references to NebulousLabs 72 | 73 | 74 | 75 | ## August 9th, 2021: 76 | ### v0.1.1 77 | Monthly release 78 | 79 | ## March 24th, 2021: 80 | ### v0.1.0 81 | Initial versioned release 82 | -------------------------------------------------------------------------------- /docker-compose.jaeger.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | services: 10 | sia: 11 | environment: 12 | - JAEGER_DISABLED=${JAEGER_DISABLED:-false} # Enable/Disable tracing 13 | - JAEGER_SERVICE_NAME=${SERVER_DOMAIN:-Skyd} # change to e.g. eu-ger-1 14 | # Configuration 15 | # See https://github.com/jaegertracing/jaeger-client-go#environment-variables 16 | # for all options. 17 | - JAEGER_SAMPLER_TYPE=probabilistic 18 | - JAEGER_SAMPLER_PARAM=0.1 19 | - JAEGER_AGENT_HOST=jaeger-agent 20 | - JAEGER_AGENT_PORT=6831 21 | - JAEGER_REPORTER_LOG_SPANS=false 22 | 23 | jaeger-agent: 24 | image: jaegertracing/jaeger-agent:1.38.1 25 | command: 26 | [ 27 | "--reporter.grpc.host-port=jaeger-collector:14250", 28 | "--reporter.grpc.retry.max=1000", 29 | ] 30 | container_name: jaeger-agent 31 | restart: on-failure 32 | logging: *default-logging 33 | expose: 34 | - 6831 35 | - 6832 36 | - 5778 37 | environment: 38 | - LOG_LEVEL=debug 39 | networks: 40 | shared: 41 | ipv4_address: 10.10.10.90 42 | depends_on: 43 | - jaeger-collector 44 | 45 | jaeger-collector: 46 | image: jaegertracing/jaeger-collector:1.38.1 47 | entrypoint: /wait_to_start.sh 48 | container_name: jaeger-collector 49 | restart: on-failure 50 | logging: *default-logging 51 | expose: 52 | - 14269 53 | - 14268 54 | - 14250 55 | environment: 56 | - SPAN_STORAGE_TYPE=elasticsearch 57 | - LOG_LEVEL=debug 58 | - WAIT_START_CMD=/go/bin/collector-linux --es.num-shards=1 --es.num-replicas=0 --es.server-urls=http://elasticsearch:9200 59 | - WAIT_COMMAND=wget -qO index.html http://elasticsearch:9200 60 | - WAIT_SLEEP=1 61 | - WAIT_LOOPS=600 62 | volumes: 63 | - ./scripts/wait_to_start.sh:/wait_to_start.sh:ro 64 | networks: 65 | shared: 66 | ipv4_address: 10.10.10.91 67 | depends_on: 68 | - elasticsearch 69 | 70 | jaeger-query: 71 | image: jaegertracing/jaeger-query:1.38.1 72 | entrypoint: /wait_to_start.sh 73 | container_name: jaeger-query 74 | restart: on-failure 75 | logging: *default-logging 76 | ports: 77 | - "127.0.0.1:16686:16686" 78 | expose: 79 | - 16687 80 | environment: 81 | - SPAN_STORAGE_TYPE=elasticsearch 82 | - LOG_LEVEL=debug 83 | - WAIT_START_CMD=/go/bin/query-linux --es.num-shards=1 --es.num-replicas=0 --es.server-urls=http://elasticsearch:9200 84 | - WAIT_COMMAND=wget -qO index.html http://elasticsearch:9200 85 | - WAIT_SLEEP=1 86 | - WAIT_LOOPS=600 87 | volumes: 88 | - ./scripts/wait_to_start.sh:/wait_to_start.sh:ro 89 | networks: 90 | shared: 91 | ipv4_address: 10.10.10.92 92 | depends_on: 93 | - elasticsearch 94 | 95 | elasticsearch: 96 | image: docker.elastic.co/elasticsearch/elasticsearch:7.17.7 97 | container_name: elasticsearch 98 | restart: on-failure 99 | logging: *default-logging 100 | environment: 101 | - discovery.type=single-node 102 | - "ES_JAVA_OPTS=-Xms2g -Xmx2g" 103 | volumes: 104 | # This dir needs to be chowned to 1000:1000 105 | - ./docker/data/elasticsearch/data:/usr/share/elasticsearch/data 106 | ports: 107 | # We need to expose this port, so we can prune the indexes. 108 | - "127.0.0.1:9200:9200" 109 | networks: 110 | shared: 111 | ipv4_address: 10.10.10.93 112 | -------------------------------------------------------------------------------- /setup-scripts/support/bashrc: -------------------------------------------------------------------------------- 1 | # /home/user/.bashrc: executed by bash(1) for non-login shells. 2 | # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) 3 | # for examples 4 | 5 | # If not running interactively, don't do anything 6 | case $- in 7 | *i*) ;; 8 | *) return;; 9 | esac 10 | 11 | # don't put duplicate lines or lines starting with space in the history. 12 | # See bash(1) for more options 13 | HISTCONTROL=ignoreboth 14 | 15 | # append to the history file, don't overwrite it 16 | shopt -s histappend 17 | 18 | # for setting history length see HISTSIZE and HISTFILESIZE in bash(1) 19 | HISTSIZE=1000 20 | HISTFILESIZE=2000 21 | 22 | # check the window size after each command and, if necessary, 23 | # update the values of LINES and COLUMNS. 24 | shopt -s checkwinsize 25 | 26 | # If set, the pattern "**" used in a pathname expansion context will 27 | # match all files and zero or more directories and subdirectories. 28 | #shopt -s globstar 29 | 30 | # make less more friendly for non-text input files, see lesspipe(1) 31 | #[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)" 32 | 33 | # set variable identifying the chroot you work in (used in the prompt below) 34 | if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then 35 | debian_chroot=$(cat /etc/debian_chroot) 36 | fi 37 | 38 | # set a fancy prompt (non-color, unless we know we "want" color) 39 | case "$TERM" in 40 | alacritty|xterm-color|*-256color) color_prompt=yes;; 41 | esac 42 | 43 | # uncomment for a colored prompt, if the terminal has the capability; turned 44 | # off by default to not distract the user: the focus in a terminal window 45 | # should be on the output of commands, not on the prompt 46 | #force_color_prompt=yes 47 | 48 | if [ -n "$force_color_prompt" ]; then 49 | if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then 50 | # We have color support; assume it's compliant with Ecma-48 51 | # (ISO/IEC-6429). (Lack of such support is extremely rare, and such 52 | # a case would tend to support setf rather than setaf.) 53 | color_prompt=yes 54 | else 55 | color_prompt= 56 | fi 57 | fi 58 | 59 | if [ "$color_prompt" = yes ]; then 60 | PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ ' 61 | else 62 | PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ ' 63 | fi 64 | unset color_prompt force_color_prompt 65 | 66 | # If this is an xterm set the title to user@host:dir 67 | case "$TERM" in 68 | xterm*|rxvt*) 69 | PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1" 70 | ;; 71 | *) 72 | ;; 73 | esac 74 | 75 | # enable color support of ls and also add handy aliases 76 | if [ -x /usr/bin/dircolors ]; then 77 | test -r /home/user/.dircolors && eval "$(dircolors -b /home/user/.dircolors)" || eval "$(dircolors -b)" 78 | alias ls='ls --color=auto' 79 | #alias dir='dir --color=auto' 80 | #alias vdir='vdir --color=auto' 81 | 82 | #alias grep='grep --color=auto' 83 | #alias fgrep='fgrep --color=auto' 84 | #alias egrep='egrep --color=auto' 85 | fi 86 | 87 | # colored GCC warnings and errors 88 | #export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01' 89 | 90 | # some more ls aliases 91 | #alias ll='ls -l' 92 | #alias la='ls -A' 93 | #alias l='ls -CF' 94 | 95 | # Alias definitions. 96 | # You may want to put all your additions into a separate file like 97 | # /home/user/.bash_aliases, instead of adding them here directly. 98 | # See /usr/share/doc/bash-doc/examples in the bash-doc package. 99 | 100 | if [ -f /home/user/.bash_aliases ]; then 101 | . /home/user/.bash_aliases 102 | fi 103 | 104 | # enable programmable completion features (you don't need to enable 105 | # this, if it's already enabled in /etc/bash.bashrc and /etc/profile 106 | # sources /etc/bash.bashrc). 107 | if ! shopt -oq posix; then 108 | if [ -f /usr/share/bash-completion/bash_completion ]; then 109 | . /usr/share/bash-completion/bash_completion 110 | elif [ -f /etc/bash_completion ]; then 111 | . /etc/bash_completion 112 | fi 113 | fi 114 | export PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/go/bin:/home/user/go/bin 115 | 116 | alias siac="docker exec -it sia siac" 117 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Skynet License 1.0 2 | 3 | License URL: https://siasky.net/skynet-license.md 4 | Licensor: Nebulous, Inc. (d/b/a Sia) 5 | 6 | ## Acceptance 7 | 8 | In order to get any license under these terms, you must agree 9 | to them as both strict obligations and conditions to all 10 | your licenses. 11 | 12 | ## Copyright License 13 | 14 | The licensor grants you a copyright license for the 15 | software to do everything you might do with the software 16 | that would otherwise infringe the licensor's copyright 17 | in it for any permitted purpose. However, you may 18 | only distribute the software according to [Distribution 19 | License](#distribution-license) and make changes or new works 20 | based on the software according to [Changes and New Works 21 | License](#changes-and-new-works-license). 22 | 23 | ## Changes and New Works License 24 | 25 | The licensor grants you an additional copyright license to 26 | make changes and new works based on the software for any 27 | permitted purpose. However, it is not a permitted purpose to 28 | remove or disable any portion of the software that effectuates 29 | the following payment to licensor: You acknowledge that the 30 | software will cause a fee of 20% of the amount of all transactions 31 | to be paid to the licensor, or a party designated by it, in 32 | Siacoin on the Sia Platform. 33 | 34 | ## Distribution License 35 | 36 | The licensor grants you an additional copyright license 37 | to distribute copies of the software. Your license 38 | to distribute covers distributing the software with 39 | changes and new works permitted by [Changes and New Works 40 | License](#changes-and-new-works-license). 41 | 42 | ## Notices 43 | 44 | You must ensure that anyone who gets a copy of any part of 45 | the software from you also gets a copy of these terms or the 46 | URL for them above. 47 | 48 | ## Patent License 49 | 50 | The licensor grants you a patent license for the software that 51 | covers patent claims the licensor can license, or becomes able 52 | to license, that you would infringe by using the software. 53 | 54 | ## Fair Use 55 | 56 | You may have "fair use" rights for the software under the 57 | law. These terms do not limit them. 58 | 59 | ## No Other Rights 60 | 61 | These terms do not allow you to sublicense or transfer any of 62 | your licenses to anyone else, or prevent the licensor from 63 | granting licenses to anyone else. These terms do not imply 64 | any other licenses. 65 | 66 | ## Patent Defense 67 | 68 | If you make any written claim that the software or any part of the 69 | Sia platform infringes or contributes to infringement of any patent, 70 | your patent license for the software granted under these terms 71 | ends immediately. If your company makes such a claim, your 72 | patent license ends immediately for work on behalf of your company. 73 | 74 | ## Violations 75 | 76 | The first time you are notified in writing that you have 77 | violated any of these terms, or done anything with the software 78 | not covered by your licenses, your licenses can nonetheless 79 | continue if you come into full compliance with these terms, 80 | and take practical steps to correct past violations, within 81 | 30 days of receiving notice. Otherwise, all your licenses 82 | end immediately. 83 | 84 | ## No Liability 85 | 86 | **_As far as the law allows, the software comes as is, without 87 | any warranty or condition, and the licensor will not be liable 88 | to you for any damages arising out of these terms or the use 89 | or nature of the software, under any kind of legal claim._** 90 | 91 | ## Definitions 92 | 93 | The **Sia platform** is any version of the Sia storage network 94 | platform that is made generally available by licensor or authorized 95 | by licensor. The current version is available at https://sia.tech/. 96 | 97 | **Transaction** is any transaction made to third parties, either 98 | from you, or by you from others on their behalf, using the software 99 | (including via any modifications or additions you may make to 100 | the software). 101 | 102 | **You** refers to the individual or entity agreeing to these 103 | terms. 104 | 105 | **Your company** is any legal entity, sole proprietorship, 106 | or other kind of organization that you work for, plus all 107 | organizations that have control over, are under the control of, 108 | or are under common control with that organization. **Control** 109 | means ownership of substantially all the assets of an entity, 110 | or the power to direct its management and policies by vote, 111 | contract, or otherwise. Control can be direct or indirect. 112 | 113 | **Your licenses** are all the licenses granted to you for the 114 | software under these terms. 115 | 116 | **Use** means anything you do with the software requiring one 117 | of your licenses. 118 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | x-logging: &default-logging 4 | driver: json-file 5 | options: 6 | max-size: "10m" 7 | max-file: "3" 8 | 9 | networks: 10 | shared: 11 | ipam: 12 | driver: default 13 | config: 14 | - subnet: 10.10.10.0/24 15 | 16 | services: 17 | sia: 18 | # uncomment "build" and comment out "image" to build from sources 19 | # build: 20 | # context: https://github.com/SkynetLabs/docker-skyd.git#main 21 | # dockerfile: scratch/Dockerfile 22 | # args: 23 | # branch: master 24 | image: skynetlabs/skyd:1.6.9 25 | command: --disable-api-security --api-addr :9980 --modules gctwra 26 | container_name: sia 27 | restart: unless-stopped 28 | stop_grace_period: 5m 29 | logging: *default-logging 30 | environment: 31 | - SKYD_DISK_CACHE_ENABLED=${SKYD_DISK_CACHE_ENABLED:-true} 32 | - SKYD_DISK_CACHE_SIZE=${SKYD_DISK_CACHE_SIZE:-53690000000} # 50GB 33 | - SKYD_DISK_CACHE_MIN_HITS=${SKYD_DISK_CACHE_MIN_HITS:-3} 34 | - SKYD_DISK_CACHE_HIT_PERIOD=${SKYD_DISK_CACHE_HIT_PERIOD:-3600} # 1h 35 | env_file: 36 | - .env 37 | volumes: 38 | - ./docker/data/sia:/sia-data 39 | networks: 40 | shared: 41 | ipv4_address: 10.10.10.10 42 | expose: 43 | - 9980 44 | 45 | certbot: 46 | # replace this image with the image supporting your dns provider from 47 | # https://hub.docker.com/r/certbot/certbot and adjust CERTBOT_ARGS env variable 48 | # note: you will need to authenticate your dns request so consult the plugin docs 49 | # configuration https://eff-certbot.readthedocs.io/en/stable/using.html#dns-plugins 50 | # 51 | # ================================================================================= 52 | # example docker-compose.yml changes required for Cloudflare dns provider: 53 | # 54 | # image: certbot/dns-cloudflare 55 | # environment: 56 | # - CERTBOT_ARGS=--dns-cloudflare --dns-cloudflare-credentials /etc/letsencrypt/cloudflare.ini 57 | # 58 | # create ./docker/data/certbot/cloudflare.ini file with the following content: 59 | # dns_cloudflare_api_token = 60 | # 61 | # make sure that the file has 0400 permissions with: 62 | # chmod 0400 ./docker/data/certbot/cloudflare.ini 63 | image: certbot/dns-route53:v1.31.0 64 | entrypoint: sh /entrypoint.sh 65 | container_name: certbot 66 | restart: unless-stopped 67 | logging: *default-logging 68 | env_file: 69 | - .env 70 | environment: 71 | - CERTBOT_ARGS=--dns-route53 72 | volumes: 73 | - ./docker/certbot/entrypoint.sh:/entrypoint.sh 74 | - ./docker/data/certbot:/etc/letsencrypt 75 | 76 | nginx: 77 | # uncomment "build" and comment out "image" to build from sources 78 | # build: 79 | # context: https://github.com/SkynetLabs/webportal-nginx.git#main 80 | # dockerfile: Dockerfile 81 | image: skynetlabs/webportal-nginx:1.0.1 82 | container_name: nginx 83 | restart: unless-stopped 84 | logging: *default-logging 85 | env_file: 86 | - .env 87 | volumes: 88 | - ./docker/data/nginx/cache:/data/nginx/cache 89 | - ./docker/data/nginx/blocker:/data/nginx/blocker 90 | - ./docker/data/nginx/logs:/usr/local/openresty/nginx/logs 91 | - ./docker/data/nginx/skynet:/data/nginx/skynet:ro 92 | - ./docker/data/sia/apipassword:/data/sia/apipassword:ro 93 | - ./docker/data/certbot:/etc/letsencrypt 94 | networks: 95 | shared: 96 | ipv4_address: 10.10.10.30 97 | ports: 98 | - "443:443" 99 | - "80:80" 100 | depends_on: 101 | - sia 102 | - handshake-api 103 | - dnslink-api 104 | - website 105 | 106 | website: 107 | # uncomment "build" and comment out "image" to build from sources 108 | # build: 109 | # context: https://github.com/SkynetLabs/webportal-website.git#main 110 | # dockerfile: Dockerfile 111 | image: skynetlabs/webportal-website:0.2.3 112 | container_name: website 113 | restart: unless-stopped 114 | logging: *default-logging 115 | volumes: 116 | - ./docker/data/website/.cache:/usr/app/.cache 117 | - ./docker/data/website/.public:/usr/app/public 118 | env_file: 119 | - .env 120 | networks: 121 | shared: 122 | ipv4_address: 10.10.10.35 123 | expose: 124 | - 9000 125 | 126 | handshake: 127 | image: handshakeorg/hsd:4.0.2 128 | command: --chain-migrate=3 --no-wallet --no-auth --compact-tree-on-init --network=main --http-host=0.0.0.0 129 | container_name: handshake 130 | restart: unless-stopped 131 | logging: *default-logging 132 | volumes: 133 | - ./docker/data/handshake/.hsd:/root/.hsd 134 | networks: 135 | shared: 136 | ipv4_address: 10.10.10.40 137 | expose: 138 | - 12037 139 | 140 | handshake-api: 141 | # uncomment "build" and comment out "image" to build from sources 142 | # build: 143 | # context: https://github.com/SkynetLabs/webportal-handshake-api.git#main 144 | # dockerfile: Dockerfile 145 | image: skynetlabs/webportal-handshake-api:0.1.3 146 | container_name: handshake-api 147 | restart: unless-stopped 148 | logging: *default-logging 149 | environment: 150 | - HOSTNAME=0.0.0.0 151 | - HSD_HOST=handshake 152 | - HSD_NETWORK=main 153 | - HSD_PORT=12037 154 | env_file: 155 | - .env 156 | networks: 157 | shared: 158 | ipv4_address: 10.10.10.50 159 | expose: 160 | - 3100 161 | depends_on: 162 | - handshake 163 | 164 | dnslink-api: 165 | # uncomment "build" and comment out "image" to build from sources 166 | # build: 167 | # context: https://github.com/SkynetLabs/webportal-dnslink-api.git#main 168 | # dockerfile: Dockerfile 169 | image: skynetlabs/webportal-dnslink-api:0.2.1 170 | container_name: dnslink-api 171 | restart: unless-stopped 172 | logging: *default-logging 173 | networks: 174 | shared: 175 | ipv4_address: 10.10.10.55 176 | expose: 177 | - 3100 178 | 179 | health-check: 180 | # uncomment "build" and comment out "image" to build from sources 181 | # build: 182 | # context: https://github.com/SkynetLabs/webportal-health-check.git#main 183 | # dockerfile: Dockerfile 184 | image: skynetlabs/webportal-health-check:1.0.0 185 | container_name: health-check 186 | restart: unless-stopped 187 | logging: *default-logging 188 | volumes: 189 | - ./docker/data/health-check/state:/usr/app/state 190 | networks: 191 | shared: 192 | ipv4_address: 10.10.10.60 193 | env_file: 194 | - .env 195 | environment: 196 | - HOSTNAME=0.0.0.0 197 | - STATE_DIR=/usr/app/state 198 | expose: 199 | - 3100 200 | -------------------------------------------------------------------------------- /setup-scripts/bot_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from dotenv import load_dotenv 4 | from pathlib import Path 5 | from datetime import datetime 6 | from discord_webhook import DiscordWebhook 7 | 8 | import urllib 9 | import json 10 | import os 11 | import traceback 12 | import sys 13 | import re 14 | import subprocess 15 | import requests 16 | import io 17 | 18 | # Load dotenv file if possible. 19 | # TODO: change all scripts to use named flags/params 20 | if len(sys.argv) > 1: 21 | env_path = Path(sys.argv[1]) 22 | load_dotenv(dotenv_path=env_path, override=True) 23 | 24 | # Get the container name as an argument or use "sia" as default. 25 | CONTAINER_NAME = "sia" 26 | if len(sys.argv) > 2: 27 | CONTAINER_NAME = sys.argv[2] 28 | 29 | # sc_precision is the number of hastings per siacoin 30 | sc_precision = 10**24 31 | 32 | # Environment variable globals 33 | setup_done = False 34 | 35 | 36 | # get docker container id and return None if container not found 37 | def get_docker_container_id(container_name): 38 | docker_cmd = "docker ps -q -f name=" + container_name 39 | output = subprocess.check_output(docker_cmd, shell=True).decode("utf-8") 40 | return None if output == "" else output 41 | 42 | 43 | # find out local siad ip by inspecting its docker container 44 | def get_docker_container_ip(container_name): 45 | ip_regex = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$") 46 | docker_cmd = ( 47 | "docker inspect -f '{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' " 48 | + container_name 49 | ) 50 | output = subprocess.check_output(docker_cmd, shell=True).decode("utf-8") 51 | return ip_regex.findall(output)[0] 52 | 53 | 54 | # sia deamon local ip address with port 55 | api_endpoint = "http://{}:{}".format( 56 | get_docker_container_ip(CONTAINER_NAME), os.getenv("API_PORT", "9980") 57 | ) 58 | 59 | 60 | # find siad api password by getting it out of the docker container 61 | def get_api_password(): 62 | if os.getenv("SIA_API_PASSWORD"): 63 | return os.getenv("SIA_API_PASSWORD") 64 | 65 | api_password_regex = re.compile(r"^\w+$") 66 | docker_cmd = "docker exec {} cat /sia-data/apipassword".format(CONTAINER_NAME) 67 | output = subprocess.check_output(docker_cmd, shell=True).decode("utf-8") 68 | return api_password_regex.findall(output)[0] 69 | 70 | 71 | def setup(): 72 | siad.initialize() 73 | 74 | global setup_done 75 | setup_done = True 76 | 77 | 78 | # send_msg sends the msg to the specified discord channel. 79 | # If force_notify is set to true it adds "@here". 80 | async def send_msg(msg, force_notify=False, file=None): 81 | try: 82 | webhook_url = os.getenv("DISCORD_WEBHOOK_URL") 83 | webhook_mention_user_id = os.getenv("DISCORD_MENTION_USER_ID") 84 | webhook_mention_role_id = os.getenv("DISCORD_MENTION_ROLE_ID") 85 | webhook = DiscordWebhook(url=webhook_url, rate_limit_retry=True) 86 | 87 | # Add the portal name. 88 | msg = "**{}**: {}".format(os.getenv("SERVER_DOMAIN"), msg) 89 | 90 | if file and isinstance(file, str): 91 | is_json = is_json_string(file) 92 | content_type = "application/json" if is_json else "text/plain" 93 | ext = "json" if is_json else "txt" 94 | filename = "{}-{}.{}".format( 95 | CONTAINER_NAME, datetime.utcnow().strftime("%Y-%m-%d-%H:%M:%S"), ext 96 | ) 97 | skylink = upload_to_skynet(file, filename, content_type=content_type) 98 | if skylink: 99 | msg = "{} {}".format(msg, skylink) # append skylink to message 100 | else: 101 | webhook.add_file(file=io.BytesIO(file.encode()), filename=filename) 102 | 103 | if force_notify and (webhook_mention_user_id or webhook_mention_role_id): 104 | webhook.allowed_mentions = { 105 | "users": [webhook_mention_user_id], 106 | "roles": [webhook_mention_role_id], 107 | } 108 | msg = "{} /cc".format(msg) # separate message from mentions 109 | if webhook_mention_role_id: 110 | msg = "{} <@&{}>".format(msg, webhook_mention_role_id) 111 | if webhook_mention_user_id: 112 | msg = "{} <@{}>".format(msg, webhook_mention_user_id) 113 | 114 | webhook.content = msg 115 | webhook.execute() 116 | 117 | print("msg > " + msg) # print message to std output for debugging purposes 118 | except: 119 | print("Failed to send message!") 120 | print(traceback.format_exc()) 121 | 122 | 123 | def upload_to_skynet(contents, filename="file.txt", content_type="text/plain"): 124 | files = {"file": (filename, contents, content_type)} 125 | res = requests.post("https://siasky.net/skynet/skyfile", files=files) 126 | if res.status_code == requests.codes["ok"]: 127 | res_json = res.json() 128 | return "https://siasky.net/" + res_json["skylink"] 129 | return None 130 | 131 | 132 | def is_json_string(str): 133 | try: 134 | json.loads(str) 135 | return True 136 | except ValueError: 137 | return False 138 | 139 | 140 | # siad class provides wrappers for the necessary siad commands. 141 | class siad: 142 | # initializes values for using the API (password and 143 | # user-agent) so that all calls to urllib.request.urlopen have these set. 144 | @staticmethod 145 | def initialize(): 146 | # Setup a handler with the API password 147 | username = "" 148 | password_mgr = urllib.request.HTTPPasswordMgrWithDefaultRealm() 149 | password_mgr.add_password(None, api_endpoint, username, get_api_password()) 150 | handler = urllib.request.HTTPBasicAuthHandler(password_mgr) 151 | 152 | # Setup an opener with the correct user agent 153 | opener = urllib.request.build_opener(handler) 154 | opener.addheaders = [("User-agent", "Sia-Agent")] 155 | 156 | # Install the opener. 157 | # Now all calls to urllib.request.urlopen use our opener. 158 | urllib.request.install_opener(opener) 159 | 160 | # load_json reads the http response and decodes the JSON value 161 | @staticmethod 162 | def load_json(resp): 163 | return json.loads(resp.decode("utf-8")) 164 | 165 | @staticmethod 166 | def get(endpoint): 167 | if not setup_done: 168 | setup() 169 | 170 | resp = urllib.request.urlopen(api_endpoint + endpoint).read() 171 | return siad.load_json(resp) 172 | 173 | @staticmethod 174 | def get_wallet(): 175 | if not setup_done: 176 | setup() 177 | 178 | resp = urllib.request.urlopen(api_endpoint + "/wallet").read() 179 | return siad.load_json(resp) 180 | 181 | @staticmethod 182 | def get_renter(): 183 | if not setup_done: 184 | setup() 185 | 186 | resp = urllib.request.urlopen(api_endpoint + "/renter").read() 187 | return siad.load_json(resp) 188 | 189 | @staticmethod 190 | def get_renter_contracts(): 191 | if not setup_done: 192 | setup() 193 | 194 | resp = urllib.request.urlopen(api_endpoint + "/renter/contracts").read() 195 | return siad.load_json(resp) 196 | -------------------------------------------------------------------------------- /setup-scripts/health-checker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import asyncio 4 | import json 5 | import os 6 | import re 7 | import sys 8 | import time 9 | import traceback 10 | from datetime import datetime, timedelta 11 | 12 | import requests 13 | from bot_utils import setup, send_msg, get_docker_container_id, get_docker_container_ip 14 | 15 | """ 16 | health-checker reads the /health-check endpoint of the portal and dispatches 17 | messages to a Discord channel. 18 | """ 19 | 20 | # Get the container name as an argument or use "sia" as default. 21 | CONTAINER_NAME = "sia" 22 | if len(sys.argv) > 2: 23 | CONTAINER_NAME = sys.argv[2] 24 | 25 | # Get the number of hours to look back in the logs or use 1 as default. 26 | CHECK_HOURS = 1 27 | if len(sys.argv) > 3: 28 | CHECK_HOURS = int(sys.argv[3]) 29 | 30 | # Discord messages have a limit on their length set at 2000 bytes. We use 31 | # a lower limit in order to leave some space for additional message text. 32 | DISCORD_MAX_MESSAGE_LENGTH = 1900 33 | 34 | GB = 1 << 30 # 1 GiB in bytes 35 | 36 | # Free disk space threshold used for notices and shutting down siad. 37 | FREE_DISK_SPACE_THRESHOLD = 100 * GB 38 | FREE_DISK_SPACE_THRESHOLD_CRITICAL = 60 * GB 39 | 40 | # Disk usage dump log file (relative to this .py script). 41 | DISK_USAGE_DUMP_LOG = "../../devops/disk-monitor/disk-usage-dump.log" 42 | 43 | setup() 44 | 45 | 46 | async def run_checks(): 47 | print("Running Skynet portal health checks") 48 | try: 49 | await check_load_average() 50 | await check_disk() 51 | await check_health() 52 | await check_alerts() 53 | await check_portal_size() 54 | except: 55 | trace = traceback.format_exc() 56 | print("[DEBUG] run_checks() failed.") 57 | await send_msg( 58 | "Failed to run the portal health checks!", 59 | file=trace, 60 | force_notify=True, 61 | ) 62 | 63 | 64 | # check_load_average monitors the system load average value and issues a 65 | # warning message if it exceeds 10. 66 | async def check_load_average(): 67 | uptime_string = os.popen("uptime").read().strip() 68 | if sys.platform == "Darwin": 69 | pattern = "^.*load averages: \d*\.\d* \d*\.\d* (\d*\.\d*)$" 70 | else: 71 | pattern = "^.*load average: \d*\.\d*, \d*\.\d*, (\d*\.\d*)$" 72 | load_av = re.match(pattern, uptime_string).group(1) 73 | if float(load_av) > 10: 74 | message = "High system load detected in uptime output: {}".format(uptime_string) 75 | # Disabling pings until we have metrics solution and process to better 76 | # address 77 | await send_msg(message, force_notify=False) 78 | 79 | 80 | # check_disk checks the amount of free space on the /home partition and issues 81 | # a warning message if it's under FREE_DISK_SPACE_THRESHOLD GB. 82 | async def check_disk(): 83 | # We check free disk space in 1024 byte units, so it's easy to convert. 84 | df = os.popen("df --block-size=1024").read().strip() 85 | volumes = {} 86 | # Iterate over the output, ignoring the header line 87 | for line in df.split("\n")[1:]: 88 | fields = list(filter(None, line.split(" "))) 89 | # -1 is "mounted on", 3 is "available space" in KiB which we want in bytes 90 | volumes[fields[-1]] = int(fields[3]) * 1024 91 | # List of mount point, longest to shortest. We'll use that to find the best 92 | # fit for the volume we want to check. 93 | mount_points = sorted(volumes.keys(), key=len, reverse=True) 94 | wd = os.popen("pwd").read().strip() 95 | vol = "" 96 | for mp in mount_points: 97 | if wd.startswith(mp): 98 | vol = mp 99 | break 100 | if vol == "": 101 | message = "Failed to check free disk space! Didn't find a suitable mount point to check." 102 | return await send_msg(message, file=df) 103 | 104 | # if we've reached a critical free disk space threshold we need to send proper notice 105 | # and shut down sia container so it doesn't get corrupted 106 | if int(volumes[vol]) < FREE_DISK_SPACE_THRESHOLD_CRITICAL: 107 | free_space_gb = "{:.2f}".format(int(volumes[vol]) / GB) 108 | message = "CRITICAL! Very low disk space: {}GiB, **siad stopped**!".format( 109 | free_space_gb 110 | ) 111 | 112 | # dump disk usage 113 | script_dir = os.path.dirname(os.path.realpath(sys.argv[0])) 114 | os.popen( 115 | script_dir + "/disk-usage-dump.sh " + script_dir + "/" + DISK_USAGE_DUMP_LOG 116 | ) 117 | 118 | inspect = os.popen("docker inspect sia").read().strip() 119 | inspect_json = json.loads(inspect) 120 | if inspect_json[0]["State"]["Running"] is True: 121 | # mark portal as unhealthy 122 | os.popen("docker exec health-check cli disable 'critical free disk space'") 123 | time.sleep(300) # wait 5 minutes to propagate dns changes 124 | os.popen("docker stop sia") # stop sia container 125 | return await send_msg(message, force_notify=True) 126 | 127 | # if we're reached a free disk space threshold we need to send proper notice 128 | if int(volumes[vol]) < FREE_DISK_SPACE_THRESHOLD: 129 | free_space_gb = "{:.2f}".format(int(volumes[vol]) / GB) 130 | message = "WARNING! Low disk space: {}GiB".format(free_space_gb) 131 | return await send_msg(message, force_notify=True) 132 | 133 | 134 | # check_health checks /health-check endpoint and reports recent issues 135 | async def check_health(): 136 | print("\nChecking portal health status...") 137 | 138 | # do not try to run health checks if health-check container does not exist 139 | # possible use case is fresh or taken down server that has only skyd running 140 | if not get_docker_container_id("health-check"): 141 | print("Container health-check not found - skipping health checks") 142 | return 143 | 144 | try: 145 | endpoint = "http://{}:{}".format(get_docker_container_ip("health-check"), 3100) 146 | except: 147 | message = "Could not get health check service endpoint api!" 148 | return await send_msg(message, force_notify=True) 149 | 150 | try: 151 | res = requests.get(endpoint + "/health-check", verify=False) 152 | json_check = res.json() 153 | 154 | server_failure = ( 155 | res.status_code is not requests.codes["ok"] 156 | and json_check["disabled"] is False 157 | ) 158 | 159 | res = requests.get(endpoint + "/health-check/critical", verify=False) 160 | json_critical = res.json() 161 | 162 | res = requests.get(endpoint + "/health-check/extended", verify=False) 163 | json_extended = res.json() 164 | except: 165 | message = traceback.format_exc() 166 | message += "\n" + "Request url: " + res.url if res.url else "-" 167 | message += ( 168 | "\n" + "Status code: " + str(res.status_code) if res.status_code else "-" 169 | ) 170 | message += "\n" + "Response body: " + res.text if res.text else "-" 171 | return await send_msg( 172 | "Failed to run health checks!", file=message, force_notify=True 173 | ) 174 | 175 | critical_checks_total = 0 176 | critical_checks_failed = 0 177 | 178 | extended_checks_total = 0 179 | extended_checks_failed = 0 180 | 181 | failed_records = [] 182 | failed_records_file = None 183 | 184 | time_limit = datetime.utcnow().replace( 185 | minute=0, second=0, microsecond=0 186 | ) - timedelta(hours=CHECK_HOURS) 187 | 188 | for critical in json_critical: 189 | time = datetime.strptime(critical["date"], "%Y-%m-%dT%H:%M:%S.%fZ") 190 | if time < time_limit: 191 | continue 192 | bad = False 193 | for check in critical["checks"]: 194 | critical_checks_total += 1 195 | if check["up"] is False: 196 | critical_checks_failed += 1 197 | bad = True 198 | if bad: 199 | critical["checks"] = [ 200 | check for check in critical["checks"] if check["up"] is False 201 | ] 202 | failed_records.append(critical) 203 | 204 | for extended in json_extended: 205 | time = datetime.strptime(extended["date"], "%Y-%m-%dT%H:%M:%S.%fZ") 206 | if time < time_limit: 207 | continue 208 | bad = False 209 | for check in extended["checks"]: 210 | extended_checks_total += 1 211 | if check["up"] is False: 212 | extended_checks_failed += 1 213 | bad = True 214 | if bad: 215 | extended["checks"] = [ 216 | check for check in extended["checks"] if check["up"] is False 217 | ] 218 | failed_records.append(extended) 219 | 220 | ################################################################################ 221 | # create a message 222 | ################################################################################ 223 | 224 | message = "" 225 | force_notify = False 226 | 227 | if server_failure: 228 | message += "__Server down!!!__ " 229 | force_notify = True 230 | 231 | if critical_checks_failed: 232 | message += "{}/{} CRITICAL checks failed over the last {} hours! ".format( 233 | critical_checks_failed, critical_checks_total, CHECK_HOURS 234 | ) 235 | force_notify = True 236 | else: 237 | message += "All {} critical checks passed. ".format(critical_checks_total) 238 | 239 | if extended_checks_failed: 240 | message += "{}/{} extended checks failed over the last {} hours! ".format( 241 | extended_checks_failed, extended_checks_total, CHECK_HOURS 242 | ) 243 | force_notify = True 244 | else: 245 | message += "All {} extended checks passed. ".format(extended_checks_total) 246 | 247 | if len(failed_records): 248 | failed_records_file = json.dumps(failed_records, indent=2) 249 | 250 | # send a message if we force notification, there is a failures dump or just once daily (heartbeat) on 1 AM 251 | if force_notify or failed_records_file or datetime.utcnow().hour == 1: 252 | return await send_msg( 253 | message, file=failed_records_file, force_notify=force_notify 254 | ) 255 | 256 | 257 | # contains_string is a simple helper to check if a string contains a string. 258 | # This is faster and easier than regex for word comparisons 259 | def contains_string(string_to_check, string_to_find): 260 | return string_to_find in string_to_check 261 | 262 | 263 | # check_alerts checks the alerts returned from siad's daemon/alerts API 264 | async def check_alerts(): 265 | print("\nChecking portal siad alerts...") 266 | 267 | ################################################################################ 268 | # parse siac 269 | ################################################################################ 270 | 271 | # Alerts 272 | # Execute 'siac alerts' and read the response 273 | cmd_string = "docker exec {} siac alerts".format(CONTAINER_NAME) 274 | siac_alert_output = os.popen(cmd_string).read().strip() 275 | 276 | # Initialize variables 277 | num_critical_alerts = 0 278 | num_error_alerts = 0 279 | num_warning_alerts = 0 280 | num_siafile_alerts = 0 281 | siafile_alerts = [] 282 | 283 | # Pattern strings to search for 284 | critical = "Severity: critical" 285 | error = "Severity: error" 286 | warning = "Severity: warning" 287 | health_of = "has a health of" 288 | siafile_alert_message = ( 289 | "The SiaFile mentioned in the 'Cause' is below 75% redundancy" 290 | ) 291 | 292 | # Split the output by line and check for type of alert and siafile alerts 293 | for line in siac_alert_output.split("\n"): 294 | # Check for the type of alert 295 | if contains_string(line, critical): 296 | num_critical_alerts += 1 297 | if contains_string(line, error): 298 | num_error_alerts += 1 299 | if contains_string(line, warning): 300 | num_warning_alerts += 1 301 | 302 | # Check for siafile alerts in alerts. This is so that the alert 303 | # severity can change and this doesn't need to be updated 304 | if contains_string(line, siafile_alert_message): 305 | num_siafile_alerts += 1 306 | if contains_string(line, health_of): 307 | siafile_alerts.append(line) 308 | 309 | # Repair Size 310 | # Execute 'siac renter' and read the response 311 | cmd_string = "docker exec {} siac renter".format(CONTAINER_NAME) 312 | siac_renter_output = os.popen(cmd_string).read().strip() 313 | 314 | # Initialize variables 315 | repair_remaining = "" 316 | 317 | # Pattern strings to search for 318 | repair_str = "Repair Data Remaining" 319 | 320 | # Split the output by line and check for the repair remaining 321 | for line in siac_renter_output.split("\n"): 322 | # Check for the type of alert 323 | if contains_string(line, repair_str): 324 | repair_remaining = line.split(":")[1].strip() 325 | 326 | ################################################################################ 327 | # create a message 328 | ################################################################################ 329 | 330 | message = "" 331 | force_notify = False 332 | 333 | if num_critical_alerts > 0: 334 | message += "{} CRITICAL Alerts found! ".format(num_critical_alerts) 335 | force_notify = True 336 | if num_error_alerts > 0: 337 | message += "{} Error Alerts found! ".format(num_error_alerts) 338 | 339 | # Subtract out the siafile alerts from the warning alerts since we announce 340 | # them separately 341 | num_warning_alerts -= num_siafile_alerts 342 | message += "{} Warning Alerts found. ".format(num_warning_alerts) 343 | message += "{} SiaFiles with bad health found. ".format(num_siafile_alerts) 344 | 345 | # Add repair size 346 | message += "{} of repair remaining. ".format(repair_remaining) 347 | 348 | # send a message if we force notification, or just once daily (heartbeat) 349 | # on 1 AM 350 | if force_notify or datetime.utcnow().hour == 1: 351 | return await send_msg( 352 | message, file=siac_alert_output, force_notify=force_notify 353 | ) 354 | 355 | 356 | # check_portal_size checks the number of files that the portal is managing to 357 | # determine if it is time to rotate it out 358 | async def check_portal_size(): 359 | print("\nChecking portal size...") 360 | 361 | # Execute siac renter to check the size of the portal 362 | # 363 | # NOTE: we should leave this as always trying to execute the docker command 364 | # against the sia container as this will then fail for maintenance severs 365 | # were we don't care about this check. 366 | cmd_string = "docker exec sia siac renter" 367 | siac_renter_output = os.popen(cmd_string).read().strip() 368 | 369 | # Initialize variables 370 | num_files = 0 371 | max_files = 1500000 # 1.5 mln 372 | files_text = "Files:" 373 | for line in siac_renter_output.split("\n"): 374 | if line.strip().startswith(files_text): 375 | for el in line.split(): 376 | if el.isdigit(): 377 | num_files = int(el) 378 | 379 | ################################################################################ 380 | # create a message 381 | ################################################################################ 382 | 383 | message = "" 384 | force_notify = False 385 | 386 | if num_files > max_files: 387 | message += "Portal has {} files! Consider rotating! ".format(num_files) 388 | # send notification when above 40% of the limit 389 | force_notify = num_files > max_files * 1.4 390 | else: 391 | message += "Portal has {} files. ".format(num_files) 392 | 393 | # send a message if we force notification, or just once daily (heartbeat) on 1 AM 394 | if force_notify or datetime.utcnow().hour == 1: 395 | return await send_msg(message, force_notify=force_notify) 396 | 397 | 398 | loop = asyncio.get_event_loop() 399 | loop.run_until_complete(run_checks()) 400 | -------------------------------------------------------------------------------- /docker/clamav/clamd.conf: -------------------------------------------------------------------------------- 1 | ## 2 | ## Example config file for the Clam AV daemon 3 | ## Please read the clamd.conf(5) manual before editing this file. 4 | ## 5 | 6 | 7 | # Comment or remove the line below. 8 | # Example 9 | 10 | # Uncomment this option to enable logging. 11 | # LogFile must be writable for the user running daemon. 12 | # A full path is required. 13 | # Default: disabled 14 | LogFile /var/log/clamav/clamd.log 15 | 16 | # By default the log file is locked for writing - the lock protects against 17 | # running clamd multiple times (if want to run another clamd, please 18 | # copy the configuration file, change the LogFile variable, and run 19 | # the daemon with --config-file option). 20 | # This option disables log file locking. 21 | # Default: no 22 | #LogFileUnlock yes 23 | 24 | # Maximum size of the log file. 25 | # Value of 0 disables the limit. 26 | # You may use 'M' or 'm' for megabytes (1M = 1m = 1048576 bytes) 27 | # and 'K' or 'k' for kilobytes (1K = 1k = 1024 bytes). To specify the size 28 | # in bytes just don't use modifiers. If LogFileMaxSize is enabled, log 29 | # rotation (the LogRotate option) will always be enabled. 30 | # Default: 1M 31 | LogFileMaxSize 50M 32 | 33 | # Log time with each message. 34 | # Default: no 35 | LogTime yes 36 | 37 | # Also log clean files. Useful in debugging but drastically increases the 38 | # log size. 39 | # Default: no 40 | #LogClean yes 41 | 42 | # Use system logger (can work together with LogFile). 43 | # Default: no 44 | #LogSyslog yes 45 | 46 | # Specify the type of syslog messages - please refer to 'man syslog' 47 | # for facility names. 48 | # Default: LOG_LOCAL6 49 | #LogFacility LOG_MAIL 50 | 51 | # Enable verbose logging. 52 | # Default: no 53 | #LogVerbose yes 54 | 55 | # Enable log rotation. Always enabled when LogFileMaxSize is enabled. 56 | # Default: no 57 | #LogRotate yes 58 | 59 | # Enable Prelude output. 60 | # Default: no 61 | #PreludeEnable yes 62 | # 63 | # Set the name of the analyzer used by prelude-admin. 64 | # Default: ClamAV 65 | #PreludeAnalyzerName ClamAV 66 | 67 | # Log additional information about the infected file, such as its 68 | # size and hash, together with the virus name. 69 | #ExtendedDetectionInfo yes 70 | 71 | # This option allows you to save a process identifier of the listening 72 | # daemon (main thread). 73 | # This file will be owned by root, as long as clamd was started by root. 74 | # It is recommended that the directory where this file is stored is 75 | # also owned by root to keep other users from tampering with it. 76 | # Default: disabled 77 | PidFile /run/lock/clamd.pid 78 | 79 | # Optional path to the global temporary directory. 80 | # Default: system specific (usually /tmp or /var/tmp). 81 | #TemporaryDirectory /var/tmp 82 | 83 | # Path to the database directory. 84 | # Default: hardcoded (depends on installation options) 85 | #DatabaseDirectory /var/lib/clamav 86 | 87 | # Only load the official signatures published by the ClamAV project. 88 | # Default: no 89 | #OfficialDatabaseOnly no 90 | 91 | # The daemon can work in local mode, network mode or both. 92 | # Due to security reasons we recommend the local mode. 93 | 94 | # Path to a local socket file the daemon will listen on. 95 | # Default: disabled (must be specified by a user) 96 | LocalSocket /run/clamav/clamd.sock 97 | 98 | # Sets the group ownership on the unix socket. 99 | # Default: disabled (the primary group of the user running clamd) 100 | #LocalSocketGroup virusgroup 101 | 102 | # Sets the permissions on the unix socket to the specified mode. 103 | # Default: disabled (socket is world accessible) 104 | #LocalSocketMode 660 105 | 106 | # Remove stale socket after unclean shutdown. 107 | # Default: yes 108 | #FixStaleSocket yes 109 | 110 | # TCP port address. 111 | # Default: no 112 | TCPSocket 3310 113 | 114 | # TCP address. 115 | # By default we bind to INADDR_ANY, probably not wise. 116 | # Enable the following to provide some degree of protection 117 | # from the outside world. This option can be specified multiple 118 | # times if you want to listen on multiple IPs. IPv6 is now supported. 119 | # Default: no 120 | TCPAddr 0.0.0.0 121 | 122 | # Maximum length the queue of pending connections may grow to. 123 | # Default: 200 124 | #MaxConnectionQueueLength 30 125 | 126 | # Clamd uses FTP-like protocol to receive data from remote clients. 127 | # If you are using clamav-milter to balance load between remote clamd daemons 128 | # on firewall servers you may need to tune the options below. 129 | 130 | # Close the connection when the data size limit is exceeded. 131 | # The value should match your MTA's limit for a maximum attachment size. 132 | # Default: 25M 133 | StreamMaxLength 100M 134 | 135 | # Limit port range. 136 | # Default: 1024 137 | #StreamMinPort 30000 138 | # Default: 2048 139 | #StreamMaxPort 32000 140 | 141 | # Maximum number of threads running at the same time. 142 | # Default: 10 143 | #MaxThreads 20 144 | 145 | # Waiting for data from a client socket will timeout after this time (seconds). 146 | # Default: 120 147 | #ReadTimeout 300 148 | 149 | # This option specifies the time (in seconds) after which clamd should 150 | # timeout if a client doesn't provide any initial command after connecting. 151 | # Default: 30 152 | #CommandReadTimeout 30 153 | 154 | # This option specifies how long to wait (in milliseconds) if the send buffer 155 | # is full. 156 | # Keep this value low to prevent clamd hanging. 157 | # 158 | # Default: 500 159 | #SendBufTimeout 200 160 | 161 | # Maximum number of queued items (including those being processed by 162 | # MaxThreads threads). 163 | # It is recommended to have this value at least twice MaxThreads if possible. 164 | # WARNING: you shouldn't increase this too much to avoid running out of file 165 | # descriptors, the following condition should hold: 166 | # MaxThreads*MaxRecursion + (MaxQueue - MaxThreads) + 6< RLIMIT_NOFILE (usual 167 | # max is 1024). 168 | # 169 | # Default: 100 170 | #MaxQueue 200 171 | 172 | # Waiting for a new job will timeout after this time (seconds). 173 | # Default: 30 174 | #IdleTimeout 60 175 | 176 | # Don't scan files and directories matching regex 177 | # This directive can be used multiple times 178 | # Default: scan all 179 | #ExcludePath ^/proc/ 180 | #ExcludePath ^/sys/ 181 | 182 | # Maximum depth directories are scanned at. 183 | # Default: 15 184 | #MaxDirectoryRecursion 20 185 | 186 | # Follow directory symlinks. 187 | # Default: no 188 | #FollowDirectorySymlinks yes 189 | 190 | # Follow regular file symlinks. 191 | # Default: no 192 | #FollowFileSymlinks yes 193 | 194 | # Scan files and directories on other filesystems. 195 | # Default: yes 196 | #CrossFilesystems yes 197 | 198 | # Perform a database check. 199 | # Default: 600 (10 min) 200 | #SelfCheck 600 201 | 202 | # Enable non-blocking (multi-threaded/concurrent) database reloads. 203 | # This feature will temporarily load a second scanning engine while scanning 204 | # continues using the first engine. Once loaded, the new engine takes over. 205 | # The old engine is removed as soon as all scans using the old engine have 206 | # completed. 207 | # This feature requires more RAM, so this option is provided in case users are 208 | # willing to block scans during reload in exchange for lower RAM requirements. 209 | # Default: yes 210 | ConcurrentDatabaseReload no 211 | 212 | # Execute a command when virus is found. In the command string %v will 213 | # be replaced with the virus name and %f will be replaced with the file name. 214 | # Additionally, two environment variables will be defined: $CLAM_VIRUSEVENT_FILENAME 215 | # and $CLAM_VIRUSEVENT_VIRUSNAME. 216 | # Default: no 217 | #VirusEvent /usr/local/bin/send_sms 123456789 "VIRUS ALERT: %v in %f" 218 | 219 | # Run as another user (clamd must be started by root for this option to work) 220 | # Default: don't drop privileges 221 | User clamav 222 | 223 | # Stop daemon when libclamav reports out of memory condition. 224 | #ExitOnOOM yes 225 | 226 | # Don't fork into background. 227 | # Default: no 228 | #Foreground yes 229 | 230 | # Enable debug messages in libclamav. 231 | # Default: no 232 | #Debug yes 233 | 234 | # Do not remove temporary files (for debug purposes). 235 | # Default: no 236 | #LeaveTemporaryFiles yes 237 | 238 | # Permit use of the ALLMATCHSCAN command. If set to no, clamd will reject 239 | # any ALLMATCHSCAN command as invalid. 240 | # Default: yes 241 | #AllowAllMatchScan no 242 | 243 | # Detect Possibly Unwanted Applications. 244 | # Default: no 245 | #DetectPUA yes 246 | 247 | # Exclude a specific PUA category. This directive can be used multiple times. 248 | # See https://github.com/vrtadmin/clamav-faq/blob/master/faq/faq-pua.md for 249 | # the complete list of PUA categories. 250 | # Default: Load all categories (if DetectPUA is activated) 251 | #ExcludePUA NetTool 252 | #ExcludePUA PWTool 253 | 254 | # Only include a specific PUA category. This directive can be used multiple 255 | # times. 256 | # Default: Load all categories (if DetectPUA is activated) 257 | #IncludePUA Spy 258 | #IncludePUA Scanner 259 | #IncludePUA RAT 260 | 261 | # This option causes memory or nested map scans to dump the content to disk. 262 | # If you turn on this option, more data is written to disk and is available 263 | # when the LeaveTemporaryFiles option is enabled. 264 | #ForceToDisk yes 265 | 266 | # This option allows you to disable the caching feature of the engine. By 267 | # default, the engine will store an MD5 in a cache of any files that are 268 | # not flagged as virus or that hit limits checks. Disabling the cache will 269 | # have a negative performance impact on large scans. 270 | # Default: no 271 | #DisableCache yes 272 | 273 | # In some cases (eg. complex malware, exploits in graphic files, and others), 274 | # ClamAV uses special algorithms to detect abnormal patterns and behaviors that 275 | # may be malicious. This option enables alerting on such heuristically 276 | # detected potential threats. 277 | # Default: yes 278 | #HeuristicAlerts yes 279 | 280 | # Allow heuristic alerts to take precedence. 281 | # When enabled, if a heuristic scan (such as phishingScan) detects 282 | # a possible virus/phish it will stop scan immediately. Recommended, saves CPU 283 | # scan-time. 284 | # When disabled, virus/phish detected by heuristic scans will be reported only 285 | # at the end of a scan. If an archive contains both a heuristically detected 286 | # virus/phish, and a real malware, the real malware will be reported 287 | # 288 | # Keep this disabled if you intend to handle "Heuristics.*" viruses 289 | # differently from "real" malware. 290 | # If a non-heuristically-detected virus (signature-based) is found first, 291 | # the scan is interrupted immediately, regardless of this config option. 292 | # 293 | # Default: no 294 | #HeuristicScanPrecedence yes 295 | 296 | 297 | ## 298 | ## Heuristic Alerts 299 | ## 300 | 301 | # With this option clamav will try to detect broken executables (both PE and 302 | # ELF) and alert on them with the Broken.Executable heuristic signature. 303 | # Default: no 304 | #AlertBrokenExecutables yes 305 | 306 | # With this option clamav will try to detect broken media file (JPEG, 307 | # TIFF, PNG, GIF) and alert on them with a Broken.Media heuristic signature. 308 | # Default: no 309 | #AlertBrokenMedia yes 310 | 311 | # Alert on encrypted archives _and_ documents with heuristic signature 312 | # (encrypted .zip, .7zip, .rar, .pdf). 313 | # Default: no 314 | #AlertEncrypted yes 315 | 316 | # Alert on encrypted archives with heuristic signature (encrypted .zip, .7zip, 317 | # .rar). 318 | # Default: no 319 | #AlertEncryptedArchive yes 320 | 321 | # Alert on encrypted archives with heuristic signature (encrypted .pdf). 322 | # Default: no 323 | #AlertEncryptedDoc yes 324 | 325 | # With this option enabled OLE2 files containing VBA macros, which were not 326 | # detected by signatures will be marked as "Heuristics.OLE2.ContainsMacros". 327 | # Default: no 328 | #AlertOLE2Macros yes 329 | 330 | # Alert on SSL mismatches in URLs, even if the URL isn't in the database. 331 | # This can lead to false positives. 332 | # Default: no 333 | #AlertPhishingSSLMismatch yes 334 | 335 | # Alert on cloaked URLs, even if URL isn't in database. 336 | # This can lead to false positives. 337 | # Default: no 338 | #AlertPhishingCloak yes 339 | 340 | # Alert on raw DMG image files containing partition intersections 341 | # Default: no 342 | #AlertPartitionIntersection yes 343 | 344 | 345 | ## 346 | ## Executable files 347 | ## 348 | 349 | # PE stands for Portable Executable - it's an executable file format used 350 | # in all 32 and 64-bit versions of Windows operating systems. This option 351 | # allows ClamAV to perform a deeper analysis of executable files and it's also 352 | # required for decompression of popular executable packers such as UPX, FSG, 353 | # and Petite. If you turn off this option, the original files will still be 354 | # scanned, but without additional processing. 355 | # Default: yes 356 | #ScanPE yes 357 | 358 | # Certain PE files contain an authenticode signature. By default, we check 359 | # the signature chain in the PE file against a database of trusted and 360 | # revoked certificates if the file being scanned is marked as a virus. 361 | # If any certificate in the chain validates against any trusted root, but 362 | # does not match any revoked certificate, the file is marked as trusted. 363 | # If the file does match a revoked certificate, the file is marked as virus. 364 | # The following setting completely turns off authenticode verification. 365 | # Default: no 366 | #DisableCertCheck yes 367 | 368 | # Executable and Linking Format is a standard format for UN*X executables. 369 | # This option allows you to control the scanning of ELF files. 370 | # If you turn off this option, the original files will still be scanned, but 371 | # without additional processing. 372 | # Default: yes 373 | #ScanELF yes 374 | 375 | 376 | ## 377 | ## Documents 378 | ## 379 | 380 | # This option enables scanning of OLE2 files, such as Microsoft Office 381 | # documents and .msi files. 382 | # If you turn off this option, the original files will still be scanned, but 383 | # without additional processing. 384 | # Default: yes 385 | #ScanOLE2 yes 386 | 387 | # This option enables scanning within PDF files. 388 | # If you turn off this option, the original files will still be scanned, but 389 | # without decoding and additional processing. 390 | # Default: yes 391 | #ScanPDF yes 392 | 393 | # This option enables scanning within SWF files. 394 | # If you turn off this option, the original files will still be scanned, but 395 | # without decoding and additional processing. 396 | # Default: yes 397 | #ScanSWF yes 398 | 399 | # This option enables scanning xml-based document files supported by libclamav. 400 | # If you turn off this option, the original files will still be scanned, but 401 | # without additional processing. 402 | # Default: yes 403 | #ScanXMLDOCS yes 404 | 405 | # This option enables scanning of HWP3 files. 406 | # If you turn off this option, the original files will still be scanned, but 407 | # without additional processing. 408 | # Default: yes 409 | #ScanHWP3 yes 410 | 411 | 412 | ## 413 | ## Mail files 414 | ## 415 | 416 | # Enable internal e-mail scanner. 417 | # If you turn off this option, the original files will still be scanned, but 418 | # without parsing individual messages/attachments. 419 | # Default: yes 420 | #ScanMail yes 421 | 422 | # Scan RFC1341 messages split over many emails. 423 | # You will need to periodically clean up $TemporaryDirectory/clamav-partial 424 | # directory. 425 | # WARNING: This option may open your system to a DoS attack. 426 | # Never use it on loaded servers. 427 | # Default: no 428 | #ScanPartialMessages yes 429 | 430 | # With this option enabled ClamAV will try to detect phishing attempts by using 431 | # HTML.Phishing and Email.Phishing NDB signatures. 432 | # Default: yes 433 | #PhishingSignatures no 434 | 435 | # With this option enabled ClamAV will try to detect phishing attempts by 436 | # analyzing URLs found in emails using WDB and PDB signature databases. 437 | # Default: yes 438 | #PhishingScanURLs no 439 | 440 | 441 | ## 442 | ## Data Loss Prevention (DLP) 443 | ## 444 | 445 | # Enable the DLP module 446 | # Default: No 447 | #StructuredDataDetection yes 448 | 449 | # This option sets the lowest number of Credit Card numbers found in a file 450 | # to generate a detect. 451 | # Default: 3 452 | #StructuredMinCreditCardCount 5 453 | 454 | # With this option enabled the DLP module will search for valid Credit Card 455 | # numbers only. Debit and Private Label cards will not be searched. 456 | # Default: no 457 | #StructuredCCOnly yes 458 | 459 | # This option sets the lowest number of Social Security Numbers found 460 | # in a file to generate a detect. 461 | # Default: 3 462 | #StructuredMinSSNCount 5 463 | 464 | # With this option enabled the DLP module will search for valid 465 | # SSNs formatted as xxx-yy-zzzz 466 | # Default: yes 467 | #StructuredSSNFormatNormal yes 468 | 469 | # With this option enabled the DLP module will search for valid 470 | # SSNs formatted as xxxyyzzzz 471 | # Default: no 472 | #StructuredSSNFormatStripped yes 473 | 474 | 475 | ## 476 | ## HTML 477 | ## 478 | 479 | # Perform HTML normalisation and decryption of MS Script Encoder code. 480 | # Default: yes 481 | # If you turn off this option, the original files will still be scanned, but 482 | # without additional processing. 483 | #ScanHTML yes 484 | 485 | 486 | ## 487 | ## Archives 488 | ## 489 | 490 | # ClamAV can scan within archives and compressed files. 491 | # If you turn off this option, the original files will still be scanned, but 492 | # without unpacking and additional processing. 493 | # Default: yes 494 | #ScanArchive yes 495 | 496 | 497 | ## 498 | ## Limits 499 | ## 500 | 501 | # The options below protect your system against Denial of Service attacks 502 | # using archive bombs. 503 | 504 | # This option sets the maximum amount of time to a scan may take. 505 | # In this version, this field only affects the scan time of ZIP archives. 506 | # Value of 0 disables the limit. 507 | # Note: disabling this limit or setting it too high may result allow scanning 508 | # of certain files to lock up the scanning process/threads resulting in a 509 | # Denial of Service. 510 | # Time is in milliseconds. 511 | # Default: 120000 512 | MaxScanTime 300000 513 | 514 | # This option sets the maximum amount of data to be scanned for each input 515 | # file. Archives and other containers are recursively extracted and scanned 516 | # up to this value. 517 | # Value of 0 disables the limit 518 | # Note: disabling this limit or setting it too high may result in severe damage 519 | # to the system. 520 | # Default: 100M 521 | MaxScanSize 1024M 522 | 523 | # Files larger than this limit won't be scanned. Affects the input file itself 524 | # as well as files contained inside it (when the input file is an archive, a 525 | # document or some other kind of container). 526 | # Value of 0 disables the limit. 527 | # Note: disabling this limit or setting it too high may result in severe damage 528 | # to the system. 529 | # Technical design limitations prevent ClamAV from scanning files greater than 530 | # 2 GB at this time. 531 | # Default: 25M 532 | MaxFileSize 1024M 533 | 534 | # Nested archives are scanned recursively, e.g. if a Zip archive contains a RAR 535 | # file, all files within it will also be scanned. This options specifies how 536 | # deeply the process should be continued. 537 | # Note: setting this limit too high may result in severe damage to the system. 538 | # Default: 17 539 | #MaxRecursion 10 540 | 541 | # Number of files to be scanned within an archive, a document, or any other 542 | # container file. 543 | # Value of 0 disables the limit. 544 | # Note: disabling this limit or setting it too high may result in severe damage 545 | # to the system. 546 | # Default: 10000 547 | #MaxFiles 15000 548 | 549 | # Maximum size of a file to check for embedded PE. Files larger than this value 550 | # will skip the additional analysis step. 551 | # Note: disabling this limit or setting it too high may result in severe damage 552 | # to the system. 553 | # Default: 10M 554 | #MaxEmbeddedPE 10M 555 | 556 | # Maximum size of a HTML file to normalize. HTML files larger than this value 557 | # will not be normalized or scanned. 558 | # Note: disabling this limit or setting it too high may result in severe damage 559 | # to the system. 560 | # Default: 10M 561 | #MaxHTMLNormalize 10M 562 | 563 | # Maximum size of a normalized HTML file to scan. HTML files larger than this 564 | # value after normalization will not be scanned. 565 | # Note: disabling this limit or setting it too high may result in severe damage 566 | # to the system. 567 | # Default: 2M 568 | #MaxHTMLNoTags 2M 569 | 570 | # Maximum size of a script file to normalize. Script content larger than this 571 | # value will not be normalized or scanned. 572 | # Note: disabling this limit or setting it too high may result in severe damage 573 | # to the system. 574 | # Default: 5M 575 | #MaxScriptNormalize 5M 576 | 577 | # Maximum size of a ZIP file to reanalyze type recognition. ZIP files larger 578 | # than this value will skip the step to potentially reanalyze as PE. 579 | # Note: disabling this limit or setting it too high may result in severe damage 580 | # to the system. 581 | # Default: 1M 582 | #MaxZipTypeRcg 1M 583 | 584 | # This option sets the maximum number of partitions of a raw disk image to be 585 | # scanned. 586 | # Raw disk images with more partitions than this value will have up to 587 | # the value number partitions scanned. Negative values are not allowed. 588 | # Note: setting this limit too high may result in severe damage or impact 589 | # performance. 590 | # Default: 50 591 | #MaxPartitions 128 592 | 593 | # This option sets the maximum number of icons within a PE to be scanned. 594 | # PE files with more icons than this value will have up to the value number 595 | # icons scanned. 596 | # Negative values are not allowed. 597 | # WARNING: setting this limit too high may result in severe damage or impact 598 | # performance. 599 | # Default: 100 600 | #MaxIconsPE 200 601 | 602 | # This option sets the maximum recursive calls for HWP3 parsing during 603 | # scanning. HWP3 files using more than this limit will be terminated and 604 | # alert the user. 605 | # Scans will be unable to scan any HWP3 attachments if the recursive limit 606 | # is reached. 607 | # Negative values are not allowed. 608 | # WARNING: setting this limit too high may result in severe damage or impact 609 | # performance. 610 | # Default: 16 611 | #MaxRecHWP3 16 612 | 613 | # This option sets the maximum calls to the PCRE match function during 614 | # an instance of regex matching. 615 | # Instances using more than this limit will be terminated and alert the user 616 | # but the scan will continue. 617 | # For more information on match_limit, see the PCRE documentation. 618 | # Negative values are not allowed. 619 | # WARNING: setting this limit too high may severely impact performance. 620 | # Default: 100000 621 | #PCREMatchLimit 20000 622 | 623 | # This option sets the maximum recursive calls to the PCRE match function 624 | # during an instance of regex matching. 625 | # Instances using more than this limit will be terminated and alert the user 626 | # but the scan will continue. 627 | # For more information on match_limit_recursion, see the PCRE documentation. 628 | # Negative values are not allowed and values > PCREMatchLimit are superfluous. 629 | # WARNING: setting this limit too high may severely impact performance. 630 | # Default: 2000 631 | #PCRERecMatchLimit 10000 632 | 633 | # This option sets the maximum filesize for which PCRE subsigs will be 634 | # executed. Files exceeding this limit will not have PCRE subsigs executed 635 | # unless a subsig is encompassed to a smaller buffer. 636 | # Negative values are not allowed. 637 | # Setting this value to zero disables the limit. 638 | # WARNING: setting this limit too high or disabling it may severely impact 639 | # performance. 640 | # Default: 25M 641 | #PCREMaxFileSize 100M 642 | 643 | # When AlertExceedsMax is set, files exceeding the MaxFileSize, MaxScanSize, or 644 | # MaxRecursion limit will be flagged with the virus name starting with 645 | # "Heuristics.Limits.Exceeded". 646 | # Default: no 647 | #AlertExceedsMax yes 648 | 649 | ## 650 | ## On-access Scan Settings 651 | ## 652 | 653 | # Don't scan files larger than OnAccessMaxFileSize 654 | # Value of 0 disables the limit. 655 | # Default: 5M 656 | #OnAccessMaxFileSize 10M 657 | 658 | # Max number of scanning threads to allocate to the OnAccess thread pool at 659 | # startup. These threads are the ones responsible for creating a connection 660 | # with the daemon and kicking off scanning after an event has been processed. 661 | # To prevent clamonacc from consuming all clamd's resources keep this lower 662 | # than clamd's max threads. 663 | # Default: 5 664 | #OnAccessMaxThreads 10 665 | 666 | # Max amount of time (in milliseconds) that the OnAccess client should spend 667 | # for every connect, send, and recieve attempt when communicating with clamd 668 | # via curl. 669 | # Default: 5000 (5 seconds) 670 | # OnAccessCurlTimeout 10000 671 | 672 | # Toggles dynamic directory determination. Allows for recursively watching 673 | # include paths. 674 | # Default: no 675 | #OnAccessDisableDDD yes 676 | 677 | # Set the include paths (all files inside them will be scanned). You can have 678 | # multiple OnAccessIncludePath directives but each directory must be added 679 | # in a separate line. 680 | # Default: disabled 681 | #OnAccessIncludePath /home 682 | #OnAccessIncludePath /students 683 | 684 | # Set the exclude paths. All subdirectories are also excluded. 685 | # Default: disabled 686 | #OnAccessExcludePath /home/user 687 | 688 | # Modifies fanotify blocking behaviour when handling permission events. 689 | # If off, fanotify will only notify if the file scanned is a virus, 690 | # and not perform any blocking. 691 | # Default: no 692 | #OnAccessPrevention yes 693 | 694 | # When using prevention, if this option is turned on, any errors that occur 695 | # during scanning will result in the event attempt being denied. This could 696 | # potentially lead to unwanted system behaviour with certain configurations, 697 | # so the client defaults this to off and prefers allowing access events in 698 | # case of scan or connection error. 699 | # Default: no 700 | #OnAccessDenyOnError yes 701 | 702 | # Toggles extra scanning and notifications when a file or directory is 703 | # created or moved. 704 | # Requires the DDD system to kick-off extra scans. 705 | # Default: no 706 | #OnAccessExtraScanning yes 707 | 708 | # Set the mount point to be scanned. The mount point specified, or the mount 709 | # point containing the specified directory will be watched. If any directories 710 | # are specified, this option will preempt (disable and ignore all options 711 | # related to) the DDD system. This option will result in verdicts only. 712 | # Note that prevention is explicitly disallowed to prevent common, fatal 713 | # misconfigurations. (e.g. watching "/" with prevention on and no exclusions 714 | # made on vital system directories) 715 | # It can be used multiple times. 716 | # Default: disabled 717 | #OnAccessMountPath / 718 | #OnAccessMountPath /home/user 719 | 720 | # With this option you can exclude the root UID (0). Processes run under 721 | # root with be able to access all files without triggering scans or 722 | # permission denied events. 723 | # Note that if clamd cannot check the uid of the process that generated an 724 | # on-access scan event (e.g., because OnAccessPrevention was not enabled, and 725 | # the process already exited), clamd will perform a scan. Thus, setting 726 | # OnAccessExcludeRootUID is not *guaranteed* to prevent every access by the 727 | # root user from triggering a scan (unless OnAccessPrevention is enabled). 728 | # Default: no 729 | #OnAccessExcludeRootUID no 730 | 731 | # With this option you can exclude specific UIDs. Processes with these UIDs 732 | # will be able to access all files without triggering scans or permission 733 | # denied events. 734 | # This option can be used multiple times (one per line). 735 | # Using a value of 0 on any line will disable this option entirely. 736 | # To exclude the root UID (0) please enable the OnAccessExcludeRootUID 737 | # option. 738 | # Also note that if clamd cannot check the uid of the process that generated an 739 | # on-access scan event (e.g., because OnAccessPrevention was not enabled, and 740 | # the process already exited), clamd will perform a scan. Thus, setting 741 | # OnAccessExcludeUID is not *guaranteed* to prevent every access by the 742 | # specified uid from triggering a scan (unless OnAccessPrevention is enabled). 743 | # Default: disabled 744 | #OnAccessExcludeUID -1 745 | 746 | # This option allows exclusions via user names when using the on-access 747 | # scanning client. It can be used multiple times. 748 | # It has the same potential race condition limitations of the 749 | # OnAccessExcludeUID option. 750 | # Default: disabled 751 | #OnAccessExcludeUname clamav 752 | 753 | # Number of times the OnAccess client will retry a failed scan due to 754 | # connection problems (or other issues). 755 | # Default: 0 756 | #OnAccessRetryAttempts 3 757 | 758 | ## 759 | ## Bytecode 760 | ## 761 | 762 | # With this option enabled ClamAV will load bytecode from the database. 763 | # It is highly recommended you keep this option on, otherwise you'll miss 764 | # detections for many new viruses. 765 | # Default: yes 766 | #Bytecode yes 767 | 768 | # Set bytecode security level. 769 | # Possible values: 770 | # None - No security at all, meant for debugging. 771 | # DO NOT USE THIS ON PRODUCTION SYSTEMS. 772 | # This value is only available if clamav was built 773 | # with --enable-debug! 774 | # TrustSigned - Trust bytecode loaded from signed .c[lv]d files, insert 775 | # runtime safety checks for bytecode loaded from other sources. 776 | # Paranoid - Don't trust any bytecode, insert runtime checks for all. 777 | # Recommended: TrustSigned, because bytecode in .cvd files already has these 778 | # checks. 779 | # Note that by default only signed bytecode is loaded, currently you can only 780 | # load unsigned bytecode in --enable-debug mode. 781 | # 782 | # Default: TrustSigned 783 | #BytecodeSecurity TrustSigned 784 | 785 | # Allow loading bytecode from outside digitally signed .c[lv]d files. 786 | # **Caution**: You should NEVER run bytecode signatures from untrusted sources. 787 | # Doing so may result in arbitrary code execution. 788 | # Default: no 789 | #BytecodeUnsigned yes 790 | 791 | # Set bytecode timeout in milliseconds. 792 | # 793 | # Default: 5000 794 | # BytecodeTimeout 1000 795 | --------------------------------------------------------------------------------