├── .gitignore ├── files ├── etc │ ├── update-motd.d │ │ └── 99-one-click │ ├── fail2ban │ │ └── jail.local │ └── iptables │ │ └── rules.v4 ├── var │ └── lib │ │ └── cloud │ │ └── scripts │ │ └── per-instance │ │ └── 001_onboot ├── opt │ ├── mastodon │ │ ├── setup.sh │ │ └── README │ └── upgrade.sh └── home │ └── mastodon │ └── live │ └── lib │ └── tasks │ └── digital_ocean.rake ├── scripts ├── 03-finalize.sh ├── 90-cleanup.sh ├── 02-install.sh ├── 01-prepare.sh └── 99-img_check.sh ├── LICENSE ├── marketplace-image.pkr.hcl └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | do_digitalocean.pem 2 | -------------------------------------------------------------------------------- /files/etc/update-motd.d/99-one-click: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Configured as part of the DigitalOcean 1-Click Image build process 4 | 5 | cat /opt/mastodon/README 6 | -------------------------------------------------------------------------------- /files/etc/fail2ban/jail.local: -------------------------------------------------------------------------------- 1 | [DEFAULT] 2 | destemail = your@email.here 3 | sendername = Fail2Ban 4 | 5 | [sshd] 6 | enabled = true 7 | port = 22 8 | 9 | [sshd-ddos] 10 | enabled = true 11 | port = 22 12 | -------------------------------------------------------------------------------- /files/var/lib/cloud/scripts/per-instance/001_onboot: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Scripts in this directory will be executed by cloud-init on the first boot of droplets 4 | # created from your image. Things ike generating passwords, configuration requiring IP address 5 | # or other items that will be unique to each instance should be done in scripts here. -------------------------------------------------------------------------------- /scripts/03-finalize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | systemctl restart fail2ban 4 | iptables-restore < /etc/iptables/rules.v4 5 | cp /home/mastodon/live/dist/*.service /etc/systemd/system/ 6 | 7 | chmod +x /opt/mastodon/setup.sh 8 | chmod +x /opt/upgrade.sh 9 | 10 | cp -f /etc/skel/.bashrc /root/.bashrc 11 | echo '/opt/mastodon/setup.sh' >> /root/.bashrc 12 | -------------------------------------------------------------------------------- /scripts/90-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf /tmp/* /var/tmp/* 4 | history -c 5 | cat /dev/null > /root/.bash_history 6 | unset HISTFILE 7 | 8 | apt-get -y purge droplet-agent 9 | apt-get -y autoremove 10 | apt-get -y autoclean 11 | 12 | find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; 13 | rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? /var/log/*.log 14 | rm -rf /var/lib/cloud/instances/* 15 | rm -rf /var/lib/cloud/instance 16 | 17 | rm -f /root/.ssh/authorized_keys /etc/ssh/*key* 18 | 19 | GREEN='\033[0;32m' 20 | NC='\033[0m' 21 | printf "\n${GREEN}Writing zeros to the remaining disk space to securely 22 | erase the unused portion of the file system. 23 | Depending on your disk size this may take several minutes. 24 | The secure erase will complete successfully when you see:${NC} 25 | dd: writing to '/zerofile': No space left on device\n 26 | Beginning secure erase now\n" 27 | dd if=/dev/zero of=/zerofile bs=4096; sync; rm /zerofile; sync 28 | cat /dev/null > /var/log/lastlog; cat /dev/null > /var/log/wtmp; cat /dev/null > /var/log/auth.log 29 | -------------------------------------------------------------------------------- /files/etc/iptables/rules.v4: -------------------------------------------------------------------------------- 1 | *filter 2 | 3 | # Allow all loopback (lo0) traffic and drop all traffic to 127/8 that doesn't use lo0 4 | -A INPUT -i lo -j ACCEPT 5 | -A INPUT ! -i lo -d 127.0.0.0/8 -j REJECT 6 | 7 | # Accept all established inbound connections 8 | -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT 9 | 10 | # Allow all outbound traffic - you can modify this to only allow certain traffic 11 | -A OUTPUT -j ACCEPT 12 | 13 | # Allow HTTP and HTTPS connections from anywhere (the normal ports for websites and SSL). 14 | -A INPUT -p tcp --dport 80 -j ACCEPT 15 | -A INPUT -p tcp --dport 443 -j ACCEPT 16 | 17 | # Allow SSH connections 18 | # The -dport number should be the same port number you set in sshd_config 19 | -A INPUT -p tcp -m state --state NEW --dport 22 -j ACCEPT 20 | 21 | # Allow ping 22 | -A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT 23 | 24 | # Log iptables denied calls 25 | -A INPUT -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7 26 | 27 | # Reject all other inbound - default deny unless explicitly allowed policy 28 | -A INPUT -j REJECT 29 | -A FORWARD -j REJECT 30 | 31 | COMMIT 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Andrew Starr-Bochicchio 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /files/opt/mastodon/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | echo "Booting Mastodon's first-time setup wizard..." 6 | 7 | su - mastodon -c "cd /home/mastodon/live && RAILS_ENV=production bundle exec rake digitalocean:setup" 8 | export "$(grep '^LOCAL_DOMAIN=' /home/mastodon/live/.env.production | xargs)" 9 | 10 | echo "Launching Let's Encrypt utility to obtain SSL certificate..." 11 | systemctl stop nginx 12 | certbot certonly --standalone --agree-tos -d $LOCAL_DOMAIN 13 | cp /home/mastodon/live/dist/nginx.conf /etc/nginx/conf.d/mastodon.conf 14 | sed -i -- "s/example.com/$LOCAL_DOMAIN/g" /etc/nginx/conf.d/mastodon.conf 15 | sed -i -- "s/ # ssl_certificate/ ssl_certificate/" /etc/nginx/conf.d/mastodon.conf 16 | rm -f /etc/nginx/conf.d/default.conf 17 | nginx -t 18 | 19 | systemctl start nginx 20 | systemctl enable mastodon-web 21 | systemctl start mastodon-web 22 | systemctl enable mastodon-streaming 23 | systemctl start mastodon-streaming 24 | systemctl enable mastodon-sidekiq 25 | systemctl start mastodon-sidekiq 26 | cp -f /etc/skel/.bashrc /root/.bashrc 27 | rm /home/mastodon/live/lib/tasks/digital_ocean.rake 28 | 29 | set +e 30 | 31 | /opt/upgrade.sh 32 | 33 | echo "Setup is complete! Login at https://$LOCAL_DOMAIN" 34 | -------------------------------------------------------------------------------- /files/opt/mastodon/README: -------------------------------------------------------------------------------- 1 | 2 | ,----,__ __---''--___ 3 | ,-' ,-'\ '--' 4 | , / O \ 5 | <_'---__/- '-_/ 6 | '--___-- 7 | / , 8 | _ _/ ,''--__-''-_ 9 | / '-' ; '-_ 10 | '--__--' \ ; 11 | ; /--__ 12 | | ; ; 13 | | | ; 14 | | | ; 15 | | | ; 16 | | | ; 17 | /ooo___|'' 18 | 19 | Welcome to Mastodon! 20 | 21 | The documentation is available at https://docs.joinmastodon.org 22 | 23 | You can restart Mastodon with: 24 | 25 | * sudo systemctl restart mastodon-web 26 | * sudo systemctl restart mastodon-streaming 27 | * sudo systemctl restart mastodon-sidekiq 28 | 29 | Mastodon is installed under /home/mastodon/live. To browse or change the 30 | files, login to the mastodon system user with: 31 | 32 | * su - mastodon 33 | 34 | You can browse error logs with: 35 | 36 | * sudo journalctl -u mastodon-web 37 | -------------------------------------------------------------------------------- /scripts/02-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cd /home/mastodon 4 | git clone https://github.com/mastodon/mastodon.git live 5 | cd live 6 | git -c advice.detachedHead=false checkout $(git tag -l | grep '^v[0-9.]*$' | sort -V | tail -n 1) 7 | RUBY_VERSION=$(cat /home/mastodon/live/.ruby-version) 8 | 9 | cd /home/mastodon 10 | git clone https://github.com/rbenv/rbenv.git /home/mastodon/.rbenv 11 | git clone https://github.com/rbenv/ruby-build.git /home/mastodon/.rbenv/plugins/ruby-build 12 | 13 | echo 'export PATH="/home/mastodon/.rbenv/bin:$PATH"' >> /home/mastodon/.profile 14 | echo 'export PATH="/home/mastodon/.rbenv/plugins/ruby-build/bin:$PATH"' >> /home/mastodon/.profile 15 | echo 'eval "$(rbenv init -)"' >> /home/mastodon/.profile 16 | export PATH="/home/mastodon/.rbenv/bin:$PATH" 17 | export PATH="/home/mastodon/.rbenv/plugins/ruby-build/bin:$PATH" 18 | eval "$(rbenv init -)" 19 | 20 | RUBY_CONFIGURE_OPTS=--with-jemalloc rbenv install $RUBY_VERSION 21 | rbenv global $RUBY_VERSION 22 | 23 | cd /home/mastodon/live 24 | gem install bundler --no-document 25 | bundle config set --local deployment 'true' 26 | bundle config set --local without 'development test' 27 | bundle install -j$(getconf _NPROCESSORS_ONLN) 28 | yarn install --pure-lockfile 29 | 30 | RAILS_ENV=production DB_HOST=/var/run/postgresql SECRET_KEY_BASE=precompile_placeholder OTP_SECRET=precompile_placeholder SAFETY_ASSURED=1 bin/rails db:create db:schema:load assets:precompile 31 | -------------------------------------------------------------------------------- /files/opt/upgrade.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | read -p "Would you like to upgrade Mastodon and its dependencies before going live? [Y/n] " -n 1 -r 4 | echo 5 | if [[ $REPLY =~ ^[Yy]$|^$ ]]; then 6 | echo "Upgrading Debian packages..." 7 | apt-get update 8 | apt-get dist-upgrade -yq; 9 | 10 | yarn set version classic 11 | 12 | echo "Downloading new Mastodon code..." 13 | GIT_TAG=$(su - mastodon -c "cd /home/mastodon/live && git tag -l | grep '^v[0-9.]*$' | sort -V | tail -n 1") 14 | su - mastodon -c "cd /home/mastodon/live && git fetch --tags && git checkout $GIT_TAG" 15 | RUBY_VERSION=$(cat /home/mastodon/live/.ruby-version) 16 | 17 | echo "Stopping Mastodon services..." 18 | systemctl stop mastodon-web 19 | systemctl stop mastodon-streaming 20 | systemctl stop mastodon-sidekiq 21 | 22 | echo "Upgrading Ruby..." 23 | su - mastodon -c "cd /home/mastodon/live && RUBY_CONFIGURE_OPTS=--with-jemalloc rbenv install $RUBY_VERSION && rbenv global $RUBY_VERSION" 24 | 25 | echo "Upgrading Mastodon dependencies..." 26 | su - mastodon -c "cd /home/mastodon/live && bundle install && yarn install --frozen-lockfile" 27 | 28 | echo "Creating new Mastodon assets and upgrading database..." 29 | su - mastodon -c "cd /home/mastodon/live && RAILS_ENV=production bundle exec rails assets:clobber assets:precompile db:migrate" 30 | 31 | echo "Restarting Mastodon services..." 32 | systemctl start mastodon-web 33 | systemctl start mastodon-streaming 34 | systemctl start mastodon-sidekiq 35 | fi 36 | -------------------------------------------------------------------------------- /marketplace-image.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | digitalocean = { 4 | version = ">= 1.2.0" 5 | source = "github.com/digitalocean/digitalocean" 6 | } 7 | } 8 | } 9 | 10 | variable "digitalocean_token" { 11 | default = "${env("DIGITALOCEAN_TOKEN")}" 12 | type = string 13 | } 14 | 15 | variable "image_name" { 16 | default = "" 17 | type = string 18 | } 19 | 20 | locals { 21 | timestamp = regex_replace(timestamp(), "[- TZ:]", "") 22 | image_name = var.image_name == "" ? "mastodon-digitalocean-${local.timestamp}" : var.image_name 23 | } 24 | 25 | source "digitalocean" "debian" { 26 | api_token = var.digitalocean_token 27 | image = "debian-12-x64" 28 | region = "nyc3" 29 | size = "s-1vcpu-2gb" 30 | snapshot_name = local.image_name 31 | ssh_username = "root" 32 | } 33 | 34 | build { 35 | sources = ["source.digitalocean.debian"] 36 | 37 | provisioner "shell" { 38 | scripts = ["scripts/01-prepare.sh"] 39 | } 40 | 41 | provisioner "shell" { 42 | execute_command = "chmod +x {{ .Path }}; su -c '{{ .Vars }} {{ .Path }}' - mastodon" 43 | scripts = ["scripts/02-install.sh"] 44 | } 45 | 46 | provisioner "file" { 47 | destination = "/etc/" 48 | source = "files/etc/" 49 | } 50 | 51 | provisioner "file" { 52 | destination = "/var/" 53 | source = "files/var/" 54 | } 55 | 56 | provisioner "file" { 57 | destination = "/opt/" 58 | source = "files/opt/" 59 | } 60 | 61 | provisioner "file" { 62 | destination = "/home/" 63 | source = "files/home/" 64 | } 65 | 66 | provisioner "shell" { 67 | scripts = ["scripts/03-finalize.sh"] 68 | } 69 | 70 | provisioner "shell" { 71 | scripts = [ 72 | "scripts/90-cleanup.sh", 73 | "scripts/99-img_check.sh" 74 | ] 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /scripts/01-prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export DEBIAN_FRONTEND=noninteractive 4 | NODE_MAJOR_VERSION=20 5 | 6 | echo "Waiting for cloud initialization to complete..." 7 | cloud-init status --wait 8 | 9 | apt-get update -qq 10 | apt-get dist-upgrade -yqq; 11 | 12 | apt-get install -qqy --no-install-recommends \ 13 | autoconf \ 14 | bison \ 15 | build-essential \ 16 | fail2ban \ 17 | git \ 18 | imagemagick \ 19 | iptables-persistent \ 20 | libffi-dev \ 21 | libgdbm-dev \ 22 | libgmp-dev \ 23 | libicu-dev \ 24 | libidn-dev \ 25 | libjemalloc-dev \ 26 | libncurses5-dev \ 27 | libpq-dev \ 28 | libprotobuf-dev \ 29 | libreadline-dev \ 30 | libssl-dev \ 31 | libxml2-dev \ 32 | libxslt1-dev \ 33 | libyaml-dev \ 34 | pkg-config \ 35 | protobuf-compiler \ 36 | shared-mime-info \ 37 | zlib1g-dev 38 | 39 | curl -sS -o - https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor | tee /usr/share/keyrings/nodesource.gpg 40 | curl -sS -o - https://dl.yarnpkg.com/debian/pubkey.gpg | gpg --dearmor | tee /usr/share/keyrings/yarnkey.gpg 41 | curl -sS -o - https://www.postgresql.org/media/keys/ACCC4CF8.asc | gpg --dearmor | tee /usr/share/keyrings/postgresql.gpg 42 | curl -sS -o - https://packages.redis.io/gpg | gpg --dearmor | tee /usr/share/keyrings/redis.gpg 43 | curl -sS -o - https://nginx.org/keys/nginx_signing.key | gpg --dearmor | tee /usr/share/keyrings/nginx.gpg 44 | echo "deb [signed-by=/usr/share/keyrings/nodesource.gpg] https://deb.nodesource.com/node_${NODE_MAJOR_VERSION}.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list 45 | echo "deb [signed-by=/usr/share/keyrings/yarnkey.gpg] https://dl.yarnpkg.com/debian stable main" | tee /etc/apt/sources.list.d/yarn.list 46 | echo "deb [signed-by=/usr/share/keyrings/postgresql.gpg] http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" | tee /etc/apt/sources.list.d/postgresql.list 47 | echo "deb [signed-by=/usr/share/keyrings/redis.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | tee /etc/apt/sources.list.d/redis.list 48 | echo "deb [signed-by=/usr/share/keyrings/nginx.gpg] http://nginx.org/packages/debian/ $(lsb_release -cs) nginx" | tee -a /etc/apt/sources.list.d/nginx.list 49 | 50 | apt-get update -qq 51 | apt-get install -qqy --no-install-recommends \ 52 | certbot \ 53 | nginx \ 54 | nodejs \ 55 | postgresql \ 56 | postgresql-contrib \ 57 | python3-certbot-nginx \ 58 | redis-server \ 59 | redis-tools \ 60 | yarn 61 | 62 | systemctl enable redis-server.service 63 | 64 | yarn set version classic 65 | adduser --disabled-password --gecos '' --shell /bin/bash mastodon 66 | sudo -u postgres psql -c "CREATE USER mastodon CREATEDB;" 67 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Build Automation with Packer 2 | 3 | [Packer](https://www.packer.io/intro/index.html) is a tool for creating images from a single source configuration. Using this Packer template reduces the entire process of creating, configuring, validating, and snapshotting a build Droplet to a single command: 4 | 5 | ``` 6 | packer build marketplace-image.pkr.hcl 7 | ``` 8 | 9 | This Packer template uses the same LAMP-based example as the [Fabric sample project](../fabric). Like the Fabric sample project, you can modify this template to use as a starting point for your image. 10 | 11 | ## Usage 12 | 13 | To run the LAMP example that this template uses by default, you'll need to [install Packer](https://www.packer.io/intro/getting-started/install.html) and [create a DigitalOcean personal access token](https://www.digitalocean.com/docs/api/create-personal-access-token/) and set it to the `DIGITALOCEAN_TOKEN` environment variable. Running `packer build marketplace-image.pkr.hcl` without any other modifications will create a build Droplet configured with LAMP, clean and verify it, then power it down and snapshot it. 14 | 15 | > ⚠️ The image validation script in `scripts/99-img_check.sh` is copied from the [top-level `scripts` directory](../scripts) in this repository. The top-level location is the script's canonical source, so make sure you're using the latest version from there. 16 | 17 | To start adapting this template for your own image, there are some variables that can be set: 18 | 19 | * `image_name` defines the name of the resulting snapshot, which by default is `mastodon-digitalocean-` with a UNIX timestamp appended. 20 | 21 | You can also modify these variables at runtime by using [the `-var` flag](https://www.packer.io/docs/templates/user-variables.html#setting-variables). 22 | 23 | ## Configuration Details 24 | 25 | By using [Packer's DigitalOcean Builder](https://www.packer.io/docs/builders/digitalocean.html) to integrate with the [DigitalOcean API](https://developers.digitalocean.com/), this template fully automates Marketplace image creation. 26 | 27 | This template uses Packer's [file provisioner](https://www.packer.io/docs/provisioners/file.html) to upload complete directories to the Droplet. The contents of `files/var/` will be uploaded to `/var/`. Likewise, the contents of `files/etc/` will be uploaded to `/etc/`. One important thing to note about the file provisioner, from Packer's docs: 28 | 29 | > The destination directory must already exist. If you need to create it, use a shell provisioner just prior to the file provisioner in order to create the directory. If the destination directory does not exist, the file provisioner may succeed, but it will have undefined results. 30 | 31 | This template also uses Packer's [shell provisioner](https://www.packer.io/docs/provisioners/shell.html) to run scripts from the `/scripts` directory and install APT packages using an inline task. 32 | 33 | Learn more about using Packer in [the official Packer documentation](https://www.packer.io/docs/index.html). 34 | -------------------------------------------------------------------------------- /files/home/mastodon/live/lib/tasks/digital_ocean.rake: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | require 'tty-prompt' 4 | require 'tty-reader' 5 | 6 | namespace :digitalocean do 7 | desc 'Configure the instance for production use' 8 | task :setup do 9 | prompt = TTY::Prompt.new 10 | env = {} 11 | 12 | begin 13 | prompt.ok('Welcome to the Mastodon first-time setup!') 14 | 15 | env['LOCAL_DOMAIN'] = prompt.ask('Domain name:') do |q| 16 | q.required true 17 | q.modify :strip 18 | q.validate(/\A[a-z0-9\.\-]+\z/i) 19 | q.messages[:valid?] = 'Invalid domain. If you intend to use unicode characters, enter punycode here' 20 | end 21 | 22 | %w(SECRET_KEY_BASE OTP_SECRET).each do |key| 23 | env[key] = SecureRandom.hex(64) 24 | end 25 | 26 | vapid_key = Webpush.generate_key 27 | 28 | env['VAPID_PRIVATE_KEY'] = vapid_key.private_key 29 | env['VAPID_PUBLIC_KEY'] = vapid_key.public_key 30 | 31 | using_docker = false 32 | db_connection_works = true 33 | 34 | env['DB_HOST'] = '/var/run/postgresql' 35 | env['DB_PORT'] = 5432 36 | env['DB_NAME'] = 'mastodon_production' 37 | env['DB_USER'] = 'mastodon' 38 | 39 | env['REDIS_HOST'] = 'localhost' 40 | env['REDIS_PORT'] = 6379 41 | 42 | if prompt.yes?('Do you want to store user-uploaded files on the cloud?', default: false) 43 | case prompt.select('Provider', ['DigitalOcean Spaces', 'Amazon S3', 'Wasabi', 'Minio', 'Google Cloud Storage']) 44 | when 'DigitalOcean Spaces' 45 | env['S3_ENABLED'] = 'true' 46 | env['S3_PROTOCOL'] = 'https' 47 | 48 | env['S3_BUCKET'] = prompt.ask('Space name:') do |q| 49 | q.required true 50 | q.default "files.#{env['LOCAL_DOMAIN']}" 51 | q.modify :strip 52 | end 53 | 54 | env['S3_REGION'] = prompt.ask('Space region:') do |q| 55 | q.required true 56 | q.default 'nyc3' 57 | q.modify :strip 58 | end 59 | 60 | env['S3_HOSTNAME'] = prompt.ask('Space endpoint:') do |q| 61 | q.required true 62 | q.default 'nyc3.digitaloceanspaces.com' 63 | q.modify :strip 64 | end 65 | 66 | env['S3_ENDPOINT'] = "https://#{env['S3_HOSTNAME']}" 67 | 68 | env['AWS_ACCESS_KEY_ID'] = prompt.ask('Space access key:') do |q| 69 | q.required true 70 | q.modify :strip 71 | end 72 | 73 | env['AWS_SECRET_ACCESS_KEY'] = prompt.ask('Space secret key:') do |q| 74 | q.required true 75 | q.modify :strip 76 | end 77 | when 'Amazon S3' 78 | env['S3_ENABLED'] = 'true' 79 | env['S3_PROTOCOL'] = 'https' 80 | 81 | env['S3_BUCKET'] = prompt.ask('S3 bucket name:') do |q| 82 | q.required true 83 | q.default "files.#{env['LOCAL_DOMAIN']}" 84 | q.modify :strip 85 | end 86 | 87 | env['S3_REGION'] = prompt.ask('S3 region:') do |q| 88 | q.required true 89 | q.default 'us-east-1' 90 | q.modify :strip 91 | end 92 | 93 | env['S3_HOSTNAME'] = prompt.ask('S3 hostname:') do |q| 94 | q.required true 95 | q.default 's3-us-east-1.amazonaws.com' 96 | q.modify :strip 97 | end 98 | 99 | env['AWS_ACCESS_KEY_ID'] = prompt.ask('S3 access key:') do |q| 100 | q.required true 101 | q.modify :strip 102 | end 103 | 104 | env['AWS_SECRET_ACCESS_KEY'] = prompt.ask('S3 secret key:') do |q| 105 | q.required true 106 | q.modify :strip 107 | end 108 | when 'Wasabi' 109 | env['S3_ENABLED'] = 'true' 110 | env['S3_PROTOCOL'] = 'https' 111 | env['S3_REGION'] = 'us-east-1' 112 | env['S3_HOSTNAME'] = 's3.wasabisys.com' 113 | env['S3_ENDPOINT'] = 'https://s3.wasabisys.com/' 114 | 115 | env['S3_BUCKET'] = prompt.ask('Wasabi bucket name:') do |q| 116 | q.required true 117 | q.default "files.#{env['LOCAL_DOMAIN']}" 118 | q.modify :strip 119 | end 120 | 121 | env['AWS_ACCESS_KEY_ID'] = prompt.ask('Wasabi access key:') do |q| 122 | q.required true 123 | q.modify :strip 124 | end 125 | 126 | env['AWS_SECRET_ACCESS_KEY'] = prompt.ask('Wasabi secret key:') do |q| 127 | q.required true 128 | q.modify :strip 129 | end 130 | when 'Minio' 131 | env['S3_ENABLED'] = 'true' 132 | env['S3_PROTOCOL'] = 'https' 133 | env['S3_REGION'] = 'us-east-1' 134 | 135 | env['S3_ENDPOINT'] = prompt.ask('Minio endpoint URL:') do |q| 136 | q.required true 137 | q.modify :strip 138 | end 139 | 140 | env['S3_PROTOCOL'] = env['S3_ENDPOINT'].start_with?('https') ? 'https' : 'http' 141 | env['S3_HOSTNAME'] = env['S3_ENDPOINT'].gsub(/\Ahttps?:\/\//, '') 142 | 143 | env['S3_BUCKET'] = prompt.ask('Minio bucket name:') do |q| 144 | q.required true 145 | q.default "files.#{env['LOCAL_DOMAIN']}" 146 | q.modify :strip 147 | end 148 | 149 | env['AWS_ACCESS_KEY_ID'] = prompt.ask('Minio access key:') do |q| 150 | q.required true 151 | q.modify :strip 152 | end 153 | 154 | env['AWS_SECRET_ACCESS_KEY'] = prompt.ask('Minio secret key:') do |q| 155 | q.required true 156 | q.modify :strip 157 | end 158 | when 'Google Cloud Storage' 159 | env['S3_ENABLED'] = 'true' 160 | env['S3_PROTOCOL'] = 'https' 161 | env['S3_HOSTNAME'] = 'storage.googleapis.com' 162 | env['S3_ENDPOINT'] = 'https://storage.googleapis.com' 163 | env['S3_MULTIPART_THRESHOLD'] = 50.megabytes 164 | 165 | env['S3_BUCKET'] = prompt.ask('GCS bucket name:') do |q| 166 | q.required true 167 | q.default "files.#{env['LOCAL_DOMAIN']}" 168 | q.modify :strip 169 | end 170 | 171 | env['S3_REGION'] = prompt.ask('GCS region:') do |q| 172 | q.required true 173 | q.default 'us-west1' 174 | q.modify :strip 175 | end 176 | 177 | env['AWS_ACCESS_KEY_ID'] = prompt.ask('GCS access key:') do |q| 178 | q.required true 179 | q.modify :strip 180 | end 181 | 182 | env['AWS_SECRET_ACCESS_KEY'] = prompt.ask('GCS secret key:') do |q| 183 | q.required true 184 | q.modify :strip 185 | end 186 | end 187 | 188 | if prompt.yes?('Do you want to access the uploaded files from your own domain?') 189 | env['S3_ALIAS_HOST'] = prompt.ask('Domain for uploaded files:') do |q| 190 | q.required true 191 | q.default "files.#{env['LOCAL_DOMAIN']}" 192 | q.modify :strip 193 | end 194 | end 195 | end 196 | 197 | loop do 198 | env['SMTP_SERVER'] = prompt.ask('SMTP server:') do |q| 199 | q.required true 200 | q.default 'smtp.mailgun.org' 201 | q.modify :strip 202 | end 203 | 204 | env['SMTP_PORT'] = prompt.ask('SMTP port:') do |q| 205 | q.required true 206 | q.default 587 207 | q.convert :int 208 | end 209 | 210 | env['SMTP_LOGIN'] = prompt.ask('SMTP username:') do |q| 211 | q.modify :strip 212 | end 213 | 214 | env['SMTP_PASSWORD'] = prompt.ask('SMTP password:') do |q| 215 | q.echo false 216 | end 217 | 218 | env['SMTP_AUTH_METHOD'] = prompt.ask('SMTP authentication:') do |q| 219 | q.required 220 | q.default 'plain' 221 | q.modify :strip 222 | end 223 | 224 | env['SMTP_OPENSSL_VERIFY_MODE'] = prompt.select('SMTP OpenSSL verify mode:', %w(none peer client_once fail_if_no_peer_cert)) 225 | 226 | env['SMTP_FROM_ADDRESS'] = prompt.ask('E-mail address to send e-mails "from":') do |q| 227 | q.required true 228 | q.default "Mastodon " 229 | q.modify :strip 230 | end 231 | 232 | break unless prompt.yes?('Send a test e-mail with this configuration right now?') 233 | 234 | send_to = prompt.ask('Send test e-mail to:', required: true) 235 | 236 | begin 237 | ActionMailer::Base.smtp_settings = { 238 | port: env['SMTP_PORT'], 239 | address: env['SMTP_SERVER'], 240 | user_name: env['SMTP_LOGIN'].presence, 241 | password: env['SMTP_PASSWORD'].presence, 242 | domain: env['LOCAL_DOMAIN'], 243 | authentication: env['SMTP_AUTH_METHOD'] == 'none' ? nil : env['SMTP_AUTH_METHOD'] || :plain, 244 | openssl_verify_mode: env['SMTP_OPENSSL_VERIFY_MODE'], 245 | enable_starttls_auto: true, 246 | } 247 | 248 | ActionMailer::Base.default_options = { 249 | from: env['SMTP_FROM_ADDRESS'], 250 | } 251 | 252 | mail = ActionMailer::Base.new.mail to: send_to, subject: 'Test', body: 'Mastodon SMTP configuration works!' 253 | mail.deliver 254 | break 255 | rescue StandardError => e 256 | prompt.error 'E-mail could not be sent with this configuration, try again.' 257 | prompt.error e.message 258 | break unless prompt.yes?('Try again?') 259 | end 260 | end 261 | 262 | prompt.ok "Great! Saving this configuration..." 263 | 264 | File.write(Rails.root.join('.env.production'), "# Generated with mastodon:setup on #{Time.now.utc}\n\n" + env.each_pair.map { |key, value| "#{key}=#{value}" }.join("\n") + "\n") 265 | 266 | prompt.say "Booting up Mastodon..." 267 | 268 | env.each_pair do |key, value| 269 | ENV[key] = value.to_s 270 | end 271 | 272 | require_relative '../../config/environment' 273 | disable_log_stdout! 274 | 275 | if !system(env.transform_values(&:to_s).merge({ 'RAILS_ENV' => 'production' }), 'rails db:seed') 276 | prompt.error 'Could not seed the database, aborting' 277 | exit(1) 278 | end 279 | 280 | prompt.ok "It is time to create an admin account that you'll be able to use from the browser!" 281 | 282 | username = prompt.ask('Username:') do |q| 283 | q.required true 284 | q.default 'admin' 285 | q.validate(/\A[a-z0-9_]+\z/i) 286 | q.modify :strip 287 | end 288 | 289 | email = prompt.ask('E-mail:') do |q| 290 | q.required true 291 | q.modify :strip 292 | end 293 | 294 | password = SecureRandom.hex(16) 295 | 296 | if (existing_user = User.find_by(email: email)) 297 | existing_user.account&.destroy 298 | existing_user.destroy 299 | end 300 | 301 | if (existing_account = Account.find_local(username)) 302 | existing_account.user&.destroy 303 | existing_account.destroy 304 | end 305 | 306 | user = User.new(admin: true, email: email, password: password, confirmed_at: Time.now.utc, account_attributes: { username: username }) 307 | user.save(validate: false) 308 | 309 | prompt.ok "You can login with the password: #{password}" 310 | prompt.ok "The web interface should be momentarily accessible via https://#{env['LOCAL_DOMAIN']}/" 311 | rescue TTY::Reader::InputInterrupt 312 | prompt.ok 'Aborting. Bye!' 313 | exit(1) 314 | end 315 | end 316 | end 317 | 318 | def disable_log_stdout! 319 | dev_null = Logger.new('/dev/null') 320 | 321 | Rails.logger = dev_null 322 | ActiveRecord::Base.logger = dev_null 323 | HttpLog.configuration.logger = dev_null 324 | Paperclip.options[:log] = false 325 | end 326 | -------------------------------------------------------------------------------- /scripts/99-img_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # DigitalOcean Marketplace Image Validation Tool 4 | # © 2021-2022 DigitalOcean LLC. 5 | # This code is licensed under Apache 2.0 license (see LICENSE.md for details) 6 | 7 | VERSION="v. 1.8.1" 8 | RUNDATE=$( date ) 9 | 10 | # Script should be run with SUDO 11 | if [ "$EUID" -ne 0 ] 12 | then echo "[Error] - This script must be run with sudo or as the root user." 13 | exit 1 14 | fi 15 | 16 | STATUS=0 17 | PASS=0 18 | WARN=0 19 | FAIL=0 20 | 21 | # $1 == command to check for 22 | # returns: 0 == true, 1 == false 23 | cmdExists() { 24 | if command -v "$1" > /dev/null 2>&1; then 25 | return 0 26 | else 27 | return 1 28 | fi 29 | } 30 | 31 | function getDistro { 32 | if [ -f /etc/os-release ]; then 33 | # freedesktop.org and systemd 34 | # shellcheck disable=SC1091 35 | . /etc/os-release 36 | OS=$NAME 37 | VER=$VERSION_ID 38 | elif type lsb_release >/dev/null 2>&1; then 39 | # linuxbase.org 40 | OS=$(lsb_release -si) 41 | VER=$(lsb_release -sr) 42 | elif [ -f /etc/lsb-release ]; then 43 | # For some versions of Debian/Ubuntu without lsb_release command 44 | # shellcheck disable=SC1091 45 | . /etc/lsb-release 46 | OS=$DISTRIB_ID 47 | VER=$DISTRIB_RELEASE 48 | elif [ -f /etc/debian_version ]; then 49 | # Older Debian/Ubuntu/etc. 50 | OS=Debian 51 | VER=$(cat /etc/debian_version) 52 | elif [ -f /etc/SuSe-release ]; then 53 | # Older SuSE/etc. 54 | : 55 | elif [ -f /etc/redhat-release ]; then 56 | # Older Red Hat, CentOS, etc. 57 | VER=$(cut -d" " -f3 < /etc/redhat-release | cut -d "." -f1) 58 | d=$(cut -d" " -f1 < /etc/redhat-release | cut -d "." -f1) 59 | if [[ $d == "CentOS" ]]; then 60 | OS="CentOS Linux" 61 | fi 62 | else 63 | # Fall back to uname, e.g. "Linux ", also works for BSD, etc. 64 | OS=$(uname -s) 65 | VER=$(uname -r) 66 | fi 67 | } 68 | function loadPasswords { 69 | SHADOW=$(cat /etc/shadow) 70 | } 71 | 72 | function checkAgent { 73 | # Check for the presence of the DO directory in the filesystem 74 | if [ -d /opt/digitalocean ];then 75 | echo -en "\e[41m[FAIL]\e[0m DigitalOcean directory detected.\n" 76 | ((FAIL++)) 77 | STATUS=2 78 | if [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]] || [[ $OS == "AlmaLinux" ]]; then 79 | echo "To uninstall the agent: 'sudo yum remove droplet-agent'" 80 | echo "To remove the DO directory: 'find /opt/digitalocean/ -type d -empty -delete'" 81 | elif [[ $OS == "Ubuntu" ]] || [[ $OS == "Debian" ]]; then 82 | echo "To uninstall the agent and remove the DO directory: 'sudo apt-get purge droplet-agent'" 83 | fi 84 | else 85 | echo -en "\e[32m[PASS]\e[0m DigitalOcean Monitoring agent was not found\n" 86 | ((PASS++)) 87 | fi 88 | } 89 | 90 | function checkLogs { 91 | cp_ignore="/var/log/cpanel-install.log" 92 | echo -en "\nChecking for log files in /var/log\n\n" 93 | # Check if there are log archives or log files that have not been recently cleared. 94 | for f in /var/log/*-????????; do 95 | [[ -e $f ]] || break 96 | if [ "${f}" != "${cp_ignore}" ]; then 97 | echo -en "\e[93m[WARN]\e[0m Log archive ${f} found; Contents:\n" 98 | cat "${f}" 99 | ((WARN++)) 100 | if [[ $STATUS != 2 ]]; then 101 | STATUS=1 102 | fi 103 | fi 104 | done 105 | for f in /var/log/*.[0-9];do 106 | [[ -e $f ]] || break 107 | echo -en "\e[93m[WARN]\e[0m Log archive ${f} found; Contents:\n" 108 | cat "${f}" 109 | ((WARN++)) 110 | if [[ $STATUS != 2 ]]; then 111 | STATUS=1 112 | fi 113 | done 114 | for f in /var/log/*.log; do 115 | [[ -e $f ]] || break 116 | if [[ "${f}" = '/var/log/lfd.log' && "$(grep -E -v '/var/log/messages has been reset| Watching /var/log/messages' "${f}" | wc -c)" -gt 50 ]]; then 117 | if [ "${f}" != "${cp_ignore}" ]; then 118 | echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found; Contents:\n" 119 | cat "${f}" 120 | ((WARN++)) 121 | if [[ $STATUS != 2 ]]; then 122 | STATUS=1 123 | fi 124 | fi 125 | elif [[ "${f}" != '/var/log/lfd.log' && "$(wc -c < "${f}")" -gt 50 ]]; then 126 | if [ "${f}" != "${cp_ignore}" ]; then 127 | echo -en "\e[93m[WARN]\e[0m un-cleared log file, ${f} found; Contents:\n" 128 | cat "${f}" 129 | ((WARN++)) 130 | if [[ $STATUS != 2 ]]; then 131 | STATUS=1 132 | fi 133 | fi 134 | fi 135 | done 136 | } 137 | function checkTMP { 138 | # Check the /tmp directory to ensure it is empty. Warn on any files found. 139 | return 1 140 | } 141 | function checkRoot { 142 | user="root" 143 | uhome="/root" 144 | for usr in $SHADOW 145 | do 146 | IFS=':' read -r -a u <<< "$usr" 147 | if [[ "${u[0]}" == "${user}" ]]; then 148 | if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then 149 | echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n" 150 | ((PASS++)) 151 | else 152 | echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account.\n" 153 | ((FAIL++)) 154 | STATUS=2 155 | fi 156 | fi 157 | done 158 | if [ -d ${uhome}/ ]; then 159 | if [ -d ${uhome}/.ssh/ ]; then 160 | if ls ${uhome}/.ssh/*> /dev/null 2>&1; then 161 | for key in "${uhome}"/.ssh/* 162 | do 163 | if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then 164 | 165 | if [ "$(wc -c < "${key}")" -gt 50 ]; then 166 | echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n" 167 | akey=$(cat "${key}") 168 | echo "File Contents:" 169 | echo "$akey" 170 | echo "--------------" 171 | ((FAIL++)) 172 | STATUS=2 173 | fi 174 | elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then 175 | if [ "$(wc -c < "${key}")" -gt 0 ]; then 176 | echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n" 177 | akey=$(cat "${key}") 178 | echo "File Contents:" 179 | echo "$akey" 180 | echo "--------------" 181 | ((FAIL++)) 182 | STATUS=2 183 | else 184 | echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n" 185 | ((WARN++)) 186 | if [[ $STATUS != 2 ]]; then 187 | STATUS=1 188 | fi 189 | fi 190 | elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then 191 | echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory at \e[93m${key}\e[0m\n" 192 | ((WARN++)) 193 | if [[ $STATUS != 2 ]]; then 194 | STATUS=1 195 | fi 196 | else 197 | if [ "$(wc -c < "${key}")" -gt 50 ]; then 198 | echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a populated known_hosts file in \e[93m${key}\e[0m\n" 199 | ((WARN++)) 200 | if [[ $STATUS != 2 ]]; then 201 | STATUS=1 202 | fi 203 | fi 204 | fi 205 | done 206 | else 207 | echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n" 208 | fi 209 | else 210 | echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n" 211 | fi 212 | if [ -f /root/.bash_history ];then 213 | 214 | BH_S=$(wc -c < /root/.bash_history) 215 | 216 | if [[ $BH_S -lt 200 ]]; then 217 | echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n" 218 | ((PASS++)) 219 | else 220 | echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n" 221 | ((FAIL++)) 222 | STATUS=2 223 | fi 224 | 225 | return 1; 226 | else 227 | echo -en "\e[32m[PASS]\e[0m The Root User's Bash History is not present\n" 228 | ((PASS++)) 229 | fi 230 | else 231 | echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n" 232 | fi 233 | echo -en "\n\n" 234 | return 1 235 | } 236 | 237 | function checkUsers { 238 | # Check each user-created account 239 | awk -F: '$3 >= 1000 && $1 != "nobody" {print $1}' < /etc/passwd | while IFS= read -r user; 240 | do 241 | # Skip some other non-user system accounts 242 | if [[ $user == "centos" ]]; then 243 | : 244 | elif [[ $user == "nfsnobody" ]]; then 245 | : 246 | else 247 | echo -en "\nChecking user: ${user}...\n" 248 | for usr in $SHADOW 249 | do 250 | IFS=':' read -r -a u <<< "$usr" 251 | if [[ "${u[0]}" == "${user}" ]]; then 252 | if [[ ${u[1]} == "!" ]] || [[ ${u[1]} == "!!" ]] || [[ ${u[1]} == "*" ]]; then 253 | echo -en "\e[32m[PASS]\e[0m User ${user} has no password set.\n" 254 | # shellcheck disable=SC2030 255 | ((PASS++)) 256 | else 257 | echo -en "\e[41m[FAIL]\e[0m User ${user} has a password set on their account. Only system users are allowed on the image.\n" 258 | # shellcheck disable=SC2030 259 | ((FAIL++)) 260 | STATUS=2 261 | fi 262 | fi 263 | done 264 | #echo "User Found: ${user}" 265 | uhome="/home/${user}" 266 | if [ -d "${uhome}/" ]; then 267 | if [ -d "${uhome}/.ssh/" ]; then 268 | if ls "${uhome}/.ssh/*"> /dev/null 2>&1; then 269 | for key in "${uhome}"/.ssh/* 270 | do 271 | if [ "${key}" == "${uhome}/.ssh/authorized_keys" ]; then 272 | if [ "$(wc -c < "${key}")" -gt 50 ]; then 273 | echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a populated authorized_keys file in \e[93m${key}\e[0m\n" 274 | akey=$(cat "${key}") 275 | echo "File Contents:" 276 | echo "$akey" 277 | echo "--------------" 278 | ((FAIL++)) 279 | STATUS=2 280 | fi 281 | elif [ "${key}" == "${uhome}/.ssh/id_rsa" ]; then 282 | if [ "$(wc -c < "${key}")" -gt 0 ]; then 283 | echo -en "\e[41m[FAIL]\e[0m User \e[1m${user}\e[0m has a private key file in \e[93m${key}\e[0m\n" 284 | akey=$(cat "${key}") 285 | echo "File Contents:" 286 | echo "$akey" 287 | echo "--------------" 288 | ((FAIL++)) 289 | STATUS=2 290 | else 291 | echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has empty private key file in \e[93m${key}\e[0m\n" 292 | # shellcheck disable=SC2030 293 | ((WARN++)) 294 | if [[ $STATUS != 2 ]]; then 295 | STATUS=1 296 | fi 297 | fi 298 | elif [ "${key}" != "${uhome}/.ssh/known_hosts" ]; then 299 | 300 | echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a file in their .ssh directory named \e[93m${key}\e[0m\n" 301 | ((WARN++)) 302 | if [[ $STATUS != 2 ]]; then 303 | STATUS=1 304 | fi 305 | 306 | else 307 | if [ "$(wc -c < "${key}")" -gt 50 ]; then 308 | echo -en "\e[93m[WARN]\e[0m User \e[1m${user}\e[0m has a known_hosts file in \e[93m${key}\e[0m\n" 309 | ((WARN++)) 310 | if [[ $STATUS != 2 ]]; then 311 | STATUS=1 312 | fi 313 | fi 314 | fi 315 | 316 | 317 | done 318 | else 319 | echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m has no SSH keys present\n" 320 | fi 321 | else 322 | echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have an .ssh directory\n" 323 | fi 324 | else 325 | echo -en "\e[32m[ OK ]\e[0m User \e[1m${user}\e[0m does not have a directory in /home\n" 326 | fi 327 | 328 | # Check for an uncleared .bash_history for this user 329 | if [ -f "${uhome}/.bash_history" ]; then 330 | BH_S=$(wc -c < "${uhome}/.bash_history") 331 | 332 | if [[ $BH_S -lt 200 ]]; then 333 | echo -en "\e[32m[PASS]\e[0m ${user}'s Bash History appears to have been cleared\n" 334 | ((PASS++)) 335 | else 336 | echo -en "\e[41m[FAIL]\e[0m ${user}'s Bash History should be cleared to prevent sensitive information from leaking\n" 337 | ((FAIL++)) 338 | STATUS=2 339 | 340 | fi 341 | echo -en "\n\n" 342 | fi 343 | fi 344 | done 345 | } 346 | function checkFirewall { 347 | 348 | if [[ $OS == "Ubuntu" ]]; then 349 | fw="ufw" 350 | ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //") 351 | if [[ $ufwa == "active" ]]; then 352 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 353 | # shellcheck disable=SC2031 354 | ((PASS++)) 355 | else 356 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 357 | # shellcheck disable=SC2031 358 | ((WARN++)) 359 | fi 360 | elif [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]] || [[ $OS == "AlmaLinux" ]]; then 361 | if [ -f /usr/lib/systemd/system/csf.service ]; then 362 | fw="csf" 363 | if [[ $(systemctl status $fw >/dev/null 2>&1) ]]; then 364 | 365 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 366 | ((PASS++)) 367 | elif cmdExists "firewall-cmd"; then 368 | if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then 369 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 370 | ((PASS++)) 371 | else 372 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 373 | ((WARN++)) 374 | fi 375 | else 376 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 377 | ((WARN++)) 378 | fi 379 | else 380 | fw="firewalld" 381 | if [[ $(systemctl is-active firewalld >/dev/null 2>&1 && echo 1 || echo 0) ]]; then 382 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 383 | ((PASS++)) 384 | else 385 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 386 | ((WARN++)) 387 | fi 388 | fi 389 | elif [[ "$OS" =~ Debian.* ]]; then 390 | # user could be using a number of different services for managing their firewall 391 | # we will check some of the most common 392 | if cmdExists 'ufw'; then 393 | fw="ufw" 394 | ufwa=$(ufw status |head -1| sed -e "s/^Status:\ //") 395 | if [[ $ufwa == "active" ]]; then 396 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 397 | ((PASS++)) 398 | else 399 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 400 | ((WARN++)) 401 | fi 402 | elif cmdExists "firewall-cmd"; then 403 | fw="firewalld" 404 | if [[ $(systemctl is-active --quiet $fw) ]]; then 405 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 406 | ((PASS++)) 407 | else 408 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 409 | ((WARN++)) 410 | fi 411 | else 412 | # user could be using vanilla iptables, check if kernel module is loaded 413 | fw="iptables" 414 | if lsmod | grep -q '^ip_tables' 2>/dev/null; then 415 | FW_VER="\e[32m[PASS]\e[0m Firewall service (${fw}) is active\n" 416 | ((PASS++)) 417 | else 418 | FW_VER="\e[93m[WARN]\e[0m No firewall is configured. Ensure ${fw} is installed and configured\n" 419 | ((WARN++)) 420 | fi 421 | fi 422 | fi 423 | 424 | } 425 | function checkUpdates { 426 | if [[ $OS == "Ubuntu" ]] || [[ "$OS" =~ Debian.* ]]; then 427 | # Ensure /tmp exists and has the proper permissions before 428 | # checking for security updates 429 | # https://github.com/digitalocean/marketplace-partners/issues/94 430 | if [[ ! -d /tmp ]]; then 431 | mkdir /tmp 432 | fi 433 | chmod 1777 /tmp 434 | 435 | echo -en "\nUpdating apt package database to check for security updates, this may take a minute...\n\n" 436 | apt-get -y update > /dev/null 437 | 438 | uc=$(apt-get --just-print upgrade | grep -i "security" -c) 439 | if [[ $uc -gt 0 ]]; then 440 | update_count=$(( uc / 2 )) 441 | else 442 | update_count=0 443 | fi 444 | 445 | if [[ $update_count -gt 0 ]]; then 446 | echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n" 447 | echo -en 448 | echo -en "Here is a list of the security updates that are not installed:\n" 449 | sleep 2 450 | apt-get --just-print upgrade | grep -i security | awk '{print $2}' | awk '!seen[$0]++' 451 | echo -en 452 | # shellcheck disable=SC2031 453 | ((FAIL++)) 454 | STATUS=2 455 | else 456 | echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n\n" 457 | ((PASS++)) 458 | fi 459 | elif [[ $OS == "CentOS Linux" ]] || [[ $OS == "CentOS Stream" ]] || [[ $OS == "Rocky Linux" ]] || [[ $OS == "AlmaLinux" ]]; then 460 | echo -en "\nChecking for available security updates, this may take a minute...\n\n" 461 | 462 | update_count=$(yum check-update --security --quiet | wc -l) 463 | if [[ $update_count -gt 0 ]]; then 464 | echo -en "\e[41m[FAIL]\e[0m There are ${update_count} security updates available for this image that have not been installed.\n" 465 | ((FAIL++)) 466 | STATUS=2 467 | else 468 | echo -en "\e[32m[PASS]\e[0m There are no pending security updates for this image.\n" 469 | ((PASS++)) 470 | fi 471 | else 472 | echo "Error encountered" 473 | exit 1 474 | fi 475 | 476 | return 1; 477 | } 478 | function checkCloudInit { 479 | 480 | if hash cloud-init 2>/dev/null; then 481 | CI="\e[32m[PASS]\e[0m Cloud-init is installed.\n" 482 | ((PASS++)) 483 | else 484 | CI="\e[41m[FAIL]\e[0m No valid verison of cloud-init was found.\n" 485 | ((FAIL++)) 486 | STATUS=2 487 | fi 488 | return 1 489 | } 490 | 491 | function version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; } 492 | 493 | 494 | clear 495 | echo "DigitalOcean Marketplace Image Validation Tool ${VERSION}" 496 | echo "Executed on: ${RUNDATE}" 497 | echo "Checking local system for Marketplace compatibility..." 498 | 499 | getDistro 500 | 501 | echo -en "\n\e[1mDistribution:\e[0m ${OS}\n" 502 | echo -en "\e[1mVersion:\e[0m ${VER}\n\n" 503 | 504 | ost=0 505 | osv=0 506 | 507 | if [[ $OS == "Ubuntu" ]]; then 508 | ost=1 509 | if [[ $VER == "22.10" ]] || [[ $VER == "22.04" ]] || [[ $VER == "20.04" ]] || [[ $VER == "18.04" ]] || [[ $VER == "16.04" ]]; then 510 | osv=1 511 | fi 512 | 513 | elif [[ "$OS" =~ Debian.* ]]; then 514 | ost=1 515 | case "$VER" in 516 | 9) 517 | osv=1 518 | ;; 519 | 10) 520 | osv=1 521 | ;; 522 | 11) 523 | osv=1 524 | ;; 525 | 12) 526 | osv=1 527 | ;; 528 | *) 529 | osv=2 530 | ;; 531 | esac 532 | 533 | elif [[ $OS == "CentOS Linux" ]]; then 534 | ost=1 535 | if [[ $VER == "8" ]]; then 536 | osv=1 537 | elif [[ $VER == "7" ]]; then 538 | osv=1 539 | elif [[ $VER == "6" ]]; then 540 | osv=1 541 | else 542 | osv=2 543 | fi 544 | elif [[ $OS == "CentOS Stream" ]]; then 545 | ost=1 546 | if [[ $VER == "8" ]]; then 547 | osv=1 548 | elif [[ $VER == "9" ]]; then 549 | osv=1 550 | else 551 | osv=2 552 | fi 553 | elif [[ $OS == "Rocky Linux" ]]; then 554 | ost=1 555 | if [[ $VER =~ 8\. ]] || [[ $VER =~ 9\. ]]; then 556 | osv=1 557 | else 558 | osv=2 559 | fi 560 | elif [[ $OS == "AlmaLinux" ]]; then 561 | ost=1 562 | if [[ "$VERSION" =~ 8.* ]] || [[ "$VERSION" =~ 9.* ]]; then 563 | osv=1 564 | else 565 | osv=2 566 | fi 567 | else 568 | ost=0 569 | fi 570 | 571 | if [[ $ost == 1 ]]; then 572 | echo -en "\e[32m[PASS]\e[0m Supported Operating System Detected: ${OS}\n" 573 | ((PASS++)) 574 | else 575 | echo -en "\e[41m[FAIL]\e[0m ${OS} is not a supported Operating System\n" 576 | ((FAIL++)) 577 | STATUS=2 578 | fi 579 | 580 | if [[ $osv == 1 ]]; then 581 | echo -en "\e[32m[PASS]\e[0m Supported Release Detected: ${VER}\n" 582 | ((PASS++)) 583 | elif [[ $ost == 1 ]]; then 584 | echo -en "\e[41m[FAIL]\e[0m ${OS} ${VER} is not a supported Operating System Version\n" 585 | ((FAIL++)) 586 | STATUS=2 587 | else 588 | echo "Exiting..." 589 | exit 1 590 | fi 591 | 592 | checkCloudInit 593 | 594 | echo -en "${CI}" 595 | 596 | checkFirewall 597 | 598 | echo -en "${FW_VER}" 599 | 600 | checkUpdates 601 | 602 | loadPasswords 603 | 604 | checkLogs 605 | 606 | echo -en "\n\nChecking all user-created accounts...\n" 607 | checkUsers 608 | 609 | echo -en "\n\nChecking the root account...\n" 610 | checkRoot 611 | 612 | checkAgent 613 | 614 | 615 | # Summary 616 | echo -en "\n\n---------------------------------------------------------------------------------------------------\n" 617 | 618 | if [[ $STATUS == 0 ]]; then 619 | echo -en "Scan Complete.\n\e[32mAll Tests Passed!\e[0m\n" 620 | elif [[ $STATUS == 1 ]]; then 621 | echo -en "Scan Complete. \n\e[93mSome non-critical tests failed. Please review these items.\e[0m\e[0m\n" 622 | else 623 | echo -en "Scan Complete. \n\e[41mOne or more tests failed. Please review these items and re-test.\e[0m\n" 624 | fi 625 | echo "---------------------------------------------------------------------------------------------------" 626 | echo -en "\e[1m${PASS} Tests PASSED\e[0m\n" 627 | echo -en "\e[1m${WARN} WARNINGS\e[0m\n" 628 | echo -en "\e[1m${FAIL} Tests FAILED\e[0m\n" 629 | echo -en "---------------------------------------------------------------------------------------------------\n" 630 | 631 | if [[ $STATUS == 0 ]]; then 632 | echo -en "We did not detect any issues with this image. Please be sure to manually ensure that all software installed on the base system is functional, secure and properly configured (or facilities for configuration on first-boot have been created).\n\n" 633 | exit 0 634 | elif [[ $STATUS == 1 ]]; then 635 | echo -en "Please review all [WARN] items above and ensure they are intended or resolved. If you do not have a specific requirement, we recommend resolving these items before image submission\n\n" 636 | exit 0 637 | else 638 | echo -en "Some critical tests failed. These items must be resolved and this scan re-run before you submit your image to the DigitalOcean Marketplace.\n\n" 639 | exit 1 640 | fi 641 | --------------------------------------------------------------------------------