├── .eslintrc.js
├── .gitattributes
├── .gitignore
├── .npmignore
├── .travis.yml
├── CHANGELOG.md
├── LICENSE
├── README.md
├── bin
├── overcast-docs.js
├── overcast-init
├── overcast-ssh
└── overcast-vagrant
├── fixtures
├── example.clusters.json
└── example.variables.json
├── overcast.js
├── package.json
├── recipes
├── base-snapshot
├── discourse
├── discourse-digitalocean
├── discourse-linode
├── git-server
│ ├── README.md
│ ├── deploy-digitalocean
│ └── install
├── lamp-server
├── nodejs-app
├── single-redis-instance
└── single-redis-instance-from-snapshot
├── scripts
├── add_user
├── authorize_key
├── change_ssh_port
├── chroot_user
├── create_apache_virtualhost
├── harden_ssh
├── health
├── install
│ ├── apache
│ ├── core
│ ├── couchdb
│ ├── discourse_docker
│ ├── docker
│ ├── emacs
│ ├── haproxy
│ ├── imagemagick
│ ├── iptables
│ ├── mysql
│ ├── nginx
│ ├── nodejs
│ ├── phantomjs
│ ├── php
│ ├── postgres
│ ├── redis
│ ├── sftp
│ └── vim
├── list_exposed_ports
├── set_redis_password
├── set_script_var
└── swap_usage_by_pid
├── src
├── cli.js
├── commands
│ ├── aliases.js
│ ├── cluster.js
│ ├── completions.js
│ ├── digitalocean.js
│ ├── expose.js
│ ├── exposed.js
│ ├── help.js
│ ├── index.js
│ ├── info.js
│ ├── init.js
│ ├── instance.js
│ ├── list.js
│ ├── ping.js
│ ├── port.js
│ ├── pull.js
│ ├── push.js
│ ├── run.js
│ ├── scriptvar.js
│ ├── slack.js
│ ├── ssh.js
│ ├── sshkey.js
│ ├── tunnel.js
│ ├── vars.js
│ ├── virtualbox.js
│ └── wait.js
├── constants.js
├── filters.js
├── log.js
├── provider.js
├── providers
│ ├── digitalocean.js
│ ├── index.js
│ ├── mock.js
│ └── virtualbox.js
├── rsync.js
├── scp.js
├── ssh.js
├── store.js
└── utils.js
├── test
├── fixtures
│ └── overcast.key.pub
└── integration
│ ├── aliases.spec.js
│ ├── cluster.spec.js
│ ├── completions.spec.js
│ ├── digitalocean.spec.js
│ ├── info.spec.js
│ ├── init.spec.js
│ ├── instance.spec.js
│ ├── list.spec.js
│ ├── port.spec.js
│ ├── pull.spec.js
│ ├── push.spec.js
│ ├── run.spec.js
│ ├── slack.spec.js
│ ├── sshkey.spec.js
│ ├── store.spec.js
│ ├── tunnel.spec.js
│ ├── utils.js
│ ├── utils.spec.js
│ ├── vars.spec.js
│ └── virtualbox.spec.js
└── yarn.lock
/.eslintrc.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | "env": {
3 | "es2021": true,
4 | "node": true
5 | },
6 | "extends": "eslint:recommended",
7 | "parserOptions": {
8 | "ecmaVersion": 12,
9 | "sourceType": "module"
10 | },
11 | "rules": {
12 | }
13 | };
14 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Ignore all line endings
2 | * -crlf
3 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | test/integration/.overcast
2 | .DS_Store
3 | .vscode
4 | .idea
5 | node_modules
6 | npm-debug.log
7 | libpeerconnection.log
8 | temp.md
9 | .overcast
10 |
--------------------------------------------------------------------------------
/.npmignore:
--------------------------------------------------------------------------------
1 | /test
2 | /.gitattributes
3 | /.travis.yml
4 | /CHANGELOG.md
5 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: node_js
2 | node_js:
3 | - "5.1"
4 | sudo: false
5 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2014 Andrew Childs
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the 'Software'), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/bin/overcast-docs.js:
--------------------------------------------------------------------------------
1 | // Generates Markdown-formatted API documentation from module/command help functions.
2 |
3 | import { compileHelp } from '../src/cli.js';
4 | import * as utils from '../src/utils.js';
5 | import * as log from '../src/log.js';
6 | import { setConfigDirs } from '../src/store.js';
7 | import allCommands from '../src/commands/index.js';
8 |
9 | setConfigDirs('/path/to/.overcast');
10 |
11 | utils.eachObject(allCommands, ({ commands }, name) => {
12 | utils.eachObject(commands, (subcommand) => {
13 | if (subcommand.alias === true) {
14 | return;
15 | }
16 | const subcommandName = subcommand.name && name !== subcommand.name ?
17 | ' ' + subcommand.name : '';
18 | console.log('### overcast ' + name + subcommandName);
19 | log.br();
20 | console.log('```');
21 | compileHelp(subcommand, true);
22 | console.log('```');
23 | log.br();
24 | });
25 | });
26 |
--------------------------------------------------------------------------------
/bin/overcast-init:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | set -x
5 |
6 | if [ -z "$OVERCAST_FIXTURE_DIR" ] || [ -z "$OVERCAST_DEST_DIR" ]; then
7 | echo "Don't use this script directly. Run 'overcast init' from the directory"
8 | echo "you want to install an overcast configuration in."
9 | exit 1
10 | fi
11 |
12 | mkdir -p "$OVERCAST_DEST_DIR/keys"
13 |
14 | if [ ! -f "$OVERCAST_DEST_DIR/clusters.json" ]; then
15 | echo "{}" > "$OVERCAST_DEST_DIR/clusters.json"
16 | fi
17 |
18 | if [ ! -f "$OVERCAST_DEST_DIR/variables.json" ]; then
19 | cp "$OVERCAST_FIXTURE_DIR/example.variables.json" "$OVERCAST_DEST_DIR/variables.json"
20 | fi
21 |
22 | cp "$OVERCAST_FIXTURE_DIR/example.clusters.json" "$OVERCAST_DEST_DIR/example.clusters.json"
23 | cp "$OVERCAST_FIXTURE_DIR/example.variables.json" "$OVERCAST_DEST_DIR/example.variables.json"
24 |
--------------------------------------------------------------------------------
/bin/overcast-ssh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | if [ -z "$OVERCAST_SCRIPT_FILE" ]; then
4 | if [ -z "$OVERCAST_HIDE_COMMAND" ]; then
5 | echo ""
6 | echo "\$ $OVERCAST_ENV$OVERCAST_COMMAND"
7 | fi
8 | if [ -z "$OVERCAST_PASSWORD" ]; then
9 | ssh -i $OVERCAST_KEY -p $OVERCAST_PORT $OVERCAST_USER@$OVERCAST_IP \
10 | -o StrictHostKeyChecking=no $OVERCAST_SSH_ARGS "$OVERCAST_ENV $OVERCAST_COMMAND"
11 | else
12 | sshpass "-p$OVERCAST_PASSWORD" \
13 | ssh -p $OVERCAST_PORT $OVERCAST_USER@$OVERCAST_IP \
14 | -o StrictHostKeyChecking=no -o PubkeyAuthentication=no $OVERCAST_SSH_ARGS "$OVERCAST_ENV $OVERCAST_COMMAND"
15 | fi
16 | else
17 | if [ -z "$SHELL_COMMAND" ]; then
18 | SHELL_COMMAND="/bin/bash -s"
19 | fi
20 | if [ -z "$OVERCAST_HIDE_COMMAND" ]; then
21 | echo ""
22 | echo "\$ $OVERCAST_ENV$SHELL_COMMAND $OVERCAST_SCRIPT_FILE"
23 | fi
24 | if [ -z "$OVERCAST_PASSWORD" ]; then
25 | ssh -i $OVERCAST_KEY -p $OVERCAST_PORT $OVERCAST_USER@$OVERCAST_IP \
26 | -o StrictHostKeyChecking=no $OVERCAST_SSH_ARGS "$OVERCAST_ENV $SHELL_COMMAND" < "$OVERCAST_SCRIPT_FILE"
27 | else
28 | sshpass "-p$OVERCAST_PASSWORD" \
29 | ssh -p $OVERCAST_PORT $OVERCAST_USER@$OVERCAST_IP \
30 | -o StrictHostKeyChecking=no -o PubkeyAuthentication=no $OVERCAST_SSH_ARGS "$OVERCAST_ENV $SHELL_COMMAND" < "$OVERCAST_SCRIPT_FILE"
31 | fi
32 | fi
33 |
--------------------------------------------------------------------------------
/bin/overcast-vagrant:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Create a new Virtualbox instance using Vagrant.
4 |
5 | # Usage:
6 | # overcast virtualbox create [name]
7 |
8 | # Expected variables:
9 | # VM_BOX
10 | # VM_IP
11 | # VM_PUB_KEY
12 | # VM_RAM
13 | # VM_CPUS
14 |
15 | VM_DIR="$HOME/.overcast-vagrant/$VM_IP"
16 |
17 | if [ -f "$VM_DIR" ]; then
18 | echo "$VM_DIR already exists, no action taken."
19 | exit 1
20 | fi
21 |
22 | mkdir -p $VM_DIR
23 |
24 | cat >> $VM_DIR/Vagrantfile << EOF
25 |
26 | VM_NAME = "$VM_BOX.$VM_IP"
27 | VM_BOX = "$VM_BOX"
28 | VM_IP = "$VM_IP"
29 | VM_RAM = "$VM_RAM"
30 | VM_CPUS = "$VM_CPUS"
31 | VM_PUB_KEY = "$VM_PUB_KEY"
32 |
33 | Vagrant.configure("2") do |config|
34 | config.vm.box = VM_BOX
35 | config.vm.network "private_network", ip: VM_IP
36 |
37 | config.vm.provider "virtualbox" do |v|
38 | v.name = VM_NAME
39 | v.memory = VM_RAM
40 | v.cpus = VM_CPUS
41 | end
42 |
43 | # Copy SSH public key to temp file.
44 | config.vm.provision "file",
45 | source: VM_PUB_KEY,
46 | destination: "/home/vagrant/overcast_pub_key"
47 |
48 | # Use Overcast key for root access.
49 | config.vm.provision "shell",
50 | inline: "chmod 600 overcast_pub_key && sudo mv overcast_pub_key /root/.ssh/authorized_keys && sudo chown root:root /root/.ssh/authorized_keys"
51 | end
52 |
53 | EOF
54 |
55 | cd $VM_DIR
56 | vagrant up
57 |
58 | exit 0
59 |
--------------------------------------------------------------------------------
/fixtures/example.clusters.json:
--------------------------------------------------------------------------------
1 | {
2 | "dummy-app-cluster": {
3 | "instances": {
4 | "dummy.app.01": {
5 | "ip": "127.0.0.1",
6 | "name": "dummy.01",
7 | "ssh_key": "/Users/username/.ssh/id_rsa",
8 | "ssh_port": "22222",
9 | "user": "root"
10 | },
11 | "dummy.app.02": {
12 | "ip": "127.0.0.2",
13 | "name": "dummy.02",
14 | "ssh_key": "overcast.key",
15 | "ssh_port": "22",
16 | "user": "root"
17 | }
18 | }
19 | },
20 | "dummy-db-cluster": {
21 | "instances": {
22 | "dummy.db.01": {
23 | "ip": "127.0.0.3",
24 | "name": "dummy.03",
25 | "ssh_key": "overcast.key",
26 | "ssh_port": "22",
27 | "user": "root"
28 | }
29 | }
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/fixtures/example.variables.json:
--------------------------------------------------------------------------------
1 | {
2 | "DIGITALOCEAN_API_TOKEN": "",
3 | "SLACK_WEBHOOK_URL": "",
4 | "OVERCAST_SSH_KEY": "",
5 | "OVERCAST_SSH_USER": ""
6 | }
7 |
--------------------------------------------------------------------------------
/overcast.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | import { init } from './src/cli.js';
4 |
5 | init();
6 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "overcast",
3 | "description": "A simple command line program that makes it easy to spin up, configure, and manage virtual machines over SSH.",
4 | "version": "2.2.7",
5 | "repository": "https://github.com/andrewchilds/overcast.git",
6 | "author": {
7 | "name": "Andrew Childs",
8 | "email": "tidy.desk9426@fastmail.com"
9 | },
10 | "license": "MIT",
11 | "engines": {
12 | "node": ">=13.2.x"
13 | },
14 | "type": "module",
15 | "main": "overcast.js",
16 | "scripts": {
17 | "test": "cd test && npx jasmine --random=false integration/*.spec.js",
18 | "docs": "node bin/overcast-docs.js > temp.md"
19 | },
20 | "bin": {
21 | "overcast": "./overcast.js"
22 | },
23 | "dependencies": {
24 | "chalk": "5.3.0",
25 | "minimist": "1.2.8",
26 | "do-wrapper": "github:andrewchilds/do-wrapper#main",
27 | "slack-notify": "2.0.6"
28 | },
29 | "devDependencies": {
30 | "eslint": "8.9.0",
31 | "jasmine": "4.0.2"
32 | }
33 | }
34 |
--------------------------------------------------------------------------------
/recipes/base-snapshot:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Recipe to create a base configuration snapshot on DigitalOcean.
4 | #
5 | # - Ubuntu 12.04, 512mb, nyc2 region
6 | # - iptables configured to only allow SSH connections, disable SSH password logins
7 | # - Performs a full system upgrade, core packages installed (git, vim, less, wget)
8 | # - A non-root user account created with SSH access using the overcast SSH key
9 | # - Creates a snapshot of the configured instance
10 | #
11 | # This script takes around 5 minutes to run.
12 |
13 | ###############################################################################
14 |
15 | # Instructions:
16 |
17 | # 1. Install Overcast:
18 | #
19 | # npm -g install overcast
20 | #
21 | # 2. Add your DigitalOcean API credentials to $HOME/.overcast/variables.json:
22 | #
23 | # {
24 | # "DIGITALOCEAN_API_TOKEN": "YOUR_API_TOKEN"
25 | # }
26 | #
27 | # 3. Download, configure and run this script from the terminal:
28 | #
29 | # curl https://raw.githubusercontent.com/andrewchilds/overcast/master/recipes/base-snapshot > ./base-snapshot
30 | # chmod +x ./base-snapshot
31 | # (Configure with your favorite editor...)
32 | # ./base-snapshot
33 | #
34 | # 4. Wait 5 minutes.
35 | #
36 | # 5. You can now use "base.001.snapshot" when creating new instances:
37 | #
38 | # overcast digitalocean create db.001 --cluster db --image-name base.001.snapshot
39 |
40 | ###############################################################################
41 |
42 | # Configuration:
43 |
44 | cluster="base"
45 | instance="base.001"
46 |
47 | # List droplet sizes with `overcast digitalocean sizes`
48 | sizeSlug="512mb"
49 |
50 | # List droplet regions with `overcast digitalocean regions`
51 | regionSlug="nyc2"
52 |
53 | ###############################################################################
54 |
55 | # You shouldn't have to edit anything below.
56 |
57 | set -e
58 |
59 | echo "Creating cluster $cluster if it doesn't already exist..."
60 | overcast cluster create $cluster
61 |
62 | echo "Creating instance $instance on DigitalOcean..."
63 | overcast digitalocean create $instance --cluster $cluster --size-slug $sizeSlug \
64 | --region-slug $regionSlug --image-slug "ubuntu-12-04-x64"
65 |
66 | echo "Disallowing password access through SSH..."
67 | overcast run $instance harden_ssh
68 |
69 | echo "Full system upgrade, installing core packages..."
70 | overcast run $instance install/core
71 | overcast digitalocean reboot $instance
72 |
73 | echo "Configuring iptables to only expose SSH..."
74 | overcast expose $instance 22 80
75 |
76 | echo "Creating snapshot \"$instance.snapshot\"..."
77 | overcast digitalocean snapshot $instance $instance.snapshot
78 |
79 | echo "Destroying instance"...
80 | overcast digitalocean destroy $instance
81 | overcast cluster remove $cluster
82 |
83 | echo "Done."
84 | overcast digitalocean snapshots
85 |
--------------------------------------------------------------------------------
/recipes/discourse:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Discourse Recipe
4 |
5 | # Deploy on a DigitalOcean 2GB:
6 | # overcast create cluster discourse
7 | # overcast digitalocean create discourse-do-01 --cluster discourse --size-slug 2gb
8 | # recipes/discourse discourse-do-01
9 |
10 | # Deploy on a Linode 2048:
11 | # overcast create cluster discourse
12 | # overcast linode create discourse-linode-01 --cluster discourse
13 | # recipes/discourse discourse-linode-01
14 |
15 | ###############################################################################
16 |
17 | # Configuration:
18 | if [ -z "$1" ]; then
19 | echo "Usage: recipes/discourse [instance]"
20 | exit 1
21 | fi
22 |
23 | instance=$1
24 |
25 | # Discourse configuration
26 | discourseDevEmails="email@example.com"
27 | discourseHostname="discourse.example.com"
28 | discourseSMTPAddress="smtp.mandrillapp.com"
29 | discourseSMTPPort="587"
30 | discourseSMTPUsername="email@example.com"
31 | discourseSMTPPassword="my.api.key"
32 |
33 | ###############################################################################
34 |
35 | # You shouldn't need to edit anything below.
36 |
37 | set -e
38 |
39 | echo "Disallowing password access through SSH..."
40 | overcast run $instance harden_ssh
41 |
42 | echo "Full system upgrade, installing core packages..."
43 | overcast run $instance install/core
44 | overcast reboot $instance
45 |
46 | echo "Configuring iptables to only expose HTTP, HTTPS and SSH..."
47 | overcast expose $instance 22 80 443
48 |
49 | echo "Creating SSH key for root that Discourse can use..."
50 | overcast run $instance "ssh-keygen -t rsa -N \"\" -f /root/.ssh/id_rsa"
51 |
52 | echo "Installing Docker..."
53 | overcast run $instance install/docker
54 |
55 | echo "Installing Discourse..."
56 | overcast run $instance install/discourse_docker \
57 | --env "discourseHostname=\"$discourseHostname\" \
58 | discourseDevEmails=\"$discourseDevEmails\" \
59 | discourseSMTPAddress=\"$discourseSMTPAddress\" \
60 | discourseSMTPPort=\"$discourseSMTPPort\" \
61 | discourseSMTPUsername=\"$discourseSMTPUsername\" \
62 | discourseSMTPPassword=\"$discourseSMTPPassword\""
63 |
64 | echo "Starting Discourse..."
65 | overcast run $instance "cd /var/docker; ./launcher start app --skip-prereqs"
66 |
67 | echo "Done!"
68 | echo "Your new Discourse instance is ready to go at http://`overcast instance get $instance ip`. Enjoy!"
69 |
--------------------------------------------------------------------------------
/recipes/discourse-digitalocean:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Deploy Discourse on DigitalOcean.
4 |
5 | # This script takes around 15-20 minutes to run.
6 |
7 | ###############################################################################
8 |
9 | # Instructions:
10 |
11 | # 1. Install Overcast:
12 | #
13 | # npm -g install overcast
14 | #
15 | # 2. Add your DigitalOcean API credentials to $HOME/.overcast/variables.json:
16 | #
17 | # {
18 | # "DIGITALOCEAN_API_TOKEN": "YOUR_API_TOKEN"
19 | # }
20 | #
21 | # 3. Sign up for an account on https://mandrillapp.com/ to handle email
22 | # notifications, click "Get SMTP Credentials", and copy the SMTP info below.
23 | #
24 | # 3. Download, configure and run this script from the terminal:
25 | #
26 | # git clone https://github.com/andrewchilds/overcast.git
27 | # (Configure with your favorite editor...)
28 | # overcast/recipes/discourse-docker
29 | #
30 | # 4. Wait 15-20 minutes.
31 | #
32 | # 5. Go to your assigned IP address and set up your new Discourse install.
33 |
34 | ###############################################################################
35 |
36 | # Configuration:
37 |
38 | cluster="discourse"
39 | instance="discourse.001"
40 |
41 | # List droplet sizes with `overcast digitalocean sizes`
42 | sizeSlug="2gb"
43 |
44 | # List droplet regions with `overcast digitalocean regions`
45 | regionSlug="nyc2"
46 |
47 | ###############################################################################
48 |
49 | # You shouldn't need to edit anything below.
50 |
51 | dirName=$(dirname "$0")
52 |
53 | overcast cluster create $cluster
54 | overcast digitalocean create $instance --cluster $cluster --size-slug $sizeSlug \
55 | --region-slug $regionSlug --image-slug "ubuntu-12-04-x64"
56 | $dirName/discourse $instance
57 |
--------------------------------------------------------------------------------
/recipes/discourse-linode:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Deploy Discourse on Linode.
4 |
5 | # This script takes around 10-15 minutes to run.
6 |
7 | ###############################################################################
8 |
9 | # Instructions:
10 |
11 | # 1. Install Overcast:
12 | #
13 | # npm -g install overcast
14 | #
15 | # 2. Add your Linode API key to $HOME/.overcast/variables.json:
16 | #
17 | # {
18 | # "LINODE_API_KEY": "YOUR_API_KEY"
19 | # }
20 | #
21 | # 3. Sign up for an account on https://mandrillapp.com/ to handle email
22 | # notifications, click "Get SMTP Credentials", and copy the SMTP info below.
23 | #
24 | # 3. Download, configure and run this script from the terminal:
25 | #
26 | # git clone https://github.com/andrewchilds/overcast.git
27 | # (Configure with your favorite editor...)
28 | # overcast/recipes/discourse-docker
29 | #
30 | # 4. Wait 10-15 minutes.
31 | #
32 | # 5. Go to your assigned IP address and set up your new Discourse install.
33 |
34 | ###############################################################################
35 |
36 | # Configuration
37 |
38 | cluster="discourse"
39 | instance="discourse-01"
40 |
41 | ###############################################################################
42 |
43 | dirName=$(dirname "$0")
44 |
45 | overcast cluster create $cluster
46 | overcast linode create $instance --cluster $cluster
47 | $dirName/discourse $instance
48 |
--------------------------------------------------------------------------------
/recipes/git-server/README.md:
--------------------------------------------------------------------------------
1 | # Creating a Private Git Server
2 |
3 | A private git server is a nice and easy way to securely work with sensitive data. You directly control who has access to the server, and there is no giant web application to install and maintain and worry about. SSH passwords are disabled by default (using the `harden_ssh` script), iptables is configured to only allow connections on port 22, and all repos are owned by the `git` user.
4 |
5 | ## Instructions
6 |
7 | Spin up and configure your git server on DigitalOcean. This uses a Ubuntu 14.04 image on a 512mb machine in the nyc2 region with backups enabled. The instance will be named `git-001`.
8 |
9 | ```sh
10 | ./deploy-digitalocean
11 | ```
12 |
13 | To create a new git repo:
14 |
15 | ```sh
16 | overcast run git-001 "git init --bare my-repo-name.git"
17 | ```
18 |
19 | Since you're using Overcast's SSH key to connect to the server, you need to add an entry in your `~/.ssh/config` file to allow git to connect:
20 |
21 | ```
22 | Host git.mydomain.com
23 | HostName [your assigned IP address]
24 | IdentityFile /path/to/your/overcast.key
25 | Port 22
26 | User git
27 | ```
28 |
29 | To work with the new repo on your local machine:
30 |
31 | ```sh
32 | cd /path/to/my/project
33 | git init
34 | git remote add origin git.mydomain.com:my-repo-name.git
35 | git add .
36 | git commit "First commit"
37 | git push origin master
38 | ```
39 |
40 | To give repo access to additional users, add their public SSH key to `/home/git/.ssh/authorized_keys`:
41 |
42 | ```sh
43 | overcast run git-001 "echo [pub-key-data] >> ~/.ssh/authorized_keys"
44 | ```
45 |
--------------------------------------------------------------------------------
/recipes/git-server/deploy-digitalocean:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Deploy a Git server on DigitalOcean.
4 |
5 | ###############################################################################
6 |
7 | # Configuration:
8 |
9 | instance="git-001"
10 |
11 | ###############################################################################
12 |
13 | # You shouldn't have to edit anything below.
14 |
15 | set -e
16 |
17 | dirName="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
18 |
19 | echo "Creating new DigitalOcean droplet..."
20 | overcast digitalocean create $instance --backups-enabled --size-slug 512mb \
21 | --region-slug nyc3 --image-slug ubuntu-20-04-x64
22 |
23 | $dirName/install $instance
24 |
--------------------------------------------------------------------------------
/recipes/git-server/install:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Git on an existing instance.
4 |
5 | ###############################################################################
6 |
7 | # Configuration:
8 |
9 | if [ -z "$1" ]; then
10 | echo "Usage: ./install [instance]"
11 | exit 1
12 | fi
13 |
14 | instance="$1"
15 | username="git"
16 |
17 | ###############################################################################
18 |
19 | # You shouldn't have to edit anything below.
20 |
21 | set -e
22 |
23 | echo "Disallowing password access through SSH..."
24 | overcast run $instance harden_ssh
25 |
26 | echo "Full system upgrade, installing core packages..."
27 | overcast run $instance install/core
28 | overcast reboot $instance
29 |
30 | echo "Configure iptables to only expose SSH..."
31 | overcast expose $instance 22
32 |
33 | echo "Switching to user \"$username\" instead of root..."
34 | overcast run $instance add_user --env "username=$username"
35 | overcast instance update $instance --user $username
36 |
37 | echo ""
38 | echo "Done! Your new git server is ready."
39 | echo ""
40 | echo "To create a repo on this server:"
41 | echo "$ overcast run $instance \"git init --bare my-repo-name.git\""
42 | echo ""
43 | echo "To connect to this server using git, add this to your ~/.ssh/config:"
44 | echo "Host git.mydomain.com"
45 | echo " HostName `overcast instance get $instance ip`"
46 | echo " IdentityFile `overcast key get overcast --private-path`"
47 | echo " Port 22"
48 | echo " User $username"
49 | echo ""
50 | echo "To use that repo on your local machine:"
51 | echo "$ cd /path/to/my/project"
52 | echo "$ git init"
53 | echo "$ git remote add origin git.mydomain.com:my-repo-name.git"
54 | echo "$ git push origin master"
55 |
--------------------------------------------------------------------------------
/recipes/lamp-server:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Ubuntu 14.04 LAMP Server Recipe
4 |
5 | # Deploy on a DigitalOcean 512mb:
6 | # overcast digitalocean create lamp-do-01
7 | # recipes/lamp-server lamp-do-01
8 |
9 | ###############################################################################
10 |
11 | # Configuration:
12 | if [ -z "$1" ]; then
13 | echo "Usage: recipes/lamp-server [instance]"
14 | exit 1
15 | fi
16 |
17 | instance=$1
18 |
19 | ###############################################################################
20 |
21 | # You shouldn't need to edit anything below.
22 |
23 | set -e
24 |
25 | echo "Full system upgrade, installing core packages..."
26 | overcast run $instance install/core install/sftp install/apache install/mysql install/php
27 | overcast reboot $instance
28 |
29 | echo "Configuring iptables to only expose HTTP, HTTPS and SSH..."
30 | overcast expose $instance 22 80 443
31 |
32 | echo "Done!"
33 |
--------------------------------------------------------------------------------
/recipes/nodejs-app:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Recipe to provision a DigitalOcean droplet using Overcast, with the following:
4 | #
5 | # - Ubuntu 12.04, 512mb, nyc2 region
6 | # - NodeJS, Redis, and Nginx installed
7 | # - A user account created with SSH access using the overcast SSH key
8 | # - A snapshot of the configured instance, in case you need to rebuild it or spin up more
9 | #
10 | # This script takes around 5 minutes to run.
11 |
12 | ###############################################################################
13 |
14 | # Configuration:
15 |
16 | cluster="example"
17 | # instance="example.001"
18 | username="appuser"
19 | sshPort="50022"
20 |
21 | ###############################################################################
22 |
23 | # You shouldn't have to edit anything below.
24 |
25 | set -e
26 |
27 | echo "Creating cluster $cluster if it doesn't already exist..."
28 | overcast cluster create $cluster
29 |
30 | if [ -z "$instance" ]; then
31 | # Autogenerate instance name based on number of existing instances in the cluster.
32 | id=$(printf '%03d' $(expr $(overcast cluster count $cluster) + 1))
33 | instance="$cluster.$id"
34 | fi
35 |
36 | echo "Creating instance $instance on DigitalOcean..."
37 | overcast digitalocean create $instance --cluster $cluster
38 |
39 | echo "System upgrade, installing core packages, Nginx, Redis, NodeJS, configure SFTP..."
40 | overcast run $instance install/core install/sftp install/nginx install/redis install/nodejs
41 | overcast reboot $instance
42 |
43 | echo "Changing SSH port to $sshPort, configuring iptables to only expose HTTP, HTTPS, and SSH..."
44 | overcast run $instance harden_ssh
45 | overcast expose $instance 22 $sshPort
46 | overcast port $instance $sshPort
47 | overcast expose $instance 80 443 $sshPort
48 |
49 | echo "Pushing nginx configuration..."
50 | overcast push $instance nginx/example.conf /etc/nginx/sites-enabled/myapp.conf
51 |
52 | echo "Adding user $username..."
53 | overcast run $instance add_user --env "username=$username"
54 |
55 | echo "Switching default SSH user to $username..."
56 | echo "Going forward, to run commands on this instance as root, use --user root"
57 | overcast instance update $instance --user $username
58 |
59 | # Run your own application install script, like so:
60 | # overcast run $instance /path/to/my-app-install-script
61 | # overcast run $instance /path/to/my-app-start-script
62 |
63 | # echo "Creating snapshot..."
64 | # overcast digitalocean snapshot $instance $instance.snapshot
65 |
66 | echo "Done."
67 |
--------------------------------------------------------------------------------
/recipes/single-redis-instance:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Recipe to spin up and configure a Redis instance on DigitalOcean.
4 | #
5 | # - Ubuntu 12.04, 512mb, nyc2 region
6 | # - Redis installed, configured with autogenerated 256-bit password
7 | # - IP allowlisting so only specified IPs can connect
8 | # - A non-root user account created with SSH access using the overcast SSH key
9 | # - A snapshot of the configured instance, in case you need to rebuild it or spin up more
10 | #
11 | # This script takes around 5 minutes to run.
12 |
13 | ###############################################################################
14 |
15 | # Configuration:
16 |
17 | cluster="redis"
18 | # instance="redis.001"
19 | username="appuser"
20 | sshPort="50022"
21 | allowlist=""
22 |
23 | ###############################################################################
24 |
25 | # You shouldn't have to edit anything below.
26 |
27 | set -e
28 |
29 | echo "Creating cluster $cluster if it doesn't already exist..."
30 | overcast cluster create $cluster
31 |
32 | if [ -z "$instance" ]; then
33 | # Autogenerate instance name based on number of existing instances in the cluster.
34 | id=$(printf '%03d' $(expr $(overcast cluster count $cluster) + 1))
35 | instance="$cluster.$id"
36 | fi
37 |
38 | echo "Creating instance $instance on DigitalOcean..."
39 | overcast digitalocean create $instance --cluster $cluster
40 |
41 | echo "Changing SSH port to $sshPort, configuring iptables..."
42 | overcast run $instance harden_ssh
43 | overcast expose $instance 22 $sshPort
44 | overcast port $instance $sshPort
45 | overcast expose $instance 6379 $sshPort --allowlist "$allowlist"
46 |
47 | echo "System upgrade, installing core packages, installing redis..."
48 | overcast run $instance install/core install/redis
49 | overcast reboot $instance
50 |
51 | echo "Setting redis password..."
52 | overcast run $instance set_redis_password
53 |
54 | echo "Adding user $username..."
55 | overcast run $instance add_user --env "username=$username"
56 |
57 | echo "Switching default SSH user to $username..."
58 | echo "Going forward, to run commands on this instance as root, use --user root"
59 | overcast instance update $instance --user $username
60 |
61 | echo "Creating snapshot..."
62 | overcast digitalocean snapshot $instance $instance.snapshot
63 |
64 | echo "Done."
65 |
--------------------------------------------------------------------------------
/recipes/single-redis-instance-from-snapshot:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Recipe to spin up and configure a Redis instance on DigitalOcean using a snapshot.
4 | # Expects the "base-snapshot" script to have been run previously.
5 | #
6 | # - Ubuntu 12.04, 512mb, nyc2 region
7 | # - Redis installed, configured with autogenerated 256-bit password
8 | # - IP allowlisting so only specified IPs can connect
9 | # - A non-root user account created with SSH access using the overcast SSH key
10 | #
11 | # This script takes around 2 minutes to run.
12 |
13 | ###############################################################################
14 |
15 | # Configuration:
16 |
17 | cluster="redis"
18 | # instance="redis.001"
19 | username="appuser"
20 | sshPort="50022"
21 | allowlist=""
22 |
23 | ###############################################################################
24 |
25 | # You shouldn't have to edit anything below.
26 |
27 | set -e
28 |
29 | echo "Creating cluster $cluster if it doesn't already exist..."
30 | overcast cluster create $cluster
31 |
32 | if [ -z "$instance" ]; then
33 | # Autogenerate instance name based on number of existing instances in the cluster.
34 | id=$(printf '%03d' $(expr $(overcast cluster count $cluster) + 1))
35 | instance="$cluster.$id"
36 | fi
37 |
38 | echo "Creating instance $instance on DigitalOcean..."
39 | overcast digitalocean create $instance --cluster $cluster --image-name base.001.snapshot --ssh-port $sshPort
40 |
41 | echo "Configuring iptables to expose SSH ($sshPort) and Redis (6379) ports..."
42 | overcast expose $instance 6379 $sshPort --allowlist "$allowlist"
43 |
44 | echo "System upgrade, installing core packages, installing redis..."
45 | overcast run $instance install/core install/redis
46 | overcast reboot $instance
47 |
48 | echo "Setting redis password..."
49 | overcast run $instance set_redis_password
50 |
51 | echo "Adding user $username..."
52 | overcast run $instance add_user --env "username=$username"
53 |
54 | echo "Switching default SSH user to $username..."
55 | echo "Going forward, to run commands on this instance as root, use --user root"
56 | overcast instance update $instance --user $username
57 |
58 | echo "Done."
59 |
--------------------------------------------------------------------------------
/scripts/add_user:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Add new user account.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster add_user --env "username=newuser"
12 |
13 | # set -x
14 |
15 | usage="Usage: overcast run myInstanceOrCluster add_user --env \"username=newuser\""
16 |
17 | if [ "$(id -u)" != "0" ]; then
18 | echo "This script must be run as root." 1>&2
19 | echo $usage
20 | exit 1
21 | fi
22 |
23 | if [ -z "$username" ]; then
24 | echo "No username defined." 1>&2
25 | echo $usage
26 | exit 1
27 | fi
28 |
29 | if id -u "$username" >/dev/null 2>&1; then
30 | echo "User $username already exists!" 1>&2
31 | exit 1
32 | fi
33 |
34 | if [ -z "$password" ]; then
35 | password=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 32 | head -n 1`
36 | echo "Using autogenerated password:"
37 | echo "$password"
38 | fi
39 |
40 | # Add user.
41 |
42 | encrypted_password=$(perl -e "print crypt(\"$password\", \"$username\")")
43 | useradd -m -s /bin/bash -p "$encrypted_password" "$username"
44 |
45 | # Set up SSH access.
46 |
47 | mkdir -p /home/$username/.ssh
48 | cp /root/.ssh/authorized_keys /home/$username/.ssh/authorized_keys
49 | chown -R $username:$username /home/$username/.ssh
50 | chmod 700 /home/$username/.ssh
51 | chmod 600 /home/$username/.ssh/authorized_keys
52 |
53 | echo "User $username created. Home directory is /home/$username."
54 |
55 | exit 0
56 |
--------------------------------------------------------------------------------
/scripts/authorize_key:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Push public SSH key to instance
4 |
5 | # Verified on:
6 | # Ubuntu 20.04
7 | # Ubuntu 14.04
8 |
9 | # Usage:
10 | # overcast sshkey create newKey
11 | # overcast sshkey push vm-01 newKey
12 | # overcast instance update vm-01 --ssh-key newKey.key
13 |
14 | set -e
15 | # set -x
16 |
17 | if [ -z "$PUBLIC_KEY" ]; then
18 | echo "Usage: overcast sshkey push vm-01 myKeyName"
19 | exit 1
20 | fi
21 |
22 | sshDir="$HOME/.ssh"
23 | authorizedKeysFile="$sshDir/authorized_keys"
24 |
25 | mkdir -p "$sshDir"
26 |
27 | if [ "$SHOULD_APPEND" = "true" ]; then
28 | echo "$PUBLIC_KEY" >> "$authorizedKeysFile"
29 | sort --unique "--output=$authorizedKeysFile" "$authorizedKeysFile"
30 | else
31 | echo "$PUBLIC_KEY" > "$authorizedKeysFile"
32 | fi
33 |
34 | chmod 700 "$sshDir"
35 | chmod 600 "$authorizedKeysFile"
36 |
--------------------------------------------------------------------------------
/scripts/change_ssh_port:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Change SSH listener port.
4 |
5 | # Verified on:
6 | # Ubuntu 24.04
7 | # Ubuntu 20.04
8 | # Ubuntu 14.04
9 | # Ubuntu 12.04
10 | # Debian 7.0
11 |
12 | # Usage:
13 | # overcast port myInstanceOrCluster 55522
14 |
15 | # set -x
16 |
17 | if [ "$(id -u)" != "0" ]; then
18 | echo "This script must be run as root." 1>&2
19 | exit 1
20 | fi
21 |
22 | if [ -z "$new_ssh_port" ]; then
23 | echo "No port defined, no action taken."
24 | exit 1
25 | fi
26 |
27 | current_ssh_port=`cat /etc/ssh/sshd_config | grep 'Port ' | grep -v 'GatewayPorts' | awk '{ print $2 }'`
28 | exposed_ports=`iptables -L -n | grep 'ACCEPT' | grep 'tcp dpt:' | awk '{ print $7 }' | tr -d 'dpt:' | paste -sd ' '`
29 |
30 | ssh_port_not_exposed=true
31 | for exposed_port in $exposed_ports; do
32 | if [ "$new_ssh_port" -eq "$exposed_port" ]; then
33 | new_ssh_port_not_exposed=false
34 | fi
35 | done
36 |
37 | if $new_ssh_port_not_exposed; then
38 | echo "New SSH port ($new_ssh_port) not exposed by iptables, no action taken."
39 | exit 1
40 | fi
41 |
42 | echo Setting port to $new_ssh_port...
43 |
44 | sed -i -r "s/^\#?Port [0-9]+/Port $new_ssh_port/g" /etc/ssh/sshd_config
45 |
46 | # Disable SSH socket, which is enforcing the old port
47 | systemctl disable ssh.socket || systemctl disable sshd.socket
48 | systemctl stop ssh.socket || systemctl stop sshd.socket
49 |
50 | # Handle service restart for both old and new Ubuntu versions
51 | if command -v systemctl >/dev/null 2>&1; then
52 | systemctl restart ssh.service || systemctl restart sshd.service
53 | else
54 | service ssh restart || service sshd restart
55 | fi
56 |
57 | # Verify service is running
58 | if command -v systemctl >/dev/null 2>&1; then
59 | systemctl is-active ssh.service || systemctl is-active sshd.service
60 | else
61 | service ssh status || service sshd status
62 | fi
63 |
64 | exit 0
65 |
--------------------------------------------------------------------------------
/scripts/chroot_user:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Set an existing user account to be chrooted SFTP only.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster chroot_user --env "username=existinguser"
12 |
13 | # set -x
14 |
15 | usage="Usage: overcast run myInstanceOrCluster chroot_user --env \"username=existinguser\""
16 |
17 | if [ "$(id -u)" != "0" ]; then
18 | echo "This script must be run as root." 1>&2
19 | echo $usage
20 | exit 1
21 | fi
22 |
23 | if [ -z "$username" ]; then
24 | echo "No username defined." 1>&2
25 | echo $usage
26 | exit 1
27 | fi
28 |
29 | if [ ! -d "/home/$username" ]; then
30 | echo "User \"$username\" not found." 1>&2
31 | exit 1
32 | fi
33 |
34 | # Add sftp group if it doesn't already exist.
35 |
36 | groupadd -f sftp
37 |
38 | # Add user to the sftp group.
39 |
40 | usermod -G sftp $username
41 |
42 | # Add nologin as a valid shell if it hasn't already.
43 |
44 | NOLOGIN_PATH="`which nologin`"
45 |
46 | if ! grep -q "$NOLOGIN_PATH" /etc/shells; then
47 | echo "$NOLOGIN_PATH" >> /etc/shells
48 | fi
49 |
50 | # Set permissions.
51 |
52 | mkdir -p /home/$username/sites
53 | chown $username:$username /home/$username/sites
54 | chown root:root /home/$username
55 | chmod 755 /home/$username
56 |
57 | # Set user shell
58 |
59 | chsh -s "$NOLOGIN_PATH" $username
60 |
61 | echo "User \"$username\" is now a chrooted SFTP user."
62 |
63 | exit 0
64 |
--------------------------------------------------------------------------------
/scripts/create_apache_virtualhost:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Create a new Apache Virtualhost and enable it.
4 |
5 | # Tested on:
6 | # Ubuntu 14.04
7 |
8 | # Usage:
9 | # overcast run myInstanceOrCluster create_apache_virtualhost --env "site_path=/path/to/site site_domain=domain.com"
10 |
11 | # set -x
12 | # set -e
13 |
14 | usage="Usage: overcast run myInstanceOrCluster create_apache_virtualhost --env \"site_path=/path/to/site site_domain=domain.com\""
15 |
16 | if [ "$(id -u)" != "0" ]; then
17 | echo "This script must be run as root." 1>&2
18 | echo $usage
19 | exit 1
20 | fi
21 |
22 | if [ -z "$site_path" ]; then
23 | echo "No site_path defined, no action taken."
24 | echo $usage
25 | exit 1
26 | fi
27 |
28 | if [ -z "$site_domain" ]; then
29 | echo "No site_domain defined, no action taken."
30 | echo $usage
31 | exit 1
32 | fi
33 |
34 | virtualhost_file="/etc/apache2/sites-available/$site_domain.conf"
35 |
36 | if [ -f "$virtualhost_file" ]; then
37 | echo "$virtualhost_file already exists, no action taken."
38 | exit 1
39 | fi
40 |
41 | mkdir -p $site_path
42 |
43 | cat >> $virtualhost_file << EOF
44 |
45 |
46 | AllowOverride All
47 | Options FollowSymLinks
48 | Require all granted
49 |
50 |
51 |
52 | ServerName $site_domain
53 | ServerAlias www.$site_domain
54 | ServerAdmin admin@$site_domain
55 | DocumentRoot $site_path
56 | ErrorLog \${APACHE_LOG_DIR}/error.log
57 | CustomLog \${APACHE_LOG_DIR}/access.log vhost_combined
58 |
59 |
60 | #
61 | # SSLEngine On
62 | # SSLCertificateFile /etc/apache2/ssl/apache.pem
63 | # SSLCertificateKeyFile /etc/apache2/ssl/apache.key
64 | # ServerAdmin admin@$site_domain
65 | # ServerName $site_domain
66 | # ServerAlias www.$site_domain
67 | # DocumentRoot $site_path
68 | # ErrorLog \${APACHE_LOG_DIR}/secure.error.log
69 | # CustomLog \${APACHE_LOG_DIR}/secure.access.log vhost_combined
70 | #
71 |
72 | EOF
73 |
74 | a2ensite "$site_domain.conf"
75 |
76 | service apache2 reload
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/scripts/harden_ssh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # Configures SSH to enhance security.
4 | # - Disallows password logins.
5 | # - Decreases LoginGraceTime.
6 | #
7 | # Tested on:
8 | # Ubuntu 22.04, 20.04, 14.04, 12.04
9 | # Debian 7.0
10 |
11 | # Usage:
12 | # overcast run myInstanceOrCluster harden_ssh
13 |
14 | # set -x
15 |
16 | if [ "$(id -u)" != "0" ]; then
17 | echo "This script must be run as root." 1>&2
18 | exit 1
19 | fi
20 |
21 | cp /etc/ssh/sshd_config "/etc/ssh/sshd_config_backup-$(date +%F-%T)"
22 |
23 | # Ref: https://www.sshaudit.com/hardening_guides.html#ubuntu_22_04_lts
24 |
25 | # Re-generate the RSA and ED25519 keys
26 | rm /etc/ssh/ssh_host_*
27 | ssh-keygen -t rsa -b 4096 -f /etc/ssh/ssh_host_rsa_key -N ""
28 | ssh-keygen -t ed25519 -f /etc/ssh/ssh_host_ed25519_key -N ""
29 |
30 | # Remove small Diffie-Hellman moduli
31 | awk '$5 >= 3071' /etc/ssh/moduli > /etc/ssh/moduli.safe
32 | mv /etc/ssh/moduli.safe /etc/ssh/moduli
33 |
34 | # Enable the RSA and ED25519 keys
35 | sed -i 's/^\#HostKey \/etc\/ssh\/ssh_host_\(rsa\|ed25519\)_key$/HostKey \/etc\/ssh\/ssh_host_\1_key/g' /etc/ssh/sshd_config
36 |
37 | echo -e "\n# Restrict key exchange, cipher, and MAC algorithms, as per sshaudit.com\n# hardening guide.\nKexAlgorithms sntrup761x25519-sha512@openssh.com,curve25519-sha256,curve25519-sha256@libssh.org,gss-curve25519-sha256-,diffie-hellman-group16-sha512,gss-group16-sha512-,diffie-hellman-group18-sha512,diffie-hellman-group-exchange-sha256\nCiphers chacha20-poly1305@openssh.com,aes256-gcm@openssh.com,aes128-gcm@openssh.com,aes256-ctr,aes192-ctr,aes128-ctr\nMACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com\nHostKeyAlgorithms ssh-ed25519,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519@openssh.com,sk-ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256,rsa-sha2-256-cert-v01@openssh.com" > /etc/ssh/sshd_config.d/ssh-audit_hardening.conf
38 |
39 | # Set LoginGraceTime to 30 seconds
40 | sed -i -r 's/^#?(LoginGraceTime)[[:space:]]+[0-9]+m?/\1 30/g' /etc/ssh/sshd_config
41 |
42 | # Disable PAM
43 | sed -i 's/UsePAM yes/UsePAM no/g' /etc/ssh/sshd_config
44 |
45 | # These are off by default in Ubuntu 20.04, leaving in for backwards compatibility.
46 | sed -i -r 's/#\?PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config
47 | sed -i 's/ChallengeResponseAuthentication yes/ChallengeResponseAuthentication no/g' /etc/ssh/sshd_config
48 |
49 | # Check the SSH configuration for errors
50 | sshd -t
51 | if [ $? -ne 0 ]; then
52 | echo "Error in SSH configuration. Not reloading SSH."
53 | exit 1
54 | fi
55 |
56 | # Using `reload` instead of `restart` to avoid killing existing sessions.
57 | service ssh reload
58 |
59 | exit 0
60 |
--------------------------------------------------------------------------------
/scripts/health:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Basic health statistics
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 9.10
8 | # Ubuntu 12.04
9 | # Ubuntu 14.04
10 |
11 | # Usage:
12 | # overcast health myInstanceOrCluster
13 |
14 | cpu_load=`cat /proc/loadavg`
15 | disk_space=`df -m / | tail -n 1`
16 | memory_usage=`free -m | grep Mem`
17 | cache_usage=`free -m | grep "buffers/cache"`
18 | swap_usage=`free -m | grep Swap`
19 | io_usage=`cat /sys/block/xvda/stat 2>/dev/null || cat /sys/block/vda/stat 2>/dev/null || cat /sys/block/sda/stat 2>/dev/null`
20 |
21 | echo {
22 |
23 | echo \"cpu_1min\": \"`echo $cpu_load | awk '{ print $1 }'`\",
24 | echo \"cpu_5min\": \"`echo $cpu_load | awk '{ print $2 }'`\",
25 | echo \"cpu_15min\": \"`echo $cpu_load | awk '{ print $3 }'`\",
26 |
27 | echo \"disk_total\": \"`echo $disk_space | awk '{ print $2 }'`\",
28 | echo \"disk_used\": \"`echo $disk_space | awk '{ print $3 }'`\",
29 | echo \"disk_free\": \"`echo $disk_space | awk '{ print $4 }'`\",
30 |
31 | echo \"mem_total\": \"`echo $memory_usage | awk '{ print $2 }'`\",
32 | echo \"mem_used\": \"`echo $memory_usage | awk '{ print $3 }'`\",
33 | echo \"mem_free\": \"`echo $memory_usage | awk '{ print $4 }'`\",
34 |
35 | echo \"cache_used\": \"`echo $cache_usage | awk '{ print $3 }'`\",
36 | echo \"cache_free\": \"`echo $cache_usage | awk '{ print $4 }'`\",
37 |
38 | echo \"swap_total\": \"`echo $swap_usage | awk '{ print $2 }'`\",
39 | echo \"swap_used\": \"`echo $swap_usage | awk '{ print $3 }'`\",
40 | echo \"swap_free\": \"`echo $swap_usage | awk '{ print $4 }'`\",
41 |
42 | echo \"tcp\": \"`wc -l /proc/net/tcp | awk '{ print $1 }'`\",
43 |
44 | echo \"rx_bytes\": \"`cat /sys/class/net/eth0/statistics/rx_bytes`\",
45 | echo \"tx_bytes\": \"`cat /sys/class/net/eth0/statistics/tx_bytes`\",
46 |
47 | echo \"io_reads\": \"`echo $io_usage | awk '{ print $3 }'`\",
48 | echo \"io_writes\": \"`echo $io_usage | awk '{ print $7 }'`\",
49 |
50 | echo \"processes\": [
51 | echo `ps aux | sed '1 d' | sed 's/"//g' | sed 's/\s\s*/ /g' | cut -d' ' -f1,2,3,4,10,11- | sed -e 's/\(.*\)/"\1",/'`
52 | echo \"\"
53 | echo ]
54 |
55 | echo }
56 |
57 | exit 0
58 |
--------------------------------------------------------------------------------
/scripts/install/apache:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Apache 2.x.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 14.04
8 |
9 | # Usage:
10 | # overcast run myInstanceOrCluster install/apache
11 |
12 | # set -x
13 |
14 | if [ "$(id -u)" != "0" ]; then
15 | echo "This script must be run as root." 1>&2
16 | exit 1
17 | fi
18 |
19 | apt-get install -y apache2
20 |
21 | # Create self-signed SSL certificate
22 | # mkdir /etc/apache2/ssl
23 | # openssl req -new -x509 -days 365 -nodes -out /etc/apache2/ssl/apache.pem -keyout /etc/apache2/ssl/apache.key
24 |
25 | # Disable default Apache virtualhost
26 | a2dissite default
27 |
28 | # Enable rewrite and SSL modules
29 | a2enmod rewrite ssl
30 |
31 | service apache2 restart
32 |
33 | exit 0
34 |
--------------------------------------------------------------------------------
/scripts/install/core:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Full system upgrade and install core packages.
4 |
5 | # Verified on:
6 | # Ubuntu 20.04
7 |
8 | # Usage:
9 | # overcast run myInstanceOrCluster install/core
10 |
11 | # set -x
12 |
13 | if [ "$(id -u)" != "0" ]; then
14 | echo "This script must be run as root." 1>&2
15 | exit 1
16 | fi
17 |
18 | # Install core packages
19 |
20 | # Update the package list
21 | apt-get update
22 |
23 | # Upgrade all installed packages
24 | # --force-confdef tells dpkg to use the default action
25 | # --force-confold tells it to keep the current version of a configuration file if one has been modified
26 | DEBIAN_FRONTEND=noninteractive apt-get upgrade -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold"
27 |
28 | # Ref: https://askubuntu.com/a/340846
29 | echo iptables-persistent iptables-persistent/autosave_v4 boolean true | sudo debconf-set-selections
30 | echo iptables-persistent iptables-persistent/autosave_v6 boolean true | sudo debconf-set-selections
31 |
32 | apt-get install -y git-core wget less build-essential iptables-persistent
33 |
34 | exit 0
35 |
--------------------------------------------------------------------------------
/scripts/install/couchdb:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install CouchDB.
4 |
5 | # Tested on:
6 | # Ubuntu 12.04
7 | # Ubuntu 14.04
8 |
9 | # Usage:
10 | # overcast run myInstanceOrCluster install/couchdb
11 |
12 | # set -x
13 |
14 | if [ "$(id -u)" != "0" ]; then
15 | echo "This script must be run as root." 1>&2
16 | exit 1
17 | fi
18 |
19 | apt-get install -y couchdb
20 |
21 | exit 0
22 |
--------------------------------------------------------------------------------
/scripts/install/discourse_docker:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install a Discourse Docker container.
4 |
5 | # Tested on:
6 | # Ubuntu 12.04
7 | # Ubuntu 14.04
8 |
9 | # References:
10 | # https://github.com/discourse/discourse/blob/master/docs/INSTALL-digital-ocean.md
11 | # https://github.com/eviltrout/discourse-droplet
12 |
13 | # Usage:
14 | # overcast run myInstanceOrCluster install/discourse_docker \
15 | # --env "discourseHostname=\"$discourseHostname\" \
16 | # discourseDevEmails=\"$discourseDevEmails\" \
17 | # discourseSMTPAddress=\"$discourseSMTPAddress\" \
18 | # discourseSMTPPort=\"$discourseSMTPPort\" \
19 | # discourseSMTPUsername=\"$discourseSMTPUsername\" \
20 | # discourseSMTPPassword=\"$discourseSMTPPassword\""
21 |
22 | # See also:
23 | # recipes/discourse-docker
24 |
25 | # set -x
26 |
27 | if [ "$(id -u)" != "0" ]; then
28 | echo "This script must be run as root." 1>&2
29 | exit 1
30 | fi
31 |
32 | apt-get install cgroup-lite
33 |
34 | mkdir /var/docker
35 | git clone https://github.com/discourse/discourse_docker.git /var/docker
36 | cd /var/docker
37 | cp samples/standalone.yml containers/app.yml
38 |
39 | sed -i "s/DISCOURSE_DEVELOPER_EMAILS\: .*/DISCOURSE_DEVELOPER_EMAILS\: \'$discourseDevEmails\'/g" containers/app.yml
40 | sed -i "s/DISCOURSE_HOSTNAME\: .*/DISCOURSE_HOSTNAME\: \'$discourseHostname\'/g" containers/app.yml
41 | sed -i "s/DISCOURSE_SMTP_ADDRESS\: .*/DISCOURSE_SMTP_ADDRESS\: \'$discourseSMTPAddress\'/g" containers/app.yml
42 | sed -i "s/\#\? \?DISCOURSE_SMTP_PORT\: .*/DISCOURSE_SMTP_PORT\: \'$discourseSMTPPort\'/g" containers/app.yml
43 | sed -i "s/\#\? \?DISCOURSE_SMTP_USER_NAME\: .*/DISCOURSE_SMTP_USER_NAME\: \'$discourseSMTPUsername\'/g" containers/app.yml
44 | sed -i "s/\#\? \?DISCOURSE_SMTP_PASSWORD\: .*/DISCOURSE_SMTP_PASSWORD\: \'$discourseSMTPPassword\'/g" containers/app.yml
45 |
46 | ./launcher bootstrap app --skip-prereqs
47 |
48 | # Running this separately in recipes/discourse-docker.
49 | # ./launcher start app --skip-prereqs
50 |
51 | exit 0
52 |
--------------------------------------------------------------------------------
/scripts/install/docker:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Docker.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster install/docker
12 |
13 | # set -x
14 |
15 | if [ "$(id -u)" != "0" ]; then
16 | echo "This script must be run as root." 1>&2
17 | exit 1
18 | fi
19 |
20 | wget -qO- https://get.docker.io/ | sh
21 |
22 | exit 0
23 |
--------------------------------------------------------------------------------
/scripts/install/emacs:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Emacs.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster install/emacs
12 |
13 | # set -x
14 |
15 | if [ "$(id -u)" != "0" ]; then
16 | echo "This script must be run as root." 1>&2
17 | exit 1
18 | fi
19 |
20 | # Install emacs
21 |
22 | apt-get install -y emacs
23 |
24 | exit 0
25 |
--------------------------------------------------------------------------------
/scripts/install/haproxy:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install HAProxy 2.8 (LTS)
4 |
5 | # Verified on:
6 | # Ubuntu 20.04
7 |
8 | # Usage:
9 | # overcast run myInstanceOrCluster install/haproxy
10 |
11 | # set -x
12 |
13 | if [ "$(id -u)" != "0" ]; then
14 | echo "This script must be run as root." 1>&2
15 | exit 1
16 | fi
17 |
18 | add-apt-repository -y ppa:vbernat/haproxy-2.8
19 | apt-get update
20 | apt-get install -y haproxy
21 |
22 | exit 0
23 |
--------------------------------------------------------------------------------
/scripts/install/imagemagick:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install ImageMagick.
4 |
5 | # Tested on:
6 | # Ubuntu 14.04
7 |
8 | # Usage:
9 | # overcast run myInstanceOrCluster install/imagemagick
10 |
11 | # set -x
12 |
13 | if [ "$(id -u)" != "0" ]; then
14 | echo "This script must be run as root." 1>&2
15 | exit 1
16 | fi
17 |
18 | apt-get install -y imagemagick
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/scripts/install/iptables:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configure iptables.
4 |
5 | # Verified on:
6 | # Ubuntu 20.04
7 | # Ubuntu 14.04
8 | # Ubuntu 12.04
9 | # Debian 7.0
10 |
11 | # Usage:
12 | # overcast expose myInstanceOrCluster 22 80 443 3000
13 | # overcast expose myInstanceOrCluster 22 80 443 3000 --allowlist "1.1.1.1 2.2.2.2 192.168.0.0/16"
14 |
15 | # set -x
16 |
17 | if [ "$(id -u)" != "0" ]; then
18 | echo "This script must be run as root." 1>&2
19 | exit 1
20 | fi
21 |
22 | current_ssh_port=`cat /etc/ssh/sshd_config | grep 'Port ' | grep -v 'GatewayPorts' | awk '{ print $2 }'`
23 |
24 | if [ -z "$exposed_ports" ]; then
25 | echo "No exposed ports defined, no action taken."
26 | exit 1
27 | fi
28 |
29 | ssh_port_not_included=true
30 | for exposed_port in $exposed_ports; do
31 | if [ "$current_ssh_port" == "$exposed_port" ]; then
32 | ssh_port_not_included=false
33 | fi
34 | done
35 |
36 | if $ssh_port_not_included; then
37 | echo "Current SSH port ($current_ssh_port) not included in exposed port list, no action taken."
38 | exit 1
39 | fi
40 |
41 | # Flush all current rules from iptables
42 | iptables -F
43 |
44 | # Accept packets belonging to established and related connections
45 | iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT
46 |
47 | # Localhost
48 | iptables -A INPUT -i lo -j ACCEPT
49 |
50 | for exposed_port in $exposed_ports; do
51 | port_allowlist_attribute="allowlist_$exposed_port"
52 | port_allowlist="${!port_allowlist_attribute}"
53 | if [ -z "$allowlist" ] && [ -z "$port_allowlist" ]; then
54 | iptables -A INPUT -p tcp --dport $exposed_port -j ACCEPT
55 | else
56 | if [ -z "$port_allowlist" ]; then
57 | for ip in $allowlist; do
58 | iptables -A INPUT -p tcp -s $ip --dport $exposed_port -j ACCEPT
59 | done
60 | else
61 | for ip in $port_allowlist; do
62 | iptables -A INPUT -p tcp -s $ip --dport $exposed_port -j ACCEPT
63 | done
64 | fi
65 | fi
66 | done
67 |
68 | # Ping
69 | # iptables -A INPUT -p icmp -m icmp --icmp-type 8 -j ACCEPT
70 |
71 | # Log Dropped Connections
72 | # iptables -A INPUT -m limit --limit 30/minute -j LOG --log-level 7 --log-prefix "Dropped by firewall: "
73 | # iptables -A INPUT -j LOG --log-level 7 --log-prefix "Dropped by firewall: "
74 |
75 | # Set default policies for INPUT, FORWARD and OUTPUT chains
76 | iptables -P INPUT DROP
77 | iptables -P FORWARD DROP
78 | iptables -P OUTPUT ACCEPT
79 |
80 | # List rules
81 | iptables -L -v
82 |
83 | mkdir -p /etc/iptables
84 | iptables-save > /etc/iptables/rules.v4
85 |
86 | exit 0
87 |
--------------------------------------------------------------------------------
/scripts/install/mysql:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install MySQL.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster install/mysql
12 |
13 | # set -x
14 |
15 | if [ "$(id -u)" != "0" ]; then
16 | echo "This script must be run as root." 1>&2
17 | exit 1
18 | fi
19 |
20 | DEBIAN_FRONTEND=noninteractive apt-get -q -y install mysql-server libmysqld-dev
21 |
22 | exit 0
23 |
--------------------------------------------------------------------------------
/scripts/install/nginx:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Nginx.
4 |
5 | # Verified on:
6 | # Ubuntu 20.04
7 | # Ubuntu 14.04
8 | # Ubuntu 12.04
9 | # Debian 7.0
10 |
11 | # Usage:
12 | # overcast run myInstanceOrCluster install/nginx
13 |
14 | # set -x
15 |
16 | if [ "$(id -u)" != "0" ]; then
17 | echo "This script must be run as root." 1>&2
18 | exit 1
19 | fi
20 |
21 | apt-get install -y nginx
22 |
23 | exit 0
24 |
--------------------------------------------------------------------------------
/scripts/install/nodejs:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Node LTS
4 | # https://github.com/joyent/node/wiki/Installing-Node.js-via-package-manager
5 |
6 | # Verified on:
7 | # Ubuntu 20.04
8 |
9 | # Usage:
10 | # overcast run myInstanceOrCluster install/nodejs
11 |
12 | # set -x
13 |
14 | if [ "$(id -u)" != "0" ]; then
15 | echo "This script must be run as root." 1>&2
16 | exit 1
17 | fi
18 |
19 | curl -fsSL https://deb.nodesource.com/setup_lts.x | sudo -E bash -
20 | apt-get install -y nodejs
21 |
22 | exit 0
23 |
--------------------------------------------------------------------------------
/scripts/install/phantomjs:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install PhantomJS 1.9.7.
4 | # http://phantomjs.org/download.html
5 |
6 | # Tested on:
7 | # Debian 7.0
8 | # Ubuntu 12.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster install/phantomjs
12 |
13 | # set -x
14 |
15 | if [ "$(id -u)" != "0" ]; then
16 | echo "This script must be run as root." 1>&2
17 | exit 1
18 | fi
19 |
20 | apt-get install -y libfontconfig
21 |
22 | cd /usr/local/share
23 | wget https://bitbucket.org/ariya/phantomjs/downloads/phantomjs-1.9.7-linux-x86_64.tar.bz2
24 | tar xvf phantomjs-1.9.7-linux-x86_64.tar.bz2
25 | rm -f phantomjs-1.9.7-linux-x86_64.tar.bz2
26 | ln -s /usr/local/share/phantomjs-1.9.7-linux-x86_64/bin/phantomjs /usr/local/bin/phantomjs
27 |
28 | exit 0
29 |
--------------------------------------------------------------------------------
/scripts/install/php:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install PHP 5.x.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 14.04
8 |
9 | # Usage:
10 | # overcast run myInstanceOrCluster install/php
11 |
12 | # set -x
13 |
14 | if [ "$(id -u)" != "0" ]; then
15 | echo "This script must be run as root." 1>&2
16 | exit 1
17 | fi
18 |
19 | apt-get install -y php5 php5-mysql php5-gd libapache-mod-ssl libapache2-mod-php5 php5-curl
20 |
21 | exit 0
22 |
--------------------------------------------------------------------------------
/scripts/install/postgres:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Postgres
4 |
5 | # Verified on:
6 | # Ubuntu 20.04
7 |
8 | # Usage:
9 | # overcast run myInstanceOrCluster install/postgres
10 |
11 | set -x
12 |
13 | if [ "$(id -u)" != "0" ]; then
14 | echo "This script must be run as root." 1>&2
15 | exit 1
16 | fi
17 |
18 | # Ref: https://www.digitalocean.com/community/tutorials/how-to-install-postgresql-on-ubuntu-20-04-quickstart
19 | apt-get install -y postgresql postgresql-contrib
20 | systemctl start postgresql.service
21 |
--------------------------------------------------------------------------------
/scripts/install/redis:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install Redis.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster install/redis
12 |
13 | # set -x
14 |
15 | if [ "$(id -u)" != "0" ]; then
16 | echo "This script must be run as root." 1>&2
17 | exit 1
18 | fi
19 |
20 | apt-get install -y redis-server
21 |
22 | exit 0
23 |
--------------------------------------------------------------------------------
/scripts/install/sftp:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configure chrooted SFTP.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Once configured, users requiring SFTP access must be added to the "sftp" group.
11 |
12 | # Usage:
13 | # overcast run myInstanceOrCluster install/sftp
14 | # overcast run myInstanceOrCluster add_user --env "username=myuser"
15 | # overcast run myInstanceOrCluster chroot_user --env "username=myuser"
16 |
17 | # set -x
18 |
19 | if [ "$(id -u)" != "0" ]; then
20 | echo "This script must be run as root." 1>&2
21 | exit 1
22 | fi
23 |
24 | groupadd -f sftp
25 |
26 | sed -i "s/^Subsystem sftp \/usr\/lib\/openssh\/sftp\-server/# Subsystem sftp \/usr\/lib\/openssh\/sftp-server/g" /etc/ssh/sshd_config
27 |
28 | if ! grep -q 'Subsystem sftp internal-sftp' /etc/ssh/sshd_config; then
29 | cat >> /etc/ssh/sshd_config << EOF
30 |
31 | Subsystem sftp internal-sftp
32 |
33 | Match group sftp
34 | ChrootDirectory %h
35 | X11Forwarding no
36 | AllowTcpForwarding no
37 | ForceCommand internal-sftp
38 | EOF
39 |
40 | echo "Chrooted SFTP configured."
41 | else
42 | echo "Chrooted SFTP already configured."
43 | fi
44 |
45 | service ssh restart
46 |
47 | exit 0
48 |
--------------------------------------------------------------------------------
/scripts/install/vim:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Install vim.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast run myInstanceOrCluster install/vim
12 |
13 | # set -x
14 |
15 | if [ "$(id -u)" != "0" ]; then
16 | echo "This script must be run as root." 1>&2
17 | exit 1
18 | fi
19 |
20 | # Install emacs
21 |
22 | apt-get install -y vim
23 |
24 | exit 0
25 |
--------------------------------------------------------------------------------
/scripts/list_exposed_ports:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # List exposed TCP ports.
4 |
5 | # Tested on:
6 | # Debian 7.0
7 | # Ubuntu 12.04
8 | # Ubuntu 14.04
9 |
10 | # Usage:
11 | # overcast exposed myInstanceOrCluster
12 |
13 | if [ "$(id -u)" != "0" ]; then
14 | echo "This script must be run as root." 1>&2
15 | exit 1
16 | fi
17 |
18 | iptables -L -n | grep 'ACCEPT' | grep 'tcp dpt:' | awk '{ print $7 }' | tr -d 'dpt:' | paste -sd ' '
19 |
20 | exit 0
21 |
--------------------------------------------------------------------------------
/scripts/set_redis_password:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Configure Redis to use password authentication. If redisPassword is not set,
4 | # an autogenerated 256-bit password will be returned by the script.
5 |
6 | # Tested on:
7 | # Debian 7.0
8 | # Ubuntu 12.04
9 | # Ubuntu 14.04
10 |
11 | # Usage:
12 | # overcast run myInstanceOrCluster set_redis_password
13 | # overcast run myInstanceOrCluster set_redis_password --env "redisPassword=myPredefinedPassword"
14 |
15 | # set -x
16 |
17 | if [ "$(id -u)" != "0" ]; then
18 | echo "This script must be run as root." 1>&2
19 | exit 1
20 | fi
21 |
22 | if [ -z "$redisPassword" ]; then
23 | redisPassword=`cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 43 | head -n 1`
24 | echo "Using autogenerated password:"
25 | echo "$redisPassword"
26 | fi
27 |
28 | sed -i "s/#\? \?requirepass .*/requirepass $redisPassword/g" /etc/redis/redis.conf
29 |
30 | # Allow remote connections to Redis:
31 | sed -i "s/#\? \?bind 127.0.0.1/bind 0.0.0.0/g" /etc/redis/redis.conf
32 |
33 | service redis-server restart
34 |
35 | exit 0
36 |
--------------------------------------------------------------------------------
/scripts/set_script_var:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Helper script to set a named variable in a remote file.
4 |
5 | # Tested on:
6 | # Ubuntu 14.04
7 |
8 | # Usage:
9 | # overcast scriptvar myInstanceOrCluster /path/to/remote/file "var_name" "var_value"
10 |
11 | set -x
12 |
13 | sed -i "s/^$VAR_NAME=.*$/$VAR_NAME=\"$VAR_VALUE\"/g" $VAR_FILENAME
14 |
--------------------------------------------------------------------------------
/scripts/swap_usage_by_pid:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Get current swap usage for all running processes
4 | # Erik Ljungstrom 27/05/2011
5 | # Modified by Mikko Rantalainen 2012-08-09
6 | # Pipe the output to "sort -nk3" to get sorted output
7 | # http://stackoverflow.com/questions/479953/how-to-find-out-which-processes-are-swapping-in-linux
8 |
9 | # Tested on:
10 | # Ubuntu 12.04
11 | # Ubuntu 14.04
12 |
13 | # Usage:
14 | # overcast run myInstanceOrCluster swap_usage_by_pid
15 |
16 | SUM=0
17 | OVERALL=0
18 | for DIR in `find /proc/ -maxdepth 1 -type d -regex "^/proc/[0-9]+"`
19 | do
20 | PID=`echo $DIR | cut -d / -f 3`
21 | PROGNAME=`ps -p $PID -o command --no-headers`
22 | for SWAP in `grep Swap $DIR/smaps 2>/dev/null | awk '{ print $2 }'`
23 | do
24 | let SUM=$SUM+$SWAP
25 | done
26 | if (( $SUM > 0 )); then
27 | echo "PID=$PID swapped $SUM KB $PROGNAME"
28 | fi
29 | let OVERALL=$OVERALL+$SUM
30 | SUM=0
31 | done
32 | echo "Overall swap used: $OVERALL KB"
33 |
34 | exit 0
35 |
--------------------------------------------------------------------------------
/src/cli.js:
--------------------------------------------------------------------------------
1 | import minimist from 'minimist';
2 | import chalk from 'chalk';
3 |
4 | import * as utils from './utils.js';
5 | import * as store from './store.js';
6 | import * as log from './log.js';
7 | import allCommands from './commands/index.js';
8 |
9 | const DEFAULT_COMMAND = 'help';
10 |
11 | export function convertArgsToString(argv) {
12 | argv = argv.slice(2);
13 |
14 | return argv.map(chunk => {
15 | if (chunk.includes(' ')) {
16 | return `"${chunk}"`;
17 | } else {
18 | return chunk;
19 | }
20 | }).join(' ');
21 | }
22 |
23 | export function init(argString = '', nextFn = () => {}) {
24 | if (!argString) {
25 | argString = convertArgsToString(process.argv);
26 | }
27 | if (!argString) {
28 | argString = DEFAULT_COMMAND;
29 | }
30 | store.setArgString(argString);
31 | utils.findConfig(() => {
32 | utils.createKeyIfMissing(() => {
33 | execute(store.getArgString(), nextFn);
34 | });
35 | });
36 | }
37 |
38 | export function execute(argString, nextFn = () => {}) {
39 | if (utils.isTestRun()) {
40 | log.alert(chalk.bgRed('TEST RUN: Be aware that some things are mocked for testing.'));
41 | }
42 |
43 | if (!argString) {
44 | return utils.die('Nothing to execute (cli.execute).');
45 | }
46 |
47 | let argArray = utils.tokenize(argString);
48 |
49 | const args = minimist(argArray);
50 | utils.argShift(args, 'command');
51 |
52 | const command = allCommands[args.command] || allCommands.help;
53 |
54 | if ((args._[0] === 'help' || args.help) && command.help) {
55 | command.help(args);
56 | } else {
57 | if (command.commands) {
58 | const matchingCommand = findMatchingCommand(command.commands, args);
59 | if (matchingCommand) {
60 | run(matchingCommand, args, nextFn);
61 | } else {
62 | missingCommand(command, args, nextFn);
63 | }
64 | } else {
65 | return utils.die(`${args.command} is missing commands array (cli.execute).`);
66 | }
67 | }
68 | }
69 |
70 | export function findMatchingCommand(commands, args) {
71 | const names = Object.keys(commands);
72 | if (names.length === 1) {
73 | return commands[names[0]];
74 | } else {
75 | utils.argShift(args, 'subcommand');
76 | return commands[args.subcommand];
77 | }
78 | }
79 |
80 | export function run(command, args, nextFn) {
81 | let shortCircuit = false;
82 | args = args || { _: [] };
83 |
84 | if (args._[0] === 'help') {
85 | return compileHelp(command);
86 | }
87 |
88 | (command.required || []).forEach((required) => {
89 | if (utils.isString(required)) {
90 | required = { name: required };
91 | }
92 |
93 | const key = required.varName || required.name;
94 | if (required.greedy) {
95 | args[key] = args._.join(' ');
96 | } else if (required.raw) {
97 | args[key] = args._.shift();
98 | } else {
99 | utils.argShift(args, key);
100 | }
101 |
102 | if (!args[key] && !required.optional) {
103 | log.br();
104 | log.failure(`Missing [${required.name}] argument.`);
105 | shortCircuit = true;
106 | missingArguments(command);
107 | }
108 |
109 | if (args[key]) {
110 | utils.forceArray(required.filters).forEach((filter) => {
111 | if (shortCircuit !== true && utils.isFunction(filter)) {
112 | // Allow filters to short-circuit a command run without
113 | // needing process.exit.
114 | if (filter(args[key], args) === false) {
115 | shortCircuit = true;
116 | return false;
117 | }
118 | }
119 | });
120 | }
121 | });
122 |
123 | if (shortCircuit) {
124 | return nextFn();
125 | }
126 |
127 | command.run(args, nextFn);
128 | }
129 |
130 | export function missingArguments(command) {
131 | compileHelp(command);
132 | if (utils.isTestRun()) {
133 | return false;
134 | } else {
135 | process.exit(1);
136 | }
137 | }
138 |
139 | export function missingCommand({banner, commands}, args, nextFn = () => {}) {
140 | let exitCode = 0;
141 | if (args.subcommand && args.subcommand !== 'help' && args.command !== 'help') {
142 | log.failure('Missing or unknown command.');
143 | exitCode = 1;
144 | }
145 |
146 | if (banner) {
147 | printLines(banner);
148 | }
149 |
150 | if (Object.keys(commands).length > 1) {
151 | log.br();
152 | log.log(`overcast ${args.command} [command] help`);
153 | printLines('View extended help.', { color: 'cyan', pad: 2 });
154 | }
155 |
156 | utils.eachObject(commands, ({ alias, usage, description }) => {
157 | if (alias === true) {
158 | return;
159 | }
160 |
161 | log.br();
162 | printLines(usage);
163 | printLines(description, { color: 'cyan', pad: 2 });
164 | });
165 |
166 | if (utils.isTestRun()) {
167 | nextFn();
168 | } else {
169 | process.exit(exitCode);
170 | }
171 | }
172 |
173 | export function compileHelp(command, skipFirstLine) {
174 | ['usage', 'description', 'options', 'examples'].forEach((key) => {
175 | if (command[key]) {
176 | // Used by bin/docs:
177 | if (skipFirstLine !== true) {
178 | log.br();
179 | }
180 | skipFirstLine = false;
181 | if (key === 'options') {
182 | printCommandOptions(command.options);
183 | } else {
184 | log.log(`${utils.capitalize(key)}:`);
185 | printLines(command[key], { color: 'cyan', pad: 2 });
186 | }
187 | }
188 | });
189 | }
190 |
191 | export function printCommandOptions(options) {
192 | let hasDefaults = false;
193 | const maxLength = utils.maxValueFromArray(options, (option) => {
194 | if (option.default) {
195 | hasDefaults = true;
196 | }
197 | return option.usage.length;
198 | }).usage.length + 4;
199 | let headline = 'Options:';
200 | if (hasDefaults) {
201 | headline = `${utils.padRight(headline, maxLength + 2)}Defaults:`;
202 | }
203 | log.log(headline);
204 | options.forEach((option) => {
205 | log.info(` ${utils.padRight(option.usage, maxLength)}${option.default || ''}`);
206 | });
207 | }
208 |
209 | export function printLines(strOrArray, options) {
210 | options = options || {};
211 | utils.forceArray(strOrArray).forEach((str) => {
212 | if (options.pad) {
213 | utils.times(options.pad, () => {
214 | str = ` ${str}`;
215 | });
216 | }
217 | if (options.color) {
218 | log.log(chalk[options.color](str));
219 | } else {
220 | log.log(str);
221 | }
222 | });
223 | }
224 |
--------------------------------------------------------------------------------
/src/commands/aliases.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import { log, alert } from '../log.js';
3 |
4 | export const commands = {};
5 |
6 | commands.aliases = {
7 | name: 'aliases',
8 | usage: ['overcast aliases'],
9 | description: [
10 | 'Return a list of bash aliases for SSHing to your instances.',
11 | '',
12 | 'To use, add this to your .bash_profile:',
13 | ' test -f $HOME/.overcast_aliases && source $HOME/.overcast_aliases',
14 | '',
15 | 'And then create the .overcast_aliases file:',
16 | ' overcast aliases > $HOME/.overcast_aliases',
17 | '',
18 | 'Or to automatically refresh aliases in every new terminal window',
19 | '(which will add a couple hundred milliseconds to your startup time),',
20 | 'add this to your .bash_profile:',
21 | ' overcast aliases > $HOME/.overcast_aliases',
22 | ' source $HOME/.overcast_aliases'
23 | ],
24 | run: (args, nextFn) => {
25 | const clusters = utils.getClusters();
26 |
27 | if (Object.keys(clusters).length > 0) {
28 | utils.eachObject(clusters, ({ instances }) => {
29 | utils.eachObject(instances, instance => {
30 | log(`alias ssh.${instance.name}="ssh -i ${utils.normalizeKeyPath(instance.ssh_key)} -p ${instance.ssh_port} ${instance.user}@${instance.ip}"`);
31 | });
32 | });
33 | } else {
34 | log('# No overcast clusters defined');
35 | }
36 |
37 | nextFn();
38 | }
39 | };
40 |
--------------------------------------------------------------------------------
/src/commands/cluster.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as filters from '../filters.js';
3 | import * as log from '../log.js';
4 |
5 | export const commands = {};
6 |
7 | commands.count = {
8 | name: 'count',
9 | usage: ['overcast cluster count [name]'],
10 | description: 'Return the number of instances in a cluster.',
11 | examples: [
12 | '$ overcast cluster count db',
13 | '> 0',
14 | '$ overcast instance create db.01 --cluster db',
15 | '> ...',
16 | '$ overcast cluster count db',
17 | '> 1'
18 | ],
19 | required: [{ name: 'name', filters: filters.findMatchingCluster }],
20 | run: ({ cluster }, nextFn) => {
21 | log.log(Object.keys(cluster.instances).length);
22 | nextFn();
23 | }
24 | };
25 |
26 | commands.add = {
27 | name: 'add',
28 | usage: ['overcast cluster add [name]'],
29 | description: 'Adds a new cluster.',
30 | examples: '$ overcast cluster add db',
31 | required: [{ name: 'name', filters: filters.shouldBeNewCluster }],
32 | run: ({ name }, nextFn) => {
33 | const clusters = utils.getClusters();
34 | // We shouldn't have to guard against an existing cluster here,
35 | // because of the shouldBeNewCluster filter above.
36 | clusters[name] = { instances: {} };
37 |
38 | utils.saveClusters(clusters, () => {
39 | log.success(`Cluster "${name}" has been added.`);
40 | nextFn();
41 | });
42 | }
43 | };
44 |
45 | commands.rename = {
46 | name: 'rename',
47 | usage: ['overcast cluster rename [name] [new-name]'],
48 | description: 'Renames a cluster.',
49 | examples: '$ overcast cluster rename app-cluster app-cluster-renamed',
50 | required: [
51 | { name: 'name', filters: filters.findMatchingCluster },
52 | { name: 'new-name', varName: 'newName', filters: filters.shouldBeNewCluster }
53 | ],
54 | run: ({ newName, name }, nextFn) => {
55 | const clusters = utils.getClusters();
56 |
57 | clusters[newName] = clusters[name];
58 | delete clusters[name];
59 |
60 | utils.saveClusters(clusters, () => {
61 | log.success(`Cluster "${name}" has been renamed to "${newName}".`);
62 | nextFn();
63 | });
64 | }
65 | };
66 |
67 | commands.remove = {
68 | name: 'remove',
69 | usage: ['overcast cluster remove [name]'],
70 | description: [
71 | 'Removes a cluster from the index. If the cluster has any instances',
72 | 'attached to it, they will be moved to an "orphaned" cluster.'
73 | ],
74 | examples: '$ overcast cluster remove db',
75 | required: [
76 | { name: 'name', filters: filters.findMatchingCluster }
77 | ],
78 | run: ({ name }, nextFn) => {
79 | const clusters = utils.getClusters();
80 |
81 | let orphaned = 0;
82 | if (clusters[name].instances && Object.keys(clusters[name].instances).length > 0) {
83 | orphaned = Object.keys(clusters[name].instances).length;
84 | clusters.orphaned = clusters.orphaned || { instances: {} };
85 | Object.assign(clusters.orphaned.instances, clusters[name].instances);
86 | }
87 |
88 | delete clusters[name];
89 |
90 | utils.saveClusters(clusters, () => {
91 | log.success(`Cluster "${name}" has been removed.`);
92 | if (orphaned) {
93 | if (name === 'orphaned') {
94 | log.alert(`The ${orphaned} instance(s) in the "orphaned" cluster were removed.`);
95 | } else {
96 | log.alert(`The ${orphaned} instance(s) from this cluster were moved to the "orphaned" cluster.`);
97 | }
98 | }
99 | nextFn();
100 | });
101 | }
102 | };
103 |
--------------------------------------------------------------------------------
/src/commands/completions.js:
--------------------------------------------------------------------------------
1 | import * as log from '../log.js';
2 | import * as utils from '../utils.js';
3 | import allCommands from './index.js';
4 |
5 | export const commands = {};
6 |
7 | commands.completions = {
8 | name: 'completions',
9 | usage: ['overcast completions'],
10 | description: [
11 | 'Return an array of commands, cluster names, and instance names for use',
12 | 'in bash tab completion.',
13 | '',
14 | 'To enable tab completion in bash, add this to your .bash_profile:',
15 | '',
16 | '_overcast_completions() {',
17 | ' local cur=${COMP_WORDS[COMP_CWORD]}',
18 | ' COMPREPLY=($(compgen -W "`overcast completions`" -- "$cur"))',
19 | ' return 0',
20 | '}',
21 | 'complete -F _overcast_completions overcast'
22 | ],
23 | run: (args, nextFn) => {
24 | log.log(getCompletions().join(' '));
25 | nextFn();
26 | }
27 | };
28 |
29 | function getCompletions() {
30 | const list = [];
31 |
32 | function pushWords(signature) {
33 | signature.split(' ').forEach((word) => {
34 | if (word && word.length > 3 && word.charAt(0) !== '[' && !list.includes(word)) {
35 | list.push(word);
36 | }
37 | });
38 | }
39 |
40 | utils.eachObject(allCommands, (command) => {
41 | if (command.commands) {
42 | utils.eachObject(command.commands, command => {
43 | command.usage.forEach((usage) => {
44 | pushWords(usage);
45 | });
46 | });
47 | }
48 | });
49 |
50 | const clusters = utils.getClusters();
51 | utils.eachObject(clusters, ({ instances }, clusterName) => {
52 | if (!list.includes(clusterName)) {
53 | list.push(clusterName);
54 | }
55 | Object.keys(instances).forEach((instanceName) => {
56 | if (!list.includes(instanceName)) {
57 | list.push(instanceName);
58 | }
59 | });
60 | });
61 |
62 | return list;
63 | }
64 |
--------------------------------------------------------------------------------
/src/commands/digitalocean.js:
--------------------------------------------------------------------------------
1 | import * as filters from '../filters.js';
2 | import * as provider from '../provider.js';
3 | import { isTestRun } from '../utils.js';
4 | import { api } from '../providers/digitalocean.js';
5 | import { mockAPI } from '../providers/mock.js';
6 |
7 | function getAPI() {
8 | return isTestRun() ? mockAPI : api;
9 | }
10 |
11 | export const commands = {};
12 |
13 | commands.boot = {
14 | name: 'boot',
15 | usage: ['overcast digitalocean boot [name]'],
16 | description: 'Boot up an instance if powered off, otherwise do nothing.',
17 | required: [
18 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] }
19 | ],
20 | run: (args, nextFn) => {
21 | provider.boot(getAPI(), args, nextFn);
22 | }
23 | };
24 |
25 | commands.poweron = Object.assign({ alias: true }, commands.boot);
26 |
27 | commands.create = {
28 | name: 'create',
29 | usage: ['overcast digitalocean create [name] [options...]'],
30 | description: ['Creates a new instance on DigitalOcean.'],
31 | examples: [
32 | '# Match using slugs:',
33 | '$ overcast digitalocean create vm-01 --size 2gb --region sfo1',
34 | '',
35 | '# Match using IDs or names:',
36 | '$ overcast digitalocean create vm-02 --region "London 1" --image 6374128'
37 | ],
38 | required: [
39 | { name: 'name', filters: filters.shouldBeNewInstance }
40 | ],
41 | options: [
42 | { usage: '--cluster CLUSTER', default: 'default' },
43 | { usage: '--ssh-port PORT', default: '22' },
44 | { usage: '--ssh-key PATH', default: 'overcast.key' },
45 | { usage: '--ssh-pub-key PATH', default: 'overcast.key.pub' },
46 | { usage: '--region REGION', default: api.DEFAULT_REGION },
47 | { usage: '--image IMAGE', default: api.DEFAULT_IMAGE },
48 | { usage: '--size SIZE', default: api.DEFAULT_SIZE },
49 | { usage: '--backups', default: 'false' },
50 | { usage: '--monitoring', default: 'false' },
51 | { usage: '--private-networking', default: 'false' },
52 | { usage: '--vpc-uuid', default: '' },
53 | { usage: '--with-droplet-agent', default: 'false' }
54 | ],
55 | run: (args, nextFn) => {
56 | provider.create(getAPI(), args, nextFn);
57 | }
58 | };
59 |
60 | commands.destroy = {
61 | name: 'destroy',
62 | usage: ['overcast digitalocean destroy [name] [options...]'],
63 | description: [
64 | 'Destroys a DigitalOcean droplet and removes it from your account.',
65 | 'Using --force overrides the confirm dialog.'
66 | ],
67 | examples: [
68 | '$ overcast digitalocean destroy vm-01'
69 | ],
70 | required: [
71 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] }
72 | ],
73 | options: [
74 | { usage: '--force', default: 'false' }
75 | ],
76 | run: (args, nextFn) => {
77 | provider.destroy(getAPI(), args, nextFn);
78 | }
79 | };
80 |
81 | commands.images = {
82 | name: 'images',
83 | usage: ['overcast digitalocean images'],
84 | description: 'List all images, including snapshots.',
85 | run: (args, nextFn) => {
86 | provider.images(getAPI(), nextFn);
87 | }
88 | };
89 |
90 | commands.instances = {
91 | name: 'instances',
92 | usage: ['overcast digitalocean instances'],
93 | description: 'List all instances in your account.',
94 | run: (args, nextFn) => {
95 | provider.instances(getAPI(), args, nextFn);
96 | }
97 | };
98 |
99 | commands.droplets = Object.assign({ alias: true }, commands.instances);
100 |
101 | commands.reboot = {
102 | name: 'reboot',
103 | usage: ['overcast digitalocean reboot [name]'],
104 | description: 'Reboot an instance using the provider API.',
105 | required: [
106 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] }
107 | ],
108 | run: (args, nextFn) => {
109 | provider.reboot(getAPI(), args, nextFn);
110 | }
111 | };
112 |
113 | commands.regions = {
114 | name: 'regions',
115 | usage: ['overcast digitalocean regions'],
116 | description: 'List all available regions.',
117 | run: (args, nextFn) => {
118 | provider.regions(getAPI(), nextFn);
119 | }
120 | };
121 |
122 | commands.rebuild = {
123 | name: 'rebuild',
124 | usage: ['overcast digitalocean rebuild [name] [image]'],
125 | description: [
126 | 'Rebuilds an existing instance on DigitalOcean, preserving the IP address.',
127 | '[image] can be image ID, name or slug.'
128 | ],
129 | examples: [
130 | '# Rebuild an instance using a readymade image:',
131 | '$ overcast digitalocean rebuild vm-01 ubuntu-14-04-x64',
132 | '',
133 | '# Rebuild an instance using a snapshot:',
134 | '$ overcast digitalocean rebuild vm-01 "vm-01 backup"'
135 | ],
136 | required: [
137 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] },
138 | { name: 'image' }
139 | ],
140 | run: (args, nextFn) => {
141 | provider.rebuild(getAPI(), args, nextFn);
142 | }
143 | };
144 |
145 | commands.resize = {
146 | name: 'resize',
147 | usage: ['overcast digitalocean resize [name] [size] [options...]'],
148 | description: [
149 | 'Shutdown, resize, and reboot a DigitalOcean instance.',
150 | '[size] must be a valid size slug.',
151 | 'If the --skip-boot flag is used, the instance will stay powered off.'
152 | ],
153 | examples: [
154 | '# Resize an instance to 2gb:',
155 | '$ overcast digitalocean resize vm-01 2gb'
156 | ],
157 | required: [
158 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] },
159 | { name: 'size' }
160 | ],
161 | options: [
162 | { usage: '--skip-boot', default: 'false' }
163 | ],
164 | run: (args, nextFn) => {
165 | provider.resize(getAPI(), args, nextFn);
166 | }
167 | };
168 |
169 | commands.snapshot = {
170 | name: 'snapshot',
171 | usage: ['overcast digitalocean snapshot [name] [snapshot-name]'],
172 | description: 'Creates a named snapshot of a droplet. This will reboot the instance.',
173 | examples: '$ overcast digitalocean snapshot vm-01 vm-01-snapshot',
174 | required: [
175 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] },
176 | { name: 'snapshot-name', varName: 'snapshotName' }
177 | ],
178 | run: (args, nextFn) => {
179 | provider.snapshot(getAPI(), args, nextFn);
180 | }
181 | };
182 |
183 | commands.snapshots = {
184 | name: 'snapshots',
185 | usage: ['overcast digitalocean snapshots'],
186 | description: 'List all available snapshots in your account.',
187 | run: (args, nextFn) => {
188 | provider.snapshots(getAPI(), nextFn);
189 | }
190 | };
191 |
192 | commands.shutdown = {
193 | name: 'shutdown',
194 | usage: ['overcast digitalocean shutdown [name]'],
195 | description: 'Shut down an instance using the provider API.',
196 | required: [
197 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeDigitalOcean] }
198 | ],
199 | run: (args, nextFn) => {
200 | provider.shutdown(getAPI(), args, nextFn);
201 | }
202 | };
203 |
204 | commands.sizes = {
205 | name: 'sizes',
206 | usage: ['overcast digitalocean sizes'],
207 | description: 'List all available instance sizes.',
208 | run: (args, nextFn) => {
209 | provider.sizes(getAPI(), nextFn);
210 | }
211 | };
212 |
213 | commands.types = Object.assign({ alias: true }, commands.sizes);
214 |
215 | commands.sync = {
216 | name: 'sync',
217 | usage: ['overcast digitalocean sync [name]'],
218 | description: 'Fetch and update instance metadata.',
219 | required: [
220 | { name: 'name', filters: filters.findFirstMatchingInstance }
221 | ],
222 | run: (args, nextFn) => {
223 | provider.sync(getAPI(), args, nextFn);
224 | }
225 | };
226 |
--------------------------------------------------------------------------------
/src/commands/expose.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as ssh from '../ssh.js';
3 | import { findMatchingInstances } from '../filters.js';
4 |
5 | export const commands = {};
6 |
7 | commands.expose = {
8 | name: 'expose',
9 | usage: ['overcast expose [instance|cluster|all] [port...] [options]'],
10 | description: [
11 | 'Reset the exposed ports on the instance or cluster using iptables.',
12 | 'This will fail if you don\'t include the current SSH port.',
13 | 'Specifying --allowlist will restrict all ports to the specified address(es).',
14 | 'These can be individual IPs or CIDR ranges, such as "192.168.0.0/24".',
15 | '',
16 | 'Expects an Ubuntu server, untested on other distributions.'
17 | ],
18 | required: [
19 | { name: 'instance|cluster|all', varName: 'name', filters: findMatchingInstances },
20 | { name: 'port...', varName: 'ports', greedy: true }
21 | ],
22 | options: [
23 | { usage: '--user USERNAME' },
24 | { usage: '--password PASSWORD' },
25 | { usage: '--allowlist "IP|RANGE"' },
26 | { usage: '--allowlist-PORT "IP|RANGE"' }
27 | ],
28 | examples: [
29 | 'Allow SSH, HTTP and HTTPS connections from anywhere:',
30 | '$ overcast expose app 22 80 443',
31 | '',
32 | 'Allow SSH from anywhere, only allow Redis connections from 1.2.3.4:',
33 | '$ overcast expose redis 22 6379 --allowlist-6379 "1.2.3.4"',
34 | '',
35 | 'Only allow SSH and MySQL connections from 1.2.3.4 or from 5.6.7.xxx:',
36 | '$ overcast expose mysql 22 3306 --allowlist "1.2.3.4 5.6.7.0/24"'
37 | ],
38 | run: (args, nextFn) => {
39 | args.env = {
40 | exposed_ports: args.ports
41 | };
42 | args._ = ['install/iptables'];
43 | utils.eachObject(args, (val, key) => {
44 | if (key.indexOf('allowlist') === 0) {
45 | args.env[key.replace('-', '_')] = val;
46 | }
47 | });
48 | ssh.run(args, nextFn);
49 | }
50 | };
51 |
--------------------------------------------------------------------------------
/src/commands/exposed.js:
--------------------------------------------------------------------------------
1 | import * as ssh from '../ssh.js';
2 |
3 | export const commands = {};
4 |
5 | commands.exposed = {
6 | name: 'exposed',
7 | usage: ['overcast exposed [instance|cluster|all]'],
8 | description: [
9 | 'List the exposed ports on the instance or cluster.',
10 | 'Expects an Ubuntu server, untested on other distributions.'
11 | ],
12 | options: [
13 | { usage: '--user USERNAME' },
14 | { usage: '--password PASSWORD' },
15 | { usage: '--machine-readable, --mr' }
16 | ],
17 | required: [{ name: 'instance|cluster|all', varName: 'name' }],
18 | run: (args, nextFn) => {
19 | args._ = ['list_exposed_ports'];
20 | ssh.run(args, nextFn);
21 | }
22 | };
23 |
--------------------------------------------------------------------------------
/src/commands/help.js:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk';
2 |
3 | import * as constants from '../constants.js';
4 | import * as utils from '../utils.js';
5 | import { getConfigDir } from '../store.js';
6 | import allCommands from './index.js';
7 |
8 | export const commands = {};
9 |
10 | commands.help = {
11 | name: 'help',
12 | usage: ['overcast help'],
13 | description: [
14 | 'Provides help about Overcast and specific commands.'
15 | ],
16 | run: (arg, nextFn) => {
17 | const signatures = [];
18 | let row = ' ';
19 | utils.eachObject(allCommands, (command, name) => {
20 | if (name !== 'help' && (command.signatures || command.commands)) {
21 | if (row.length > 58) {
22 | signatures.push(row);
23 | row = ' ';
24 | }
25 | row += ` ${name}`;
26 | }
27 | });
28 | signatures.push(row);
29 |
30 | utils.printArray([
31 | '',
32 | `This is Overcast v${constants.VERSION}`,
33 | '',
34 | 'Documentation, source code, feedback:',
35 | chalk.cyan(' https://github.com/andrewchilds/overcast'),
36 | '',
37 | 'Usage:',
38 | chalk.cyan(' overcast [command] [options...]'),
39 | '',
40 | 'Help:',
41 | chalk.cyan(' overcast help'),
42 | chalk.cyan(' overcast [command] help'),
43 | '',
44 | 'Commands:'
45 | ]);
46 | utils.printArray(signatures, 'cyan');
47 | utils.printArray([
48 | '',
49 | 'Config directory:',
50 | chalk.cyan(` ${getConfigDir()}`)
51 | ]);
52 |
53 | nextFn();
54 | }
55 | };
56 |
--------------------------------------------------------------------------------
/src/commands/index.js:
--------------------------------------------------------------------------------
1 | import * as aliases from './aliases.js';
2 | import * as cluster from './cluster.js';
3 | import * as completions from './completions.js';
4 | import * as digitalocean from './digitalocean.js';
5 | import * as expose from './expose.js';
6 | import * as exposed from './exposed.js';
7 | import * as help from './help.js';
8 | import * as info from './info.js';
9 | import * as init from './init.js';
10 | import * as instance from './instance.js';
11 | import * as list from './list.js';
12 | import * as ping from './ping.js';
13 | import * as port from './port.js';
14 | import * as pull from './pull.js';
15 | import * as push from './push.js';
16 | import * as run from './run.js';
17 | import * as scriptvar from './scriptvar.js';
18 | import * as slack from './slack.js';
19 | import * as ssh from './ssh.js';
20 | import * as sshkey from './sshkey.js';
21 | import * as tunnel from './tunnel.js';
22 | import * as vars from './vars.js';
23 | import * as virtualbox from './virtualbox.js';
24 | import * as wait from './wait.js';
25 |
26 | export default {
27 | aliases,
28 | cluster,
29 | completions,
30 | digitalocean,
31 | expose,
32 | exposed,
33 | help,
34 | info,
35 | init,
36 | instance,
37 | list,
38 | ping,
39 | port,
40 | pull,
41 | push,
42 | run,
43 | scriptvar,
44 | slack,
45 | ssh,
46 | sshkey,
47 | tunnel,
48 | vars,
49 | virtualbox,
50 | wait
51 | };
52 |
--------------------------------------------------------------------------------
/src/commands/info.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as filters from '../filters.js';
3 | import * as log from '../log.js';
4 | import { getClustersJSON } from '../store.js';
5 |
6 | export const commands = {};
7 |
8 | commands.info = {
9 | name: 'info',
10 | usage: ['overcast info', 'overcast info [name]'],
11 | description: ['Prints the complete .overcast/clusters.json file.',
12 | 'Optionally display only instances matching [name].'],
13 | required: [{ name: 'name', optional: true, filters: filters.findMatchingInstances }],
14 | run: ({ instances }, nextFn) => {
15 | const clusters = utils.getClusters();
16 |
17 | log.faded(`Using ${getClustersJSON()}`);
18 |
19 | if (Object.keys(clusters).length === 0) {
20 | log.br();
21 | log.alert('No clusters found.');
22 | return nextFn();
23 | }
24 |
25 | if (instances && instances.length > 0) {
26 | log.br();
27 | instances.forEach((instance) => {
28 | log.log(instance.name);
29 | utils.prettyPrint(instance, 2);
30 | });
31 |
32 | return nextFn();
33 | }
34 |
35 | utils.eachObject(clusters, ({ instances }, clusterName) => {
36 | log.br();
37 | log.log(clusterName);
38 | utils.eachObject(instances, instance => {
39 | log.br();
40 | log.log(` ${instance.name}`);
41 | utils.prettyPrint(instance, 4);
42 | });
43 | });
44 |
45 | nextFn();
46 | }
47 | };
48 |
--------------------------------------------------------------------------------
/src/commands/init.js:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import * as utils from '../utils.js';
3 | import * as log from '../log.js';
4 | import * as store from '../store.js';
5 |
6 | export const commands = {};
7 |
8 | commands.init = {
9 | name: 'init',
10 | usage: ['overcast init'],
11 | description: [
12 | 'Create an .overcast config directory in the current working directory.',
13 | 'No action taken if one already exists.'
14 | ],
15 | run: (args, nextFn) => {
16 | const cwd = process.cwd();
17 |
18 | if (fs.existsSync(`${cwd}/.overcast`)) {
19 | log.alert(`An .overcast directory already exists here (${cwd}).`);
20 | nextFn();
21 | } else {
22 | utils.initOvercastDir(cwd, () => {
23 | // Override, in case we already have an existing dir elsewhere:
24 | store.setConfigDirs(cwd + '/.overcast');
25 | utils.createKeyIfMissing(nextFn);
26 | });
27 | }
28 | }
29 | };
30 |
--------------------------------------------------------------------------------
/src/commands/instance.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as filters from '../filters.js';
3 | import * as log from '../log.js';
4 |
5 | export const commands = {};
6 |
7 | commands.get = {
8 | name: 'get',
9 | usage: ['overcast instance get [instance|cluster|all] [attr...] [options...]'],
10 | description: [
11 | 'Returns the attribute(s) for the instance or cluster, one per line,',
12 | 'or space-delimited using the --single-line option.',
13 | 'Deeply nested arrays and objects are supported.',
14 | '"origin" is a compound attribute that returns user@ip:ssh-port.'
15 | ],
16 | examples: [
17 | '$ overcast instance get app-01 origin',
18 | 'root@1.2.3.4:22',
19 | '',
20 | '$ overcast instance get app-cluster ip',
21 | '127.0.0.1',
22 | '127.0.0.2',
23 | '127.0.0.3',
24 | '',
25 | '$ overcast instance get app-01 digitalocean.image.id',
26 | '103510828'
27 | ],
28 | options: [
29 | { usage: '--single-line, -s', default: 'false' }
30 | ],
31 | required: [
32 | { name: 'instance|cluster|all', varName: 'name', filters: filters.findMatchingInstances },
33 | { name: 'attr...', varName: 'attr', greedy: true }
34 | ],
35 | run: (args, nextFn) => {
36 | const output = [];
37 | args.attr = args.attr.split(' ');
38 |
39 | args.instances.forEach((instance) => {
40 | args.attr.forEach((attr) => {
41 | attr = attr.replace(/-/g, '_');
42 | if (attr === 'origin') {
43 | output.push(`${instance.user}@${instance.ip}:${instance.ssh_port}`);
44 | } else {
45 | const v = utils.deepGet(instance, attr);
46 | if (v !== undefined) {
47 | output.push(v);
48 | }
49 | }
50 | });
51 | });
52 |
53 | if (args.s || args['single-line']) {
54 | log.log(output.join(' '));
55 | } else {
56 | output.forEach((line) => {
57 | log.log(line);
58 | });
59 | }
60 |
61 | nextFn();
62 | }
63 | };
64 |
65 | commands.add = {
66 | name: 'add',
67 | usage: ['overcast instance add [name] [ip] [options...]'],
68 | description: 'Adds an existing instance to a cluster.',
69 | examples: [
70 | '$ overcast instance add app.01 127.0.0.1 --cluster app \\',
71 | ' --ssh-port 22222 --ssh-key $HOME/.ssh/id_rsa'
72 | ],
73 | required: [
74 | { name: 'name', filters: filters.shouldBeNewInstance },
75 | { name: 'ip' }
76 | ],
77 | options: [
78 | { usage: '--cluster CLUSTER', default: 'default' },
79 | { usage: '--ssh-port PORT', default: '22' },
80 | { usage: '--ssh-key PATH', default: 'overcast.key' },
81 | { usage: '--user USERNAME', default: 'root' },
82 | { usage: '--password PASSWORD' },
83 | ],
84 | run: (args, nextFn) => {
85 | const instance = {
86 | ip: args.ip,
87 | name: args.name,
88 | ssh_port: args['ssh-port'] || '22',
89 | ssh_key: args['ssh-key'] || 'overcast.key',
90 | user: args.user || 'root',
91 | password: args.password || ''
92 | };
93 |
94 | utils.saveInstanceToCluster(args.cluster, instance, () => {
95 | log.success(`Instance "${args.name}" (${args.ip}) has been added to the "${args.cluster}" cluster.`);
96 | nextFn();
97 | });
98 | }
99 | };
100 |
101 | commands.list = {
102 | name: 'list',
103 | usage: ['overcast instance list [cluster...]'],
104 | description: [
105 | 'Returns all instance names, one per line.',
106 | 'Optionally limit to one or more clusters.'
107 | ],
108 | examples: [
109 | '$ overcast instance list',
110 | '$ overcast instance list app-cluster db-cluster'
111 | ],
112 | run: (args, nextFn) => {
113 | const clusters = utils.getClusters();
114 | const scope = (args._ && args._.length > 0) ? args._ : Object.keys(clusters);
115 |
116 | utils.eachObject(clusters, ({instances}, clusterName) => {
117 | if (scope.findIndex(s => s === clusterName) !== -1) {
118 | utils.eachObject(instances, ({name}) => {
119 | log.log(name);
120 | });
121 | }
122 | });
123 |
124 | nextFn();
125 | }
126 | };
127 |
128 | commands.remove = {
129 | name: 'remove',
130 | usage: ['overcast instance remove [name]'],
131 | description: [
132 | 'Removes an instance from the index.',
133 | 'The server itself is not affected by this action.'
134 | ],
135 | examples: [
136 | '$ overcast instance remove app-01'
137 | ],
138 | required: [{ name: 'name', filters: filters.findFirstMatchingInstance }],
139 | run: ({ instance }, nextFn) => {
140 | utils.deleteInstance(instance, () => {
141 | log.success(`Instance "${instance.name}" removed.`);
142 | nextFn();
143 | });
144 | }
145 | };
146 |
147 | commands.update = {
148 | name: 'update',
149 | usage: ['overcast instance update [instance|cluster|all] [options...]'],
150 | description: [
151 | 'Update any instance property. Specifying --cluster will move the instance',
152 | 'to that cluster. Specifying --name will rename the instance.'
153 | ],
154 | examples: [
155 | '# Update the user and ssh-key of an instance:',
156 | '$ overcast instance update app.01 --user myuser --ssh-key /path/to/key',
157 | '',
158 | '# Update ssh-port of a cluster:',
159 | '$ overcast instance update app-cluster --ssh-port 22222'
160 | ],
161 | required: [
162 | { name: 'instance|cluster|all', varName: 'oldName', filters: filters.findMatchingInstances }
163 | ],
164 | options: [
165 | { usage: '--name NAME' },
166 | { usage: '--cluster CLUSTER' },
167 | { usage: '--ip IP' },
168 | { usage: '--ssh-port PORT' },
169 | { usage: '--ssh-key PATH' },
170 | { usage: '--user USERNAME' },
171 | { usage: '--password PASSWORD' }
172 | ],
173 | run: (args, nextFn) => {
174 | const clusters = utils.getClusters();
175 |
176 | if (!args.name) {
177 | args.name = args.oldName;
178 | args.oldName = null;
179 | }
180 |
181 | const instances = utils.findMatchingInstances(args.oldName || args.name);
182 | const messages = [];
183 |
184 | utils.eachObject(instances, (instance) => {
185 | return updateInstance(args, messages, clusters, instance);
186 | });
187 |
188 | utils.saveClusters(clusters, () => {
189 | messages.forEach(log.success);
190 | nextFn();
191 | });
192 | }
193 | };
194 |
195 | export function updateInstance(args, messages, clusters, instance) {
196 | let parentClusterName = utils.findClusterNameForInstance(instance);
197 |
198 | if (args.cluster) {
199 | if (!clusters[args.cluster]) {
200 | return utils.die(`No "${args.cluster}" cluster found. Known clusters are: ${Object.keys(clusters).join(', ')}.`);
201 | }
202 | if (clusters[args.cluster].instances[instance.name]) {
203 | return utils.die(`An instance named "${instance.name}" already exists in the "${args.cluster}" cluster.`);
204 | }
205 |
206 | delete clusters[parentClusterName].instances[instance.name];
207 | clusters[args.cluster].instances[instance.name] = instance;
208 | parentClusterName = args.cluster;
209 | messages.push(`Instance "${instance.name}" has been moved to the "${args.cluster}" cluster.`);
210 | }
211 |
212 | if (args.oldName) {
213 | if (clusters[parentClusterName].instances[args.name]) {
214 | return utils.die(`An instance named "${args.name}" already exists in the "${parentClusterName}" cluster.`);
215 | }
216 |
217 | instance.name = args.name;
218 | delete clusters[parentClusterName].instances[args.oldName];
219 | clusters[parentClusterName].instances[args.name] = instance;
220 | messages.push(`Instance "${args.oldName}" has been renamed to "${args.name}".`);
221 | }
222 |
223 | ['ip', 'ssh-key', 'ssh-port', 'user', 'password'].forEach((prop) => {
224 | if (prop in args) {
225 | if (args[prop]) {
226 | clusters[parentClusterName].instances[instance.name][prop.replace('-', '_')] = args[prop];
227 | messages.push(`Instance property "${prop}" has been updated to "${args[prop]}".`);
228 | } else {
229 | delete clusters[parentClusterName].instances[instance.name][prop.replace('-', '_')];
230 | messages.push(`Instance property "${prop}" has been unset.`);
231 | }
232 | }
233 | });
234 | }
235 |
--------------------------------------------------------------------------------
/src/commands/list.js:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk';
2 | import * as utils from '../utils.js';
3 | import * as log from '../log.js';
4 | import { getClustersJSON } from '../store.js';
5 |
6 | export const commands = {};
7 |
8 | commands.list = {
9 | name: 'list',
10 | usage: ['overcast list'],
11 | description: 'List your cluster and instance definitions.',
12 | run: (args, nextFn = () => {}) => {
13 | const clusters = utils.getClusters();
14 |
15 | log.faded(`Using ${getClustersJSON()}`);
16 |
17 | if (Object.keys(clusters).length === 0) {
18 | log.br();
19 | log.alert('No clusters found.');
20 | return nextFn();
21 | }
22 |
23 | utils.eachObject(clusters, ({ instances }, clusterName) => {
24 | log.br();
25 | log.log(clusterName);
26 | utils.eachObject(instances, (instance) => {
27 | const origin = `(${instance.user}@${instance.ip}:${instance.ssh_port || 22})`;
28 | const provider = getProviderName(instance);
29 | const str = ` ${chalk.cyan(instance.name)} ${origin} (${chalk.green(provider || 'unknown provider')})`;
30 | log.log(str);
31 | });
32 | });
33 |
34 | nextFn();
35 | }
36 | };
37 |
38 | function getProviderName(instance) {
39 | let name = '';
40 | ['digitalocean', 'virtualbox', 'linode', 'aws'].forEach((provider) => {
41 | if (instance[provider]) {
42 | name = `${provider}`;
43 | }
44 | });
45 | return name;
46 | }
47 |
--------------------------------------------------------------------------------
/src/commands/ping.js:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk';
2 | import cp from 'child_process';
3 |
4 | import * as log from '../log.js';
5 | import * as utils from '../utils.js';
6 | import * as filters from '../filters.js';
7 | import { fsync } from 'fs';
8 |
9 | export const commands = {};
10 |
11 | commands.ping = {
12 | name: 'ping',
13 | usage: ['overcast ping [instance|cluster|all] [options]'],
14 | description: 'Display the average ping time for an instance or cluster.',
15 | examples: [
16 | '$ overcast ping app-01',
17 | '$ overcast ping db --count 5'
18 | ],
19 | required: [
20 | { name: 'name', filters: filters.findMatchingInstances }
21 | ],
22 | options: [{ usage: '--count N, -c N', default: '3' }],
23 | run: (args, nextFn) => {
24 | const count = args.count || args.c || 3;
25 | const fns = [];
26 |
27 | args.instances.forEach((instance) => {
28 | fns.push((nextFn) => {
29 | ping(instance, count, nextFn);
30 | });
31 | });
32 |
33 | utils.allInParallelThen(fns, nextFn);
34 | }
35 | };
36 |
37 | function ping({ ip, name }, count, nextFn) {
38 | cp.exec(`ping -c ${count} ${ip}`, (err, stdout) => {
39 | const color = utils.getNextColor();
40 | const averagePing = stdout.match(/ ([\d\.]+)\/([\d\.]+)\/([\d\.]+)\/([\d\.]+) ms/);
41 | const prefix = `${name}: `;
42 | log.log(`${chalk[color](prefix) + averagePing[2]} ms`);
43 | nextFn();
44 | });
45 | }
46 |
--------------------------------------------------------------------------------
/src/commands/port.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as ssh from '../ssh.js';
3 | import * as filters from '../filters.js';
4 |
5 | export const commands = {};
6 |
7 | commands.port = {
8 | name: 'port',
9 | usage: ['overcast port [instance|cluster|all] [port]'],
10 | description: [
11 | 'Change the SSH port for an instance or a cluster. This command fails',
12 | 'if the new port has not been previously opened by iptables.',
13 | 'See also the "expose" and "exposed" commands.'
14 | ],
15 | examples: [
16 | '# Expose only necessary ports:',
17 | '$ overcast expose vm-01 22 55522 80 443',
18 | '',
19 | '# Update SSH port from 22 to 55522:',
20 | '$ overcast port vm-01 55522',
21 | '',
22 | '# Close port 22:',
23 | '$ overcast expose vm-01 55522 80 443'
24 | ],
25 | required: [
26 | { name: 'instance|cluster|all', varName: 'name', filters: filters.findMatchingInstances },
27 | { name: 'port' }
28 | ],
29 | run: (args, nextFn) => {
30 | const new_ssh_port = `${args.port}`;
31 | args.env = {
32 | new_ssh_port
33 | };
34 |
35 | args._ = ['change_ssh_port'];
36 | ssh.run(args, () => {
37 | const fns = [];
38 | args.instances.forEach((instance) => {
39 | fns.push((nextFn) => {
40 | utils.updateInstance(instance.name, {
41 | ssh_port: new_ssh_port
42 | }, nextFn);
43 | });
44 | });
45 |
46 | utils.allInParallelThen(fns, nextFn);
47 | });
48 | }
49 | };
50 |
--------------------------------------------------------------------------------
/src/commands/pull.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as scp from '../scp.js';
3 | import * as rsync from '../rsync.js';
4 | import { findMatchingInstances } from '../filters.js';
5 |
6 | export const commands = {};
7 |
8 | commands.pull = {
9 | name: 'pull',
10 | usage: ['overcast pull [instance|cluster|all] [source] [dest] [options...]'],
11 | description: [
12 | 'Pull a file or directory from an instance or cluster using scp by default,',
13 | 'or using rsync if the --rsync flag is used. Source is absolute or relative',
14 | 'to the home directory. Destination can be absolute or relative to the',
15 | '.overcast/files directory. Any reference to {instance} in the destination',
16 | 'will be replaced with the instance name.'
17 | ],
18 | examples: [
19 | 'Assuming instances "app.01" and "app.02", this will expand to:',
20 | ' - .overcast/files/app.01.bashrc',
21 | ' - .overcast/files/app.02.bashrc',
22 | '$ overcast pull app .bashrc {instance}.bashrc'
23 | ],
24 | required: [
25 | { name: 'instance|cluster|all', varName: 'name', filters: findMatchingInstances },
26 | { name: 'source', raw: true },
27 | { name: 'dest', raw: true }
28 | ],
29 | options: [
30 | { usage: '--rsync', default: 'false' },
31 | { usage: '--user USERNAME' },
32 | { usage: '--password PASSWORD' }
33 | ],
34 | run: (args, nextFn) => {
35 | args.direction = 'pull';
36 |
37 | if (utils.argIsTruthy(args.rsync)) {
38 | rsync.run(args, nextFn);
39 | } else {
40 | scp.run(args, nextFn);
41 | }
42 | }
43 | };
44 |
--------------------------------------------------------------------------------
/src/commands/push.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as scp from '../scp.js';
3 | import * as rsync from '../rsync.js';
4 | import { findMatchingInstances } from '../filters.js';
5 |
6 | export const commands = {};
7 |
8 | commands.push = {
9 | name: 'push',
10 | usage: ['overcast push [instance|cluster|all] [source] [dest] [options...]'],
11 | description: [
12 | 'Push a file or directory to an instance or cluster using scp by default,',
13 | 'or rsync if the --rsync flag is used. Source can be absolute or relative',
14 | 'to the .overcast/files directory. Destination can be absolute or relative',
15 | 'to the home directory. Any reference to {instance} in the source will be',
16 | 'replaced with the instance name. The --exclude flag only works with rsync.'
17 | ],
18 | examples: [
19 | 'Assuming instances "app.01" and "app.02", this will expand to:',
20 | ' - .overcast/files/app.01.bashrc',
21 | ' - .overcast/files/app.02.bashrc',
22 | '$ overcast push app {instance}.bashrc .bashrc'
23 | ],
24 | required: [
25 | { name: 'instance|cluster|all', varName: 'name', filters: findMatchingInstances },
26 | { name: 'source', raw: true },
27 | { name: 'dest', raw: true }
28 | ],
29 | options: [
30 | { usage: '--rsync', default: 'false' },
31 | { usage: '--user USERNAME' },
32 | { usage: '--password PASSWORD' },
33 | { usage: '--exclude FILE_OR_DIRECTORY' }
34 | ],
35 | run: (args, nextFn) => {
36 | args.direction = 'push';
37 |
38 | if (utils.argIsTruthy(args.rsync)) {
39 | rsync.run(args, nextFn);
40 | } else {
41 | scp.run(args, nextFn);
42 | }
43 | }
44 | };
45 |
--------------------------------------------------------------------------------
/src/commands/run.js:
--------------------------------------------------------------------------------
1 | import { findMatchingInstances } from '../filters.js';
2 | import * as ssh from '../ssh.js';
3 |
4 | export const commands = {};
5 |
6 | commands.run = {
7 | name: 'run',
8 | usage: ['overcast run [instance|cluster|all] [command|file...]'],
9 | description: [
10 | 'Execute commands or script files on an instance or cluster over SSH.',
11 | 'Commands will execute sequentially unless the --parallel flag is used.',
12 | 'An error will stop execution unless the --continueOnError flag is used.',
13 | 'Script files can be either absolute or relative path.',
14 | ],
15 | examples: [
16 | '# Run arbirary commands and files in sequence across all instances:',
17 | '$ overcast run all uptime "free -m" "df -h" /path/to/my/script',
18 | '',
19 | '# Setting environment variables:',
20 | '$ overcast run app --env "foo=\'bar bar\' testing=123" env',
21 | '',
22 | '# Use machine-readable output (no server prefix):',
23 | '$ overcast run app-01 uptime --mr',
24 | '',
25 | '# Run bundled and custom scripts in sequence:',
26 | '$ overcast run db-* install/core install/redis ./my/install/script',
27 | '',
28 | '# Pass along arbitrary SSH arguments, for example to force a pseudo-tty:',
29 | '$ overcast run all /my/install/script --ssh-args "-tt"'
30 | ],
31 | required: [
32 | { name: 'instance|cluster|all', varName: 'name', filters: findMatchingInstances },
33 | { name: 'command|file', varName: 'firstCommandOrFile', raw: true }
34 | ],
35 | options: [
36 | { usage: '--env "KEY=VAL KEY=\'1 2 3\'"' },
37 | { usage: '--user USERNAME' },
38 | { usage: '--password PASSWORD' },
39 | { usage: '--ssh-key PATH' },
40 | { usage: '--ssh-args ARGS' },
41 | { usage: '--continueOnError', default: 'false' },
42 | { usage: '--machine-readable, --mr', default: 'false' },
43 | { usage: '--parallel, -p', default: 'false' },
44 | { usage: '--shell-command "COMMAND"', default: 'bash -s' },
45 | { usage: '--only-once', default: 'false' }
46 | ],
47 | run: (args, nextFn) => {
48 | args._.unshift(args.firstCommandOrFile);
49 | delete args.firstCommandOrFile;
50 |
51 | ssh.run(args, nextFn);
52 | }
53 | };
54 |
--------------------------------------------------------------------------------
/src/commands/scriptvar.js:
--------------------------------------------------------------------------------
1 | import { findMatchingInstances } from '../filters.js';
2 | import * as ssh from '../ssh.js';
3 |
4 | export const commands = {};
5 |
6 | commands.scriptvar = {
7 | name: 'scriptvar',
8 | usage: ['overcast scriptvar [instance|cluster|all] [filename] [key] [value]'],
9 | description: [
10 | 'Set a named variable in a remote file on an instance or cluster.',
11 | 'Expects a shell variable format, for example MY_VAR_NAME="my_value"'
12 | ],
13 | examples: [
14 | '$ overcast scriptvar app-01 /path/to/file.sh MY_API_TOKEN abc123'
15 | ],
16 | required: [
17 | { name: 'instance|cluster|all', varName: 'name', filters: findMatchingInstances },
18 | { name: 'filename', varName: 'var_filename', raw: true },
19 | { name: 'key', varName: 'var_name', raw: true },
20 | { name: 'value', varName: 'var_value', raw: true }
21 | ],
22 | options: [
23 | { usage: '--user USERNAME' },
24 | { usage: '--password PASSWORD' },
25 | { usage: '--continueOnError', default: 'false' },
26 | { usage: '--machine-readable, --mr', default: 'false' },
27 | { usage: '--parallel, -p', default: 'false' }
28 | ],
29 | run: (args, nextFn) => {
30 | args._ = ['set_script_var'];
31 | args.env = {
32 | VAR_FILENAME: args.var_filename,
33 | VAR_NAME: args.var_name,
34 | VAR_VALUE: args.var_value
35 | };
36 |
37 | ssh.run(args, nextFn);
38 | }
39 | };
40 |
--------------------------------------------------------------------------------
/src/commands/slack.js:
--------------------------------------------------------------------------------
1 | import SlackNotify from 'slack-notify';
2 | import * as utils from '../utils.js';
3 | import * as log from '../log.js';
4 | import { getVariablesJSON } from '../store.js';
5 |
6 | export const commands = {};
7 |
8 | commands.slack = {
9 | name: 'slack',
10 | usage: ['overcast slack [message] [options...]'],
11 | description: [
12 | 'Sends a message to a Slack channel.',
13 | 'Requires a SLACK_WEBHOOK_URL property to be set in variables.json.',
14 | 'You can set that with the following command:',
15 | 'overcast var set SLACK_WEBHOOK_URL https://foo.slack.com/blah'
16 | ],
17 | examples: [
18 | '$ overcast slack "Deploy completed." --icon-emoji ":satelite:"',
19 | '$ overcast slack "Server stats" --channel "#general" --cpu "0.54 0.14 0.09"'
20 | ],
21 | required: [
22 | { name: 'message', raw: true }
23 | ],
24 | options: [
25 | { usage: '--channel NAME', default: '#alerts' },
26 | { usage: '--icon-emoji EMOJI', default: ':cloud:' },
27 | { usage: '--icon-url URL' },
28 | { usage: '--user NAME', default: 'Overcast' },
29 | { usage: '--KEY VALUE' }
30 | ],
31 | run: (args, nextFn) => {
32 | const options = {
33 | channel: args.channel || '#alerts',
34 | icon_emoji: args['icon-emoji'] || ':cloud:',
35 | icon_url: args['icon-url'] || null,
36 | text: args.message,
37 | username: args.user || 'Overcast'
38 | };
39 |
40 | const custom_fields = Object.assign({}, args);
41 | ['_', 'channel', 'command', 'message', 'icon-emoji', 'icon-url', 'message', 'user'].forEach((key) => {
42 | delete custom_fields[key];
43 | });
44 |
45 | options.fields = custom_fields;
46 |
47 | send(options, nextFn);
48 | }
49 | };
50 |
51 | export function send(options, nextFn = () => {}) {
52 | const vars = utils.getVariables();
53 |
54 | if (!vars.SLACK_WEBHOOK_URL) {
55 | log.faded('No message sent.');
56 | log.faded(`Please add SLACK_WEBHOOK_URL to ${getVariablesJSON()}.`);
57 |
58 | return nextFn();
59 | }
60 |
61 | if (utils.isTestRun()) {
62 | log.success('Message sent to Slack. (Pretending because this is a test run. Options = ' + JSON.stringify(options));
63 | nextFn();
64 | } else {
65 | const slack = SlackNotify(vars.SLACK_WEBHOOK_URL);
66 | slack.send(options).then(() => {
67 | log.success('Message sent to Slack.');
68 | nextFn();
69 | }).catch((err) => {
70 | log.failure(`Unable to send message to Slack. ${err}`);
71 | nextFn();
72 | });
73 | }
74 | }
75 |
--------------------------------------------------------------------------------
/src/commands/ssh.js:
--------------------------------------------------------------------------------
1 | import cp from 'child_process';
2 | import * as utils from '../utils.js';
3 | import * as filters from '../filters.js';
4 | import * as log from '../log.js';
5 | import { getConfigDir } from '../store.js';
6 |
7 | export const commands = {};
8 |
9 | commands.ssh = {
10 | name: 'ssh',
11 | usage: ['overcast ssh [instance] [options...]'],
12 | description: [
13 | 'Opens an interactive SSH connection to an instance.'
14 | ],
15 | required: [
16 | { name: 'instance', varName: 'name', filters: filters.findFirstMatchingInstance }
17 | ],
18 | examples: [
19 | '$ overcast ssh instance-01',
20 | '# To use a personal username and key in variables.json:',
21 | '$ overcast vars set OVERCAST_SSH_USER my-username',
22 | '$ overcast vars set OVERCAST_SSH_KEY /path/to/my.key',
23 | '$ overcast ssh instance-01 # will use the above variables to attempt a connection'
24 | ],
25 | options: [
26 | { usage: '--user USERNAME' },
27 | { usage: '--password PASSWORD' },
28 | { usage: '--ssh-key PATH' }
29 | ],
30 | run: (args, nextFn) => {
31 | // This fixes a "possible EventEmitter memory leak detected" error.
32 | // Ref: https://github.com/andrewchilds/overcast/issues/14
33 | process.stdin.setMaxListeners(0);
34 |
35 | connect(args.instance, args, nextFn);
36 | }
37 | };
38 |
39 | function connect(instance, args, nextFn = () => {}) {
40 | const vars = utils.getVariables();
41 |
42 | const privateKeyFile = utils.normalizeKeyPath(args['ssh-key'] || vars.OVERCAST_SSH_KEY || instance.ssh_key || `${getConfigDir()}/keys/overcast.key`);
43 | const sshPort = instance.ssh_port || '22';
44 | const host = `${args.user || vars.OVERCAST_SSH_USER || instance.user || 'root'}@${instance.ip}`;
45 | const password = (args.password || instance.password || '');
46 |
47 | const command = [];
48 | if (password) {
49 | command.push('sshpass');
50 | command.push(`-p${password}`);
51 | }
52 | command.push('ssh');
53 | command.push('-tt');
54 | if (!password) {
55 | command.push('-i');
56 | command.push(privateKeyFile);
57 | }
58 | command.push('-p');
59 | command.push(sshPort);
60 | command.push('-o');
61 | command.push('StrictHostKeyChecking=no');
62 | if (password) {
63 | command.push('-o');
64 | command.push('PubkeyAuthentication=no');
65 | }
66 | command.push(host);
67 |
68 | log.log(command.join(' '));
69 |
70 | const ssh = cp.spawn(command.shift(), command, {
71 | stdio: 'inherit'
72 | });
73 |
74 | ssh.on('error', (err) => {
75 | log.failure('There was an error running this command. ' + err);
76 | if (password) {
77 | log.failure('You need the "sshpass" program installed to use password-based');
78 | log.failure('SSH authentication. Do you have that installed?');
79 | }
80 | });
81 |
82 | ssh.on('exit', code => {
83 | process.stdin.pause();
84 |
85 | if (code !== 0) {
86 | const str = `SSH connection exited with a non-zero code (${code}).`;
87 | utils.die(str);
88 | }
89 |
90 | nextFn();
91 | });
92 | }
93 |
--------------------------------------------------------------------------------
/src/commands/sshkey.js:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import * as utils from '../utils.js';
3 | import * as filters from '../filters.js';
4 | import * as ssh from '../ssh.js';
5 | import * as log from '../log.js';
6 | import { getConfigDir } from '../store.js';
7 |
8 | export const commands = {};
9 |
10 | commands.create = {
11 | name: 'create',
12 | usage: ['overcast sshkey create [name]'],
13 | description: 'Creates a new SSH key in the current .overcast config.',
14 | examples: [
15 | '$ overcast sshkey create myKeyName',
16 | 'New SSH key "myKeyName" created.',
17 | ' - /path/to/.overcast/keys/myKeyName.key',
18 | ' - /path/to/.overcast/keys/myKeyName.key.pub'
19 | ],
20 | required: [{ name: 'name', filters: filters.shouldBeNewKey }],
21 | run: ({ name }, nextFn) => {
22 | utils.createKey(name, nextFn);
23 | }
24 | };
25 |
26 | commands.delete = {
27 | name: 'delete',
28 | usage: ['overcast sshkey delete [name]'],
29 | description: 'Deletes SSH public/private key files from the current .overcast config.',
30 | examples: [
31 | '$ overcast sshkey delete myKeyName',
32 | 'SSH key "myKeyName" deleted.'
33 | ],
34 | required: [{ name: 'name', filters: filters.shouldBeExistingKey }],
35 | run: ({ name }, nextFn) => {
36 | utils.deleteKey(name, () => {
37 | log.success(`SSH key "${name}" deleted.`);
38 | nextFn();
39 | });
40 | }
41 | };
42 |
43 | commands.get = {
44 | name: 'get',
45 | usage: ['overcast sshkey get [name] [option]'],
46 | description: [
47 | 'Display the requested SSH key data or path from the current .overcast config.',
48 | 'Defaults to displaying the public key data if no option found.'
49 | ],
50 | options: [
51 | { usage: '--public-data' },
52 | { usage: '--private-data' },
53 | { usage: '--public-path' },
54 | { usage: '--private-path' }
55 | ],
56 | examples: [
57 | '$ overcast sshkey get myKeyName',
58 | '[public key data]',
59 | '$ overcast sshkey get myKeyName --private-data',
60 | '[private key data]'
61 | ],
62 | required: [{ name: 'name', filters: filters.shouldBeExistingKey }],
63 | run: (args, nextFn) => {
64 | const keyFile = utils.getKeyFileFromName(args.name);
65 | const publicKeyFile = `${keyFile}.pub`;
66 |
67 | if (args['private-data']) {
68 | printFile(keyFile);
69 | } else if (args['private-path']) {
70 | log.log(keyFile);
71 | } else if (args['public-path']) {
72 | log.log(publicKeyFile);
73 | } else {
74 | printFile(publicKeyFile);
75 | }
76 |
77 | nextFn();
78 | }
79 | };
80 |
81 | commands.list = {
82 | name: 'list',
83 | usage: ['overcast sshkey list'],
84 | description: 'List the found SSH key names in the current .overcast config.',
85 | examples: [
86 | '$ overcast sshkey list',
87 | 'myKeyName',
88 | 'overcast'
89 | ],
90 | run: (args, nextFn) => {
91 | listKeys(nextFn);
92 | }
93 | };
94 |
95 | commands.push = {
96 | name: 'push',
97 | usage: ['overcast sshkey push [instance|cluster|all] [name|path] [options...]'],
98 | description: [
99 | 'Push a public SSH key to an instance or cluster. Accepts a key name,',
100 | 'filename, or full path. This will overwrite the existing authorized_keys',
101 | 'file, unless you use --append.'
102 | ],
103 | examples: [
104 | '# Generate new SSH key pair:',
105 | '$ overcast sshkey create newKey',
106 | '',
107 | '# Push public key to instance, update instance config to use private key:',
108 | '$ overcast sshkey push vm-01 newKey',
109 | '$ overcast instance update vm-01 --ssh-key newKey.key',
110 | '',
111 | '# Same as above but using key path instead of key name:',
112 | '$ overcast sshkey push vm-02 "~/.ssh/id_rsa.pub"',
113 | '$ overcast instance update vm-02 --ssh-key "~/.ssh/id_rsa"',
114 | '',
115 | '# Push public key to instance using arbitrary user:',
116 | '$ overcast sshkey push vm-03 newKey --user myOtherUser',
117 | '',
118 | '# Append public key to authorized_keys instead of overwriting:',
119 | '$ overcast sshkey push vm-04 newKey --append'
120 | ],
121 | required: [
122 | { name: 'instance|cluster|all', varName: 'name', filters: filters.findMatchingInstances },
123 | { name: 'name|path', varName: 'path', raw: true }
124 | ],
125 | options: [
126 | { usage: '--user USERNAME' },
127 | { usage: '--append, -a', default: 'false' }
128 | ],
129 | run: (args, nextFn) => {
130 | const keyPath = getKeyPath(args.path);
131 | args.env = {
132 | PUBLIC_KEY: fs.readFileSync(keyPath, { encoding: 'utf8' }),
133 | SHOULD_APPEND: utils.argIsTruthy(args.append) || utils.argIsTruthy(args.a)
134 | };
135 |
136 | args._ = ['authorize_key'];
137 | args.mr = true; // machine readable
138 | ssh.run(args, () => {
139 | log.success(`Key updated on ${args.instances.length} instance(s).`);
140 | log.info('If this is the default user you use to SSH in,');
141 | log.info('you need to update the instance configuration. For example:');
142 | log.info(`overcast instance update ${args.name} --ssh-key myPrivateKey.key`);
143 |
144 | nextFn();
145 | });
146 | }
147 | };
148 |
149 | export function getKeyPath(path) {
150 | let keyPath = utils.normalizeKeyPath(path);
151 | if (!fs.existsSync(keyPath)) {
152 | if (fs.existsSync(`${keyPath}.key.pub`)) {
153 | keyPath += '.key.pub';
154 | } else if (fs.existsSync(`${keyPath}.pub`)) {
155 | keyPath += '.pub';
156 | } else {
157 | return utils.die(`Key "${keyPath}" not found.`);
158 | }
159 | }
160 |
161 | return keyPath;
162 | }
163 |
164 | function printFile(file) {
165 | const data = fs.readFileSync(file, { encoding: 'utf8' });
166 | log.log(data.toString());
167 | }
168 |
169 | function listKeys(nextFn) {
170 | fs.readdir(`${getConfigDir()}/keys/`, (err, data) => {
171 | const obj = {};
172 | data.forEach((name) => {
173 | name = name.replace('.pub', '').replace('.key', '');
174 | obj[name] = true;
175 | });
176 | Object.keys(obj).forEach(row => {
177 | log.log(row);
178 | });
179 |
180 | nextFn();
181 | });
182 | }
183 |
--------------------------------------------------------------------------------
/src/commands/tunnel.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as filters from '../filters.js';
3 | import * as log from '../log.js';
4 |
5 | export const commands = {};
6 |
7 | commands.tunnel = {
8 | name: 'tunnel',
9 | usage: ['overcast tunnel [instance] [local-port((:hostname):remote-port)...]'],
10 | description: [
11 | 'Opens an SSH tunnel to the port(s) specified.',
12 | 'If only one port is specified, assume the same port for local/remote.',
13 | 'If no remote host is specified, assume the remote host itself (127.0.0.1).',
14 | 'Multiple tunnels can be opened over a single connection.'
15 | ],
16 | examples: [
17 | '# Tunnel local 5984 to remote 5984',
18 | '$ overcast tunnel app-01 5984',
19 | '',
20 | '# Tunnel local 8000 to remote 5984, local 8001 to remote 3000',
21 | '$ overcast tunnel app-01 8000:5984 8001:3000',
22 | '',
23 | '# Tunnel local 3000 to otherhost.com:4000',
24 | '$ overcast tunnel app-01 3000:otherhost.com:4000'
25 | ],
26 | required: [
27 | { name: 'instance', varName: 'name', filters: filters.findFirstMatchingInstance },
28 | { name: 'local-port((:hostname):remote-port)...', varName: 'firstPort', raw: true }
29 | ],
30 | options: [
31 | { usage: '--user USERNAME' },
32 | { usage: '--password PASSWORD' },
33 | { usage: '--ssh-key PATH' }
34 | ],
35 | run: (args, nextFn) => {
36 | args._.unshift(args.firstPort);
37 | delete args.firstPort;
38 |
39 | connect(args.instance, args, nextFn);
40 | }
41 | };
42 |
43 | function connect(instance, args, nextFn) {
44 | const password = (args.password || instance.password || '');
45 |
46 | const sshArgs = [];
47 | if (password) {
48 | sshArgs.push('sshpass');
49 | sshArgs.push(`-p${password}`);
50 | }
51 | sshArgs.push('ssh');
52 | if (!password) {
53 | sshArgs.push('-i');
54 | sshArgs.push(utils.normalizeKeyPath(args['ssh-key'] || instance.ssh_key || 'overcast.key'));
55 | }
56 | sshArgs.push('-p');
57 | sshArgs.push((instance.ssh_port || '22'));
58 | sshArgs.push('-o');
59 | sshArgs.push('StrictHostKeyChecking=no');
60 | if (password) {
61 | sshArgs.push('-o');
62 | sshArgs.push('PubkeyAuthentication=no');
63 | }
64 |
65 | const ports = normalizePorts(args._);
66 | ports.forEach(({ localPort, remoteHost, remotePort }) => {
67 | sshArgs.push(`-L ${localPort}:${remoteHost}:${remotePort}`);
68 | });
69 |
70 | sshArgs.push(`${args.user || instance.user || 'root'}@${instance.ip}`);
71 | sshArgs.push('-N'); // Don't run a command.
72 |
73 | log.faded(sshArgs.join(' '));
74 |
75 | if (utils.isTestRun()) {
76 | log.log('mocked call of SSH command');
77 |
78 | return nextFn();
79 | }
80 |
81 | const ssh = utils.spawn(sshArgs);
82 |
83 | ports.forEach(({ localPort, remoteHost, remotePort }) => {
84 | log.info(`Tunneling from ${localPort} to ${remoteHost}:${remotePort}.`);
85 | });
86 |
87 | ssh.stdout.on('data', data => {
88 | log.faded(data.toString());
89 | });
90 |
91 | ssh.stderr.on('data', data => {
92 | log.alert(data.toString());
93 | });
94 |
95 | ssh.on('exit', code => {
96 | if (code !== 0) {
97 | utils.die(`SSH connection exited with a non-zero code (${code}). Stopping execution...`);
98 | }
99 | log.br();
100 |
101 | nextFn();
102 | });
103 | }
104 |
105 | export function normalizePorts(arr) {
106 | const ports = [];
107 |
108 | arr.forEach(str => {
109 | str = (`${str}`).split(':');
110 | const hasHostDefined = str.length === 3; // e.g. 3000:otherhost.com:4000
111 | const hasRemotePortDefined = str.length >= 2; // e.g. 80:8080
112 | ports.push({
113 | localPort: str[0],
114 | remoteHost: hasHostDefined ? str[1] : '127.0.0.1',
115 | remotePort: hasRemotePortDefined ? str[str.length - 1] : str[0]
116 | });
117 | });
118 |
119 | return ports;
120 | }
121 |
--------------------------------------------------------------------------------
/src/commands/vars.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as log from '../log.js';
3 | import { getVariablesJSON } from '../store.js';
4 |
5 | export const commands = {};
6 |
7 | commands.list = {
8 | name: 'list',
9 | usage: ['overcast vars list'],
10 | description: `List all variables in your current config.`,
11 | run: (args, nextFn) => {
12 | const vars = utils.getVariables();
13 | log.faded(`Using ${getVariablesJSON()}`);
14 | log.br();
15 | utils.eachObject(vars, (value, name) => {
16 | if (value === '') {
17 | log.log(`${name} = ''`);
18 | } else if (value === null) {
19 | log.log(`${name} = null`);
20 | } else {
21 | log.log(`${name} = ${value}`);
22 | }
23 | });
24 |
25 | nextFn();
26 | }
27 | };
28 |
29 | commands.set = {
30 | name: 'set',
31 | usage: ['overcast vars set [name] [value]'],
32 | description: `Set a variable in your current config.`,
33 | examples: [
34 | '$ overcast vars set AWS_KEY myawskey12345',
35 | '$ overcast vars set MY_CUSTOM_VARIABLE_NAME foo'
36 | ],
37 | required: ['name', { name: 'value', raw: true }],
38 | run: ({ name, value }, nextFn) => {
39 | const vars = utils.getVariables();
40 | vars[name] = value;
41 | utils.saveVariables(vars, () => {
42 | log.success(`Variable "${name}" saved.`);
43 | nextFn();
44 | });
45 | }
46 | };
47 |
48 | commands.get = {
49 | name: 'get',
50 | usage: ['overcast vars get [name]'],
51 | description: `Get a variable from your current config.`,
52 | examples: [
53 | '$ overcast vars get AWS_KEY',
54 | '> myawskey12345',
55 | '',
56 | '$ overcast vars get MY_CUSTOM_VARIABLE_NAME',
57 | '> foo'
58 | ],
59 | required: ['name'],
60 | run: ({ name }, nextFn) => {
61 | const vars = utils.getVariables();
62 | if (vars[name]) {
63 | log.log(vars[name]);
64 | } else {
65 | utils.die(`Variable "${name}" not found.`);
66 | }
67 |
68 | nextFn();
69 | }
70 | };
71 |
72 | commands.delete = {
73 | name: 'delete',
74 | usage: ['overcast vars delete [name]'],
75 | description: `Delete a variable from your current config.`,
76 | examples: [
77 | '$ overcast vars delete MY_CUSTOM_VARIABLE_NAME'
78 | ],
79 | required: ['name'],
80 | run: ({ name }, nextFn) => {
81 | const vars = utils.getVariables();
82 | if (vars[name]) {
83 | vars[name] = '';
84 | utils.saveVariables(vars, nextFn);
85 | } else {
86 | log.alert(`Variable "${name}" not found. No action taken.`);
87 | nextFn();
88 | }
89 | }
90 | };
91 |
--------------------------------------------------------------------------------
/src/commands/virtualbox.js:
--------------------------------------------------------------------------------
1 | import * as filters from '../filters.js';
2 | import * as provider from '../provider.js';
3 | import { isTestRun } from '../utils.js';
4 | import { api } from '../providers/virtualbox.js';
5 | import { mockAPI } from '../providers/mock.js';
6 |
7 | function getAPI() {
8 | return isTestRun() ? mockAPI : api;
9 | }
10 |
11 | export const commands = {};
12 |
13 | export const banner = [
14 | 'These commands require VirtualBox and Vagrant to be installed on',
15 | 'your local machine. Vagrant files are stored in ~/.overcast-vagrant.'
16 | ];
17 |
18 | commands.boot = {
19 | name: 'boot',
20 | usage: ['overcast virtualbox boot [name]'],
21 | description: 'Boot up a Virtualbox instance.',
22 | required: [
23 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeVirtualbox] }
24 | ],
25 | run: (args, nextFn) => {
26 | provider.boot(getAPI(), args, nextFn);
27 | }
28 | };
29 |
30 | commands.start = Object.assign({ alias: true }, commands.boot);
31 |
32 | commands.create = {
33 | name: 'create',
34 | usage: ['overcast virtualbox create [name] [options...]'],
35 | description: [
36 | 'Creates a new Virtualbox instance.'
37 | ],
38 | examples: [
39 | '$ overcast virtualbox create vm-01',
40 | '$ overcast virtualbox create vm-02 --ram 1024 --image precise64'
41 | ],
42 | required: [
43 | { name: 'name', filters: filters.shouldBeNewInstance }
44 | ],
45 | options: [
46 | { usage: '--cluster CLUSTER', default: 'default' },
47 | { usage: '--cpus COUNT', default: '1' },
48 | { usage: '--image NAME', default: 'trusty64' },
49 | { usage: '--ram MB', default: '512' },
50 | { usage: '--ip ADDRESS', default: '192.168.22.10' },
51 | { usage: '--ssh-key PATH', default: 'overcast.key' },
52 | { usage: '--ssh-pub-key PATH', default: 'overcast.key.pub' }
53 | ],
54 | run: (args, nextFn) => {
55 | provider.create(getAPI(), args, nextFn);
56 | }
57 | };
58 |
59 | commands.destroy = {
60 | name: 'destroy',
61 | usage: ['overcast virtualbox destroy [name] [options...]'],
62 | description: [
63 | 'Destroys a Virtualbox instance.',
64 | 'Using --force overrides the confirm dialog.'
65 | ],
66 | examples: [
67 | '$ overcast virtualbox destroy vm-01'
68 | ],
69 | required: [
70 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeVirtualbox] }
71 | ],
72 | options: [
73 | { usage: '--force', default: 'false' }
74 | ],
75 | run: (args, nextFn) => {
76 | provider.destroy(getAPI(), args, nextFn);
77 | }
78 | };
79 |
80 | commands.reboot = {
81 | name: 'reboot',
82 | usage: ['overcast virtualbox reboot [name]'],
83 | description: 'Reboots a Virtualbox instance.',
84 | required: [
85 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeVirtualbox] }
86 | ],
87 | run: (args, nextFn) => {
88 | provider.reboot(getAPI(), args, nextFn);
89 | }
90 | };
91 |
92 | commands.shutdown = {
93 | name: 'shutdown',
94 | usage: ['overcast virtualbox shutdown [name]'],
95 | description: 'Shut down a Virtualbox instance.',
96 | required: [
97 | { name: 'name', filters: [filters.findFirstMatchingInstance, filters.shouldBeVirtualbox] }
98 | ],
99 | run: (args, nextFn) => {
100 | provider.shutdown(getAPI(), args, nextFn);
101 | }
102 | };
103 |
104 | commands.stop = Object.assign({ alias: true }, commands.shutdown);
105 |
--------------------------------------------------------------------------------
/src/commands/wait.js:
--------------------------------------------------------------------------------
1 | import * as utils from '../utils.js';
2 | import * as log from '../log.js';
3 |
4 | export const commands = {};
5 |
6 | commands.wait = {
7 | name: 'wait',
8 | usage: ['overcast wait [seconds]'],
9 | description: [
10 | 'Show a progress bar for a specified number of seconds.'
11 | ],
12 | examples: [
13 | '$ overcast wait 30'
14 | ],
15 | required: ['seconds'],
16 | run: ({ seconds }, nextFn) => {
17 | utils.fixedWait(seconds, () => {
18 | log.success('Done!');
19 | nextFn();
20 | });
21 | }
22 | };
23 |
--------------------------------------------------------------------------------
/src/constants.js:
--------------------------------------------------------------------------------
1 | export const VERSION = '2.2.7';
2 |
3 | export const SSH_COLORS = [
4 | 'cyan',
5 | 'green',
6 | 'red',
7 | 'yellow',
8 | 'magenta',
9 | 'blue',
10 | 'white'
11 | ];
12 |
--------------------------------------------------------------------------------
/src/filters.js:
--------------------------------------------------------------------------------
1 | import * as utils from './utils.js';
2 | import * as log from './log.js';
3 |
4 | export function findMatchingInstances(name, args) {
5 | args.instances = utils.findMatchingInstances(name);
6 |
7 | if (args.instances.length === 0) {
8 | utils.dieWithList(`No instances found matching "${name}".`);
9 | return false;
10 | }
11 | }
12 |
13 | export function findFirstMatchingInstance(name, args) {
14 | args.instance = utils.findFirstMatchingInstance(name);
15 |
16 | if (!args.instance) {
17 | utils.dieWithList(`No instance found matching "${name}".`);
18 | return false;
19 | }
20 | }
21 |
22 | export function findMatchingCluster(name, args) {
23 | const clusters = utils.getClusters();
24 | args.cluster = clusters[name];
25 |
26 | if (!args.cluster) {
27 | utils.dieWithList(`No clusters found matching "${name}".`);
28 | return false;
29 | }
30 | }
31 |
32 | export function shouldBeNewCluster(name, args) {
33 | const clusters = utils.getClusters();
34 |
35 | if (clusters[name]) {
36 | log.alert(`The cluster "${name}" already exists. No action taken.`);
37 | return false;
38 | }
39 | }
40 |
41 | export function shouldBeNewInstance(name, args) {
42 | const clusters = utils.getClusters();
43 |
44 | if (!args.cluster) {
45 | log.faded('Using "default" cluster.');
46 | args.cluster = 'default';
47 | }
48 |
49 | if (clusters[name]) {
50 | return utils.die(`"${name}" is already in use as a cluster name.`);
51 | } else if (name === 'all') {
52 | return utils.die('"all" is a special keyword that cannot be used for instance names.');
53 | } else if (name.includes('*')) {
54 | return utils.die('Instance names cannot include asterisk characters.');
55 | } else if (utils.findMatchingInstancesByInstanceName(name).length > 0) {
56 | return utils.die(`Instance "${name}" already exists.`);
57 | }
58 | }
59 |
60 | export function shouldBeNewKey(name, args) {
61 | if (utils.keyExists(name)) {
62 | log.alert(`The key "${name}" already exists. No action taken.`);
63 | return false;
64 | }
65 | }
66 |
67 | export function shouldBeExistingKey(name, args) {
68 | if (!utils.keyExists(name)) {
69 | log.alert(`The key "${name}" was not found. No action taken.`);
70 | return false;
71 | }
72 | }
73 |
74 | export function shouldBeDigitalOcean(name, {instance}) {
75 | if (!instance || !instance.digitalocean) {
76 | log.failure('This instance has no DigitalOcean metadata attached.');
77 | log.failure('Run this command and then try again:');
78 | return utils.die(`overcast digitalocean sync "${instance.name}"`);
79 | }
80 | }
81 |
82 | export function shouldBeVirtualbox(name, {instance}) {
83 | if (!instance || !instance.virtualbox) {
84 | return utils.die('This instance has no Virtualbox metadata attached.');
85 | }
86 | }
87 |
--------------------------------------------------------------------------------
/src/log.js:
--------------------------------------------------------------------------------
1 | import chalk from 'chalk';
2 |
3 | import { isTestRun } from './utils.js';
4 | import { appendToStore, getStore } from './store.js';
5 |
6 | const STORE_KEY = 'LOGS';
7 |
8 | export const faded = (str) => {
9 | log(str, chalk.blackBright);
10 | }
11 |
12 | export const success = (str) => {
13 | log(str, chalk.green);
14 | };
15 |
16 | export const info = (str) => {
17 | log(str, chalk.cyan);
18 | };
19 |
20 | export const alert = (str) => {
21 | log(str, chalk.yellow);
22 | };
23 |
24 | export const failure = (str) => {
25 | log(str, chalk.red);
26 | };
27 |
28 | export const br = () => {
29 | log('');
30 | };
31 |
32 | export const log = (str, colorFn = null) => {
33 | if (isTestRun()) {
34 | appendToStore(STORE_KEY, str);
35 | } else {
36 | console.log(colorFn ? colorFn(str) : str);
37 | }
38 | };
39 |
40 | export const getLogs = () => {
41 | return getStore(STORE_KEY);
42 | };
43 |
--------------------------------------------------------------------------------
/src/provider.js:
--------------------------------------------------------------------------------
1 | import readline from 'readline';
2 | import * as utils from './utils.js';
3 | import * as log from './log.js';
4 |
5 | export function handleCommandNotFound(fn) {
6 | if (!utils.isFunction(fn)) {
7 | return utils.die('Command not supported by provider.');
8 | }
9 | }
10 |
11 | export function create(api, args, nextFn) {
12 | handleCommandNotFound(api.create);
13 |
14 | log.faded(`Creating new instance "${args.name}" on ${api.name}...`);
15 | api.create(args, instance => {
16 | utils.saveInstanceToCluster(args.cluster, instance, () => {
17 | log.success(`Instance "${args.name}" (${instance.ip}) saved.`);
18 | waitForBoot(instance, nextFn);
19 | });
20 | });
21 | }
22 |
23 | export function destroy(api, args, nextFn) {
24 | handleCommandNotFound(api.destroy);
25 |
26 | const onDestroy = () => {
27 | utils.deleteInstance(args.instance, () => {
28 | log.success(`Instance "${args.instance.name}" destroyed.`);
29 | nextFn()
30 | });
31 | };
32 |
33 | if (args.force || utils.isTestRun()) {
34 | return api.destroy(args.instance, onDestroy);
35 | }
36 |
37 | const rl = readline.createInterface({
38 | input: process.stdin,
39 | output: process.stdout
40 | });
41 |
42 | const q = `Do you really want to destroy "${args.instance.name}"? [Y/n]`;
43 | rl.question(q, answer => {
44 | rl.close();
45 | if (answer !== '' && answer !== 'Y' && answer !== 'y') {
46 | log.faded('No action taken.');
47 | } else {
48 | api.destroy(args.instance, onDestroy);
49 | }
50 | });
51 | }
52 |
53 | export function boot(api, args, nextFn) {
54 | handleCommandNotFound(api.boot);
55 |
56 | log.faded(`Booting "${args.instance.name}"...`);
57 | api.boot(args.instance, () => {
58 | log.success(`Instance "${args.instance.name}" booted.`);
59 | waitForBoot(args.instance, nextFn);
60 | });
61 | }
62 |
63 | export function shutdown(api, args, nextFn) {
64 | handleCommandNotFound(api.shutdown);
65 |
66 | log.faded(`Shutting down "${args.instance.name}"...`);
67 | api.shutdown(args.instance, () => {
68 | log.success(`Instance "${args.instance.name}" has been shut down.`);
69 | if (utils.isFunction(nextFn)) {
70 | nextFn();
71 | }
72 | });
73 | }
74 |
75 | export function reboot(api, args, nextFn) {
76 | handleCommandNotFound(api.reboot);
77 |
78 | log.faded(`Rebooting "${args.instance.name}"...`);
79 | api.reboot(args.instance, () => {
80 | log.success(`Instance "${args.instance.name}" rebooted.`);
81 | waitForBoot(args.instance, nextFn);
82 | });
83 | }
84 |
85 | export function rebuild(api, args, nextFn) {
86 | handleCommandNotFound(api.rebuild);
87 |
88 | log.faded(`Rebuilding "${args.instance.name}" using image "${args.image}"...`);
89 | api.rebuild(args.instance, args.image, () => {
90 | updateInstanceMetadata(api, args, () => {
91 | log.success(`Instance "${args.instance.name}" rebuilt.`);
92 | waitForBoot(args.instance, nextFn);
93 | });
94 | });
95 | }
96 |
97 | export function resize(api, args, nextFn) {
98 | handleCommandNotFound(api.resize);
99 |
100 | log.faded(`Resizing "${args.instance.name}" to "${args.size}"...`);
101 | api.resize(args.instance, args.size, () => {
102 | updateInstanceMetadata(api, args, () => {
103 | log.success(`Instance "${args.instance.name}" resized.`);
104 | if (args.skipBoot || args['skip-boot']) {
105 | log.faded('Skipping boot since --skip-boot flag was used.');
106 | if (utils.isFunction(nextFn)) {
107 | nextFn();
108 | }
109 | } else {
110 | boot(api, args, nextFn);
111 | }
112 | });
113 | });
114 | }
115 |
116 | export function snapshot(api, args, nextFn) {
117 | handleCommandNotFound(api.snapshot);
118 |
119 | log.faded(`Saving snapshot "${args.snapshotName}" of "${args.instance.name}"...`);
120 | api.snapshot(args.instance, args.snapshotName, () => {
121 | log.success(`Snapshot "${args.snapshotName}" of "${args.instance.name}" saved.`);
122 | waitForBoot(args.instance, nextFn);
123 | });
124 | }
125 |
126 | // AKA distributions (Linode).
127 | export function images(api, nextFn) {
128 | handleCommandNotFound(api.getImages);
129 |
130 | api.getImages(images => {
131 | utils.printCollection('images', images);
132 | if (utils.isFunction(nextFn)) {
133 | nextFn();
134 | }
135 | });
136 | }
137 |
138 | // AKA droplets (DO) or linodes (Linode).
139 | export function instances(api, args, nextFn) {
140 | handleCommandNotFound(api.getInstances);
141 |
142 | // AWS needs args.region, DigitalOcean does not.
143 | api.getInstances(args, instances => {
144 | utils.printCollection('instances', instances);
145 | if (utils.isFunction(nextFn)) {
146 | nextFn();
147 | }
148 | });
149 | }
150 |
151 | export function instance(api, args, nextFn) {
152 | handleCommandNotFound(api.getInstance);
153 |
154 | api.getInstance(args.instance, nextFn);
155 | }
156 |
157 | export function updateInstanceMetadata(api, args, nextFn) {
158 | handleCommandNotFound(api.updateInstanceMetadata);
159 |
160 | api.updateInstanceMetadata(args.instance, nextFn);
161 | }
162 |
163 | export function sync(api, args, nextFn) {
164 | handleCommandNotFound(api.sync);
165 |
166 | log.faded(`Fetching metadata for "${args.instance.name}"...`);
167 | api.sync(args.instance, () => {
168 | log.success(`Metadata for "${args.instance.name}" updated.`);
169 | if (utils.isFunction(nextFn)) {
170 | nextFn();
171 | }
172 | });
173 | }
174 |
175 | export function kernels(api, nextFn) {
176 | handleCommandNotFound(api.getKernels);
177 |
178 | api.getKernels(kernels => {
179 | utils.printCollection('kernels', kernels);
180 | if (utils.isFunction(nextFn)) {
181 | nextFn();
182 | }
183 | });
184 | }
185 |
186 | // AKA datacenters (Linode).
187 | export function regions(api, nextFn) {
188 | handleCommandNotFound(api.getRegions);
189 |
190 | api.getRegions(regions => {
191 | utils.printCollection('regions', regions);
192 | if (utils.isFunction(nextFn)) {
193 | nextFn();
194 | }
195 | });
196 | }
197 |
198 | // AKA types (AWS) or plans (Linode).
199 | export function sizes(api, nextFn) {
200 | handleCommandNotFound(api.getSizes);
201 |
202 | api.getSizes(sizes => {
203 | utils.printCollection('sizes', sizes);
204 | if (utils.isFunction(nextFn)) {
205 | nextFn();
206 | }
207 | });
208 | }
209 |
210 | export function snapshots(api, nextFn) {
211 | handleCommandNotFound(api.getSnapshots);
212 |
213 | api.getSnapshots(snapshots => {
214 | utils.printCollection('snapshots', snapshots);
215 | if (utils.isFunction(nextFn)) {
216 | nextFn();
217 | }
218 | });
219 | }
220 |
221 | export function waitForBoot(instance, nextFn = () => {}, startTime) {
222 | if (!startTime) {
223 | startTime = utils.now();
224 | log.faded('Waiting until we can connect to ' + instance.name + '...');
225 | }
226 |
227 | testConnection(instance, canConnect => {
228 | const delayBetweenPolls = 2000;
229 |
230 | if (canConnect) {
231 | const duration = (utils.now() - startTime) / 1000;
232 | log.success('Connection established after ' + Math.ceil(duration) + ' seconds.');
233 | nextFn();
234 | } else {
235 | setTimeout(() => {
236 | waitForBoot(instance, nextFn, startTime);
237 | }, delayBetweenPolls);
238 | }
239 | });
240 | }
241 |
242 | export function testConnection(instance, nextFn = () => {}) {
243 | if (utils.isTestRun) {
244 | return nextFn(true);
245 | }
246 |
247 | const key = utils.normalizeKeyPath(utils.escapeWindowsPath(instance.ssh_key));
248 | const port = instance.ssh_port || 22;
249 | const host = instance.user + '@' + instance.ip;
250 | const command = 'ssh -i ' + key + ' -p ' + port + ' ' + host +
251 | ' -o StrictHostKeyChecking=no "echo hi"';
252 |
253 | const ssh = utils.spawn(command);
254 | const timeout = setTimeout(() => {
255 | callbackOnce(false);
256 | ssh.kill();
257 | }, 8000);
258 |
259 | let alreadyCalled = false;
260 | const callbackOnce = (result) => {
261 | if (!alreadyCalled) {
262 | clearTimeout(timeout);
263 | alreadyCalled = true;
264 | nextFn(result);
265 | }
266 | };
267 |
268 | ssh.on('exit', (code) => {
269 | if (code === 0) {
270 | callbackOnce(true);
271 | } else {
272 | callbackOnce(false);
273 | }
274 | });
275 | }
276 |
--------------------------------------------------------------------------------
/src/providers/index.js:
--------------------------------------------------------------------------------
1 | import * as digitalocean from './digitalocean.v2.js';
2 | import * as virtualbox from './virtualbox.js';
3 |
4 | export default {
5 | digitalocean,
6 | virtualbox
7 | };
8 |
--------------------------------------------------------------------------------
/src/providers/mock.js:
--------------------------------------------------------------------------------
1 |
2 | export const mockAPI = {
3 | id: 'mock',
4 | name: 'MockProvider'
5 | };
6 |
7 | // Provider interface
8 |
9 | mockAPI.create = (args, nextFn) => {
10 | nextFn(mockInstance({
11 | name: args.name,
12 | ssh_port: args['ssh-port'],
13 | // Including these here to bypass filters
14 | virtualbox: {},
15 | digitalocean: {}
16 | }));
17 | }
18 |
19 | mockAPI.destroy = (instance, nextFn) => {
20 | nextFn();
21 | }
22 |
23 | mockAPI.boot = (instance, nextFn) => {
24 | nextFn();
25 | }
26 |
27 | mockAPI.shutdown = (instance, nextFn) => {
28 | nextFn();
29 | }
30 |
31 | mockAPI.reboot = (instance, nextFn) => {
32 | nextFn();
33 | }
34 |
35 | mockAPI.rebuild = (instance, image, nextFn) => {
36 | nextFn();
37 | }
38 |
39 | mockAPI.resize = (instance, size, nextFn) => {
40 | nextFn();
41 | }
42 |
43 | mockAPI.snapshot = (instance, snapshotName, nextFn) => {
44 | nextFn();
45 | }
46 |
47 | mockAPI.getImages = (nextFn) => {
48 | nextFn(MOCK_IMAGES);
49 | }
50 |
51 | mockAPI.getInstances = (args, nextFn) => {
52 | nextFn(MOCK_INSTANCES);
53 | }
54 |
55 | mockAPI.getInstance = (instance, nextFn) => {
56 | nextFn(instance);
57 | }
58 |
59 | mockAPI.sync = (instance, nextFn) => {
60 | mockAPI.updateInstanceMetadata(instance, nextFn);
61 | }
62 |
63 | mockAPI.updateInstanceMetadata = (instance, nextFn) => {
64 | nextFn();
65 | }
66 |
67 | mockAPI.getRegions = (nextFn) => {
68 | nextFn(MOCK_REGIONS);
69 | }
70 |
71 | mockAPI.getSizes = (nextFn) => {
72 | nextFn(MOCK_SIZES);
73 | }
74 |
75 | mockAPI.getSnapshots = (nextFn) => {
76 | nextFn([]);
77 | }
78 |
79 | mockAPI.getKeys = (nextFn) => {
80 | nextFn(MOCK_KEYS);
81 | }
82 |
83 | mockAPI.createKey = (keyData, nextFn) => {
84 | nextFn();
85 | }
86 |
87 | // Mock data
88 |
89 | let IP_ID = 100;
90 |
91 | function mockIP() {
92 | const ip = '192.168.100.' + IP_ID;
93 | IP_ID += 1;
94 |
95 | return ip;
96 | }
97 |
98 | function mockInstance(args) {
99 | return Object.assign({
100 | ip: mockIP(),
101 | name: 'mock-01',
102 | ssh_key: 'overcast.key',
103 | ssh_port: '22',
104 | user: 'root'
105 | }, args);
106 | }
107 |
108 | const MOCK_IMAGES = [
109 | { name: '20.04' },
110 | { name: '16.04' }
111 | ];
112 |
113 | const MOCK_REGIONS = [
114 | { name: 'nyc1' },
115 | { name: 'sfo1' }
116 | ];
117 |
118 | const MOCK_KEYS = [];
119 |
120 | const MOCK_INSTANCES = [
121 | mockInstance({ name: 'mock-01' })
122 | ];
123 |
--------------------------------------------------------------------------------
/src/providers/virtualbox.js:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import cp from 'child_process';
3 | import * as utils from '../utils.js';
4 | import * as log from '../log.js';
5 |
6 | const FIRST_IP = '192.168.22.10';
7 | const OVERCAST_VAGRANT_DIR = utils.getUserHome() + '/.overcast-vagrant';
8 |
9 | export const api = {
10 | id: 'virtualbox',
11 | name: 'VirtualBox'
12 | };
13 |
14 | const BUNDLED_IMAGE_URLS = {
15 | 'trusty64': 'https://cloud-images.ubuntu.com/vagrant/trusty/current/trusty-server-cloudimg-amd64-vagrant-disk1.box',
16 | 'precise64': 'https://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box',
17 | 'focal64': 'https://app.vagrantup.com/bento/boxes/ubuntu-20.04/versions/202112.19.0/providers/virtualbox.box'
18 | };
19 |
20 | // Provider interface
21 |
22 | api.boot = (instance, nextFn = () => {}) => {
23 | startInstance(instance)
24 | .catch(genericCatch)
25 | .then(() => {
26 | nextFn();
27 | });
28 | };
29 |
30 | api.create = (args, nextFn = () => {}) => {
31 | args = Object.assign({
32 | ssh_port: 22,
33 | user: 'root',
34 | ssh_key: utils.normalizeKeyPath(args['ssh-key'] || 'overcast.key'),
35 | ssh_pub_key: utils.normalizeKeyPath(args['ssh-pub-key'] || 'overcast.key.pub'),
36 | image: args.image || 'focal64',
37 | ram: args.ram || '1024',
38 | cpus: args.cpus || '1'
39 | }, args);
40 |
41 | getVagrantImages(args)
42 | .then(createVagrantBox)
43 | .then(createInstance)
44 | .catch(genericCatch)
45 | .then(args => {
46 | const instance = {
47 | name: args.name,
48 | ip: args.ip,
49 | ssh_key: args.ssh_key,
50 | ssh_port: '22',
51 | user: 'root',
52 | virtualbox: {
53 | dir: args.dir,
54 | name: args.image + '.' + args.ip,
55 | image: args.image,
56 | ram: args.ram,
57 | cpus: args.cpus
58 | }
59 | };
60 |
61 | nextFn(instance);
62 | });
63 | };
64 |
65 | api.destroy = (instance, nextFn = () => {}) => {
66 | destroyInstance(instance)
67 | .catch(genericCatch)
68 | .then(() => {
69 | nextFn();
70 | });
71 | };
72 |
73 | api.reboot = (instance, nextFn = () => {}) => {
74 | stopInstance(instance)
75 | .then(startInstance)
76 | .catch(genericCatch)
77 | .then(instance => {
78 | nextFn();
79 | });
80 | };
81 |
82 | api.shutdown = (instance, nextFn = () => {}) => {
83 | stopInstance(instance)
84 | .catch(genericCatch)
85 | .then(() => {
86 | nextFn();
87 | });
88 | };
89 |
90 | // Internal functions
91 |
92 | export function parseCSV(str) {
93 | var arr = [];
94 | (str || '').split("\n").forEach((row) => {
95 | row = row.trim();
96 | if (row) {
97 | // TODO: This doesn't handle double quotes or escaped commas. Fix me.
98 | arr.push(row.split(','));
99 | }
100 | });
101 | return arr;
102 | }
103 |
104 | export function getVagrantImages(args) {
105 | return new Promise((resolve, reject) => {
106 | const vagrant = utils.spawn(['vagrant box list --machine-readable']);
107 | let stdout = '';
108 |
109 | vagrant.stdout.on('data', data => {
110 | stdout += data + '';
111 | });
112 |
113 | vagrant.on('exit', code => {
114 | if (code !== 0) {
115 | reject();
116 | } else {
117 | const images = [];
118 | parseCSV(stdout).forEach((row) => {
119 | if (row[2] === 'box-name') {
120 | images.push(row[3]);
121 | }
122 | });
123 | args.vagrantImages = images;
124 | resolve(args);
125 | }
126 | });
127 | });
128 | }
129 |
130 | export function createVagrantBox(args) {
131 | return new Promise((resolve, reject) => {
132 | if (args.vagrantImages && args.vagrantImages.indexOf(args.image) !== -1) {
133 | log.faded('Image "' + args.image + '" found.');
134 | resolve(args);
135 | } else if (BUNDLED_IMAGE_URLS[args.image]) {
136 | var color = utils.getNextColor();
137 | var vagrant = utils.spawn(['vagrant box add --name ' + args.image + ' ' + BUNDLED_IMAGE_URLS[args.image]]);
138 |
139 | vagrant.stdout.on('data', data => {
140 | utils.prefixPrint(args.name, color, data);
141 | });
142 |
143 | vagrant.stderr.on('data', data => {
144 | utils.prefixPrint(args.name, color, data, 'grey');
145 | });
146 |
147 | vagrant.on('exit', code => {
148 | if (code !== 0) {
149 | reject();
150 | } else {
151 | resolve(args);
152 | }
153 | });
154 | } else {
155 | log.failure('Image "' + args.image + '" not found. Please add this using Vagrant:');
156 | return utils.die('vagrant box add --name "' + args.image + '" [image-url]');
157 | }
158 | });
159 | }
160 |
161 | export function nextAvailableIP(ip) {
162 | if (fs.existsSync(OVERCAST_VAGRANT_DIR + '/' + ip)) {
163 | var existing = fs.readdirSync(OVERCAST_VAGRANT_DIR);
164 | return findNextAvailableIP(existing);
165 | } else {
166 | return ip;
167 | }
168 | }
169 |
170 | export function findNextAvailableIP(existing) {
171 | var ip = FIRST_IP;
172 |
173 | while (existing.indexOf(ip) !== -1) {
174 | ip = ip.split('.');
175 | if (ip[3] === '255') {
176 | if (ip[2] === '255') {
177 | log.failure('Congratulations! You seem to have used all available IP addresses in the 192.168 block.');
178 | return utils.die('Please destroy some of these instances before making a new one.');
179 | }
180 | ip[2] = parseInt(ip[2], 10) + 1;
181 | ip[3] = '10';
182 | } else {
183 | ip[3] = parseInt(ip[3], 10) + 1;
184 | }
185 | ip = ip.join('.');
186 | }
187 |
188 | return ip;
189 | }
190 |
191 | export function createInstance(args) {
192 | return new Promise((resolve, reject) => {
193 | var ip = nextAvailableIP(args.ip || FIRST_IP);
194 | log.faded('Using IP address ' + ip + '.');
195 |
196 | args.ip = ip;
197 | args.dir = OVERCAST_VAGRANT_DIR + '/' + ip;
198 |
199 | var color = utils.getNextColor();
200 |
201 | var bashArgs = [
202 | utils.escapeWindowsPath(utils.getFileDirname() + '../bin/overcast-vagrant')
203 | ];
204 |
205 | var bashEnv = Object.assign({}, process.env, {
206 | VM_BOX: args.image,
207 | VM_IP: args.ip,
208 | VM_RAM: args.ram,
209 | VM_CPUS: args.cpus,
210 | VM_PUB_KEY: args.ssh_pub_key
211 | });
212 |
213 | var bash = cp.spawn('bash', bashArgs, { env: bashEnv });
214 |
215 | bash.stdout.on('data', data => {
216 | utils.prefixPrint(args.name, color, data);
217 | });
218 |
219 | bash.stderr.on('data', data => {
220 | utils.prefixPrint(args.name, color, data, 'grey');
221 | });
222 |
223 | bash.on('exit', code => {
224 | if (code !== 0) {
225 | reject();
226 | } else {
227 | resolve(args);
228 | }
229 | });
230 | });
231 | }
232 |
233 | export function stopInstance(instance) {
234 | return new Promise((resolve, reject) => {
235 | var color = utils.getNextColor();
236 | var vagrant = utils.spawn('vagrant halt', {
237 | cwd: instance.virtualbox.dir
238 | });
239 |
240 | vagrant.stdout.on('data', data => {
241 | utils.prefixPrint(instance.name, color, data);
242 | });
243 |
244 | vagrant.stderr.on('data', data => {
245 | utils.prefixPrint(instance.name, color, data, 'grey');
246 | });
247 |
248 | vagrant.on('exit', code => {
249 | if (code !== 0) {
250 | reject();
251 | } else {
252 | resolve(instance);
253 | }
254 | });
255 | });
256 | }
257 |
258 | export function startInstance(instance) {
259 | return new Promise((resolve, reject) => {
260 | var color = utils.getNextColor();
261 | var vagrant = utils.spawn('vagrant up', {
262 | cwd: instance.virtualbox.dir
263 | });
264 |
265 | vagrant.stdout.on('data', data => {
266 | utils.prefixPrint(instance.name, color, data);
267 | });
268 |
269 | vagrant.stderr.on('data', data => {
270 | utils.prefixPrint(instance.name, color, data, 'grey');
271 | });
272 |
273 | vagrant.on('exit', code => {
274 | if (code !== 0) {
275 | reject();
276 | } else {
277 | resolve(instance);
278 | }
279 | });
280 | });
281 | }
282 |
283 | export function destroyInstance(instance) {
284 | return new Promise((resolve, reject) => {
285 | var color = utils.getNextColor();
286 | var vagrant = utils.spawn('vagrant destroy -f', {
287 | cwd: instance.virtualbox.dir
288 | });
289 |
290 | vagrant.stdout.on('data', data => {
291 | utils.prefixPrint(instance.name, color, data);
292 | });
293 |
294 | vagrant.stderr.on('data', data => {
295 | utils.prefixPrint(instance.name, color, data, 'grey');
296 | });
297 |
298 | vagrant.on('exit', code => {
299 | if (code !== 0) {
300 | reject();
301 | } else {
302 | utils.rmDir(instance.virtualbox.dir, () => {
303 | resolve(instance);
304 | });
305 | }
306 | });
307 | });
308 | }
309 |
310 | function genericCatch(err) {
311 | if (err) {
312 | return utils.die(`Got an error from the Virtualbox API: ${err.message || err}`);
313 | }
314 | }
315 |
--------------------------------------------------------------------------------
/src/rsync.js:
--------------------------------------------------------------------------------
1 | import * as utils from './utils.js';
2 | import * as log from './log.js';
3 |
4 | export function run(args, nextFn) {
5 | const instances = utils.findMatchingInstances(args.name);
6 | utils.handleInstanceOrClusterNotFound(instances, args);
7 |
8 | if (args.parallel || args.p) {
9 | instances.forEach((instance) => {
10 | runOnInstance(instance, utils.deepClone(args), nextFn);
11 | });
12 | } else {
13 | runOnInstances(instances, args, nextFn);
14 | }
15 | }
16 |
17 | function runOnInstances(instances, args, nextFn) {
18 | const instance = instances.shift();
19 | runOnInstance(instance, utils.deepClone(args), () => {
20 | if (instances.length > 0) {
21 | runOnInstances(instances, args);
22 | } else {
23 | nextFn();
24 | }
25 | });
26 | }
27 |
28 | function runOnInstance(instance, args, nextFn = () => {}) {
29 | const vars = utils.getVariables();
30 |
31 | rsync({
32 | ip: instance.ip,
33 | name: instance.name,
34 | user: args.user || vars.OVERCAST_SSH_USER || instance.user,
35 | password: args.password || instance.password,
36 | ssh_key: args['ssh-key'] || vars.OVERCAST_SSH_KEY || instance.ssh_key,
37 | ssh_port: instance.ssh_port,
38 | env: args.env,
39 | exclude: args.exclude,
40 | direction: args.direction,
41 | source: args.source,
42 | dest: args.dest
43 | }, nextFn);
44 | }
45 |
46 | function rsync(options, nextFn = () => {}) {
47 | if (!options.ip) {
48 | return utils.die('IP missing.');
49 | }
50 |
51 | const color = utils.getNextColor();
52 |
53 | options.ssh_key = utils.normalizeKeyPath(options.ssh_key);
54 | options.ssh_port = options.ssh_port || '22';
55 | options.user = options.user || 'root';
56 | options.name = options.name || 'Unknown';
57 |
58 | const ssh = [];
59 | if (options.password) {
60 | ssh.push('sshpass');
61 | ssh.push('-p' + options.password);
62 | }
63 | ssh.push('ssh');
64 | ssh.push('-p');
65 | ssh.push(options.ssh_port);
66 | if (options.password) {
67 | ssh.push('-o');
68 | ssh.push('PubkeyAuthentication=no');
69 | } else {
70 | ssh.push('-i');
71 | ssh.push(options.ssh_key);
72 | }
73 |
74 | const args = [
75 | 'rsync',
76 | '-e "' + ssh.join(' ') + '"',
77 | '-varuzP',
78 | '--delete',
79 | '--ignore-errors'
80 | ];
81 |
82 | if (options.exclude) {
83 | args.push('--exclude');
84 | args.push(options.exclude);
85 | }
86 |
87 | if (options.direction === 'pull') {
88 | options.dest = utils.convertToAbsoluteFilePath(options.dest);
89 | options.dest = utils.replaceInstanceName(options.name, options.dest);
90 | args.push(options.user + '@' + options.ip + ':' + options.source);
91 | args.push(options.dest);
92 | } else if (options.direction === 'push') {
93 | options.source = utils.convertToAbsoluteFilePath(options.source);
94 | options.source = utils.replaceInstanceName(options.name, options.source);
95 | args.push(options.source);
96 | args.push(options.user + '@' + options.ip + ':' + options.dest);
97 | } else {
98 | return utils.die('No direction specified.');
99 | }
100 |
101 | log.faded(args.join(' '));
102 |
103 | if (utils.isTestRun()) {
104 | log.log('mocked call of Rsync command');
105 |
106 | return nextFn();
107 | }
108 |
109 | const rsyncProcess = utils.spawn(args);
110 |
111 | rsyncProcess.stdout.on('data', data => {
112 | utils.prefixPrint(options.name, color, data);
113 | });
114 |
115 | rsyncProcess.stderr.on('data', data => {
116 | utils.prefixPrint(options.name, color, data, 'grey');
117 | });
118 |
119 | rsyncProcess.on('exit', code => {
120 | if (code !== 0) {
121 | const str = 'rsync exited with a non-zero code (' + code + '). Stopping execution...';
122 | utils.prefixPrint(options.name, color, str, 'red');
123 | process.exit(1);
124 | }
125 | log.success(options.source + ' transferred to ' + options.dest);
126 | log.br();
127 |
128 | nextFn();
129 | });
130 | }
131 |
--------------------------------------------------------------------------------
/src/scp.js:
--------------------------------------------------------------------------------
1 | import * as utils from './utils.js';
2 | import * as log from './log.js';
3 |
4 | export function run(args, nextFn) {
5 | const instances = utils.findMatchingInstances(args.name);
6 | utils.handleInstanceOrClusterNotFound(instances, args);
7 |
8 | if (args.parallel || args.p) {
9 | instances.forEach((instance) => {
10 | runOnInstance(instance, utils.deepClone(args), nextFn);
11 | });
12 | } else {
13 | runOnInstances(instances, args, nextFn);
14 | }
15 | }
16 |
17 | function runOnInstances(stack, args, nextFn) {
18 | const instance = stack.shift();
19 | runOnInstance(instance, utils.deepClone(args), () => {
20 | if (stack.length > 0) {
21 | runOnInstances(stack, args);
22 | } else {
23 | nextFn();
24 | }
25 | });
26 | }
27 |
28 | function runOnInstance(instance, args, nextFn = () => {}) {
29 | const vars = utils.getVariables();
30 |
31 | scpExec({
32 | ip: instance.ip,
33 | name: instance.name,
34 | user: args.user || vars.OVERCAST_SSH_USER || instance.user,
35 | password: args.password || instance.password,
36 | ssh_key: args['ssh-key'] || vars.OVERCAST_SSH_KEY || instance.ssh_key,
37 | ssh_port: instance.ssh_port,
38 | env: args.env,
39 | direction: args.direction,
40 | source: args.source,
41 | dest: args.dest
42 | }, nextFn);
43 | }
44 |
45 | function scpExec(options, nextFn = () => {}) {
46 | if (!options.ip) {
47 | return utils.die('IP missing.');
48 | }
49 |
50 | const color = utils.getNextColor();
51 |
52 | options.ssh_key = utils.normalizeKeyPath(options.ssh_key);
53 | options.ssh_port = options.ssh_port || '22';
54 | options.user = options.user || 'root';
55 | options.name = options.name || 'Unknown';
56 |
57 | const args = [];
58 | if (options.password) {
59 | args.push('sshpass');
60 | args.push('-p' + options.password);
61 | }
62 | args.push('scp');
63 | args.push('-r');
64 | if (!options.password) {
65 | args.push('-i');
66 | args.push(options.ssh_key);
67 | }
68 | args.push('-P');
69 | args.push(options.ssh_port);
70 | args.push('-o');
71 | args.push('StrictHostKeyChecking=no');
72 | if (options.password) {
73 | args.push('-o');
74 | args.push('PubkeyAuthentication=no');
75 | }
76 |
77 | if (options.direction === 'pull') {
78 | options.dest = utils.convertToAbsoluteFilePath(options.dest);
79 | options.dest = utils.replaceInstanceName(options.name, options.dest);
80 | args.push((options.user || 'root') + '@' + options.ip + ':' + options.source);
81 | args.push(options.dest);
82 | } else if (options.direction === 'push') {
83 | options.source = utils.convertToAbsoluteFilePath(options.source);
84 | options.source = utils.replaceInstanceName(options.name, options.source);
85 | args.push(options.source);
86 | args.push((options.user || 'root') + '@' + options.ip + ':' + options.dest);
87 | } else {
88 | return utils.die('No direction specified.');
89 | }
90 |
91 | log.faded(args.join(' '));
92 |
93 | if (utils.isTestRun()) {
94 | log.log('mocked call of SCP command');
95 |
96 | return nextFn();
97 | }
98 |
99 | const scp = utils.spawn(args);
100 |
101 | scp.stdout.on('data', data => {
102 | utils.prefixPrint(options.name, color, data);
103 | });
104 |
105 | scp.stderr.on('data', data => {
106 | utils.prefixPrint(options.name, color, data, 'grey');
107 | });
108 |
109 | scp.on('exit', code => {
110 | if (code !== 0) {
111 | const str = 'SCP connection exited with a non-zero code (' + code + '). Stopping execution...';
112 | utils.prefixPrint(options.name, color, str, 'red');
113 | process.exit(1);
114 | }
115 | log.success(options.source + ' transferred to ' + options.dest);
116 | log.br();
117 |
118 | nextFn();
119 | });
120 | }
121 |
--------------------------------------------------------------------------------
/src/ssh.js:
--------------------------------------------------------------------------------
1 | import fs from 'fs';
2 | import path from 'path';
3 | import cp from 'child_process';
4 | import * as utils from './utils.js';
5 | import * as log from './log.js';
6 | import { decreaseSSHCount } from './store.js';
7 |
8 | export function run(args, nextFn) {
9 | // Handle cases where minimist mistakenly parses ssh-args (e.g. "-tt" becomes { t: true }).
10 | if (args['ssh-args'] === true) {
11 | const rawArgs = process.argv.slice(2);
12 | const rawArgsIndex = rawArgs.findIndex(arg => arg === '--ssh-args') + 1;
13 | if (rawArgs[rawArgsIndex]) {
14 | args['ssh-args'] = rawArgs[rawArgsIndex];
15 | }
16 | }
17 |
18 | let instances = utils.findMatchingInstances(args.name);
19 | utils.handleInstanceOrClusterNotFound(instances, args);
20 |
21 | if (instances.length > 1 && utils.argIsTruthy(args['only-once'])) {
22 | instances = [instances[0]];
23 | }
24 |
25 | if (args.parallel || args.p) {
26 | runOnInstancesInParallel(instances, args, nextFn);
27 | } else {
28 | runOnInstances(instances, args, nextFn);
29 | }
30 | }
31 |
32 | function runOnInstancesInParallel(instances, args, nextFn) {
33 | const fns = instances.map((instance) => {
34 | return (nextFn) => {
35 | runOnInstance(instance, utils.deepClone(args), nextFn);
36 | };
37 | });
38 |
39 | utils.allInParallelThen(fns, nextFn);
40 | }
41 |
42 | function runOnInstances(instances, args, nextFn = () => {}) {
43 | const instance = instances.shift();
44 | runOnInstance(instance, utils.deepClone(args), () => {
45 | if (instances.length > 0) {
46 | runOnInstances(instances, args, nextFn);
47 | } else {
48 | nextFn();
49 | }
50 | });
51 | }
52 |
53 | function runOnInstance(instance, args, nextFn) {
54 | const command = args._.shift();
55 | const vars = utils.getVariables();
56 |
57 | sshExec({
58 | ip: instance.ip,
59 | name: instance.name,
60 | user: args.user || vars.OVERCAST_SSH_USER || instance.user,
61 | password: args.password || instance.password,
62 | ssh_key: args['ssh-key'] || vars.OVERCAST_SSH_KEY || instance.ssh_key,
63 | ssh_args: utils.isString(args['ssh-args']) ? args['ssh-args'] : '',
64 | ssh_port: instance.ssh_port,
65 | continueOnError: args.continueOnError,
66 | machineReadable: args['mr'] || args['machine-readable'],
67 | env: args.env,
68 | command,
69 | shell_command: args['shell-command']
70 | }, () => {
71 | if (args._.length > 0) {
72 | runOnInstance(instance, args, nextFn);
73 | } else if (utils.isFunction(nextFn)) {
74 | nextFn();
75 | }
76 | });
77 | }
78 |
79 | function sshExec(options, nextFn) {
80 | if (!options.ip) {
81 | utils.die('IP missing.');
82 | return nextFn();
83 | }
84 |
85 | const color = utils.getNextColor();
86 |
87 | options.ssh_key = utils.normalizeKeyPath(options.ssh_key);
88 | options.ssh_port = options.ssh_port || '22';
89 | options.user = options.user || 'root';
90 | options.password = options.password || '';
91 | options.name = options.name || 'Unknown';
92 |
93 | const args = [
94 | utils.escapeWindowsPath(utils.getFileDirname() + '/../bin/overcast-ssh')
95 | ];
96 |
97 | const sshEnv = {
98 | OVERCAST_KEY: utils.escapeWindowsPath(options.ssh_key),
99 | OVERCAST_PORT: options.ssh_port,
100 | OVERCAST_USER: options.user,
101 | OVERCAST_PASSWORD: options.password,
102 | OVERCAST_IP: options.ip,
103 | OVERCAST_SSH_ARGS: options.ssh_args
104 | };
105 |
106 | if (options.env) {
107 | if (utils.isObject(options.env)) {
108 | sshEnv.OVERCAST_ENV = utils.mapObject(options.env, (val, key) => {
109 | return key + '="' + (val + '').replace(/"/g, '\"') + '"';
110 | }).join(' ');
111 | } else if (utils.isArray(options.env)) {
112 | sshEnv.OVERCAST_ENV = options.env.join(' ');
113 | } else if (utils.isString(options.env)) {
114 | sshEnv.OVERCAST_ENV = options.env.trim();
115 | }
116 | if (sshEnv.OVERCAST_ENV) {
117 | sshEnv.OVERCAST_ENV += ' ';
118 | }
119 | }
120 |
121 | const cwdScriptFile = commandAsScriptFile(options.command, process.cwd());
122 | const bundledScriptFile = commandAsScriptFile(options.command, utils.getFileDirname() + '/../scripts');
123 |
124 | if (fs.existsSync(cwdScriptFile)) {
125 | sshEnv.OVERCAST_SCRIPT_FILE = utils.escapeWindowsPath(cwdScriptFile);
126 | } else if (fs.existsSync(bundledScriptFile)) {
127 | sshEnv.OVERCAST_SCRIPT_FILE = utils.escapeWindowsPath(bundledScriptFile);
128 | } else {
129 | sshEnv.OVERCAST_COMMAND = options.command;
130 | }
131 |
132 | if (options.shell_command) {
133 | sshEnv.SHELL_COMMAND = options.shell_command;
134 | }
135 |
136 | if (options.machineReadable) {
137 | sshEnv.OVERCAST_HIDE_COMMAND = 1;
138 | }
139 |
140 | if (utils.isTestRun()) {
141 | log.log('mocked call of SSH command');
142 | log.log(args);
143 | log.log(sshEnv);
144 |
145 | return nextFn();
146 | }
147 |
148 | const ssh = cp.spawn('bash', args, { env: Object.assign({}, process.env, sshEnv) });
149 | const connectionProblem = false;
150 |
151 | ssh.stdout.on('data', data => {
152 | if (options.machineReadable) {
153 | process.stdout.write(data + '');
154 | } else {
155 | utils.prefixPrint(options.name, color, data);
156 | }
157 | });
158 |
159 | ssh.stderr.on('data', data => {
160 | if (data.toString().includes( 'Operation timed out') ||
161 | data.toString().includes('No route to host') ||
162 | data.toString().includes('Host is down')) {
163 | connectionProblem = true;
164 | }
165 |
166 | if (options.machineReadable) {
167 | process.stdout.write(data + '');
168 | } else {
169 | utils.prefixPrint(options.name, color, data, 'grey');
170 | }
171 | });
172 |
173 | ssh.on('exit', code => {
174 | if (connectionProblem && code === 255) {
175 | options.retries = options.retries ? options.retries + 1 : 1;
176 | options.maxRetries = options.maxRetries || 3;
177 |
178 | if (options.retries <= options.maxRetries) {
179 | utils.prefixPrint(options.name, color, 'Retrying (' +
180 | options.retries + ' of ' + options.maxRetries + ' attempts)...', 'red');
181 | log.br();
182 | // Do this to keep the same color for this session.
183 | decreaseSSHCount();
184 | sshExec(options, nextFn);
185 | return false;
186 | } else {
187 | // TODO: implement events
188 | // events.trigger('ssh.timeout', options);
189 | utils.prefixPrint(options.name, color, 'Giving up!', 'red');
190 | }
191 | }
192 | if (code !== 0 && !options.continueOnError) {
193 | const str = 'SSH connection exited with a non-zero code (' + code + '). Stopping execution...';
194 | utils.prefixPrint(options.name, color, str, 'red');
195 | process.exit(1);
196 | }
197 | if (!options.machineReadable) {
198 | log.br();
199 | }
200 | if (utils.isFunction(nextFn)) {
201 | nextFn();
202 | }
203 | });
204 | }
205 |
206 | function commandAsScriptFile(str, scriptDir) {
207 | return str.charAt(0) === '/' ? str : path.normalize(scriptDir + '/' + str);
208 | }
209 |
--------------------------------------------------------------------------------
/src/store.js:
--------------------------------------------------------------------------------
1 | // A module to contain all in-memory application state.
2 |
3 | let STORE = {};
4 |
5 | export function clearStore() {
6 | STORE = {};
7 | }
8 |
9 | export function dump() {
10 | return STORE;
11 | }
12 |
13 | export function setStore(key, val) {
14 | STORE[key] = val;
15 | }
16 |
17 | export function getStore(key) {
18 | return STORE[key];
19 | }
20 |
21 | export function appendToStore(key, val) {
22 | STORE[key] = STORE[key] || [];
23 | STORE[key].push(val);
24 | }
25 |
26 | export function setArgString(str) {
27 | STORE.ARG_STRING = str;
28 | }
29 |
30 | export function getArgString() {
31 | return STORE.ARG_STRING || '';
32 | }
33 |
34 | export function setConfigDirs(path) {
35 | setConfigDir(path);
36 | setClustersJSON(path + '/clusters.json');
37 | setVariablesJSON(path + '/variables.json');
38 | }
39 |
40 | export function setConfigDir(path) {
41 | return STORE.CONFIG_DIR = path;
42 | }
43 |
44 | export function getConfigDir() {
45 | return STORE.CONFIG_DIR;
46 | }
47 |
48 | export function setClustersJSON(path) {
49 | return STORE.CLUSTERS_JSON = path;
50 | }
51 |
52 | export function getClustersJSON() {
53 | return STORE.CLUSTERS_JSON;
54 | }
55 |
56 | export function setVariablesJSON(path) {
57 | return STORE.VARIABLES_JSON = path;
58 | }
59 |
60 | export function getVariablesJSON() {
61 | return STORE.VARIABLES_JSON;
62 | }
63 |
64 | export function getSSHCount() {
65 | return STORE.SSH_COUNT || 0;
66 | }
67 |
68 | export function setSSHCount(c) {
69 | return STORE.SSH_COUNT = c;
70 | }
71 |
72 | export function increaseSSHCount() {
73 | return setSSHCount(getSSHCount() + 1);
74 | }
75 |
76 | export function decreaseSSHCount() {
77 | return setSSHCount(Math.max(0, getSSHCount() - 1));
78 | }
79 |
--------------------------------------------------------------------------------
/test/fixtures/overcast.key.pub:
--------------------------------------------------------------------------------
1 | keydata
2 |
--------------------------------------------------------------------------------
/test/integration/aliases.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('aliases', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | describe('when there are no instances', () => {
9 | it('should print nothing', (nextFn) => {
10 | overcast('aliases', (logs) => {
11 | expectInLog(expect, logs, 'No overcast clusters defined');
12 | nextFn();
13 | });
14 | });
15 | });
16 |
17 | describe('when there are instances', () => {
18 | it('should print the ssh bash aliases for each', (nextFn) => {
19 | overcast('instance add instance.01 127.0.0.1', () => {
20 | overcast('aliases', (logs) => {
21 | const str = 'alias ssh.instance.01="ssh -i';
22 | expectInLog(expect, logs, str);
23 | nextFn();
24 | });
25 | });
26 | });
27 | });
28 |
29 | });
30 |
--------------------------------------------------------------------------------
/test/integration/cluster.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | const name = 'testCluster';
4 |
5 | describe('cluster', () => {
6 | beforeAll((nextFn) => {
7 | tearDown(nextFn);
8 | });
9 |
10 | describe('add', () => {
11 | it('should fail if there was no name added', (nextFn) => {
12 | overcast(`cluster add`, (logs) => {
13 | expectInLog(expect, logs, 'Missing [name] argument.');
14 | nextFn();
15 | });
16 | });
17 |
18 | it('should allow me to add a new cluster', (nextFn) => {
19 | overcast(`cluster add ${name}`, (logs) => {
20 | expectInLog(expect, logs, `Cluster "${name}" has been added`);
21 | nextFn();
22 | });
23 | });
24 | });
25 |
26 | describe('count', () => {
27 | it('should return the instance count for an existing cluster', (nextFn) => {
28 | overcast(`cluster count ${name}`, (logs) => {
29 | expectInLogExact(expect, logs, 0);
30 | overcast(`instance add myName 1.2.3.4 --cluster ${name}`, () => {
31 | overcast(`cluster count ${name}`, (logs) => {
32 | expectInLogExact(expect, logs, 1);
33 | nextFn();
34 | });
35 | });
36 | });
37 | });
38 | });
39 |
40 | describe('rename', () => {
41 | it('should not allow me to rename a missing cluster', (nextFn) => {
42 | overcast(`cluster rename foo bar`, (logs) => {
43 | expectInLog(expect, logs, 'No clusters found matching "foo"');
44 | nextFn();
45 | });
46 | });
47 |
48 | it('should allow me to rename an existing cluster', (nextFn) => {
49 | overcast(`cluster rename ${name} foo`, (logs) => {
50 | expectInLog(expect, logs, `Cluster "${name}" has been renamed to "foo"`);
51 | overcast('cluster count foo', (logs) => {
52 | expectInLogExact(expect, logs, 1);
53 | nextFn();
54 | });
55 | });
56 | });
57 | });
58 |
59 | describe('remove', () => {
60 | it('should not allow me to remove a missing cluster', (nextFn) => {
61 | overcast(`cluster remove bar`, (logs) => {
62 | expectInLog(expect, logs, 'No clusters found matching "bar"');
63 | nextFn();
64 | });
65 | });
66 |
67 | it('should allow me to remove an existing cluster, and move instances to an orphaned cluster', (nextFn) => {
68 | overcast(`cluster remove foo`, (logs) => {
69 | expectInLog(expect, logs, 'Cluster "foo" has been removed');
70 | overcast(`cluster count orphaned`, (logs) => {
71 | expectInLogExact(expect, logs, 1);
72 | nextFn();
73 | });
74 | });
75 | });
76 | });
77 |
78 | });
79 |
--------------------------------------------------------------------------------
/test/integration/completions.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('completions', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should print a list of keywords', (nextFn) => {
9 | overcast('completions', (logs) => {
10 | expectInLog(expect, logs, 'overcast');
11 | expectInLog(expect, logs, 'cluster');
12 | expectInLog(expect, logs, 'instance');
13 | nextFn();
14 | });
15 | });
16 |
17 | it('should also print a list of clusters and instances', (nextFn) => {
18 | overcast('instance add myInstanceName 1.2.3.4 --cluster helloCluster', () => {
19 | overcast('completions', (logs) => {
20 | expectInLog(expect, logs, 'myInstanceName');
21 | expectInLog(expect, logs, 'helloCluster');
22 | nextFn();
23 | });
24 | });
25 | });
26 |
27 | });
28 |
--------------------------------------------------------------------------------
/test/integration/digitalocean.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectNotInLog } from './utils.js';
2 |
3 | describe('digitalocean', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | describe('create', () => {
9 | it('should create an instance', (nextFn) => {
10 | overcast('digitalocean create TEST_NAME', (logs) => {
11 | expectInLog(expect, logs, 'Instance "TEST_NAME" (192.168.100.101) saved');
12 | expectInLog(expect, logs, 'Connection established');
13 | overcast('list', (logs) => {
14 | expectInLog(expect, logs, '(root@192.168.100.101:22)');
15 | nextFn();
16 | });
17 | });
18 | });
19 |
20 | it('should not allow duplicate instance names', (nextFn) => {
21 | overcast('digitalocean create TEST_NAME', (logs) => {
22 | expectInLog(expect, logs, 'Instance "TEST_NAME" already exists');
23 | nextFn();
24 | });
25 | });
26 | });
27 |
28 | describe('boot', () => {
29 | it('should not allow an invalid instance to be booted', (nextFn) => {
30 | overcast('digitalocean boot INVALID_NAME', (logs) => {
31 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
32 | nextFn();
33 | });
34 | });
35 |
36 | it('should boot a valid instance', (nextFn) => {
37 | overcast('digitalocean boot TEST_NAME', (logs) => {
38 | expectInLog(expect, logs, 'Instance "TEST_NAME" booted');
39 | nextFn();
40 | });
41 | });
42 | });
43 |
44 | describe('reboot', () => {
45 | it('should not allow an invalid instance to be rebooted', (nextFn) => {
46 | overcast('digitalocean reboot INVALID_NAME', (logs) => {
47 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
48 | nextFn();
49 | });
50 | });
51 |
52 | it('should reboot an instance', (nextFn) => {
53 | overcast('digitalocean reboot TEST_NAME', (logs) => {
54 | expectInLog(expect, logs, 'Instance "TEST_NAME" rebooted');
55 | nextFn();
56 | });
57 | });
58 | });
59 |
60 | describe('shutdown', () => {
61 | it('should not allow an invalid instance to be shut down', (nextFn) => {
62 | overcast('digitalocean shutdown INVALID_NAME', (logs) => {
63 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
64 | nextFn();
65 | });
66 | });
67 |
68 | it('should shutdown an instance', (nextFn) => {
69 | overcast('digitalocean shutdown TEST_NAME', (logs) => {
70 | expectInLog(expect, logs, 'Instance "TEST_NAME" has been shut down');
71 | nextFn();
72 | });
73 | });
74 | });
75 |
76 | describe('destroy', () => {
77 | it('should not allow an invalid instance to be destroyed', (nextFn) => {
78 | overcast('digitalocean destroy INVALID_NAME', (logs) => {
79 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
80 | nextFn();
81 | });
82 | });
83 |
84 | it('should destroy an instance', (nextFn) => {
85 | overcast('digitalocean destroy TEST_NAME', (logs) => {
86 | expectInLog(expect, logs, 'Instance "TEST_NAME" destroyed');
87 | overcast('list', (logs) => {
88 | expectNotInLog(expect, logs, '(root@192.168.100.101:22)');
89 | nextFn();
90 | });
91 | });
92 | });
93 | });
94 |
95 | });
96 |
--------------------------------------------------------------------------------
/test/integration/info.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('info', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should display nothing when no clusters are defined', (nextFn) => {
9 | overcast('info', (logs) => {
10 | expectInLog(expect, logs, 'No clusters found');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should display info when clusters and instances are added', (nextFn) => {
16 | overcast('cluster add info-test', () => {
17 | overcast('instance add info.01 --cluster info-test --ip 127.0.0.1', () => {
18 | overcast('info', (logs) => {
19 | expectInLog(expect, logs, 'info-test');
20 | nextFn();
21 | });
22 | });
23 | });
24 | });
25 |
26 | it('should display nothing after the test cluster is removed', (nextFn) => {
27 | overcast('instance remove info.01', () => {
28 | overcast('cluster remove info-test', () => {
29 | overcast('info', (logs) => {
30 | expectInLog(expect, logs, 'No clusters found');
31 | nextFn();
32 | });
33 | });
34 | });
35 | });
36 | });
37 |
--------------------------------------------------------------------------------
/test/integration/init.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('init', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should allow me to init in the cwd', (nextFn) => {
9 | overcast('init', (logs) => {
10 | expectInLog(expect, logs, 'Created an .overcast directory');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should tell me if a config directory already exists', (nextFn) => {
16 | overcast('init', (logs) => {
17 | expectInLog(expect, logs, 'An .overcast directory already exists');
18 | nextFn();
19 | });
20 | });
21 |
22 | });
23 |
--------------------------------------------------------------------------------
/test/integration/instance.spec.js:
--------------------------------------------------------------------------------
1 | import { getClusters, saveClusters } from '../../src/utils.js';
2 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
3 |
4 | describe('instance', () => {
5 | beforeAll((nextFn) => {
6 | tearDown(nextFn);
7 | });
8 |
9 | describe('add', () => {
10 | it('should complain if no instance name or IP is provided', (nextFn) => {
11 | overcast('instance add', (logs) => {
12 | expectInLog(expect, logs, 'Missing [name] argument');
13 | expectInLog(expect, logs, 'Missing [ip] argument');
14 | nextFn();
15 | });
16 | });
17 |
18 | it('should allow me to add an instance', (nextFn) => {
19 | overcast('cluster add instance-test', () => {
20 | overcast('instance add instance.01 127.0.0.1 --cluster instance-test', (logs) => {
21 | expectInLog(expect, logs, 'Instance "instance.01" (127.0.0.1) has been added to the "instance-test" cluster');
22 | overcast('instance add instance.02 1.2.3.4 --cluster instance-test', (logs) => {
23 | expectInLog(expect, logs, 'Instance "instance.02" (1.2.3.4) has been added to the "instance-test" cluster');
24 | nextFn();
25 | });
26 | });
27 | });
28 | });
29 |
30 | it('should complain if instance name already exists as a cluster', (nextFn) => {
31 | overcast('instance add instance-test', (logs) => {
32 | expectInLog(expect, logs, '"instance-test" is already in use as a cluster name');
33 | nextFn();
34 | });
35 | });
36 |
37 | it('should complain if instance name already exists as an instance', (nextFn) => {
38 | overcast('instance add instance.01', (logs) => {
39 | expectInLog(expect, logs, 'Instance "instance.01" already exists');
40 | nextFn();
41 | });
42 | });
43 |
44 | });
45 |
46 | describe('get', () => {
47 | it('should complain if no instance name is provided', (nextFn) => {
48 | overcast('instance get', (logs) => {
49 | expectInLog(expect, logs, 'Missing [instance|cluster|all] argument');
50 | nextFn();
51 | });
52 | });
53 |
54 | it('should complain if a non-existing instance name is provided', (nextFn) => {
55 | overcast('instance get missing-01', (logs) => {
56 | expectInLog(expect, logs, 'No instances found matching "missing-01"');
57 | nextFn();
58 | });
59 | });
60 |
61 | it('should complain if no key is provided', (nextFn) => {
62 | overcast('instance get instance.01', (logs) => {
63 | expectInLog(expect, logs, 'Missing [attr...] argument');
64 | nextFn();
65 | });
66 | });
67 |
68 | it('should output the instance attributes', (nextFn) => {
69 | overcast('instance get instance.01 name ip', (logs) => {
70 | expectInLogExact(expect, logs, 'instance.01');
71 | expectInLogExact(expect, logs, '127.0.0.1');
72 | nextFn();
73 | });
74 | });
75 |
76 | it('should output the nested instance attributes', (nextFn) => {
77 | const clusters = getClusters();
78 | // 'instance-test': { instances: { 'instance.01': [Object], 'instance.02': [Object] } }
79 | clusters['instance-test'].instances['instance.01'].nested = {
80 | a: {
81 | b: [
82 | { c: 'foo' },
83 | { d: 'bar' }
84 | ]
85 | }
86 | };
87 | saveClusters(clusters, () => {
88 | overcast('instance get instance.01 nested.a.b[1].d', (logs) => {
89 | expectInLogExact(expect, logs, 'bar');
90 | nextFn();
91 | });
92 | });
93 | });
94 |
95 | it('should handle --single-line option', (nextFn) => {
96 | overcast('instance get instance.* origin --single-line', (logs) => {
97 | expectInLogExact(expect, logs, 'root@127.0.0.1:22 root@1.2.3.4:22');
98 | nextFn();
99 | });
100 | });
101 | });
102 |
103 | describe('list', () => {
104 | it('should list all instances', (nextFn) => {
105 | overcast('instance list', (logs) => {
106 | expectInLogExact(expect, logs, 'instance.01');
107 | expectInLogExact(expect, logs, 'instance.02');
108 | nextFn();
109 | });
110 | });
111 | });
112 |
113 | describe('update', () => {
114 | it('should complain if no instance name is provided', (nextFn) => {
115 | overcast('instance update', (logs) => {
116 | expectInLog(expect, logs, 'Missing [instance|cluster|all] argument');
117 | nextFn();
118 | });
119 | });
120 |
121 | it('should complain if an incorrect cluster name is provided', (nextFn) => {
122 | overcast('instance update instance.01 --cluster NEW_CLUSTER', (logs) => {
123 | expectInLog(expect, logs, 'No "NEW_CLUSTER" cluster found.');
124 | nextFn();
125 | });
126 | });
127 |
128 | it('should allow me to move an instance to a different cluster', (nextFn) => {
129 | overcast('cluster add NEW_CLUSTER', () => {
130 | overcast('instance update instance.01 --cluster NEW_CLUSTER', (logs) => {
131 | expectInLog(expect, logs, 'Instance "instance.01" has been moved to the "NEW_CLUSTER" cluster.');
132 | nextFn();
133 | });
134 | });
135 | });
136 |
137 | it('should allow me to rename an instance', (nextFn) => {
138 | overcast('instance update instance.01 --name instance.01.renamed', (logs) => {
139 | expectInLog(expect, logs, 'Instance "instance.01" has been renamed to "instance.01.renamed"');
140 | nextFn();
141 | });
142 | });
143 | });
144 |
145 | describe('remove', () => {
146 | it('should complain if no instance name is provided', (nextFn) => {
147 | overcast('instance remove', (logs) => {
148 | expectInLog(expect, logs, 'Missing [name] argument');
149 | nextFn();
150 | });
151 | });
152 |
153 | it('should complain if an incorrect instance name is provided', (nextFn) => {
154 | overcast('instance remove MISSING_NAME', (logs) => {
155 | expectInLog(expect, logs, 'No instance found matching "MISSING_NAME".');
156 | nextFn();
157 | });
158 | });
159 |
160 | it('should allow me to remove an instance', (nextFn) => {
161 | overcast('instance remove instance.01.renamed', (logs) => {
162 | expectInLog(expect, logs, 'Instance "instance.01.renamed" removed');
163 | overcast('cluster remove instance-test', (logs) => {
164 | expectInLog(expect, logs, 'Cluster "instance-test" has been removed');
165 | nextFn();
166 | });
167 | });
168 | });
169 | });
170 |
171 | });
172 |
--------------------------------------------------------------------------------
/test/integration/list.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('list', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should display nothing when no clusters are defined', (nextFn) => {
9 | overcast('list', (logs) => {
10 | expectInLog(expect, logs, 'No clusters found');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should display info when clusters are defined', (nextFn) => {
16 | overcast('cluster add list-test', () => {
17 | overcast('list', (logs) => {
18 | expectInLog(expect, logs, 'list-test');
19 | nextFn();
20 | });
21 | });
22 | });
23 |
24 | it('should display nothing after the test cluster is removed', (nextFn) => {
25 | overcast('cluster remove list-test', () => {
26 | overcast('list', (logs) => {
27 | expectInLog(expect, logs, 'No clusters found');
28 | nextFn();
29 | });
30 | });
31 | });
32 | });
33 |
--------------------------------------------------------------------------------
/test/integration/port.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | describe('port', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should complain if no instance name is provided', (nextFn) => {
9 | overcast('port', (logs) => {
10 | expectInLog(expect, logs, 'Missing [instance|cluster|all] argument');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should complain if a missing instance name is provided', (nextFn) => {
16 | overcast('port MISSING', (logs) => {
17 | expectInLog(expect, logs, 'No instances found matching "MISSING"');
18 | nextFn();
19 | });
20 | });
21 |
22 | it('should complain if no port is provided', (nextFn) => {
23 | overcast('instance add vm-01 1.2.3.4', () => {
24 | overcast('port vm-01', (logs) => {
25 | expectInLog(expect, logs, 'Missing [port] argument');
26 | nextFn();
27 | });
28 | });
29 | });
30 |
31 | it('should otherwise update the port', (nextFn) => {
32 | overcast('instance get vm-01 ssh_port', (logs) => {
33 | expectInLogExact(expect, logs, '22');
34 | overcast('port vm-01 22222', (logs) => {
35 | expectInLog(expect, logs, 'mocked call of SSH command');
36 | expectInLog(expect, logs, { OVERCAST_PORT: '22' });
37 | expectInLog(expect, logs, { OVERCAST_ENV: 'new_ssh_port="22222" ' });
38 | overcast('instance get vm-01 ssh_port', (logs) => {
39 | expectInLogExact(expect, logs, '22222');
40 | nextFn();
41 | });
42 | });
43 | });
44 | });
45 |
46 | });
47 |
--------------------------------------------------------------------------------
/test/integration/pull.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | describe('pull', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should throw an error if name is missing', (nextFn) => {
9 | overcast('pull', (logs) => {
10 | expectInLog(expect, logs, 'Missing [instance|cluster|all] argument');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should throw an error if source is missing', (nextFn) => {
16 | overcast('pull MISSING', (logs) => {
17 | expectInLog(expect, logs, 'No instances found matching "MISSING"');
18 | nextFn();
19 | });
20 | });
21 |
22 | it('should throw an error if dest is missing', (nextFn) => {
23 | overcast('instance add vm-01 1.2.3.4', () => {
24 | overcast('pull vm-01 /path/to/src', (logs) => {
25 | expectInLog(expect, logs, 'Missing [dest] argument');
26 | nextFn();
27 | });
28 | });
29 | });
30 |
31 | it('should otherwise call scp if everything exists', (nextFn) => {
32 | overcast('pull vm-01 /path/to/src /path/to/dest', (logs) => {
33 | expectInLog(expect, logs, 'mocked call of SCP command');
34 | expectInLog(expect, logs, 'scp -r -i ');
35 | expectInLog(expect, logs, ' -P 22 -o StrictHostKeyChecking=no root@1.2.3.4:/path/to/src /path/to/dest');
36 | nextFn();
37 | });
38 | });
39 |
40 | it('should otherwise call rsync if flag is set', (nextFn) => {
41 | overcast('pull vm-01 /path/to/src /path/to/dest --rsync', (logs) => {
42 | expectInLog(expect, logs, 'mocked call of Rsync command');
43 | expectInLog(expect, logs, 'rsync -e "ssh -p 22 -i ');
44 | expectInLog(expect, logs, ' -varuzP --delete --ignore-errors root@1.2.3.4:/path/to/src /path/to/dest');
45 | nextFn();
46 | });
47 | });
48 |
49 | });
50 |
--------------------------------------------------------------------------------
/test/integration/push.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | describe('push', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should throw an error if name is missing', (nextFn) => {
9 | overcast('push', (logs) => {
10 | expectInLog(expect, logs, 'Missing [instance|cluster|all] argument');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should throw an error if source is missing', (nextFn) => {
16 | overcast('push MISSING', (logs) => {
17 | expectInLog(expect, logs, 'No instances found matching "MISSING"');
18 | nextFn();
19 | });
20 | });
21 |
22 | it('should throw an error if dest is missing', (nextFn) => {
23 | overcast('instance add vm-01 1.2.3.4', () => {
24 | overcast('push vm-01 /path/to/src', (logs) => {
25 | expectInLog(expect, logs, 'Missing [dest] argument');
26 | nextFn();
27 | });
28 | });
29 | });
30 |
31 | it('should otherwise call scp if everything exists', (nextFn) => {
32 | overcast('push vm-01 /path/to/src /path/to/dest', (logs) => {
33 | expectInLog(expect, logs, 'mocked call of SCP command');
34 | expectInLog(expect, logs, 'scp -r -i ');
35 | expectInLog(expect, logs, ' -P 22 -o StrictHostKeyChecking=no /path/to/src root@1.2.3.4:/path/to/dest');
36 | nextFn();
37 | });
38 | });
39 |
40 | it('should otherwise call rsync if flag is set', (nextFn) => {
41 | overcast('push vm-01 /path/to/src /path/to/dest --rsync', (logs) => {
42 | expectInLog(expect, logs, 'mocked call of Rsync command');
43 | expectInLog(expect, logs, 'rsync -e "ssh -p 22 -i ');
44 | expectInLog(expect, logs, ' -varuzP --delete --ignore-errors /path/to/src root@1.2.3.4:/path/to/dest');
45 | nextFn();
46 | });
47 | });
48 |
49 | });
50 |
--------------------------------------------------------------------------------
/test/integration/run.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | describe('run', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should complain if no instance name is provided', (nextFn) => {
9 | overcast('run', (logs) => {
10 | expectInLog(expect, logs, 'Missing [instance|cluster|all] argument');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should complain if a missing instance name is provided', (nextFn) => {
16 | overcast('run MISSING', (logs) => {
17 | expectInLog(expect, logs, 'No instances found matching "MISSING"');
18 | nextFn();
19 | });
20 | });
21 |
22 | it('should complain if no command is provided', (nextFn) => {
23 | overcast('instance add vm-01 1.2.3.4', () => {
24 | overcast('run vm-01', (logs) => {
25 | expectInLog(expect, logs, 'Missing [command|file] argument');
26 | nextFn();
27 | });
28 | });
29 | });
30 |
31 | it('should otherwise run the command', (nextFn) => {
32 | overcast('run vm-01 uptime', (logs) => {
33 | expectInLog(expect, logs, 'mocked call of SSH command');
34 | expectInLog(expect, logs, { OVERCAST_COMMAND: 'uptime' });
35 | nextFn();
36 | });
37 | });
38 |
39 | });
40 |
--------------------------------------------------------------------------------
/test/integration/slack.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('slack', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | describe('without a message set', () => {
9 | it('should complain', (nextFn) => {
10 | overcast('slack', (logs) => {
11 | expectInLog(expect, logs, 'Missing [message] argument');
12 | nextFn();
13 | });
14 | });
15 | });
16 |
17 | describe('without a token set', () => {
18 | it('should complain', (nextFn) => {
19 | overcast('slack "Hello!"', (logs) => {
20 | expectInLog(expect, logs, 'Please add SLACK_WEBHOOK_URL');
21 | nextFn();
22 | });
23 | });
24 | });
25 |
26 | describe('with a valid message and token set', () => {
27 | it('should try to send the slack message', (nextFn) => {
28 | overcast('vars set SLACK_WEBHOOK_URL https://example.slack.com', () => {
29 | overcast('slack "Hello!" --channel "#random" --ram "256mb"', (logs) => {
30 | expectInLog(expect, logs, 'Message sent to Slack');
31 | expectInLog(expect, logs, '"text":"Hello!"');
32 | expectInLog(expect, logs, '"channel":"#random"');
33 | expectInLog(expect, logs, '"fields":{"ram":"256mb"');
34 | nextFn();
35 | });
36 | });
37 | });
38 | });
39 |
40 | });
41 |
--------------------------------------------------------------------------------
/test/integration/sshkey.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | describe('sshkey', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | describe('without a name set', () => {
9 | it('should complain and fail', (nextFn) => {
10 | overcast('sshkey create', (logs) => {
11 | expectInLog(expect, logs, 'Missing [name] argument');
12 | nextFn();
13 | });
14 | });
15 | });
16 |
17 | describe('with an existing name provided', () => {
18 | it('should complain and fail', (nextFn) => {
19 | overcast('sshkey create overcast', (logs) => {
20 | expectInLog(expect, logs, 'The key "overcast" already exists');
21 | nextFn();
22 | });
23 | });
24 | });
25 |
26 | describe('with a correct new name set', () => {
27 | it('should create the new key', (nextFn) => {
28 | overcast('sshkey create myNewKey', (logs) => {
29 | expectInLog(expect, logs, 'Created new SSH key at');
30 | nextFn();
31 | });
32 | });
33 | });
34 |
35 | describe('list', () => {
36 | it('should list the keys', (nextFn) => {
37 | overcast('sshkey list', (logs) => {
38 | expectInLogExact(expect, logs, 'myNewKey');
39 | expectInLogExact(expect, logs, 'overcast');
40 | nextFn();
41 | });
42 | });
43 | });
44 | });
45 |
--------------------------------------------------------------------------------
/test/integration/store.spec.js:
--------------------------------------------------------------------------------
1 | import * as store from '../../src/store.js';
2 |
3 | describe('store', () => {
4 | beforeEach(() => {
5 | store.clearStore();
6 | });
7 |
8 | describe('increaseSSHCount', () => {
9 | it('should increase the SSH count', () => {
10 | expect(store.increaseSSHCount()).toEqual(1);
11 | expect(store.increaseSSHCount()).toEqual(2);
12 | expect(store.increaseSSHCount()).toEqual(3);
13 | });
14 | });
15 |
16 | describe('decreaseSSHCount', () => {
17 | it('should decrease the SSH count to zero', () => {
18 | expect(store.increaseSSHCount()).toEqual(1);
19 | expect(store.increaseSSHCount()).toEqual(2);
20 | expect(store.decreaseSSHCount()).toEqual(1);
21 | expect(store.decreaseSSHCount()).toEqual(0);
22 | expect(store.decreaseSSHCount()).toEqual(0);
23 | });
24 | });
25 | });
26 |
--------------------------------------------------------------------------------
/test/integration/tunnel.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectInLogExact } from './utils.js';
2 |
3 | describe('tunnel', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | it('should throw an error if instance is missing', (nextFn) => {
9 | overcast('tunnel', (logs) => {
10 | expectInLog(expect, logs, 'Missing [instance] argument');
11 | nextFn();
12 | });
13 | });
14 |
15 | it('should throw an error if instance is missing', (nextFn) => {
16 | overcast('tunnel MISSING', (logs) => {
17 | expectInLog(expect, logs, 'No instance found matching "MISSING"');
18 | nextFn();
19 | });
20 | });
21 |
22 | it('should throw an error if dest is missing', (nextFn) => {
23 | overcast('instance add vm-01 1.2.3.4', () => {
24 | overcast('tunnel vm-01', (logs) => {
25 | expectInLog(expect, logs, 'Missing [local-port((:hostname):remote-port)...] argument');
26 | nextFn();
27 | });
28 | });
29 | });
30 |
31 | it('should handle [port] syntax', (nextFn) => {
32 | overcast('tunnel vm-01 5000', (logs) => {
33 | expectInLog(expect, logs, 'mocked call of SSH command');
34 | expectInLog(expect, logs, '-p 22 -o StrictHostKeyChecking=no -L 5000:127.0.0.1:5000 root@1.2.3.4 -N');
35 | nextFn();
36 | });
37 | });
38 |
39 | it('should handle [port:port] syntax', (nextFn) => {
40 | overcast('tunnel vm-01 8080:80', (logs) => {
41 | expectInLog(expect, logs, 'mocked call of SSH command');
42 | expectInLog(expect, logs, '-p 22 -o StrictHostKeyChecking=no -L 8080:127.0.0.1:80 root@1.2.3.4 -N');
43 | nextFn();
44 | });
45 | });
46 |
47 | });
48 |
--------------------------------------------------------------------------------
/test/integration/utils.js:
--------------------------------------------------------------------------------
1 | import cp from 'child_process';
2 | import { init } from '../../src/cli.js';
3 | import { getLogs } from '../../src/log.js';
4 | import { clearStore } from '../../src/store.js';
5 | import { isObject } from '../../src/utils.js';
6 |
7 | export const overcast = (args, nextFn = () => {}) => {
8 | clearStore();
9 | init(args + ' --is-test-run', () => {
10 | nextFn(getLogs());
11 | });
12 | };
13 |
14 | export const tearDown = (nextFn = () => {}) => {
15 | const args = 'rm -rf .overcast';
16 | cp.exec(args, () => {
17 | nextFn();
18 | });
19 | };
20 |
21 | function findInLog(logs, str) {
22 | return logs.some(log => {
23 | return log.includes && log.includes(str);
24 | });
25 | }
26 |
27 | export const expectInLog = (expect, logs, str) => {
28 | if (isObject(str)) {
29 | return expectInLogObject(expect, logs, str);
30 | }
31 |
32 | const found = findInLog(logs, str);
33 | if (found !== true) {
34 | console.log(logs);
35 | }
36 | expect(found).toBe(true);
37 | };
38 |
39 | export const expectInLogObject = (expect, logs, obj) => {
40 | let found = false;
41 |
42 | logs.forEach(log => {
43 | if (isObject(log)) {
44 | let match = true;
45 | Object.keys(obj).forEach((k) => {
46 | if (!log[k] || log[k] !== obj[k]) {
47 | match = false;
48 | }
49 | });
50 | if (match) {
51 | found = true;
52 | }
53 | }
54 | });
55 |
56 | if (found !== true) {
57 | console.log(logs);
58 | }
59 | expect(found).toBe(true);
60 | };
61 |
62 | export const expectInLogExact = (expect, logs, str) => {
63 | const found = logs.some(log => log === str);
64 | if (found !== true) {
65 | console.log(logs);
66 | }
67 | expect(found).toBe(true);
68 | }
69 |
70 | export const expectNotInLog = (expect, logs, str) => {
71 | const found = findInLog(logs, str);
72 | if (found !== false) {
73 | console.log(logs);
74 | }
75 | expect(found).toBe(false);
76 | };
77 |
--------------------------------------------------------------------------------
/test/integration/utils.spec.js:
--------------------------------------------------------------------------------
1 | import * as store from '../../src/store.js';
2 | import * as utils from '../../src/utils.js';
3 |
4 | describe('utils', () => {
5 | beforeEach(() => {
6 | store.clearStore();
7 | });
8 |
9 | describe('getNextColor', () => {
10 | it('sends back the next color and advances the state count', () => {
11 | expect(utils.getNextColor()).toBe('cyan');
12 | expect(utils.getNextColor()).toBe('green');
13 | expect(utils.getNextColor()).toBe('red');
14 | expect(utils.getNextColor()).toBe('yellow');
15 | expect(utils.getNextColor()).toBe('magenta');
16 | expect(utils.getNextColor()).toBe('blue');
17 | expect(utils.getNextColor()).toBe('white');
18 | expect(utils.getNextColor()).toBe('cyan');
19 | })
20 | });
21 |
22 | describe('deepGet', () => {
23 | const testObj = {
24 | a: {
25 | b: {
26 | c: {
27 | d: 123
28 | }
29 | },
30 | e: [
31 | { f: 9 },
32 | { g: 10 }
33 | ]
34 | }
35 | };
36 |
37 | const falseyObj = {
38 | isUndefined: undefined,
39 | isNull: null,
40 | isZero: 0,
41 | isEmptyString: ''
42 | };
43 |
44 | const testArr = [
45 | { id: 1, comments: [{ text: 'hello' }, { text: 'goodbye' }] },
46 | { id: 2, comments: [] }
47 | ];
48 |
49 | it('handles nested objects', () => {
50 | expect(utils.deepGet(testObj, 'a.b.c.d')).toBe(123);
51 | });
52 |
53 | it('handles arrays inside an object', () => {
54 | expect(utils.deepGet(testObj, 'a.e[0].f')).toBe(9);
55 | });
56 |
57 | it('handles objects inside an array', () => {
58 | expect(utils.deepGet(testArr, '[0].comments[1].text')).toBe('goodbye');
59 | });
60 |
61 | it('returns the default value if query was not found', () => {
62 | const defaultVal = 'oh no';
63 | expect(utils.deepGet(testObj, 'invalid.not[0].found', defaultVal)).toBe(defaultVal);
64 | });
65 |
66 | it('returns undefined if query was not found and no default is set', () => {
67 | expect(utils.deepGet(testObj, 'invalid.not[0].found')).toBe(undefined);
68 | });
69 |
70 | it('returns falsey values', () => {
71 | const defaultVal = 'my default';
72 | expect(utils.deepGet(falseyObj, 'isUndefined', defaultVal)).toBe(undefined);
73 | expect(utils.deepGet(falseyObj, 'isNull', defaultVal)).toBe(null);
74 | expect(utils.deepGet(falseyObj, 'isZero', defaultVal)).toBe(0);
75 | expect(utils.deepGet(falseyObj, 'isEmptyString', defaultVal)).toBe('');
76 | });
77 | });
78 |
79 | describe('deepClone', () => {
80 | it('creates a deep clone of an object', () => {
81 | const original = {
82 | foo: {
83 | a: 1,
84 | b: new Date()
85 | },
86 | bar: true,
87 | arr: ['a', 'b', { a: 9 }],
88 | b: null,
89 | c: undefined,
90 | d: 123,
91 | e: 234.56,
92 | f: 'asdfjkh',
93 | fn: () => { return true; },
94 | r: /[a-z]/i,
95 | };
96 | const clone = utils.deepClone(original);
97 | expect(clone).toEqual(original);
98 | expect(clone.foo.b).not.toBe(original.foo.b);
99 | expect(clone.foo.b.getTime()).toBe(original.foo.b.getTime());
100 | expect(clone.arr).not.toBe(original.arr);
101 | expect(clone.arr).toEqual(original.arr);
102 | expect(clone.arr[2].a).toBe(original.arr[2].a);
103 | expect(clone.fn).toBe(original.fn);
104 | expect(clone.r).toEqual(original.r);
105 | expect(clone.r).not.toBe(original.r);
106 | expect(clone.fn()).toBe(original.fn());
107 | });
108 |
109 | it('makes a deep clone of an array', () => {
110 | const original = [1, 2, { a: 1, b: 2 }];
111 | let clone = utils.deepClone(original);
112 | expect(clone).not.toBe(original);
113 | expect(clone).toEqual(original);
114 | });
115 |
116 | it('makes a clone of an object with object references', () => {
117 | const other = { a: 1, b: 2 };
118 | let original = {
119 | a: other,
120 | b: {
121 | a: other
122 | }
123 | };
124 | const clone = utils.deepClone(original);
125 | expect(clone).toEqual(original);
126 | other.a = 3;
127 | expect(clone.a.a).toBe(1);
128 | expect(clone.b.a.a).toBe(1);
129 | });
130 |
131 | it('makes a clone of a string', () => {
132 | let original = 'hello';
133 | const clone = utils.deepClone(original);
134 | expect(clone).toBe(original);
135 | expect(clone).toEqual(original);
136 | original = 'updated';
137 | expect(clone).not.toBe(original);
138 | });
139 |
140 | it('handles undefined', () => {
141 | const original = undefined;
142 | const clone = utils.deepClone(original);
143 | expect(clone).toBe(original);
144 | expect(clone).toEqual(original);
145 | });
146 |
147 | it('handles null', () => {
148 | const original = null;
149 | const clone = utils.deepClone(original);
150 | expect(clone).toBe(original);
151 | expect(clone).toEqual(original);
152 | });
153 | });
154 |
155 | describe('findMatchingInstances', () => {
156 | const subject = utils.findMatchingInstances;
157 | const clusters = {
158 | dummy: {
159 | instances: {
160 | 'dummy.01': { name: 'dummy.01' },
161 | 'dummy.02': { name: 'dummy.02' }
162 | }
163 | },
164 | test: {
165 | instances: {
166 | 'test-01': { name: 'test-01' },
167 | 'test-02': { name: 'test-02' },
168 | 'test-03': { name: 'test-03' }
169 | }
170 | }
171 | };
172 |
173 | describe('name is "all"', () => {
174 | it('should return all instances', () => {
175 | expect(subject('all', clusters)).toEqual([
176 | { name: 'dummy.01' },
177 | { name: 'dummy.02' },
178 | { name: 'test-01' },
179 | { name: 'test-02' },
180 | { name: 'test-03' }
181 | ]);
182 | });
183 | });
184 |
185 | describe('name matches a cluster', () => {
186 | it('should return all instances from that cluster', () => {
187 | expect(subject('dummy', clusters)).toEqual([
188 | { name: 'dummy.01' },
189 | { name: 'dummy.02' }
190 | ]);
191 | });
192 | });
193 |
194 | describe('name matches an instance', () => {
195 | it('should return the matching instance', () => {
196 | expect(subject('test-03', clusters)).toEqual([
197 | { name: 'test-03' }
198 | ]);
199 | });
200 | });
201 |
202 | describe('name includes a wildcard', () => {
203 | it('should return the matching instances', () => {
204 | expect(subject('test-0*', clusters)).toEqual([
205 | { name: 'test-01' },
206 | { name: 'test-02' },
207 | { name: 'test-03' }
208 | ]);
209 | expect(subject('*01', clusters)).toEqual([
210 | { name: 'dummy.01' },
211 | { name: 'test-01' }
212 | ]);
213 | expect(subject('*.*', clusters)).toEqual([
214 | { name: 'dummy.01' },
215 | { name: 'dummy.02' }
216 | ]);
217 | });
218 | });
219 | });
220 |
221 | describe('tokenize', () => {
222 | const subject = utils.tokenize;
223 |
224 | it('should handle double-quoted tokens', () => {
225 | expect(subject('"my first token" second, third'))
226 | .toEqual(['my first token', 'second,', 'third']);
227 | });
228 |
229 | it('should handle single-quotes in double-quoted tokens and vice-versa', () => {
230 | return expect(subject('"first token\'s value" \'second "token quote"\' third'))
231 | .toEqual(['first token\'s value', 'second "token quote"', 'third']);
232 | });
233 |
234 | it('should handle single-quoted tokens', () => {
235 | return expect(subject('"my first token" \'my second token\' third'))
236 | .toEqual(['my first token', 'my second token', 'third']);
237 | });
238 |
239 | it('should handle simple tokens with irregular spacing', () => {
240 | expect(subject(' first second --third'))
241 | .toEqual(['first', 'second', '--third']);
242 | });
243 |
244 | });
245 |
246 | describe('sanitize', () => {
247 | const subject = utils.sanitize;
248 |
249 | it('should sanitize the input string', () => {
250 | expect(subject('foo ~`!@#$%^&*()-=_+[]\\{}|;:\'",./<>?bar'))
251 | .toBe('foo *-_.bar');
252 | });
253 |
254 | it('should handle numbers', () => {
255 | expect(subject(12345)).toBe('12345');
256 | });
257 |
258 | it('should return empty strings for null and undefined', () => {
259 | expect(subject(null)).toBe('');
260 | expect(subject()).toBe('');
261 | });
262 | });
263 | });
264 |
--------------------------------------------------------------------------------
/test/integration/vars.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog } from './utils.js';
2 |
3 | describe('vars', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | describe('set', () => {
9 | it('should set vars', (nextFn) => {
10 | overcast('vars set TEST_NAME test_value', (logs) => {
11 | expectInLog(expect, logs, 'Variable "TEST_NAME" saved');
12 | nextFn();
13 | });
14 | });
15 | });
16 |
17 | describe('get', () => {
18 | it('should get vars', (nextFn) => {
19 | overcast('vars get TEST_NAME', (logs) => {
20 | expectInLog(expect, logs, 'test_value');
21 | nextFn();
22 | });
23 | });
24 |
25 | it('should handle missing vars', (nextFn) => {
26 | overcast('vars get BOGUS', (logs) => {
27 | expectInLog(expect, logs, 'Variable "BOGUS" not found');
28 | nextFn();
29 | });
30 | });
31 | });
32 |
33 | describe('delete', () => {
34 | it('should delete vars', (nextFn) => {
35 | overcast('vars delete TEST_NAME', () => {
36 | overcast('vars get TEST_NAME', (logs) => {
37 | expectInLog(expect, logs, 'Variable "TEST_NAME" not found');
38 | nextFn();
39 | });
40 | });
41 | });
42 |
43 | it('should handle missing vars', (nextFn) => {
44 | overcast('vars delete MISSING_NAME', (logs) => {
45 | expectInLog(expect, logs, 'Variable "MISSING_NAME" not found');
46 | nextFn();
47 | });
48 | });
49 | });
50 | });
51 |
--------------------------------------------------------------------------------
/test/integration/virtualbox.spec.js:
--------------------------------------------------------------------------------
1 | import { overcast, tearDown, expectInLog, expectNotInLog } from './utils.js';
2 |
3 | describe('virtualbox', () => {
4 | beforeAll((nextFn) => {
5 | tearDown(nextFn);
6 | });
7 |
8 | describe('create', () => {
9 | it('should create an instance', (nextFn) => {
10 | overcast('virtualbox create TEST_NAME', (logs) => {
11 | expectInLog(expect, logs, 'Instance "TEST_NAME" (192.168.100.102) saved');
12 | expectInLog(expect, logs, 'Connection established');
13 | overcast('list', (logs) => {
14 | expectInLog(expect, logs, '(root@192.168.100.102:22)');
15 | nextFn();
16 | });
17 | });
18 | });
19 |
20 | it('should not allow duplicate instance names', (nextFn) => {
21 | overcast('virtualbox create TEST_NAME', (logs) => {
22 | expectInLog(expect, logs, 'Instance "TEST_NAME" already exists');
23 | nextFn();
24 | });
25 | });
26 | });
27 |
28 | describe('boot', () => {
29 | it('should not allow an invalid instance to be booted', (nextFn) => {
30 | overcast('virtualbox boot INVALID_NAME', (logs) => {
31 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
32 | nextFn();
33 | });
34 | });
35 |
36 | it('should boot an instance', (nextFn) => {
37 | overcast('virtualbox boot TEST_NAME', (logs) => {
38 | expectInLog(expect, logs, 'Instance "TEST_NAME" booted');
39 | nextFn();
40 | });
41 | });
42 | });
43 |
44 | describe('reboot', () => {
45 | it('should not allow an invalid instance to be rebooted', (nextFn) => {
46 | overcast('virtualbox reboot INVALID_NAME', (logs) => {
47 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
48 | nextFn();
49 | });
50 | });
51 |
52 | it('should reboot an instance', (nextFn) => {
53 | overcast('virtualbox reboot TEST_NAME', (logs) => {
54 | expectInLog(expect, logs, 'Instance "TEST_NAME" rebooted');
55 | nextFn();
56 | });
57 | });
58 | });
59 |
60 | describe('shutdown', () => {
61 | it('should not allow an invalid instance to be shut down', (nextFn) => {
62 | overcast('virtualbox shutdown INVALID_NAME', (logs) => {
63 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
64 | nextFn();
65 | });
66 | });
67 |
68 | it('should shutdown an instance', (nextFn) => {
69 | overcast('virtualbox shutdown TEST_NAME', (logs) => {
70 | expectInLog(expect, logs, 'Instance "TEST_NAME" has been shut down');
71 | nextFn();
72 | });
73 | });
74 | });
75 |
76 | describe('destroy', () => {
77 | it('should not allow an invalid instance to be destroyed', (nextFn) => {
78 | overcast('virtualbox destroy INVALID_NAME', (logs) => {
79 | expectInLog(expect, logs, 'No instance found matching "INVALID_NAME"');
80 | nextFn();
81 | });
82 | });
83 |
84 | it('should destroy an instance', (nextFn) => {
85 | overcast('virtualbox destroy TEST_NAME', (logs) => {
86 | expectInLog(expect, logs, 'Instance "TEST_NAME" destroyed');
87 | overcast('list', (logs) => {
88 | expectNotInLog(expect, logs, '(root@192.168.100.102:22)');
89 | nextFn();
90 | });
91 | });
92 | });
93 | });
94 |
95 | });
96 |
--------------------------------------------------------------------------------