├── .editorconfig ├── .gitignore ├── deploy ├── deploy ├── mailtube │ ├── init.d │ │ └── appserver.conf │ └── nginx │ │ ├── nginx.conf │ │ └── site.conf ├── open ├── setup ├── ssh └── templates │ ├── carnivore │ ├── carnivore.json │ ├── primal │ └── primal.json ├── license ├── package.json └── readme.md /.editorconfig: -------------------------------------------------------------------------------- 1 | # editorconfig.org 2 | root = true 3 | 4 | [*] 5 | indent_style = space 6 | indent_size = 2 7 | end_of_line = lf 8 | charset = utf-8 9 | trim_trailing_whitespace = true 10 | insert_final_newline = true 11 | 12 | [*.md] 13 | trim_trailing_whitespace = false 14 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | deploy/env/ 2 | deploy/keys/ 3 | deploy/log/ 4 | deploy/mailtube/package.json 5 | node_modules/ 6 | npm-debug.log 7 | tmp/ 8 | -------------------------------------------------------------------------------- /deploy/deploy: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | : "${NODE_ENV:="staging"}" 6 | # : "${PRIMAL_AMI:="ami-xxxxxxxx"}" 7 | # : "${CARNIVORE_AMI:="ami-xxxxxxxx"}" 8 | : "${CLEANUP:="no"}" 9 | 10 | # configurable variables 11 | NAME="baal-$NODE_ENV" 12 | 13 | # conventions, do not change 14 | INSTANCE_TYPE="t2.micro" 15 | INSTANCE_USER="admin" 16 | MIN_CAPACITY="1" 17 | MAX_CAPACITY="3" 18 | DESIRED_CAPACITY="1" 19 | 20 | STAMP="$(date +"%Y%m%d%H%M%S")" 21 | SG_NAME="standard-$NAME" 22 | ELB_NAME="elb-$NAME" 23 | KEYFILE="deploy/keys/$NODE_ENV" 24 | ASG_NAME="asg-$NAME-$STAMP" 25 | LC_NAME="lc-$NAME-$STAMP" 26 | 27 | rm -rf deploy/log 28 | mkdir deploy/log 29 | 30 | echo "deploy: build target: $NODE_ENV" 31 | echo "deploy: getting vpc and subnet ids..." 32 | aws ec2 describe-subnets --filters Name=tag:Name,Values=subnet-$NAME > deploy/log/subnet.log 33 | VPC_ID=$(jq -r .Subnets[0].VpcId < deploy/log/subnet.log) 34 | SUBNET_ID=$(jq -r .Subnets[0].SubnetId < deploy/log/subnet.log) 35 | 36 | # build images using packer 37 | if [ -z ${PRIMAL_AMI+x} ] 38 | then 39 | echo "deploy: building primal image with packer..." 40 | 41 | cp package.json deploy/mailtube 42 | packer build \ 43 | -var VPC_ID=$VPC_ID \ 44 | -var SUBNET_ID=$SUBNET_ID \ 45 | -var NODE_VERSION=$(jq -r .engines.node < package.json) \ 46 | deploy/templates/primal.json | tee deploy/log/packer-primal.log 47 | 48 | PRIMAL_AMI=$(tail -1 < deploy/log/packer-primal.log | cut -d ' ' -f 2) 49 | 50 | echo "deploy: built image $PRIMAL_AMI" 51 | else 52 | echo "deploy: skipping primal image build, using $PRIMAL_AMI" 53 | fi 54 | 55 | if [ -z ${CARNIVORE_AMI+x} ] 56 | then 57 | echo "deploy: building app..." 58 | npm run build-env 59 | 60 | echo "deploy: building carnivore image with packer..." 61 | packer build \ 62 | -var VPC_ID=$VPC_ID \ 63 | -var SUBNET_ID=$SUBNET_ID \ 64 | -var SOURCE_AMI=$PRIMAL_AMI \ 65 | -var NODE_ENV=$NODE_ENV \ 66 | deploy/templates/carnivore.json | tee deploy/log/packer-carnivore.log 67 | 68 | CARNIVORE_AMI=$(tail -1 < deploy/log/packer-carnivore.log | cut -d ' ' -f 2) 69 | 70 | echo "deploy: built image $CARNIVORE_AMI" 71 | else 72 | CLEANUP="no" 73 | echo "deploy: skipping deployment image build, using $CARNIVORE_AMI" 74 | fi 75 | 76 | echo "deploy: deploying $CARNIVORE_AMI ($NODE_ENV) to aws." 77 | echo "deploy: pulling down list of existing autoscaling groups..." 78 | aws autoscaling describe-auto-scaling-groups > deploy/log/asg-list.log 79 | 80 | echo "deploy: pulling down list of existing launch configurations..." 81 | aws autoscaling describe-launch-configurations > deploy/log/asg-lc.log 82 | 83 | EXISTING_GROUP_NAMES=$(underscore process --outfmt text "data.AutoScalingGroups.filter(function (asg) { 84 | return asg.AutoScalingGroupName.indexOf(\"asg-$NAME\") === 0 85 | }).map(function (asg) { 86 | return asg.AutoScalingGroupName 87 | })" < deploy/log/asg-list.log) 88 | 89 | EXISTING_LAUNCH_CONFIGURATIONS=$(underscore process --outfmt text "data.LaunchConfigurations.filter(function (lc) { 90 | return lc.LaunchConfigurationName.indexOf(\"lc-$NAME\") === 0 91 | }).map(function (lc) { 92 | return lc.LaunchConfigurationName 93 | })" < deploy/log/asg-lc.log) 94 | 95 | if [ "$EXISTING_GROUP_NAMES" != "" ] 96 | then 97 | echo "$EXISTING_GROUP_NAMES" > deploy/log/asg-existing-group-names.log 98 | else 99 | touch deploy/log/asg-existing-group-names.log 100 | fi 101 | 102 | if [ "$EXISTING_LAUNCH_CONFIGURATIONS" != "" ] 103 | then 104 | echo "$EXISTING_LAUNCH_CONFIGURATIONS" > deploy/log/asg-existing-lc.log 105 | else 106 | touch deploy/log/asg-existing-lc.log 107 | fi 108 | 109 | echo "deploy: querying security group about their id..." 110 | SG_ID=$(aws ec2 describe-security-groups --filters Name=tag:Name,Values=$SG_NAME | jq -r .SecurityGroups[0].GroupId) 111 | 112 | echo "deploy: creating $LC_NAME using the latest image..." 113 | aws autoscaling create-launch-configuration \ 114 | --launch-configuration-name "$LC_NAME" \ 115 | --image-id "$CARNIVORE_AMI" \ 116 | --instance-type "$INSTANCE_TYPE" \ 117 | --key-name "$NAME" \ 118 | --associate-public-ip-address \ 119 | --security-groups "$SG_ID" > deploy/log/asg-lc-creation.log 120 | 121 | echo "deploy: creating $ASG_NAME autoscaling group..." 122 | aws autoscaling create-auto-scaling-group \ 123 | --auto-scaling-group-name "$ASG_NAME" \ 124 | --launch-configuration-name "$LC_NAME" \ 125 | --health-check-type "ELB" \ 126 | --health-check-grace-period 300 \ 127 | --load-balancer-names "$ELB_NAME" \ 128 | --min-size "$MIN_CAPACITY" \ 129 | --max-size "$MAX_CAPACITY" \ 130 | --desired-capacity "$DESIRED_CAPACITY" \ 131 | --vpc-zone-identifier $SUBNET_ID \ 132 | --tags ResourceId=$ASG_NAME,Key=Name,Value=$NAME ResourceId=$ASG_NAME,Key=Role,Value=web > deploy/log/asg-create-group.log 133 | 134 | EC2_HEALTH="0" 135 | while [ "$EC2_HEALTH" != "$DESIRED_CAPACITY" ] 136 | do 137 | printf "deploy-aws: ensuring new instance(s) are healthy at ec2" 138 | sleep 2;printf ".";sleep 2;printf "." 139 | aws autoscaling describe-auto-scaling-groups \ 140 | --auto-scaling-group-names "$ASG_NAME" > deploy/log/asg-description.log 141 | 142 | EC2_HEALTH=$(underscore process --outfmt text "data.AutoScalingGroups[0].Instances.filter(function (i) { 143 | return i.LifecycleState === 'InService' && i.HealthStatus === 'Healthy' 144 | }).length" < deploy/log/asg-description.log) 145 | 146 | echo " ($EC2_HEALTH/$DESIRED_CAPACITY are healthy)" 147 | done 148 | 149 | ELB_INSTANCES=$(jq -r '.AutoScalingGroups[0].Instances[]?.InstanceId' < deploy/log/asg-description.log) 150 | ELB_HEALTH="0" 151 | while [ "$ELB_HEALTH" != "$DESIRED_CAPACITY" ] 152 | do 153 | printf "deploy-aws: ensuring new instance(s) are healthy at elb" 154 | sleep 2;printf ".";sleep 2;printf "." 155 | aws elb describe-instance-health \ 156 | --load-balancer-name "$ELB_NAME" \ 157 | --instances $ELB_INSTANCES > deploy/log/elb-health-description.log 158 | 159 | ELB_HEALTH=$(underscore process --outfmt text "data.InstanceStates.filter(function (s) { 160 | return s.State === 'InService' 161 | }).length" < deploy/log/elb-health-description.log) 162 | 163 | echo " ($ELB_HEALTH/$DESIRED_CAPACITY are healthy)" 164 | done 165 | 166 | while read EXISTING_GROUP_NAME 167 | do 168 | ASG_INSTANCES=$(underscore process --outfmt text "data.AutoScalingGroups.filter(function (asg,i) { 169 | return asg.AutoScalingGroupName === \"$EXISTING_GROUP_NAME\" 170 | }).shift().Instances.map(function (i) { 171 | return i.InstanceId 172 | })" < deploy/log/asg-list.log) 173 | 174 | echo "deploy: removing instances in outdated $EXISTING_GROUP_NAME from $ELB_NAME..." 175 | aws elb deregister-instances-from-load-balancer \ 176 | --load-balancer-name $ELB_NAME \ 177 | --instances $ASG_INSTANCES > deploy/log/elb-deregister.log 178 | 179 | echo "deploy: downscaling outdated $EXISTING_GROUP_NAME..." 180 | aws autoscaling update-auto-scaling-group \ 181 | --auto-scaling-group-name $EXISTING_GROUP_NAME \ 182 | --max-size 0 \ 183 | --min-size 0 > deploy/log/asg-downscale.log 184 | 185 | OPERATIONAL="1" 186 | while [ "$OPERATIONAL" != "0" ] 187 | do 188 | printf "deploy-aws: ensuring outdated instance(s) are terminated" 189 | sleep 2;printf ".";sleep 2;printf "." 190 | aws autoscaling describe-auto-scaling-groups \ 191 | --auto-scaling-group-names "$EXISTING_GROUP_NAME" > deploy/log/asg-existing-description.log 192 | 193 | OPERATIONAL=$(underscore process --outfmt text "data.AutoScalingGroups.filter(function (asg) { 194 | return asg.AutoScalingGroupName === \"$EXISTING_GROUP_NAME\" 195 | }).shift().Instances.length" < deploy/log/asg-existing-description.log) 196 | 197 | echo " ($OPERATIONAL are operational)" 198 | done 199 | 200 | echo "deploy: deleting outdated $EXISTING_GROUP_NAME..." 201 | sleep 10; 202 | aws autoscaling delete-auto-scaling-group \ 203 | --auto-scaling-group-name $EXISTING_GROUP_NAME || echo "deploy: delete failed. maybe it's already deleted." 204 | done < deploy/log/asg-existing-group-names.log 205 | 206 | while read EXISTING_LC_NAME 207 | do 208 | echo "deploy: removing outdated launch configuration $EXISTING_LC_NAME..." 209 | aws autoscaling delete-launch-configuration \ 210 | --launch-configuration-name "$EXISTING_LC_NAME" >> deploy/log/asg-lc-deletion.log || echo "deploy: delete failed. maybe it's already deleted." 211 | done < deploy/log/asg-existing-lc.log 212 | 213 | if [ "$CLEANUP" != "no" ] 214 | then 215 | SNAPSHOT_ID=$(aws ec2 describe-images \ 216 | --image-ids $CARNIVORE_AMI \ 217 | | jq -r .Images[0].BlockDeviceMappings[0].Ebs.SnapshotId) 218 | 219 | echo "deploy: deregistering deployment image $CARNIVORE_AMI" 220 | aws ec2 deregister-image --image-id "$CARNIVORE_AMI" 221 | 222 | echo "deploy: deleting snapshot $SNAPSHOT_ID" 223 | aws ec2 delete-snapshot --snapshot-id "$SNAPSHOT_ID" 224 | fi 225 | 226 | echo "deploy: done!" 227 | -------------------------------------------------------------------------------- /deploy/mailtube/init.d/appserver.conf: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | ### BEGIN INIT INFO 3 | # Provides: {NAME} 4 | # Required-Start: $local_fs $network $named $time $syslog 5 | # Required-Stop: $local_fs $network $named $time $syslog 6 | # Default-Start: 2 3 4 5 7 | # Default-Stop: 0 1 6 8 | # Description: {DESCRIPTION} 9 | ### END INIT INFO 10 | 11 | SCRIPT={COMMAND} 12 | RUNAS={USER} 13 | 14 | PIDFILE=/var/run/{NAME}.pid 15 | LOGFILE=/var/log/{NAME}.log 16 | 17 | start() { 18 | if [ -f /var/run/$PIDNAME ] && kill -0 $(cat /var/run/$PIDNAME); then 19 | echo '{NAME}: Service already running' >&2 20 | return 1 21 | fi 22 | echo '{NAME}: Starting service…' >&2 23 | local CMD="$SCRIPT &> \"$LOGFILE\" & echo \$!" 24 | su -c "$CMD" $RUNAS > "$PIDFILE" 25 | echo '{NAME}: Service started' >&2 26 | } 27 | 28 | stop() { 29 | if [ ! -f "$PIDFILE" ] || ! kill -0 $(cat "$PIDFILE"); then 30 | echo '{NAME}: Service not running' >&2 31 | return 1 32 | fi 33 | echo '{NAME}: Stopping service…' >&2 34 | kill -15 $(cat "$PIDFILE") && rm -f "$PIDFILE" 35 | echo '{NAME}: Service stopped' >&2 36 | } 37 | 38 | uninstall() { 39 | stop 40 | rm -f "$PIDFILE" 41 | echo "{NAME}: log file is not be removed: '$LOGFILE'" >&2 42 | update-rc.d -f {NAME} remove 43 | rm -fv "$0" 44 | } 45 | 46 | case "$1" in 47 | start) 48 | start 49 | ;; 50 | stop) 51 | stop 52 | ;; 53 | uninstall) 54 | uninstall 55 | ;; 56 | retart) 57 | stop 58 | start 59 | ;; 60 | *) 61 | echo "Usage: $0 {start|stop|restart|uninstall}" 62 | esac 63 | -------------------------------------------------------------------------------- /deploy/mailtube/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user {NGINX_USER}; 2 | worker_processes {NGINX_WORKERS}; 3 | 4 | error_log /var/log/nginx/error.log; 5 | pid /var/run/nginx.pid; 6 | 7 | events { 8 | worker_connections 1024; 9 | } 10 | 11 | http { 12 | proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=one:8m max_size=3000m inactive=600m; 13 | proxy_temp_path /var/tmp; 14 | include mime.types; 15 | default_type application/octet-stream; 16 | sendfile on; 17 | keepalive_timeout 65; 18 | server_tokens off; 19 | 20 | gzip on; 21 | gzip_comp_level 6; 22 | gzip_vary on; 23 | gzip_min_length 1000; 24 | gzip_proxied any; 25 | gzip_types text/plain text/css application/json application/x-javascript text/javascript text/xml application/xml application/xml+rss image/x-icon; 26 | gzip_buffers 16 8k; 27 | 28 | log_format main '$remote_addr - $host [$time_local] "$request" ' 29 | '$status $body_bytes_sent "$http_referer" ' 30 | '"$http_user_agent"'; 31 | 32 | access_log /var/log/nginx/access.log combined; 33 | 34 | include /etc/nginx/sites-enabled/*; 35 | } 36 | -------------------------------------------------------------------------------- /deploy/mailtube/nginx/site.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 8080; 3 | server_name {SERVER_NAME}; 4 | 5 | location ~ ^/(images/|js/|css/|fonts/|favicon.ico|opensearch.xml|robots.txt|humans.txt) { 6 | root {STATIC_ROOT}; 7 | access_log off; 8 | expires max; 9 | } 10 | 11 | location / { 12 | proxy_redirect off; 13 | proxy_set_header Connection ""; 14 | proxy_set_header Host $http_host; 15 | proxy_set_header X-Real-IP $remote_addr; 16 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 17 | proxy_set_header X-Forwarded-Proto $scheme; 18 | proxy_http_version 1.1; 19 | proxy_cache one; 20 | proxy_cache_key nx$request_uri$scheme; 21 | proxy_pass http://127.0.0.1:3000; 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /deploy/open: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | : "${NODE_ENV:="staging"}" 4 | 5 | TLD="baal.com" 6 | HOST_NAME=$NODE_ENV"." 7 | 8 | if [ "$HOST_NAME" == "production." ] 9 | then 10 | HOST_NAME="" 11 | fi 12 | 13 | open http://$HOST_NAME$TLD 14 | -------------------------------------------------------------------------------- /deploy/setup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | : "${NODE_ENV:="staging"}" 6 | 7 | # configurable variables 8 | TLD="baal.com" 9 | NAME="baal-$NODE_ENV" 10 | 11 | # conventions, do not change 12 | ELB_NAME="elb-$NAME" 13 | ASG_NAME="asg-$NAME" 14 | SG_NAME="standard-$NAME" 15 | LC_NAME="lc-$NAME-initial" 16 | KEYFILE="deploy/keys/$NODE_ENV" 17 | HOST_NAME=$NODE_ENV"." 18 | 19 | if [ "$HOST_NAME" == "production." ] 20 | then 21 | HOST_NAME="" 22 | fi 23 | 24 | rm -rf deploy/log 25 | mkdir deploy/log 26 | 27 | echo "setup: querying hosted zone..." 28 | aws route53 list-hosted-zones-by-name \ 29 | --dns-name $TLD > deploy/log/route53-hosted-zone.log 30 | 31 | JQUERY=$(echo "if .HostedZones[0].Name == \"$TLD.\" then .HostedZones[0].Id else \"FAIL\" end") 32 | HOSTED_ZONE=$(jq -r "$JQUERY" < deploy/log/route53-hosted-zone.log) 33 | 34 | if [ "$HOSTED_ZONE" == "FAIL" ] 35 | then 36 | echo "setup: couldn't find hosted zone. please register a route53 hosted zone for $TLD" 37 | exit 1 38 | fi 39 | 40 | echo "setup: creating vpc..." 41 | aws ec2 create-vpc \ 42 | --cidr-block 10.0.0.0/16 > deploy/log/ec2-create-vpc.log 43 | 44 | VPC_ID=$(cat deploy/log/ec2-create-vpc.log | jq -r .Vpc.VpcId) 45 | 46 | echo "setup: configuring dns for vpc..." 47 | aws ec2 modify-vpc-attribute \ 48 | --vpc-id $VPC_ID \ 49 | --enable-dns-support > deploy/log/ec2-vpc-dns-support.log 50 | 51 | aws ec2 modify-vpc-attribute \ 52 | --vpc-id $VPC_ID \ 53 | --enable-dns-hostnames > deploy/log/ec2-vpc-dns-hostnames.log 54 | 55 | echo "setup: creating security group for vpc..." 56 | aws ec2 create-security-group \ 57 | --group-name $SG_NAME \ 58 | --vpc-id $VPC_ID \ 59 | --description "Autogenerated security group for $HOST_NAME$TLD" > deploy/log/ec2-vpc-sg-create.log 60 | 61 | SG_ID=$(jq -r .GroupId < deploy/log/ec2-vpc-sg-create.log) 62 | 63 | echo "setup: tagging security group..." 64 | aws ec2 create-tags \ 65 | --resources $SG_ID \ 66 | --tags Key=Name,Value=standard-$NAME Key=Role,Value=web > deploy/log/ec2-tag-sg.log 67 | 68 | echo "setup: opening up http on security group..." 69 | aws ec2 authorize-security-group-ingress \ 70 | --group-id $SG_ID \ 71 | --protocol tcp \ 72 | --port 80 \ 73 | --cidr 0.0.0.0/0 74 | 75 | echo "setup: opening up https on security group..." 76 | aws ec2 authorize-security-group-ingress \ 77 | --group-id $SG_ID \ 78 | --protocol tcp \ 79 | --port 443 \ 80 | --cidr 0.0.0.0/0 81 | 82 | # todo: remove this step. instead, grant ssh to current IP temporarily for ssh commands 83 | echo "setup: opening up ssh on security group..." 84 | aws ec2 authorize-security-group-ingress \ 85 | --group-id $SG_ID \ 86 | --protocol tcp \ 87 | --port 22 \ 88 | --cidr 0.0.0.0/0 89 | 90 | echo "setup: creating subnet..." 91 | aws ec2 create-subnet \ 92 | --cidr-block 10.0.0.0/16 \ 93 | --vpc-id $VPC_ID > deploy/log/ec2-create-subnet.log 94 | 95 | echo "setup: creating internet gateway..." 96 | aws ec2 create-internet-gateway > deploy/log/ec2-create-internet-gateway.log 97 | 98 | SUBNET_ID=$(jq -r .Subnet.SubnetId < deploy/log/ec2-create-subnet.log) 99 | IGW_ID=$(jq -r .InternetGateway.InternetGatewayId < deploy/log/ec2-create-internet-gateway.log) 100 | 101 | echo "setup: tagging vpc..." 102 | aws ec2 create-tags \ 103 | --resources $VPC_ID \ 104 | --tags Key=Name,Value=vpc-$NAME Key=Role,Value=web > deploy/log/ec2-tag-vpc.log 105 | 106 | echo "setup: tagging subnet..." 107 | aws ec2 create-tags \ 108 | --resources $SUBNET_ID \ 109 | --tags Key=Name,Value=subnet-$NAME Key=Role,Value=web > deploy/log/ec2-tag-subnet.log 110 | 111 | echo "setup: tagging internet gateway..." 112 | aws ec2 create-tags \ 113 | --resources $IGW_ID \ 114 | --tags Key=Name,Value=igw-$NAME Key=Role,Value=web > deploy/log/ec2-tag-igw.log 115 | 116 | echo "setup: attaching internet gateway to vpc..." 117 | aws ec2 attach-internet-gateway \ 118 | --vpc-id $VPC_ID \ 119 | --internet-gateway-id $IGW_ID > deploy/log/ec2-attach-igw.log 120 | 121 | echo "setup: creating custom route table..." 122 | aws ec2 create-route-table \ 123 | --vpc-id $VPC_ID > deploy/log/ec2-create-route-table.log 124 | 125 | ROUTE_TABLE_ID=$(jq -r .RouteTable.RouteTableId < deploy/log/ec2-create-route-table.log) 126 | 127 | echo "setup: associating route table..." 128 | aws ec2 associate-route-table \ 129 | --route-table-id $ROUTE_TABLE_ID \ 130 | --subnet-id $SUBNET_ID > deploy/log/ec2-associate-route-table.log 131 | 132 | echo "setup: adding route to internet gateway..." 133 | aws ec2 create-route \ 134 | --route-table-id $ROUTE_TABLE_ID \ 135 | --gateway-id $IGW_ID \ 136 | --destination-cidr-block 0.0.0.0/0 > deploy/log/ec2-create-igw-route.log 137 | 138 | echo "setup: creating $ELB_NAME load balancer..." 139 | aws elb create-load-balancer \ 140 | --load-balancer-name "$ELB_NAME" \ 141 | --security-groups $SG_ID \ 142 | --listeners Protocol=TCP,LoadBalancerPort=80,InstanceProtocol=TCP,InstancePort=80 \ 143 | --subnets $SUBNET_ID > deploy/log/elb-create.log 144 | 145 | echo "setup: enabling connection draining on elb..." 146 | aws elb modify-load-balancer-attributes \ 147 | --load-balancer-name "$ELB_NAME" \ 148 | --load-balancer-attributes "{\"ConnectionDraining\":{\"Enabled\":true,\"Timeout\":300}}" > deploy/log/elb-draining.log 149 | 150 | echo "setup: configuring health checks on elb..." 151 | aws elb configure-health-check \ 152 | --load-balancer-name "$ELB_NAME" \ 153 | --health-check Target=TCP:80,Interval=30,UnhealthyThreshold=2,HealthyThreshold=2,Timeout=4 > deploy/log/elb-health.log 154 | 155 | echo "setup: creating proxy protocol policy on elb..." 156 | aws elb create-load-balancer-policy \ 157 | --load-balancer-name "$ELB_NAME" \ 158 | --policy-name "$ELB_NAME-proxy-protocol" \ 159 | --policy-type-name ProxyProtocolPolicyType \ 160 | --policy-attributes AttributeName=ProxyProtocol,AttributeValue=true > deploy/log/elb-create-pp.log 161 | 162 | echo "setup: enabling proxy protocol on elb..." 163 | aws elb set-load-balancer-policies-for-backend-server \ 164 | --load-balancer-name "$ELB_NAME" \ 165 | --policy-names "$ELB_NAME-proxy-protocol" \ 166 | --instance-port 80 > deploy/log/elb-enable-pp.log 167 | 168 | echo "setup: describing load balancer to create route53 alias recordset..." 169 | aws elb describe-load-balancers \ 170 | --load-balancer-name "$ELB_NAME" > deploy/log/elb-describe-lb.log 171 | 172 | ELB_ZONE_ID=$(jq -r '.LoadBalancerDescriptions[0].CanonicalHostedZoneNameID' < deploy/log/elb-describe-lb.log) 173 | ELB_ZONE_NAME=$(jq -r '.LoadBalancerDescriptions[0].CanonicalHostedZoneName' < deploy/log/elb-describe-lb.log) 174 | 175 | echo "setup: creating route53 alias recordset on $HOST_NAME$TLD..." 176 | echo "{ 177 | \"Changes\": [{ 178 | \"Action\": \"UPSERT\", 179 | \"ResourceRecordSet\": { 180 | \"Type\": \"A\", 181 | \"Name\": \"$HOST_NAME$TLD.\", 182 | \"AliasTarget\": { 183 | \"HostedZoneId\": \"$ELB_ZONE_ID\", 184 | \"DNSName\": \"$ELB_ZONE_NAME\", 185 | \"EvaluateTargetHealth\": true 186 | } 187 | } 188 | }] 189 | }" > deploy/log/route53-record-set-changes.log 190 | 191 | aws route53 change-resource-record-sets \ 192 | --hosted-zone-id "$HOSTED_ZONE" \ 193 | --change-batch "file://deploy/log/route53-record-set-changes.log" > deploy/log/route53-change-recordset.log 194 | 195 | if [ -f "$KEYFILE" ] 196 | then 197 | echo "setup: ssh key file already exists on aws." 198 | else 199 | echo "setup: ssh key file doesn't exist yet. creating..." 200 | mkdir -p deploy/keys 201 | ssh-keygen -t rsa -b 4096 -N "" -f "$KEYFILE" 202 | aws ec2 import-key-pair \ 203 | --key-name "$NAME" \ 204 | --public-key-material "file://$KEYFILE.pub" > deploy/log/ec2-upload-keypair.log 205 | echo "setup: ssh key file uploaded to aws." 206 | fi 207 | 208 | echo "setup: done." 209 | -------------------------------------------------------------------------------- /deploy/ssh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | : "${NODE_ENV:="staging"}" 4 | 5 | NAME="baal-$NODE_ENV" 6 | KEYFILE="deploy/keys/$NODE_ENV" 7 | INSTANCE_USER="admin" 8 | PUBLIC_DNS=$(aws ec2 describe-instances \ 9 | --filters "Name=instance-state-name,Values=running" "Name=tag:Name,Values=$NAME" \ 10 | | jq -r ".Reservations[0].Instances[0].PublicDnsName") 11 | 12 | ssh -i $KEYFILE -o StrictHostKeyChecking=no $INSTANCE_USER@$PUBLIC_DNS 13 | -------------------------------------------------------------------------------- /deploy/templates/carnivore: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "packer: updating nginx configuration" 4 | cp -r $HOME/app/mailtube/nginx $HOME/app/nginx 5 | 6 | sed -i "s#{NGINX_USER}#$INSTANCE_USER#g" $HOME/app/nginx/nginx.conf 7 | sed -i "s#{NGINX_WORKERS}#$NGINX_WORKERS#g" $HOME/app/nginx/nginx.conf 8 | sed -i "s#{SERVER_NAME}#$SERVER_NAME#g" $HOME/app/nginx/site.conf 9 | sed -i "s#{STATIC_ROOT}#$HOME/app/server/.bin/public#g" $HOME/app/nginx/site.conf 10 | 11 | sudo ln -sfn $HOME/app/nginx/nginx.conf /etc/nginx/nginx.conf 12 | sudo ln -sfn $HOME/app/nginx/site.conf /etc/nginx/sites-enabled/$NAME.conf 13 | sudo rm /etc/nginx/sites-enabled/default 14 | 15 | sudo service nginx restart || sudo service nginx start || (sudo cat /var/log/nginx/error.log && exit 1) 16 | 17 | echo "packer: installing appserver daemon..." 18 | echo "#!/bin/bash" > $HOME/app/start 19 | echo ". $HOME/.nvm/nvm.sh" >> $HOME/app/start 20 | echo "NODE_ENV=$NODE_ENV node $HOME/app/server/cluster.js" >> $HOME/app/start 21 | chmod +x $HOME/app/start 22 | cp $HOME/app/mailtube/init.d/appserver.conf $HOME/app/$NAME.conf 23 | sed -i "s#{NAME}#$NAME#g" $HOME/app/$NAME.conf 24 | sed -i "s#{DESCRIPTION}#Web application daemon service for $NAME#g" $HOME/app/$NAME.conf 25 | sed -i "s#{USER}#$INSTANCE_USER#g" $HOME/app/$NAME.conf 26 | sed -i "s#{COMMAND}#$HOME/app/start#g" $HOME/app/$NAME.conf 27 | sudo mv $HOME/app/$NAME.conf /etc/init.d/$NAME 28 | sudo chmod +x /etc/init.d/$NAME 29 | sudo touch /var/log/$NAME.log 30 | sudo chown $INSTANCE_USER /var/log/$NAME.log 31 | sudo update-rc.d $NAME defaults 32 | 33 | echo "packer: sourcing nvm" 34 | . $HOME/.nvm/nvm.sh 35 | 36 | echo "packer: moving uploaded server code" 37 | mv /tmp/appserver $HOME/app/server 38 | 39 | echo "packer: installing server dependencies" 40 | mv $HOME/app/server/deploy/env/.env.$NODE_ENV.json $HOME/app/server/.env.json 41 | mv $HOME/app/precache/node_modules $HOME/app/server/node_modules 42 | npm install --prefix $HOME/app/server --production 43 | 44 | echo "packer: booting appserver daemon..." 45 | sudo service $NAME start 46 | -------------------------------------------------------------------------------- /deploy/templates/carnivore.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "SOURCE_AMI": null, 4 | "INSTANCE_TYPE": "t2.micro", 5 | "INSTANCE_USER": "admin", 6 | "VPC_ID": null, 7 | "SUBNET_ID": null, 8 | "SERVER_NAME": "baal.com", 9 | "NODE_ENV": "staging", 10 | "NGINX_WORKERS": "4" 11 | }, 12 | "builders": [{ 13 | "type": "amazon-ebs", 14 | "region": "us-east-1", 15 | "vpc_id": "{{user `VPC_ID`}}", 16 | "subnet_id": "{{user `SUBNET_ID`}}", 17 | "associate_public_ip_address": true, 18 | "instance_type": "{{user `INSTANCE_TYPE`}}", 19 | "ssh_username": "{{user `INSTANCE_USER`}}", 20 | "ami_name": "baal-carnivore-{{user `NODE_ENV`}} {{timestamp}}", 21 | "source_ami": "{{user `SOURCE_AMI`}}" 22 | }], 23 | "provisioners": [{ 24 | "type": "file", 25 | "source": "tmp/appserver", 26 | "destination": "/tmp/appserver" 27 | }, { 28 | "type": "shell", 29 | "environment_vars": [ 30 | "INSTANCE_USER={{user `INSTANCE_USER`}}", 31 | "NGINX_WORKERS={{user `NGINX_WORKERS`}}", 32 | "SERVER_NAME={{user `SERVER_NAME`}}", 33 | "NODE_ENV={{user `NODE_ENV`}}", 34 | "NAME=baal-{{user `NODE_ENV`}}" 35 | ], 36 | "script": "deploy/templates/carnivore" 37 | }] 38 | } 39 | -------------------------------------------------------------------------------- /deploy/templates/primal: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "packer: updating aptitude" 4 | sudo apt-key update 5 | sudo apt-get update 6 | sudo apt-get remove apt-listchanges -y 7 | sudo apt-get install git make g++ graphicsmagick curl python-software-properties software-properties-common -y 8 | sudo add-apt-repository ppa:nginx/stable -y 9 | 10 | echo "packer: creating swap space" 11 | sudo mkdir -p /media/fasthdd 12 | sudo dd if=/dev/zero of=/media/fasthdd/swapfile.img bs=1024 count=3M 13 | sudo mkswap /media/fasthdd/swapfile.img 14 | sudo chmod 0600 /media/fasthdd/swapfile.img 15 | echo "/media/fasthdd/swapfile.img swap swap sw 0 0" | sudo tee -a /etc/fstab 16 | sudo swapon /media/fasthdd/swapfile.img 17 | 18 | echo "packer: nginx" 19 | sudo mkdir -p /var/log/nginx 20 | sudo chown $INSTANCE_USER /var/log/nginx 21 | sudo chmod -R 755 /var/log/nginx 22 | sudo apt-get install nginx -y 23 | 24 | echo "packer: nginx as a service" 25 | sudo update-rc.d nginx defaults 26 | 27 | echo "packer: tweaking tcp" 28 | sudo sysctl -w net.ipv4.tcp_slow_start_after_idle=0 29 | sudo sysctl -w net.ipv4.tcp_window_scaling=1 30 | 31 | echo "packer: ipv4 forwarding" 32 | cp /etc/sysctl.conf /tmp/ 33 | echo "net.ipv4.ip_forward = 1" >> /tmp/sysctl.conf 34 | sudo cp /tmp/sysctl.conf /etc/ 35 | sudo sysctl -p /etc/sysctl.conf 36 | 37 | echo "packer: forward port 80 to 8080" 38 | sudo iptables -A PREROUTING -t nat -i eth0 -p tcp --dport 80 -j REDIRECT --to-port 8080 39 | sudo iptables -A INPUT -p tcp -m tcp --sport 80 -j ACCEPT 40 | sudo iptables -A OUTPUT -p tcp -m tcp --dport 80 -j ACCEPT 41 | sudo iptables-save > /tmp/iptables-store.conf 42 | sudo mv /tmp/iptables-store.conf /etc/iptables-store.conf 43 | 44 | echo "packer: remember port forwarding rule across reboots" 45 | echo "#!/bin/sh" > /tmp/iptables-ifupd 46 | echo "iptables-restore < /etc/iptables-store.conf" >> /tmp/iptables-ifupd 47 | chmod +x /tmp/iptables-ifupd 48 | sudo mv /tmp/iptables-ifupd /etc/network/if-up.d/iptables 49 | 50 | echo "packer: nvm" 51 | curl https://raw.githubusercontent.com/creationix/nvm/$NVM_VERSION/install.sh | bash 52 | . $HOME/.nvm/nvm.sh 53 | 54 | echo '[[ -s $HOME/.nvm/nvm.sh ]] && . $HOME/.nvm/nvm.sh' >> $HOME/.bashrc 55 | 56 | echo "packer: nodejs" 57 | nvm install $NODE_VERSION 58 | nvm alias default $NODE_VERSION 59 | npm update -g npm 60 | 61 | echo "packer: precaching server dependencies" 62 | mkdir -p $HOME/app/precache 63 | cp -r /tmp/mailtube $HOME/app/mailtube 64 | cp $HOME/app/mailtube/package.json $HOME/app/precache 65 | npm install --prefix $HOME/app/precache --production 66 | -------------------------------------------------------------------------------- /deploy/templates/primal.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "SOURCE_AMI": "ami-116d857a", 4 | "INSTANCE_TYPE": "t2.micro", 5 | "INSTANCE_USER": "admin", 6 | "VPC_ID": null, 7 | "SUBNET_ID": null, 8 | "NVM_VERSION": "v0.24.0", 9 | "NODE_VERSION": "0.10" 10 | }, 11 | "builders": [{ 12 | "type": "amazon-ebs", 13 | "region": "us-east-1", 14 | "vpc_id": "{{user `VPC_ID`}}", 15 | "subnet_id": "{{user `SUBNET_ID`}}", 16 | "associate_public_ip_address": true, 17 | "instance_type": "{{user `INSTANCE_TYPE`}}", 18 | "ssh_username": "{{user `INSTANCE_USER`}}", 19 | "ami_name": "baal-primal {{timestamp}}", 20 | "source_ami": "{{user `SOURCE_AMI`}}" 21 | }], 22 | "provisioners": [{ 23 | "type": "file", 24 | "source": "deploy/mailtube", 25 | "destination": "/tmp/mailtube" 26 | }, { 27 | "type": "shell", 28 | "environment_vars": [ 29 | "INSTANCE_USER={{user `INSTANCE_USER`}}", 30 | "NVM_VERSION={{user `NVM_VERSION`}}", 31 | "NODE_VERSION={{user `NODE_VERSION`}}" 32 | ], 33 | "script": "deploy/templates/primal" 34 | }] 35 | } 36 | -------------------------------------------------------------------------------- /license: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright © 2015 Nicolas Bevacqua 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so, 10 | subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 17 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 18 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 19 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 20 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 21 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "baal", 3 | "version": "1.0.0", 4 | "description": "Automated, autoscaled, zero-downtime, immutable deployments using plain old bash, Packer, nginx, Node.js, and AWS. Made easy.", 5 | "scripts": { 6 | "setup": "deploy/setup", 7 | "deploy": "deploy/deploy", 8 | "ssh": "deploy/ssh", 9 | "open": "deploy/open", 10 | "shutdown": "deploy/shutdown" 11 | }, 12 | "dependencies": { 13 | }, 14 | "devDependencies": { 15 | "underscore-cli": "^0.2.18" 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # baal: build. automate. autoscale. launch. 2 | 3 | > Automated, autoscaled, zero-downtime, immutable deployments using plain old bash, Packer, nginx, Node.js, and AWS. Made easy. 4 | 5 | Read about this deployment strategy on [Pony Foo][1], and then clone this repository. Copy the scripts, and adjust as needed. A few pointers. 6 | 7 | - The application is named `baal` in a few places. Change that to your application's name 8 | - You need to set a value for `HOSTED_ZONE` in the `setup` script. Read the articles on [Pony Foo][1] to learn about that 9 | - In the `deploy` script you can set `PRIMAL_ID` to an AMI so that your base AMI isn't rebuilt on every deploy 10 | - By default, `baal` expects to build your static assets with `npm run build-$NODE_ENV` 11 | 12 | # reading 13 | 14 | Relevant [Pony Foo][1] articles: 15 | 16 | - [Immutable Deployments and Packer][2] 17 | - [Leveraging Immutable Deployments][3] 18 | 19 | # requirements 20 | 21 | You'll need to install all of the following. 22 | 23 | ```bash 24 | pip install awscli 25 | aws configure 26 | brew install jq 27 | npm install underscore-cli --save-dev 28 | ``` 29 | 30 | # license 31 | 32 | MIT 33 | 34 | [1]: http://ponyfoo.com 35 | [2]: http://ponyfoo.com/articles/immutable-deployments-packer 36 | [3]: http://ponyfoo.com/articles/leveraging-immutable-deployments 37 | --------------------------------------------------------------------------------