├── .gitignore ├── .ruby-gemset ├── .ruby-version ├── Gemfile ├── Gemfile.lock ├── README.md ├── Rakefile ├── go ├── init_rbenv ├── opsworks-service.template └── rbenv-vars.example /.gitignore: -------------------------------------------------------------------------------- 1 | .rbenv-vars 2 | .ruby-version 3 | .ruby-gemset 4 | .bundle 5 | vendor 6 | -------------------------------------------------------------------------------- /.ruby-gemset: -------------------------------------------------------------------------------- 1 | es-opsworks 2 | -------------------------------------------------------------------------------- /.ruby-version: -------------------------------------------------------------------------------- 1 | jruby-9.1.12.0 2 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem 'aws-sdk', '~> 2.1' 4 | gem 'rake' -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | aws-sdk (2.10.21) 5 | aws-sdk-resources (= 2.10.21) 6 | aws-sdk-core (2.10.21) 7 | aws-sigv4 (~> 1.0) 8 | jmespath (~> 1.0) 9 | aws-sdk-resources (2.10.21) 10 | aws-sdk-core (= 2.10.21) 11 | aws-sigv4 (1.0.1) 12 | jmespath (1.3.1) 13 | rake (10.3.2) 14 | 15 | PLATFORMS 16 | java 17 | ruby 18 | 19 | DEPENDENCIES 20 | aws-sdk (~> 2.1.19) 21 | rake 22 | 23 | BUNDLED WITH 24 | 1.10.6 25 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ElasticSearch OpsWorks 2 | 3 | [Built with :yellow_heart: and :coffee: in San Francisco](http://getmingle.io) 4 | 5 | Deploy an ElasticSearch cluster to AWS OpsWorks from https://github.com/ThoughtWorksStudios/opsworks-elasticsearch-cookbook 6 | 7 | Take a look at https://github.com/ThoughtWorksStudios/opsworks-elasticsearch-cookbook/blob/0.0.1/Berksfile for all cookbook versions installed. 8 | This has not been tested with other versions. YMMV. 9 | 10 | ## Before deployment 11 | 12 | Please setup the following dependencies in your AWS region: 13 | 14 | * SSL certificate 15 | * An SSH key pair, defaults to a keypair named "elasticsearch" if not specified 16 | * A domain name for accessing the elasticsearch cluster 17 | * A Route53 zone for the domain 18 | * The default `aws-opsworks-service-role` and `aws-opsworks-ec2-role` need to exist before provisioning. OpsWorks should automatically create these roles when you add your first stack through the OpsWorks console. See http://docs.aws.amazon.com/opsworks/latest/userguide/gettingstarted-simple-stack.html and http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-appsrole.html for details. 19 | 20 | ## Setup environment 21 | 22 | * Clone this repository 23 | * Run `init_rbenv` to setup the rbenv environment, gems, etc. if you don't have it yet 24 | * This is designed to be used in a clean environment, e.g. build agents 25 | * The script "go" is a script to run rake tasks in a build 26 | * `cp rbenv-vars.example .rbenv-vars` 27 | * Fill out values in .rbenv-vars to suit your deployment 28 | 29 | ## Usage 30 | 31 | Provision the environment: 32 | 33 | rake provision 34 | 35 | Open `https:///_plugin/head` 36 | 37 | Destroy the environment: 38 | 39 | rake destroy 40 | 41 | 42 | ## Infrastructure details 43 | 44 | Route53 --> ELB --> EC2 attached to EBS volumes 45 | 46 | * Index will be stored on EBS volumes, mounted at `/mnt/elasticsearch-data` 47 | * One master node by default, 2-node cluster by default 48 | * Load balanced by an ELB 49 | * Listens on HTTPS only, configured with basic auth challenge 50 | * EC2 instance type defaults to `c3.large` 51 | -------------------------------------------------------------------------------- /Rakefile: -------------------------------------------------------------------------------- 1 | require 'aws-sdk' 2 | 3 | 4 | SUCCESS_STATS = [:create_complete, :update_complete, :update_rollback_complete] 5 | FAILED_STATS = [:create_failed, :update_failed] 6 | DEFAULT_RECIPES = [ 7 | "apt", 8 | "ark", 9 | "elasticsearch", 10 | "elasticsearch::aws", 11 | "elasticsearch::proxy", 12 | "java", 13 | "layer-custom::esplugins", 14 | "layer-custom::allocation-awareness", 15 | "layer-custom::esmonit", 16 | "layer-custom::cloudwatch-custom" 17 | ].join(",") 18 | 19 | def opsworks 20 | Aws::OpsWorks::Client.new({region: get_required("AWS_OPSWORKS_API_REGION")}) 21 | end 22 | 23 | def wait_for_cf_stack_op_to_finish 24 | stats = cfm.describe_stacks({stack_name: stack_name}).stacks[0].stack_status.downcase.to_sym 25 | puts "[Stack: #{stack_name}]: current status: #{stats}" 26 | 27 | while !SUCCESS_STATS.include?(stats) 28 | sleep 3 29 | stats = cfm.describe_stacks({stack_name: stack_name}).stacks[0].stack_status.downcase.to_sym 30 | raise "Resource stack update failed!" if FAILED_STATS.include?(stats) 31 | puts "[Stack: #{stack_name}]: current status: #{stats}" 32 | end 33 | end 34 | 35 | def cf_query_output(key) 36 | output = cf_stack.outputs.find { |o| o.output_key == key } 37 | output && output.output_value 38 | end 39 | 40 | def instance_online?(instance_id) 41 | response = opsworks.describe_instances(instance_ids: [instance_id]) 42 | response[:instances].first[:status] == "online" 43 | end 44 | 45 | def instance_status(instance_id) 46 | begin 47 | response = opsworks.describe_instances(instance_ids: [instance_id]) 48 | rescue Aws::OpsWorks::Errors::ResourceNotFoundException 49 | return "nonexistent" 50 | end 51 | response[:instances].first[:status].tap do |status| 52 | raise "Instance #{instance_id} has a failed status #{status}" if status =~ /fail|error/i 53 | end 54 | end 55 | 56 | def wait_for_instance(instance_id, status) 57 | while (ins_status = instance_status(instance_id)) != status 58 | puts "[Instance #{instance_id}] waiting for instance to become #{status}. Current status: #{ins_status}" 59 | sleep 10 60 | end 61 | end 62 | 63 | def all_availability_zones 64 | ec2 = Aws::EC2::Client.new 65 | ec2.describe_availability_zones["availability_zones"].map(&:zone_name) 66 | end 67 | 68 | def get_all_instances(layer_id) 69 | response = opsworks.describe_instances({layer_id: layer_id}) 70 | response[:instances] 71 | end 72 | 73 | def attach_ebs_volumes(instance_id, volume_ids) 74 | volume_ids.each do |volume_id| 75 | puts "Attaching EBS volume #{volume_id} to instance #{instance_id}" 76 | opsworks.assign_volume({volume_id: volume_id, instance_id: instance_id}) 77 | end 78 | end 79 | 80 | def detach_ebs_volumes(instance_id) 81 | response = opsworks.describe_volumes(instance_id: instance_id) 82 | volume_ids = response[:volumes].map { |v| v[:volume_id] } 83 | volume_ids.each do |volume_id| 84 | puts "Detaching EBS volume #{volume_id} from instance #{instance_id}" 85 | opsworks.unassign_volume(volume_id: volume_id) 86 | end 87 | 88 | volume_ids 89 | end 90 | 91 | def create_instance(stack_id, layer_id, az) 92 | opsworks.create_instance({stack_id: stack_id, 93 | layer_ids: [layer_id], 94 | instance_type: ENV['INSTANCE_TYPE'] || 'c3.large', 95 | install_updates_on_boot: !ENV['SKIP_INSTANCE_PACKAGE_UPDATES'], 96 | availability_zone: az}) 97 | end 98 | 99 | def update_instances(stack_id, layer_id, count) 100 | azs = all_availability_zones 101 | existing_instances = get_all_instances(layer_id) 102 | count_to_create = count - existing_instances.size 103 | new_instances = (1..count_to_create).map do |i| 104 | instance = create_instance(stack_id, layer_id, azs[(existing_instances.size + i) % azs.size]) 105 | puts "Created instance, id: #{instance[:instance_id]}, starting the instance now." 106 | opsworks.start_instance(instance_id: instance[:instance_id]) 107 | instance 108 | end 109 | 110 | new_instances.each do |instance| 111 | wait_for_instance(instance[:instance_id], "online") 112 | end 113 | 114 | puts "Replacing existing instances.." if existing_instances.size > 0 115 | 116 | existing_instances.each do |instance| 117 | puts "Stopping instance #{instance[:hostname]}, id: #{instance[:instance_id]}" 118 | opsworks.stop_instance({instance_id: instance[:instance_id]}) 119 | wait_for_instance(instance[:instance_id], "stopped") 120 | ebs_volume_ids = detach_ebs_volumes(instance[:instance_id]) 121 | 122 | puts "Creating replacement instance" 123 | replacement = create_instance(stack_id, layer_id, instance[:availability_zone]) 124 | attach_ebs_volumes(replacement[:instance_id], ebs_volume_ids) 125 | 126 | puts "Starting new instance, id: #{replacement[:instance_id]}" 127 | opsworks.start_instance(instance_id: replacement[:instance_id]) 128 | wait_for_instance(replacement[:instance_id], "online") 129 | 130 | puts "Deleting old instance #{instance[:hostname]}, #{instance[:instance_id]}" 131 | opsworks.delete_instance(instance_id: instance[:instance_id]) 132 | end 133 | end 134 | 135 | def min_master_node_count(instance_count) 136 | instance_count <= 2 ? 1 : (instance_count / 2 + 1) 137 | end 138 | 139 | def environment 140 | ENV["ENVIRONMENT"] || "my" 141 | end 142 | 143 | def stack_name 144 | "#{environment}-search" 145 | end 146 | 147 | def get_required(name) 148 | ENV[name] || raise("You must provide the environment variable #{name}") 149 | end 150 | 151 | def add_param_if_set(params, param_name, env_var) 152 | if ENV[env_var] 153 | "Setting CloudFormation param #{param_name.inspect} => #{ENV[env_var].inspect}" 154 | params.merge!(param_name => ENV[env_var]) 155 | end 156 | end 157 | 158 | def cfm 159 | Aws::CloudFormation::Client.new 160 | end 161 | 162 | def cf_stack 163 | cfm.describe_stacks.stacks.detect{|stack| stack.stack_name == stack_name} 164 | end 165 | 166 | desc "Provisions the ElasticSearch cluster" 167 | task :provision do 168 | instance_count = (ENV["INSTANCE_COUNT"] || "2").to_i 169 | data_volume_size = (ENV["DATA_VOLUME_SIZE"] || "100").to_i 170 | template = File.read("opsworks-service.template") 171 | 172 | parameters = { 173 | "SSLCertificateName" => get_required("SSL_CERTIFICATE_NAME"), 174 | "Route53ZoneName" => get_required("ROUTE53_ZONE_NAME"), 175 | "SearchDomainName" => get_required("SEARCH_DOMAIN_NAME"), 176 | "Jdk7DownloadUrl" => ENV["JDK_7_DOWNLOAD_URL"] || "", 177 | 178 | "SshKeyName" => ENV["SSH_KEY_NAME"] || "elasticsearch", 179 | "SearchUser" => ENV["SEARCH_USER"] || "elasticsearch", 180 | "SearchPassword" => ENV["SEARCH_PASSWORD"] || "pass", 181 | "InstanceDefaultOs" => ENV["INSTANCE_DEFAULT_OS"] || "Amazon Linux 2015.03", 182 | "DataVolumeSize" => data_volume_size.to_s, 183 | "InstanceCount" => instance_count.to_s, 184 | "MinMasterNodes" => min_master_node_count(instance_count).to_s, 185 | "ClusterName" => "#{environment}-search-cluster", 186 | "RecipeList" => DEFAULT_RECIPES 187 | } 188 | 189 | if ENV["PAPERTRAIL_ENDPOINT"].to_s =~ /[^\:]+:[\d]+/ 190 | papertrail_host, papertrail_port = ENV["PAPERTRAIL_ENDPOINT"].split(":") 191 | puts "PaperTrail configured: host=#{papertrail_host}, port=#{papertrail_port}" 192 | 193 | parameters.merge!( 194 | "RecipeList" => [DEFAULT_RECIPES, "papertrail"].join(","), 195 | "PaperTrailHost" => papertrail_host, 196 | "PaperTrailPort" => papertrail_port 197 | ) 198 | end 199 | 200 | add_param_if_set(parameters, "ElasticSearchVersion", "ELASTICSEARCH_VERSION") 201 | add_param_if_set(parameters, "ElasticSearchAWSCloudPluginVersion", "ELASTICSEARCH_AWS_PLUGIN_VERSION") 202 | 203 | params = parameters.inject([]) do |array, (key, value)| 204 | array << { parameter_key: key, parameter_value: value } 205 | array 206 | end 207 | 208 | unless cf_stack.nil? 209 | begin 210 | puts "Updating CloudFormation stack #{stack_name}" 211 | cfm.update_stack({stack_name: stack_name, template_body: template, parameters: params}) 212 | rescue => e 213 | raise unless e.message =~ /No updates are to be performed/ 214 | puts "Your CloudFormation stack is already up to date" 215 | end 216 | else 217 | puts "Creating CloudFormation stack #{stack_name}" 218 | cfm.create_stack({stack_name: stack_name, template_body: template, parameters: params}) 219 | end 220 | 221 | wait_for_cf_stack_op_to_finish 222 | 223 | unless ENV["SKIP_INSTANCE_UPDATE"] == "true" 224 | stack_id = cf_query_output("StackId") 225 | layer_id = cf_query_output("LayerId") 226 | 227 | update_instances(stack_id, layer_id, instance_count) 228 | end 229 | end 230 | 231 | desc "Destroys the ElasticSearch cluster" 232 | task :destroy do 233 | unless cf_stack.nil? 234 | puts "Destroying environment #{environment}" 235 | 236 | layer_id = cf_query_output("LayerId") 237 | 238 | get_all_instances(layer_id).each do |instance| 239 | puts "Stopping instance #{instance[:hostname]}" 240 | opsworks.stop_instance(instance_id: instance[:instance_id]) 241 | wait_for_instance(instance[:instance_id], "stopped") 242 | 243 | puts "Deleting instance #{instance[:hostname]}" 244 | opsworks.delete_instance(instance_id: instance[:instance_id], delete_volumes: true) 245 | wait_for_instance(instance[:instance_id], "nonexistent") 246 | end 247 | 248 | puts "Deleting OpsWorks stack #{stack_name}" 249 | cfm.delete_stack({stack_name: stack_name}) 250 | else 251 | puts "Environment #{environment} does not exist" 252 | end 253 | end 254 | -------------------------------------------------------------------------------- /go: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eu 2 | 3 | DIR=$(cd `dirname ${BASH_SOURCE}` > /dev/null && pwd) 4 | 5 | $DIR/init_rbenv 6 | 7 | if ! (type rbenv > /dev/null 2>&1); then 8 | # initialize rbenv for this shell session 9 | export PATH="$HOME/.rbenv/bin:$PATH" 10 | eval "$(rbenv init -)" 11 | fi 12 | 13 | rake $* 14 | -------------------------------------------------------------------------------- /init_rbenv: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | BUNDLER_VERSION="1.11.2" 4 | 5 | export RBENV_VERSION=$(cat .ruby-version) 6 | export RBENV_ROOT=$HOME/.rbenv 7 | 8 | unset GEM_PATH 9 | unset GEM_HOME 10 | 11 | if [ ! -d $RBENV_ROOT ]; then 12 | echo "Installing rbenv." 13 | git clone git://github.com/sstephenson/rbenv.git $RBENV_ROOT 14 | fi 15 | 16 | if [ ! -d $RBENV_ROOT/plugins/rbenv-update ]; then 17 | echo "Installing rbenv-update plugin." 18 | git clone https://github.com/rkh/rbenv-update.git $RBENV_ROOT/plugins/rbenv-update 19 | fi 20 | 21 | if [ ! -d $RBENV_ROOT/plugins/ruby-build ]; then 22 | echo "Installing ruby-build plugin." 23 | git clone https://github.com/sstephenson/ruby-build.git $RBENV_ROOT/plugins/ruby-build 24 | fi 25 | 26 | if [ ! -d $RBENV_ROOT/plugins/rbenv-gemset ]; then 27 | echo "Installing rbenv-gemset plugin." 28 | git clone https://github.com/jf/rbenv-gemset.git $RBENV_ROOT/plugins/rbenv-gemset 29 | fi 30 | 31 | if [ ! -d $RBENV_ROOT/plugins/rbenv-vars ]; then 32 | echo "Installing rbenv-vars plugin." 33 | git clone https://github.com/sstephenson/rbenv-vars.git $RBENV_ROOT/plugins/rbenv-vars 34 | fi 35 | 36 | if [ ! -d $RBENV_ROOT/versions/$RBENV_VERSION ]; then 37 | echo "Installing ruby ${RBENV_VERSION}." 38 | $RBENV_ROOT/bin/rbenv update 39 | $RBENV_ROOT/bin/rbenv install $RBENV_VERSION 40 | 41 | if [[ $RBENV_VERSION =~ "jruby" ]]; then 42 | # should be the same as pristine since this is a new install, but `gem pristine` hits file permissions errors the first time 43 | $RBENV_ROOT/bin/rbenv exec gem install jruby-launcher 44 | fi 45 | 46 | echo "done" 47 | elif [[ ("true" = "${RBENV_UPDATE:-false}") || ($(uname -a) =~ Darwin) ]]; then 48 | echo "Updating rbenv..." 49 | $RBENV_ROOT/bin/rbenv update 50 | else 51 | echo "Environment up to date, ruby version: $RBENV_VERSION" 52 | fi 53 | 54 | if ! (echo $PATH | grep -F "$RBENV_ROOT" > /dev/null 2>&1 && grep -F 'export PATH="$HOME/.rbenv' $HOME/.bash_profile 2>&1 > /dev/null); then 55 | echo "Adding rbenv to PATH" 56 | echo 'export PATH="$HOME/.rbenv/bin:$PATH"' >> $HOME/.bash_profile 57 | export PATH="$RBENV_ROOT/bin:$PATH" 58 | fi 59 | 60 | if ! (type rbenv > /dev/null 2>&1 && grep -F 'eval "$(rbenv init -)"' $HOME/.bash_profile 2>&1 > /dev/null); then 61 | echo "Initializing rbenv in your .bash_profile" 62 | echo 'eval "$(rbenv init -)"' >> $HOME/.bash_profile 63 | eval "$(rbenv init -)" 64 | fi 65 | 66 | if ! ($RBENV_ROOT/shims/bundle --version 2> /dev/null | grep -F "$BUNDLER_VERSION" > /dev/null 2>&1); then 67 | echo "installing bundler $BUNDLER_VERSION" 68 | cmd="$RBENV_ROOT/bin/rbenv exec gem install --no-ri --no-rdoc bundler -v $BUNDLER_VERSION" 69 | echo "executing: $cmd" 70 | $cmd 71 | echo "done, result: $?" 72 | fi 73 | unset BUNDLER_VERSION 74 | 75 | $RBENV_ROOT/bin/rbenv exec ruby -S bundle install 76 | $RBENV_ROOT/bin/rbenv rehash 77 | -------------------------------------------------------------------------------- /opsworks-service.template: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Parameters": { 4 | "CookbookRepo": { 5 | "Description": "GitURL", 6 | "Type": "String", 7 | "Default": "https://github.com/ThoughtWorksStudios/opsworks-elasticsearch-cookbook" 8 | }, 9 | 10 | "CookbookRepoRevision": { 11 | "Description": "Git Revision/Tag", 12 | "Type": "String", 13 | "Default": "0.0.4" 14 | }, 15 | 16 | "ElasticSearchVersion": { 17 | "Description": "The version of ElasticSearch to install.", 18 | "Type": "String", 19 | "Default": "1.3.5" 20 | }, 21 | 22 | "ElasticSearchAWSCloudPluginVersion": { 23 | "Description": "The version of the ElasticSearch AWS Cloud Plugin to install. Note that this version MUST correspond to the targeted version of ElasticSearch. See https://github.com/elasticsearch/elasticsearch-cloud-aws for the version compatibility table.", 24 | "Type": "String", 25 | "Default": "2.3.0" 26 | }, 27 | 28 | "RecipeList": { 29 | "Description": "The list of cookbooks to include when setting up the cluster.", 30 | "Type": "CommaDelimitedList" 31 | }, 32 | 33 | "SSLCertificateName": { 34 | "Description": "The SSL certificate.", 35 | "Type": "String" 36 | }, 37 | 38 | "InstanceCount": { 39 | "Description": "Number of nodes to spin up in the cluster. This also configures the `expected_nodes` setting in ElasticSearch, which serves as a hint when the cluster considers shard reallocation.", 40 | "Type": "String" 41 | }, 42 | 43 | "InstanceDefaultOs": { 44 | "Description": "The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance.", 45 | "Type": "String" 46 | }, 47 | 48 | "DataVolumeSize": { 49 | "Description": "Size(GB) of mounted EBS data volume where ElasticSearch data will be stored.", 50 | "Type": "Number", 51 | "Default": 100 52 | }, 53 | 54 | "MinMasterNodes": { 55 | "Description": "Number of master eligible nodes visible to a given node before accepting requests. When this criterion is not satisfied, a given node will assume it has split off from from the cluster. This setting this helps avoid a catastrophic split-brain scenario in the cluster. This is typically is set to [N/2 + 1] nodes.", 56 | "Type": "String" 57 | }, 58 | 59 | "SearchUser": { 60 | "Description": "username to access the ElasticSearch cluster.", 61 | "Type": "String" 62 | }, 63 | 64 | "SearchPassword": { 65 | "Description": "password to access the ElasticSearch cluster.", 66 | "Type": "String" 67 | }, 68 | 69 | "ClusterName": { 70 | "Description": "The name of the ElasticSearch cluster.", 71 | "Type": "String" 72 | }, 73 | 74 | "Route53ZoneName": { 75 | "Description": "Route53 zone under which to setup the DNS record.", 76 | "Type": "String" 77 | }, 78 | 79 | "Jdk7DownloadUrl": { 80 | "Description": "Download URL for JDK 7.", 81 | "Type": "String" 82 | }, 83 | 84 | "SearchDomainName": { 85 | "Description": "Domain name to register for the cluster under Route53.", 86 | "Type": "String" 87 | }, 88 | 89 | "SshKeyName": { 90 | "Description": "SSH key name for EC2 instances.", 91 | "Type": "String" 92 | }, 93 | 94 | "PaperTrailHost": { 95 | "Description": "The PaperTrail endpoint hostname. Only required if you add the papertrail cookbook.", 96 | "Type": "String", 97 | "Default": "logs.papertrailapp.com" 98 | }, 99 | 100 | "PaperTrailPort": { 101 | "Description": "The PaperTrail endpoint port. Only required if you add the papertrail cookbook.", 102 | "Type": "Number", 103 | "Default": 0, 104 | "MinValue" : "0", 105 | "MaxValue" : "65535" 106 | } 107 | 108 | }, 109 | 110 | "Outputs": { 111 | "StackId": { 112 | "Description": "opsworks stack id ", 113 | "Value": { "Ref": "SearchStack"} 114 | }, 115 | 116 | "LayerId": { 117 | "Description": "opsworks search layer id ", 118 | "Value": { "Ref": "SearchLayer"} 119 | } 120 | 121 | }, 122 | 123 | "Resources": { 124 | 125 | "LoadBalancer" : { 126 | "Type" : "AWS::ElasticLoadBalancing::LoadBalancer", 127 | "Properties" : { 128 | "AvailabilityZones" : { "Fn::GetAZs" : "" }, 129 | "Listeners" : [ 130 | { 131 | "LoadBalancerPort" : "443", 132 | "InstancePort" : "80", 133 | "Protocol" : "HTTPS", 134 | "SSLCertificateId": { 135 | "Fn::Join": ["", ["arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":server-certificate/", { "Ref" : "SSLCertificateName" }]] 136 | } 137 | } 138 | ], 139 | "HealthCheck" : { 140 | "Target" : "TCP:80", 141 | "HealthyThreshold" : "2", 142 | "UnhealthyThreshold" : "8", 143 | "Interval" : "30", 144 | "Timeout" : "20" 145 | } 146 | } 147 | }, 148 | 149 | 150 | "DNSRecord": { 151 | "Type" : "AWS::Route53::RecordSet", 152 | "Properties" : { 153 | "HostedZoneName" : {"Ref": "Route53ZoneName"}, 154 | "Type" : "CNAME", 155 | "Name" : { "Ref": "SearchDomainName"}, 156 | "ResourceRecords" : [ {"Fn::GetAtt":["LoadBalancer","DNSName"]}], 157 | "TTL" : "30" 158 | } 159 | }, 160 | 161 | "ELBAttachment" : { 162 | "Type" : "AWS::OpsWorks::ElasticLoadBalancerAttachment", 163 | "Properties" : { 164 | "ElasticLoadBalancerName" : { "Ref" : "LoadBalancer" }, 165 | "LayerId" : { "Ref" : "SearchLayer" } 166 | } 167 | }, 168 | 169 | 170 | "SearchStack": { 171 | "Type": "AWS::OpsWorks::Stack", 172 | "Properties": { 173 | "Name": { 174 | "Ref": "AWS::StackName" 175 | }, 176 | "CustomJson": { 177 | "java": { 178 | "jdk_version": "7", 179 | "oracle": { 180 | "accept_oracle_download_terms": "true" 181 | }, 182 | "jdk" : { 183 | "7": { 184 | "x86_64": { 185 | "url" : { "Ref" : "Jdk7DownloadUrl" } 186 | } 187 | } 188 | }, 189 | "accept_license_agreement": "true", 190 | "install_flavor": "oracle" 191 | }, 192 | "papertrail": { 193 | "remote_host": { "Ref": "PaperTrailHost" }, 194 | "remote_port": { "Ref": "PaperTrailPort" }, 195 | "watch_files": [ 196 | { "filename": { "Fn::Join": ["", ["/usr/local/var/log/elasticsearch/", { "Ref": "ClusterName" }, ".log"]] }, "tag": "search" }, 197 | { "filename": { "Fn::Join": ["", ["/usr/local/var/log/elasticsearch/", { "Ref": "ClusterName" }, "_index_indexing_slowlog.log"]] }, "tag": "indexing-slowlog" }, 198 | { "filename": { "Fn::Join": ["", ["/usr/local/var/log/elasticsearch/", { "Ref": "ClusterName" }, "_index_search_slowlog.log"]] }, "tag": "search-slowlog" } 199 | ] 200 | }, 201 | "elasticsearch": { 202 | "version": { "Ref": "ElasticSearchVersion" }, 203 | "plugins" : { 204 | "elasticsearch/elasticsearch-cloud-aws": { 205 | "version": { "Ref": "ElasticSearchAWSCloudPluginVersion" } 206 | } 207 | }, 208 | "nginx": { 209 | "users": [{ 210 | "username": { "Ref": "SearchUser" }, 211 | "password": { "Ref": "SearchPassword" } 212 | }], 213 | "allow_cluster_api": "true", 214 | "port": 80 215 | }, 216 | "cluster": { 217 | "name": { "Ref" : "ClusterName" } 218 | }, 219 | "gateway": { 220 | "expected_nodes": { "Ref": "InstanceCount" } 221 | }, 222 | "discovery": { 223 | "type": "ec2", 224 | "zen": { 225 | "minimum_master_nodes": { "Ref": "MinMasterNodes" }, 226 | "ping": { 227 | "multicast": { 228 | "enabled": false 229 | } 230 | } 231 | }, 232 | "ec2": { 233 | "tag": { 234 | "opsworks:stack": { 235 | "Ref": "AWS::StackName" 236 | } 237 | } 238 | } 239 | }, 240 | "path": { 241 | "data": "/mnt/elasticsearch-data" 242 | }, 243 | "cloud": { 244 | "aws": { 245 | "region": { "Ref": "AWS::Region" } 246 | } 247 | }, 248 | "custom_config": { 249 | "cluster.routing.allocation.awareness.attributes": "rack_id", 250 | "index": "\n analysis:\n analyzer:\n default_index:\n filter:\n - standard\n - lowercase\n - snowball\n tokenizer: standard\n default_search:\n tokenizer: standard\n filter:\n - standard\n - lowercase\n - snowball\n" 251 | } 252 | } 253 | }, 254 | "ServiceRoleArn": { 255 | "Fn::Join": ["", ["arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":role/aws-opsworks-service-role"]] 256 | }, 257 | "DefaultInstanceProfileArn": { 258 | "Fn::Join": ["", ["arn:aws:iam::", { "Ref": "AWS::AccountId" }, ":instance-profile/aws-opsworks-ec2-role"]] 259 | }, 260 | "ConfigurationManager": { 261 | "Name": "Chef", 262 | "Version": "11.10" 263 | }, 264 | "ChefConfiguration": { 265 | "BerkshelfVersion": "3.2.0", 266 | "ManageBerkshelf": true 267 | }, 268 | "DefaultOs": { "Ref": "InstanceDefaultOs" }, 269 | "DefaultRootDeviceType": "ebs", 270 | "DefaultSshKeyName": { "Ref": "SshKeyName" }, 271 | "UseCustomCookbooks": true, 272 | "UseOpsworksSecurityGroups": false, 273 | "CustomCookbooksSource": { 274 | "Type": "git", 275 | "Url": { 276 | "Ref": "CookbookRepo" 277 | }, 278 | "Revision": { "Ref": "CookbookRepoRevision" } 279 | } 280 | } 281 | }, 282 | 283 | "SearchSecurityGroup": { 284 | "Type": "AWS::EC2::SecurityGroup", 285 | "Properties": { 286 | "GroupDescription": "so that ES cluster can find other nodes", 287 | "SecurityGroupIngress": [ 288 | { 289 | "IpProtocol": "tcp", 290 | "FromPort": "80", 291 | "ToPort": "80", 292 | "SourceSecurityGroupOwnerId": { 293 | "Fn::GetAtt": [ 294 | "LoadBalancer", 295 | "SourceSecurityGroup.OwnerAlias" 296 | ] 297 | }, 298 | "SourceSecurityGroupName": { 299 | "Fn::GetAtt": [ 300 | "LoadBalancer", 301 | "SourceSecurityGroup.GroupName" 302 | ] 303 | } 304 | } 305 | ] 306 | } 307 | }, 308 | 309 | "InterConnectingIngress": { 310 | "Type": "AWS::EC2::SecurityGroupIngress", 311 | "Properties": { 312 | "GroupName": { "Ref": "SearchSecurityGroup" }, 313 | "IpProtocol": "tcp", 314 | "FromPort": "0", 315 | "ToPort": "65535", 316 | "SourceSecurityGroupName": { "Ref": "SearchSecurityGroup" } 317 | } 318 | }, 319 | 320 | "SearchLayer": { 321 | "Type": "AWS::OpsWorks::Layer", 322 | "Properties": { 323 | "StackId": { 324 | "Ref": "SearchStack" 325 | }, 326 | "Name": "Search", 327 | "Type": "custom", 328 | "Shortname": "search", 329 | "CustomRecipes": { 330 | "Setup": { "Ref": "RecipeList" } 331 | }, 332 | "EnableAutoHealing": false, 333 | "AutoAssignElasticIps": false, 334 | "AutoAssignPublicIps": true, 335 | "VolumeConfigurations": [ 336 | { 337 | "MountPoint": "/mnt/elasticsearch-data", 338 | "NumberOfDisks": 1, 339 | "VolumeType": "gp2", 340 | "Size": { "Ref": "DataVolumeSize" } 341 | } 342 | ], 343 | "CustomSecurityGroupIds": [ 344 | { 345 | "Fn::GetAtt": [ 346 | "SearchSecurityGroup", 347 | "GroupId" 348 | ] 349 | } 350 | ] 351 | } 352 | } 353 | 354 | } 355 | } 356 | -------------------------------------------------------------------------------- /rbenv-vars.example: -------------------------------------------------------------------------------- 1 | # copy this file as .rbenv-vars and fill out this template 2 | 3 | AWS_ACCESS_KEY_ID= 4 | AWS_SECRET_ACCESS_KEY= 5 | AWS_REGION= 6 | 7 | # This variable will namespace your AWS resources. 8 | ENVIRONMENT= 9 | 10 | # The ElasticSearch + AWS cloud plugin versions. 11 | # Note that specific versions of the AWS cloud plugin MUST match corresponding versions of ElasticSearch. 12 | # See https://github.com/elasticsearch/elasticsearch-cloud-aws for the version compatibility table. 13 | # If these are not specified, versions default to ES=1.3.5, plugin=2.3.0 14 | ELASTICSEARCH_VERSION=1.4.0 15 | ELASTICSEARCH_AWS_PLUGIN_VERSION=2.4.0 16 | 17 | # The following 2 variables setup elasticsearch server basic auth 18 | SEARCH_USER=elasticsearch 19 | SEARCH_PASSWORD=password 20 | 21 | # Number of nodes in your cluster, defaults to 2 if you do not specify 22 | INSTANCE_COUNT=2 23 | INSTANCE_TYPE=c3.large 24 | 25 | # The stack's default operating system, which is installed on every instance unless you specify a different operating system when you create the instance, defaults to "Amazon Linux 2014.09" 26 | INSTANCE_DEFAULT_OS=Amazon Linux 2014.09 27 | 28 | # Size(GB) of mounted EBS data volume where ElasticSearch data will be stored. 29 | DATA_VOLUME_SIZE=100 30 | 31 | # To access your elastic search server through https instead of http 32 | SSL_CERTIFICATE_NAME= 33 | 34 | # Public download url for jdk 7 because oracle website download mechanism keeps changing 35 | JDK_7_DOWNLOAD_URL= 36 | 37 | # You need to create an SSH key pair in your AWS region 38 | SSH_KEY_NAME=elasticsearch 39 | 40 | # The CloudFormation stack will need the following 2 variables to give the elasticsearch cluster public access 41 | ROUTE53_ZONE_NAME=. 42 | 43 | # example: search.es.com 44 | SEARCH_DOMAIN_NAME= 45 | 46 | AWS_OPSWORKS_API_REGION=us-east-1 47 | 48 | # If you would like to log to PaperTrail, uncomment this setting and configure the endpoint here (hostame:port). 49 | # PAPERTRAIL_ENDPOINT=logs.papertrailapp.com: --------------------------------------------------------------------------------