├── elasticsearch ├── tmp │ └── .gitignore ├── recipes │ ├── curl.rb │ ├── aws.rb │ ├── plugins.rb │ ├── monit.rb │ ├── test.rb │ ├── ebs.rb │ ├── nginx.rb │ ├── proxy.rb │ ├── data.rb │ └── default.rb ├── Gemfile ├── .gitignore ├── templates │ └── default │ │ ├── nginx.monitrc.conf.erb │ │ ├── nginx.conf.erb │ │ ├── logging.yml.erb │ │ ├── elasticsearch-env.sh.erb │ │ ├── elasticsearch.conf.erb │ │ ├── elasticsearch_proxy.conf.erb │ │ ├── elasticsearch.init.erb │ │ └── elasticsearch.yml.erb ├── attributes │ ├── plugins.rb │ ├── nginx.rb │ ├── logging.rb │ ├── proxy.rb │ ├── aws.rb │ ├── data.rb │ └── default.rb ├── tests │ ├── service_test.rb │ ├── monit_test.rb │ ├── plugins_test.rb │ ├── java_test.rb │ ├── data_test.rb │ ├── aws_test.rb │ ├── proxy_test.rb │ ├── cluster_test.rb │ └── installation_test.rb ├── Berksfile ├── Rakefile ├── chefignore ├── metadata.rb ├── libraries │ ├── print_value.rb │ ├── install_plugin.rb │ └── create_ebs.rb ├── Vagrantfile ├── LICENSE.txt └── README.markdown ├── ark ├── files │ └── default │ │ ├── tests │ │ └── minitest │ │ │ ├── default_test.rb │ │ │ ├── support │ │ │ └── helpers.rb │ │ │ └── test_test.rb │ │ ├── foo.tbz │ │ ├── foo.tgz │ │ ├── foo.zip │ │ └── foo.tar.gz ├── test │ └── support │ │ └── Gemfile ├── Vagrantfile ├── .travis.yml ├── .gitignore ├── attributes │ └── default.rb ├── Berksfile ├── Toftfile ├── metadata.rb ├── Gemfile ├── recipes │ ├── default.rb │ └── test.rb ├── .kitchen.yml ├── CHANGELOG.md ├── Rakefile ├── resources │ └── default.rb ├── README.md └── providers │ └── default.rb ├── redis ├── recipes │ ├── default.rb │ ├── init.rb │ └── source.rb ├── templates │ └── default │ │ ├── redis.upstart.erb │ │ └── redis.sysv.erb ├── metadata.rb └── attributes │ └── redis.rb ├── nginx ├── README.md ├── templates │ └── default │ │ ├── nginx-error.rsyslog.conf.erb │ │ ├── default-host.erb │ │ ├── default-host-page.html.erb │ │ ├── nginx.init.erb │ │ └── nginx.conf.erb ├── metadata.rb ├── attributes │ └── default.rb └── recipes │ └── default.rb ├── users ├── README.rdoc ├── templates │ └── default │ │ └── authorized_keys.erb ├── metadata.rb └── recipes │ └── default.rb ├── newrelic ├── attributes │ └── default.rb ├── recipes │ └── default.rb └── templates │ └── default │ └── nrsysmond.cfg.erb ├── rsyslog ├── attributes │ └── default.rb ├── templates │ └── default │ │ ├── chef.conf.erb │ │ └── rsyslog.conf.erb └── recipes │ └── default.rb ├── monit ├── templates │ └── default │ │ ├── monit.rsyslog.conf.erb │ │ └── monitrc.erb ├── metadata.rb └── recipes │ └── default.rb ├── README.md ├── rubies ├── metadata.rb ├── attributes │ └── default.rb └── recipes │ └── default.rb └── sudo ├── templates └── default │ └── sudoers.erb ├── attributes └── default.rb ├── recipes └── default.rb ├── metadata.rb └── README.md /elasticsearch/tmp/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ark/files/default/tests/minitest/default_test.rb: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ark/files/default/tests/minitest/support/helpers.rb: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /redis/recipes/default.rb: -------------------------------------------------------------------------------- 1 | include_recipe "redis::source" 2 | -------------------------------------------------------------------------------- /elasticsearch/recipes/curl.rb: -------------------------------------------------------------------------------- 1 | package 'curl' do 2 | action :install 3 | end 4 | -------------------------------------------------------------------------------- /nginx/README.md: -------------------------------------------------------------------------------- 1 | Installs Nginx from source with the Phusion Passenger module 2 | -------------------------------------------------------------------------------- /users/README.rdoc: -------------------------------------------------------------------------------- 1 | = DESCRIPTION 2 | 3 | = REQUIREMENTS 4 | 5 | = ATTRIBUTES 6 | 7 | = USAGE 8 | -------------------------------------------------------------------------------- /ark/files/default/foo.tbz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machines/cookbooks/master/ark/files/default/foo.tbz -------------------------------------------------------------------------------- /ark/files/default/foo.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machines/cookbooks/master/ark/files/default/foo.tgz -------------------------------------------------------------------------------- /ark/files/default/foo.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machines/cookbooks/master/ark/files/default/foo.zip -------------------------------------------------------------------------------- /ark/files/default/foo.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/machines/cookbooks/master/ark/files/default/foo.tar.gz -------------------------------------------------------------------------------- /ark/test/support/Gemfile: -------------------------------------------------------------------------------- 1 | source "https://rubygems.org" 2 | 3 | gem 'rake' 4 | gem 'foodcritic', :platforms => :ruby_19 5 | -------------------------------------------------------------------------------- /newrelic/attributes/default.rb: -------------------------------------------------------------------------------- 1 | default[:newrelic][:license_key] = "change me in newrelic/attributes/default.rb or in json" 2 | -------------------------------------------------------------------------------- /users/templates/default/authorized_keys.erb: -------------------------------------------------------------------------------- 1 | # Generated by Machines.io 2 | <% @ssh_keys.each do |user, key| %> 3 | <%= key %> 4 | <% end %> 5 | -------------------------------------------------------------------------------- /elasticsearch/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem 'pry' 4 | gem 'chef' 5 | gem 'vagrant', '1.0.6' 6 | gem 'berkshelf', '1.2.1' 7 | -------------------------------------------------------------------------------- /rsyslog/attributes/default.rb: -------------------------------------------------------------------------------- 1 | default[:rsyslog][:papertrail][:hostname] = "logs.papertrailapp.com" 2 | default[:rsyslog][:papertrail][:port] = 10345 3 | -------------------------------------------------------------------------------- /ark/Vagrantfile: -------------------------------------------------------------------------------- 1 | require 'kitchen/vagrant' 2 | require 'berkshelf/vagrant' 3 | 4 | Vagrant::Config.run do |config| 5 | Kitchen::Vagrant.define_vms(config) 6 | end 7 | -------------------------------------------------------------------------------- /ark/.travis.yml: -------------------------------------------------------------------------------- 1 | language: ruby 2 | gemfile: 3 | - test/support/Gemfile 4 | rvm: 5 | - 1.9.3 6 | script: BUNDLE_GEMFILE=test/support/Gemfile bundle exec rake foodcritic -------------------------------------------------------------------------------- /rsyslog/templates/default/chef.conf.erb: -------------------------------------------------------------------------------- 1 | $ModLoad imfile 2 | $InputFileName /var/log/chef/solo.log 3 | $InputFileTag chef: 4 | $InputFileStateFile stat-chef 5 | $InputRunFileMonitor 6 | -------------------------------------------------------------------------------- /monit/templates/default/monit.rsyslog.conf.erb: -------------------------------------------------------------------------------- 1 | $ModLoad imfile 2 | $InputFileName /var/log/monit.log 3 | $InputFileTag monit: 4 | $InputFileStateFile stat-monit 5 | $InputRunFileMonitor 6 | -------------------------------------------------------------------------------- /ark/.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant 2 | Berksfile.lock 3 | Gemfile.lock 4 | *~ 5 | *# 6 | .#* 7 | \#*# 8 | .*.sw[a-z] 9 | *.un~ 10 | .bundle 11 | .cache 12 | .kitchen 13 | bin 14 | .kitchen.local.yml 15 | -------------------------------------------------------------------------------- /nginx/templates/default/nginx-error.rsyslog.conf.erb: -------------------------------------------------------------------------------- 1 | $ModLoad imfile 2 | $InputFileName <%= node.nginx.install_path %>/logs/error.log 3 | $InputFileTag nginx-error: 4 | $InputFileStateFile stat-nginx-error 5 | $InputRunFileMonitor 6 | -------------------------------------------------------------------------------- /ark/attributes/default.rb: -------------------------------------------------------------------------------- 1 | 2 | default['ark']['apache_mirror'] = 'http://apache.mirrors.tds.net' 3 | default['ark']['prefix_root'] = "/usr/local" 4 | default['ark']['prefix_bin'] = "/usr/local/bin" 5 | default['ark']['prefix_home'] = "/usr/local" 6 | -------------------------------------------------------------------------------- /elasticsearch/.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .vagrant 3 | .vagrant-1 4 | .vagrant-2 5 | Berksfile.lock 6 | Gemfile.lock 7 | tmp/* 8 | !tmp/.gitignore 9 | !tmp/cookbooks/.gitignore 10 | !tmp/data_bags/.gitignore 11 | !tmp/data_bags/elasticsearch/settings.json 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Machines.io Cookbooks 2 | 3 | All applications running on the Machines platform have these cookbooks available by default and many are used during the bootstrap and application deployment process. 4 | 5 | ## Copyright 6 | 7 | © 2013 JK Tech, Inc. 8 | -------------------------------------------------------------------------------- /ark/Berksfile: -------------------------------------------------------------------------------- 1 | site :opscode 2 | 3 | metadata 4 | 5 | cookbook "java" 6 | 7 | group :integration do 8 | cookbook "apt" 9 | cookbook "yum" 10 | 11 | # Future, when/if minitest support for this cookbook is added 12 | cookbook "minitest-handler" 13 | end 14 | -------------------------------------------------------------------------------- /elasticsearch/recipes/aws.rb: -------------------------------------------------------------------------------- 1 | node.default[:elasticsearch][:plugin][:mandatory] = Array(node[:elasticsearch][:plugin][:mandatory] | ['cloud-aws']) 2 | 3 | install_plugin "elasticsearch/elasticsearch-cloud-aws/#{node.elasticsearch['plugins']['elasticsearch-cloud-aws']['version']}" 4 | -------------------------------------------------------------------------------- /ark/Toftfile: -------------------------------------------------------------------------------- 1 | require 'rubygems' 2 | require 'toft' 3 | 4 | include Toft 5 | 6 | n1 = create_node "n1", {:ip => '192.168.20.2'} 7 | 8 | n1.start 9 | 10 | n1.run_chef [ "recipe[chef_handler]", "recipe[minitest-handler::recipes]", "recipe[ark::test]" ] 11 | 12 | n1.stop 13 | 14 | n1.destroy 15 | 16 | -------------------------------------------------------------------------------- /monit/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "Machines.io" 2 | maintainer_email "support@machines.io" 3 | license "Apache 2.0" 4 | description "Installs and configures monit" 5 | version "1.0.0" 6 | 7 | recipe "monit", "Installs monit package with configuration" 8 | 9 | supports "ubuntu" 10 | -------------------------------------------------------------------------------- /rubies/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "Machines.io" 2 | maintainer_email "support@machines.io" 3 | license "MIT" 4 | description "Installs and configures multiple rubies" 5 | version "1.0.0" 6 | 7 | recipe "rubies:default", "Downloads and installs a bunch of rubies" 8 | 9 | supports "ubuntu" 10 | -------------------------------------------------------------------------------- /nginx/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "Machines.io" 2 | maintainer_email "support@machines.io" 3 | license "Apache 2.0" 4 | description "Installs and configures nginx" 5 | version "1.0.15" 6 | 7 | recipe "nginx", "Installs Nginx from source with the Phusion Passenger module" 8 | 9 | supports "ubuntu" 10 | -------------------------------------------------------------------------------- /redis/templates/default/redis.upstart.erb: -------------------------------------------------------------------------------- 1 | # THIS FILE HAS BEEN GENERATED BY CHEF 2 | # ANY MANUAL MODIFICATIONS WILL BE OVERWRITTEN 3 | 4 | description "redis" 5 | 6 | start on runlevel [2345] 7 | stop on [!2345] 8 | 9 | respawn 10 | respawn limit 10 5 11 | 12 | exec sudo -u redis /usr/local/bin/redis-server <%= node.redis.config %> 13 | -------------------------------------------------------------------------------- /users/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "Machines.io" 2 | maintainer_email "support@machines.io" 3 | license "Apache 2.0" 4 | description "Add deploy users" 5 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.rdoc')) 6 | version "0.0.1" 7 | 8 | %w{ ubuntu debian }.each do |os| 9 | supports os 10 | end 11 | -------------------------------------------------------------------------------- /redis/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "Gerhard Lazu" 2 | maintainer_email "gerhard@lazu.co.uk" 3 | license "Apache 2.0" 4 | description "Installs and configures Redis 2.4.9" 5 | version "2.4.9" 6 | 7 | recipe "redis::source", "Installs redis from source" 8 | 9 | %w{ ubuntu debian }.each do |os| 10 | supports os 11 | end 12 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/nginx.monitrc.conf.erb: -------------------------------------------------------------------------------- 1 | check process nginx with pidfile /var/run/nginx.pid 2 | start program = "/etc/init.d/nginx start" with timeout 60 seconds 3 | stop program = "/etc/init.d/nginx stop" 4 | if cpu > 90% for 15 cycles then alert 5 | if totalmem > 90% for 15 cycles then alert 6 | if loadavg(15min) greater than 10 for 50 cycles then alert 7 | group nginx 8 | -------------------------------------------------------------------------------- /redis/recipes/init.rb: -------------------------------------------------------------------------------- 1 | case node[:redis][:init] 2 | when "init" 3 | template "/etc/init.d/redis" do 4 | cookbook "redis" 5 | source "redis.sysv.erb" 6 | mode 0755 7 | backup false 8 | end 9 | when "upstart" 10 | template "/etc/init/redis.conf" do 11 | cookbook "redis" 12 | source "redis.upstart.erb" 13 | mode 0644 14 | backup false 15 | end 16 | end 17 | -------------------------------------------------------------------------------- /elasticsearch/attributes/plugins.rb: -------------------------------------------------------------------------------- 1 | Chef::Log.debug "Attempting to load plugin list from the databag..." 2 | 3 | plugins = Chef::DataBagItem.load('elasticsearch', 'plugins')[node.chef_environment].to_hash['plugins'] rescue {} 4 | 5 | Chef::Log.debug "Plugins list: #{plugins.keys.inspect}" 6 | 7 | node.default.elasticsearch[:plugins] ||= plugins 8 | node.default.elasticsearch[:plugin][:mandatory] = [] 9 | -------------------------------------------------------------------------------- /elasticsearch/tests/service_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::default' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | it "runs as a daemon" do 8 | service("elasticsearch").must_be_running 9 | end 10 | 11 | it "boots on startup" do 12 | service("elasticsearch").must_be_enabled 13 | end 14 | 15 | end 16 | -------------------------------------------------------------------------------- /nginx/templates/default/default-host.erb: -------------------------------------------------------------------------------- 1 | # Nginx Default VirtualHost 2 | # 3 | # Generated by Machines.io with Chef 4 | # Local modifications will be overwritten 5 | 6 | server { 7 | listen 80; 8 | server_name _; 9 | 10 | root /mnt/default-host/public; 11 | access_log <%= node.nginx.log_dir %>/default-access.log; 12 | error_log <%= node.nginx.log_dir %>/default-error.log; 13 | rewrite_log on; 14 | } 15 | -------------------------------------------------------------------------------- /elasticsearch/recipes/plugins.rb: -------------------------------------------------------------------------------- 1 | node[:elasticsearch][:plugins].each do | name, config | 2 | next if name == 'elasticsearch-cloud-aws' && !node.recipe?('aws') 3 | install_plugin name, config 4 | end 5 | 6 | directory "#{node.elasticsearch[:dir]}/elasticsearch-#{node.elasticsearch[:version]}/plugins/" do 7 | owner node.elasticsearch[:user] 8 | group node.elasticsearch[:user] 9 | mode 0755 10 | recursive true 11 | end 12 | -------------------------------------------------------------------------------- /elasticsearch/tests/monit_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::monit' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | it "saves the configuration file in the Monit directory" do 8 | if node.recipes.include?("elasticsearch::monit") 9 | file("/etc/monit/conf.d/elasticsearch.conf").must_exist 10 | end 11 | end 12 | 13 | end 14 | -------------------------------------------------------------------------------- /ark/metadata.rb: -------------------------------------------------------------------------------- 1 | name "ark" 2 | maintainer "Bryan W. Berry" 3 | maintainer_email "bryan.berry@gmail.com" 4 | license "Apache 2.0" 5 | description "Installs/Configures ark" 6 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.md')) 7 | version "0.1.0" 8 | 9 | %w{ debian ubuntu centos redhat fedora }.each do |os| 10 | supports os 11 | end 12 | 13 | recipe "ark::default", "Installs and configures ark" 14 | -------------------------------------------------------------------------------- /elasticsearch/tests/plugins_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::plugins' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | it "creates the file in plugins" do 8 | if node.recipes.include?("elasticsearch::plugins") 9 | file("/usr/local/elasticsearch/plugins/paramedic/_site/index.html").must_exist.with(:owner, 'elasticsearch') 10 | end 11 | end if Chef::VERSION > '10.14' 12 | 13 | end 14 | -------------------------------------------------------------------------------- /elasticsearch/Berksfile: -------------------------------------------------------------------------------- 1 | metadata 2 | 3 | cookbook 'apt', git: 'git://github.com/opscode-cookbooks/apt.git' 4 | cookbook 'yum', git: 'git://github.com/opscode-cookbooks/yum.git' 5 | 6 | cookbook 'ark', git: 'git://github.com/bryanwb/chef-ark.git' 7 | cookbook 'java', git: 'git://github.com/opscode-cookbooks/java.git' 8 | 9 | cookbook 'monit', git: 'git://github.com/apsoto/monit.git' 10 | 11 | cookbook 'vim' 12 | cookbook 'minitest-handler', git: 'git://github.com/btm/minitest-handler-cookbook.git' 13 | -------------------------------------------------------------------------------- /rsyslog/recipes/default.rb: -------------------------------------------------------------------------------- 1 | template "/etc/rsyslog.d/chef.conf" do 2 | source "chef.conf.erb" 3 | owner "root" 4 | group "root" 5 | mode "0644" 6 | notifies :restart, "service[rsyslog]", :delayed 7 | end 8 | 9 | template "/etc/rsyslog.conf" do 10 | source "rsyslog.conf.erb" 11 | owner "root" 12 | group "root" 13 | mode "0644" 14 | notifies :restart, "service[rsyslog]", :immediately 15 | end 16 | 17 | service "rsyslog" do 18 | supports restart: true 19 | action :enable 20 | end 21 | -------------------------------------------------------------------------------- /monit/recipes/default.rb: -------------------------------------------------------------------------------- 1 | package "monit" 2 | 3 | template "/etc/rsyslog.d/monit.conf" do 4 | source "monit.rsyslog.conf.erb" 5 | owner "root" 6 | group "root" 7 | mode "0644" 8 | notifies :restart, "service[rsyslog]", :delayed 9 | end 10 | 11 | template "monitrc" do 12 | path "/etc/monit/monitrc" 13 | source "monitrc.erb" 14 | owner "root" 15 | group "root" 16 | mode "0600" 17 | notifies :restart, "service[monit]", :immediately 18 | end 19 | 20 | service "monit" do 21 | supports :status => true, :restart => true 22 | action :enable 23 | end 24 | -------------------------------------------------------------------------------- /ark/Gemfile: -------------------------------------------------------------------------------- 1 | source 'https://rubygems.org' 2 | 3 | gem 'rake' 4 | gem 'foodcritic' 5 | gem 'berkshelf' 6 | gem 'thor-foodcritic' 7 | gem 'vagrant', '~> 1.0.6' 8 | 9 | group :integration do 10 | gem 'test-kitchen', :git => "git://github.com/opscode/test-kitchen.git", :branch => '1.0' 11 | gem 'kitchen-vagrant', :git => "git://github.com/opscode/kitchen-vagrant.git" 12 | 13 | # Change .kitchen.yml's driver_plugin to ec2 and populate 14 | # .kitchen.local.yml's driver_config with aws auth data 15 | gem 'kitchen-ec2', :git => "git://github.com/opscode/kitchen-ec2.git" 16 | end 17 | -------------------------------------------------------------------------------- /elasticsearch/tests/java_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::default' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | describe "Java installation" do 8 | 9 | it "installs the correct version and makes it default" do 10 | if node[:java] and node[:java][:jdk_version] == '7' 11 | java_version = `java -version 2>&1` 12 | assert_match /1\.7\.0/, java_version, "Java version #{node.java.jdk_version} does not match: #{java_version}" 13 | end 14 | end 15 | 16 | end 17 | 18 | end 19 | -------------------------------------------------------------------------------- /elasticsearch/Rakefile: -------------------------------------------------------------------------------- 1 | desc "Create a release tag and push everything to Github" 2 | task :release do 3 | unless system("git status --porcelain").to_s =~ /^\s*$/ 4 | puts "[!] Error, repository dirty, please commit or stash your changes.", "" 5 | exit(1) 6 | end 7 | if version = File.read('metadata.rb')[/^version\s*"(.*)"$/, 1] 8 | sh <<-COMMAND.gsub(/ /, ' ') 9 | git tag #{version} && \ 10 | git push origin master --verbose && \ 11 | git push origin --tags --verbose && \ 12 | knife cookbook site share "elasticsearch" "Databases" 13 | COMMAND 14 | end 15 | end 16 | -------------------------------------------------------------------------------- /elasticsearch/attributes/nginx.rb: -------------------------------------------------------------------------------- 1 | default[:nginx][:dir] = "/etc/nginx" 2 | default[:nginx][:log_dir] = "/var/log/nginx" 3 | default[:nginx][:binary] = "/usr/sbin/nginx" 4 | default[:nginx][:root] = "/var/www/nginx" 5 | 6 | default[:nginx][:user] = case node[:platform] 7 | when 'debian', 'ubuntu' then 'www-data' 8 | when 'redhat', 'centos', 'scientific', 'amazon', 'oracle', 'fedora' then 'nginx' 9 | else 'nginx' 10 | end 11 | 12 | default[:nginx][:keepalive] = "on" 13 | default[:nginx][:keepalive_timeout] = 65 14 | default[:nginx][:worker_processes] = node[:cpu][:total] rescue 1 15 | default[:nginx][:worker_connections] = 2048 16 | -------------------------------------------------------------------------------- /elasticsearch/recipes/monit.rb: -------------------------------------------------------------------------------- 1 | # Add Monit configuration file via the `monitrc` definition 2 | # 3 | begin 4 | monitrc "elasticsearch" do 5 | template_cookbook "elasticsearch" 6 | source "elasticsearch.conf.rb" 7 | end 8 | rescue Exception => e 9 | Chef::Log.error "The 'monit' recipe is not included in the node run_list or the 'monitrc' resource is not defined" 10 | raise e 11 | end 12 | 13 | # NOTE: On some systems, notably Amazon Linux, Monit installed from packages 14 | # has a different configuration file then expected by the Monit 15 | # cookbook. In such case: 16 | # 17 | # sudo cp /etc/monit/monitrc /etc/monit.conf 18 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/nginx.conf.erb: -------------------------------------------------------------------------------- 1 | user <%= node[:nginx][:user] %>; 2 | worker_processes <%= node[:nginx][:worker_processes] %>; 3 | 4 | error_log <%= node[:nginx][:log_dir] %>/error.log; 5 | pid /var/run/nginx.pid; 6 | 7 | events { 8 | worker_connections <%= node[:nginx][:worker_connections] %>; 9 | } 10 | 11 | http { 12 | access_log <%= node[:nginx][:log_dir] %>/access.log; 13 | 14 | sendfile on; 15 | tcp_nopush on; 16 | tcp_nodelay on; 17 | 18 | gzip on; 19 | gzip_types application/javascript application/x-javascript text/css text/xml application/xml; 20 | 21 | include <%= node[:nginx][:dir] %>/conf.d/*.conf; 22 | } 23 | -------------------------------------------------------------------------------- /elasticsearch/tests/data_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::data' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | it "mounts the secondary disk" do 8 | mount("/usr/local/var/data/elasticsearch/disk1", :device => "/dev/sdb"). 9 | must_be_mounted \ 10 | if node.recipes.include?("elasticsearch::data") 11 | end 12 | 13 | it "correctly creates the data directory" do 14 | directory("/usr/local/var/data/elasticsearch/disk1"). 15 | must_exist. 16 | with(:owner, 'elasticsearch') \ 17 | if node.recipes.include?("elasticsearch::data") 18 | end 19 | 20 | end 21 | -------------------------------------------------------------------------------- /sudo/templates/default/sudoers.erb: -------------------------------------------------------------------------------- 1 | # 2 | # /etc/sudoers 3 | # 4 | # Generated by Chef for <%= node[:fqdn] %> 5 | # 6 | 7 | Defaults !lecture,tty_tickets,!fqdn,env_reset,!secure_path 8 | 9 | # User privilege specification 10 | root ALL=(ALL) ALL 11 | 12 | # Members of the sysadmin group may gain root privileges 13 | %sysadmin ALL=(ALL) ALL 14 | 15 | <% @sudoers_groups.each do |group| -%> 16 | # Members of the group '<%= group %>' may gain root privileges 17 | %<%= group %> ALL=(ALL) ALL 18 | <% end -%> 19 | 20 | # Sudo users who can gain root privileges without a password 21 | 22 | <% @sudoers_users.each do |user| -%> 23 | <%= user %> ALL=(ALL) NOPASSWD:ALL 24 | <% end -%> 25 | -------------------------------------------------------------------------------- /elasticsearch/chefignore: -------------------------------------------------------------------------------- 1 | # Put files/directories that should be ignored in this file. 2 | # Lines that start with '# ' are comments. 3 | 4 | ## OS 5 | .DS_Store 6 | Icon? 7 | nohup.out 8 | 9 | ## EDITORS 10 | \#* 11 | .#* 12 | *~ 13 | *.sw[a-z] 14 | *.bak 15 | REVISION 16 | TAGS* 17 | tmtags 18 | *_flymake.* 19 | *_flymake 20 | *.tmproj 21 | .project 22 | .settings 23 | mkmf.log 24 | 25 | ## COMPILED 26 | a.out 27 | *.o 28 | *.pyc 29 | *.so 30 | 31 | ## OTHER SCM 32 | */.bzr/* 33 | */.hg/* 34 | */.svn/* 35 | 36 | ## Don't send rspecs up in cookbook 37 | .watchr 38 | .rspec 39 | spec/* 40 | spec/fixtures/* 41 | features/* 42 | 43 | ## SCM 44 | .gitignore 45 | 46 | # Berkshelf 47 | Berksfile 48 | Berksfile.lock 49 | -------------------------------------------------------------------------------- /elasticsearch/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "karmi" 2 | maintainer_email "karmi@karmi.cz" 3 | license "Apache" 4 | description "Installs and configures elasticsearch" 5 | long_description IO.read(File.join(File.dirname(__FILE__), 'README.markdown')) 6 | version "0.2.7" 7 | name "elasticsearch" 8 | 9 | depends 'ark' 10 | 11 | recommends 'build-essential' 12 | recommends 'xml' 13 | recommends 'java' 14 | recommends 'monit' 15 | 16 | provides 'elasticsearch' 17 | provides 'elasticsearch::data' 18 | provides 'elasticsearch::ebs' 19 | provides 'elasticsearch::aws' 20 | provides 'elasticsearch::nginx' 21 | provides 'elasticsearch::proxy' 22 | provides 'elasticsearch::plugins' 23 | provides 'elasticsearch::monit' 24 | -------------------------------------------------------------------------------- /rubies/attributes/default.rb: -------------------------------------------------------------------------------- 1 | default[:rubies][:versions] = [ 2 | "2.1.2", 3 | "2.0.0-p481", 4 | "1.9.3-p545", 5 | "1.8.7-p371", 6 | "1.8.7-p358", 7 | "jruby-1.7.11" 8 | ] 9 | default[:rubies][:source] = "http://packages.machines.io/rubies" 10 | default[:rubies][:install_path] = "/opt/rubies" 11 | default[:rubies][:system_ruby_version] = "2.1.2" 12 | default[:rubies][:rbenv_path] = "/usr/local/rbenv" 13 | 14 | # Gems installed on all rubies 15 | default[:rubies][:gems] = [ 16 | ["bundler", "1.6.3"] 17 | ] 18 | 19 | # Gems installed to the default system ruby 20 | default[:rubies][:system_ruby_gems] = [ 21 | ["chef", "11.10.4"], 22 | ["backup", "3.10.0"], 23 | ["remote_syslog", "1.6.14"] 24 | ] 25 | -------------------------------------------------------------------------------- /elasticsearch/recipes/test.rb: -------------------------------------------------------------------------------- 1 | Chef::Log.debug "Installing and configuring minitest and minitest-chef-handler" 2 | 3 | chef_gem "minitest" 4 | chef_gem "minitest-chef-handler" 5 | 6 | require "minitest-chef-handler" 7 | 8 | test_pattern = './**/*elasticsearch*/tests/**/*_test.rb' 9 | test_files = Dir[test_pattern].entries.inject([]) do |result,item| 10 | result << item unless result.any? { |i| i.include? item.split('/').last } 11 | result 12 | end 13 | 14 | Chef::Log.debug "Will run these tests: #{test_files.inspect}" 15 | 16 | handler = MiniTest::Chef::Handler.new({ 17 | :path => test_files, 18 | :verbose => true 19 | }) 20 | 21 | Chef::Log.info("Enabling minitest-chef-handler as a report handler") 22 | Chef::Config.send("report_handlers") << handler 23 | -------------------------------------------------------------------------------- /sudo/attributes/default.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: sudo 3 | # Attribute File:: sudoers 4 | # 5 | # Copyright 2008-2009, Opscode, Inc. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | default[:authorization][:sudo][:groups] = Array.new 21 | default[:authorization][:sudo][:users] = Array.new 22 | -------------------------------------------------------------------------------- /users/recipes/default.rb: -------------------------------------------------------------------------------- 1 | sysadmin_group = Array.new 2 | 3 | data_bag("users").each do |user| 4 | 5 | u = data_bag_item("users", user) 6 | 7 | sysadmin_group << u['id'] 8 | 9 | home_dir = "/home/#{u['id']}" 10 | 11 | user u['id'] do 12 | uid u['uid'] 13 | gid u['gid'] 14 | shell u['shell'] 15 | comment u['comment'] 16 | supports :manage_home => true 17 | home home_dir 18 | end 19 | 20 | directory "#{home_dir}/.ssh" do 21 | owner u['id'] 22 | group u['gid'] || u['id'] 23 | mode "0700" 24 | end 25 | 26 | template "#{home_dir}/.ssh/authorized_keys" do 27 | source "authorized_keys.erb" 28 | owner u['id'] 29 | group u['gid'] || u['id'] 30 | mode "0600" 31 | variables :ssh_keys => u['ssh_keys'] 32 | end 33 | end 34 | 35 | group "sysadmin" do 36 | gid 2300 37 | members sysadmin_group 38 | end 39 | -------------------------------------------------------------------------------- /elasticsearch/recipes/ebs.rb: -------------------------------------------------------------------------------- 1 | [Chef::Recipe, Chef::Resource].each { |l| l.send :include, ::Extensions } 2 | 3 | # Install the Fog gem dependencies 4 | # 5 | value_for_platform_family( 6 | [:ubuntu, :debian] => %w| build-essential libxslt1-dev libxml2-dev |, 7 | [:rhel, :centos, :suse, :amazon] => %w| gcc gcc-c++ make libxslt-devel libxml2-devel | 8 | ).each do |pkg| 9 | package(pkg) { action :nothing }.run_action(:upgrade) 10 | end 11 | 12 | # Install the Fog gem for Chef run 13 | # 14 | chef_gem("fog") do 15 | version '1.10.1' 16 | action :install 17 | end 18 | 19 | # Create EBS for each device with proper configuration 20 | # 21 | # See the `attributes/data` file for instructions. 22 | # 23 | node.elasticsearch[:data][:devices].each do |device, params| 24 | if params[:ebs] && !params[:ebs].keys.empty? 25 | create_ebs device, params 26 | end 27 | end 28 | -------------------------------------------------------------------------------- /newrelic/recipes/default.rb: -------------------------------------------------------------------------------- 1 | # Install newrelic server monitoring 2 | 3 | execute "Add newrelic apt source" do 4 | command "wget -O /etc/apt/sources.list.d/newrelic.list http://download.newrelic.com/debian/newrelic.list" 5 | not_if { File.exist?('/etc/apt/sources.list.d/newrelic.list') } 6 | end 7 | 8 | execute "Add newrelic key to apt keyring" do 9 | command "wget -O - http://download.newrelic.com/548C16BF.gpg | apt-key add - && apt-get update" 10 | not_if "gpg --keyring /etc/apt/trusted.gpg --list-keys | grep '1024D/548C16BF'" 11 | end 12 | 13 | package "newrelic-sysmond" do 14 | action :install 15 | end 16 | 17 | service "newrelic-sysmond" do 18 | action :nothing 19 | end 20 | 21 | template "/etc/newrelic/nrsysmond.cfg" do 22 | source "nrsysmond.cfg.erb" 23 | owner 'root' 24 | group 'newrelic' 25 | mode '0640' 26 | notifies :restart, resources(:service => "newrelic-sysmond") 27 | end 28 | -------------------------------------------------------------------------------- /ark/recipes/default.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: ark 3 | # Recipe:: default 4 | # 5 | # Author:: Bryan W. Berry 6 | # Copyright 2012, Bryan W. Berry 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | # 20 | 21 | 22 | package "unzip" 23 | package "libtool" 24 | package "autoconf" 25 | package "autogen" unless platform_family?("rhel", "fedora") 26 | 27 | if platform?("freebsd") 28 | package "gtar" 29 | end 30 | -------------------------------------------------------------------------------- /ark/.kitchen.yml: -------------------------------------------------------------------------------- 1 | --- 2 | driver_plugin: vagrant 3 | platforms: 4 | - name: ubuntu-12.04 5 | driver_config: 6 | box: opscode-ubuntu-12.04 7 | box_url: http://cloud-images.ubuntu.com/vagrant/precise/current/precise-server-cloudimg-amd64-vagrant-disk1.box 8 | require_chef_omnibus: 11.4.0 9 | run_list: 10 | - recipe[apt] 11 | - name: ubuntu-10.04 12 | driver_config: 13 | box: opscode-ubuntu-10.04 14 | box_url: http://opscode-vm.s3.amazonaws.com/vagrant/opscode_ubuntu-10.04_chef-11.2.0.box 15 | require_chef_omnibus: 11.4.0 16 | run_list: 17 | - recipe[apt] 18 | - name: centos-6.3 19 | driver_config: 20 | box: opscode-centos-6.3 21 | box_url: http://opscode-vm.s3.amazonaws.com/vagrant/opscode_centos-6.3_chef-11.2.0.box 22 | require_chef_omnibus: 11.4.0 23 | run_list: 24 | - recipe[yum::epel] 25 | suites: 26 | - name: default 27 | run_list: 28 | - recipe[minitest-handler] 29 | - recipe[ark] 30 | - recipe[ark::test] 31 | attributes: {} 32 | -------------------------------------------------------------------------------- /ark/CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 0.1.0: 2 | 3 | * [COOK-2335] - ark resource broken on Chef 11 4 | 5 | ## 0.0.17 6 | 7 | * [COOK-2026] - Allow cherry_pick action to be used for directories as 8 | well as files 9 | 10 | ## 0.0.16 11 | 12 | * [COOK-1593] - README formatting updates for better display on 13 | Community Site 14 | 15 | ## 0.0.15 16 | 17 | New features 18 | * add `setup_py_*` actions 19 | * add vagrantfile 20 | * add foodcritic test 21 | * travis.ci support 22 | 23 | Bug fixes 24 | * dangling "unless" 25 | 26 | ## 0.0.10 (May 23, 2012) 27 | 28 | New features 29 | * use autogen.sh to generate configure script for configure action 30 | https://github.com/bryanwb/chef-ark/issues/16 31 | * support more file extensions https://github.com/bryanwb/chef-ark/pull/18 32 | * add extension attribute which allows you to download files which do 33 | not have the file extension as part of the URL 34 | 35 | Bug fixes 36 | * strip_leading_dir not working for zip files 37 | https://github.com/bryanwb/chef-ark/issues/19 38 | -------------------------------------------------------------------------------- /ark/Rakefile: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env rake 2 | 3 | @cookbook = "ark" 4 | 5 | desc "Runs foodcritc linter" 6 | task :foodcritic do 7 | if Gem::Version.new("1.9.2") <= Gem::Version.new(RUBY_VERSION.dup) 8 | sandbox = File.join(File.dirname(__FILE__), %w{tmp foodcritic}, @cookbook) 9 | prepare_foodcritic_sandbox(sandbox) 10 | 11 | sh "foodcritic --epic-fail any #{File.dirname(sandbox)}" 12 | else 13 | puts "WARN: foodcritic run is skipped as Ruby #{RUBY_VERSION} is < 1.9.2." 14 | end 15 | end 16 | 17 | task :default => 'foodcritic' 18 | 19 | private 20 | 21 | def prepare_foodcritic_sandbox(sandbox) 22 | files = %w{*.md *.rb attributes definitions files providers 23 | recipes resources templates} 24 | 25 | rm_rf sandbox 26 | mkdir_p sandbox 27 | cp_r Dir.glob("{#{files.join(',')}}"), sandbox 28 | puts "\n\n" 29 | end 30 | 31 | begin 32 | require 'kitchen/rake_tasks' 33 | Kitchen::RakeTasks.new 34 | rescue LoadError 35 | puts ">>>>> Kitchen gem not loaded, omitting tasks" unless ENV['CI'] 36 | end 37 | -------------------------------------------------------------------------------- /elasticsearch/attributes/logging.rb: -------------------------------------------------------------------------------- 1 | default.elasticsearch[:logging]['action'] = 'DEBUG' 2 | default.elasticsearch[:logging]['com.amazonaws'] = 'WARN' 3 | default.elasticsearch[:logging]['index.search.slowlog'] = 'TRACE, index_search_slow_log_file' 4 | default.elasticsearch[:logging]['index.indexing.slowlog'] = 'TRACE, index_indexing_slow_log_file' 5 | 6 | # -------------------------------------------- 7 | # NOTE: Setting the attributes for logging.yml 8 | # -------------------------------------------- 9 | # 10 | # The template iterates over all values set in the `node.elasticsearch.logging` 11 | # namespaces, and prints all settings which have been configured. 12 | # This file only configures the minimal default set. 13 | # 14 | # To configure logging, simply set the corresponding attribute, eg.: 15 | # 16 | # node.elasticsearch.logging['discovery'] = 'TRACE' 17 | # 18 | # Use the same notation for deeply nested attributes: 19 | # 20 | # node.elasticsearch.logging['index.search.slowlog'] = 'DEBUG, index_search_slow_log_file' 21 | # 22 | -------------------------------------------------------------------------------- /sudo/recipes/default.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: sudo 3 | # Recipe:: default 4 | # 5 | # Copyright 2008-2009, Opscode, Inc. 6 | # 7 | # Licensed under the Apache License, Version 2.0 (the "License"); 8 | # you may not use this file except in compliance with the License. 9 | # You may obtain a copy of the License at 10 | # 11 | # http://www.apache.org/licenses/LICENSE-2.0 12 | # 13 | # Unless required by applicable law or agreed to in writing, software 14 | # distributed under the License is distributed on an "AS IS" BASIS, 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 16 | # See the License for the specific language governing permissions and 17 | # limitations under the License. 18 | # 19 | 20 | package "sudo" do 21 | action :upgrade 22 | end 23 | 24 | template "/etc/sudoers" do 25 | source "sudoers.erb" 26 | mode 0440 27 | owner "root" 28 | group "root" 29 | variables( 30 | :sudoers_groups => node[:authorization][:sudo][:groups], 31 | :sudoers_users => node[:authorization][:sudo][:users] 32 | ) 33 | end 34 | -------------------------------------------------------------------------------- /sudo/metadata.rb: -------------------------------------------------------------------------------- 1 | maintainer "Opscode, Inc." 2 | maintainer_email "cookbooks@opscode.com" 3 | license "Apache 2.0" 4 | description "Installs sudo and configures /etc/sudoers" 5 | version "0.9.1" 6 | 7 | recipe "sudo", "Installs sudo and configures /etc/sudoers" 8 | 9 | %w{redhat centos fedora ubuntu debian freebsd}.each do |os| 10 | supports os 11 | end 12 | 13 | attribute "authorization", 14 | :display_name => "Authorization", 15 | :description => "Hash of Authorization attributes", 16 | :type => "hash" 17 | 18 | attribute "authorization/sudoers", 19 | :display_name => "Authorization Sudoers", 20 | :description => "Hash of Authorization/Sudoers attributes", 21 | :type => "hash" 22 | 23 | attribute "authorization/sudoers/users", 24 | :display_name => "Sudo Users", 25 | :description => "Users who are allowed sudo ALL", 26 | :type => "array", 27 | :default => "" 28 | 29 | attribute "authorization/sudoers/groups", 30 | :display_name => "Sudo Groups", 31 | :description => "Groups who are allowed sudo ALL", 32 | :type => "array", 33 | :default => "" 34 | -------------------------------------------------------------------------------- /elasticsearch/tests/aws_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::aws' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | cluster_url = 'http://localhost:9200' 8 | health_url = "#{cluster_url}/_cluster/health" 9 | 10 | proxy_url = "http://USERNAME:PASSWORD@localhost:8080" 11 | 12 | 13 | it "creates the directory" do 14 | if node.recipes.include?("elasticsearch::aws") 15 | directory("/usr/local/elasticsearch/plugins/cloud-aws/").must_exist.with(:owner, 'elasticsearch') 16 | end 17 | end 18 | 19 | # Pending... 20 | # it "loads the plugin" do 21 | # if node.recipes.include?("elasticsearch::aws") 22 | # system("service elasticsearch restart") 23 | # timeout = 120 24 | # until system("curl --silent --show-error '#{health_url}' > /dev/null 2>&1") or timeout == 0 25 | # sleep 1 26 | # timeout -= 1 27 | # end 28 | # file('/usr/local/var/log/elasticsearch/elasticsearch_vagrant.log').must_match /loaded.*\[cloud\-aws\]$/ 29 | # end 30 | # end 31 | 32 | end 33 | -------------------------------------------------------------------------------- /elasticsearch/tests/proxy_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::proxy' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | cluster_url = 'http://localhost:9200' 8 | health_url = "#{cluster_url}/_cluster/health" 9 | 10 | proxy_url = "http://USERNAME:PASSWORD@localhost:8080" 11 | 12 | it "runs as a daemon" do 13 | service("nginx").must_be_running 14 | end 15 | 16 | it "has a username in passwords file" do 17 | file("/usr/local/etc/elasticsearch/passwords").must_exist.must_include("USERNAME") 18 | end 19 | 20 | it "proxies request to elasticsearch" do 21 | timeout = 120 22 | until system("curl --silent --show-error '#{health_url}?wait_for_status=yellow&timeout=1m' > /dev/null") or timeout == 0 23 | sleep 1 24 | timeout -= 1 25 | end 26 | 27 | Net::HTTP.start('localhost', 8080) do |http| 28 | req = Net::HTTP::Get.new('/') 29 | req.basic_auth 'USERNAME', 'PASSWORD' 30 | response = http.request(req) 31 | status = JSON.parse(response.body)['ok'] 32 | assert_equal status, true 33 | end 34 | end 35 | 36 | end 37 | -------------------------------------------------------------------------------- /elasticsearch/recipes/nginx.rb: -------------------------------------------------------------------------------- 1 | # Install Nginx via packages 2 | # 3 | package "nginx" 4 | 5 | # Create user and group for Nginx 6 | # 7 | user node[:nginx][:user] do 8 | comment "Nginx User" 9 | system true 10 | shell "/bin/false" 11 | action :create 12 | end 13 | group node[:nginx][:user] do 14 | members node[:nginx][:user] 15 | action :create 16 | end 17 | 18 | # Create service for Nginx (/sbin/service nginx) 19 | # 20 | service "nginx" do 21 | supports :status => true, :restart => true, :reload => true 22 | action [ :enable, :start ] 23 | end 24 | 25 | # Create log directory 26 | # 27 | directory node[:nginx][:log_dir] do 28 | mode 0755 29 | owner 'root' 30 | action :create 31 | recursive true 32 | end 33 | 34 | # Create Nginx main configuration file 35 | # 36 | template "nginx.conf.erb" do 37 | path "#{node[:nginx][:dir]}/nginx.conf" 38 | source "nginx.conf.erb" 39 | owner "root" 40 | mode 0644 41 | notifies :restart, 'service[nginx]', :immediately 42 | end 43 | 44 | if node.recipes.include?('monit') and respond_to?(:monitrc) 45 | monitrc "nginx.monitrc" do 46 | template_cookbook 'elasticsearch' 47 | source 'nginx.monitrc.conf.erb' 48 | end 49 | end 50 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/logging.yml.erb: -------------------------------------------------------------------------------- 1 | rootLogger: INFO, console, file 2 | 3 | <% node.elasticsearch[:logging].sort.each do |key, value| %> 4 | logger.<%= key %>: <%= value %> 5 | <% end %> 6 | 7 | additivity: 8 | index.search.slowlog: false 9 | index.indexing.slowlog: false 10 | 11 | appender: 12 | console: 13 | type: console 14 | layout: 15 | type: consolePattern 16 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 17 | 18 | file: 19 | type: dailyRollingFile 20 | file: ${path.logs}/${cluster.name}.log 21 | datePattern: "'.'yyyy-MM-dd" 22 | layout: 23 | type: pattern 24 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 25 | 26 | index_search_slow_log_file: 27 | type: dailyRollingFile 28 | file: ${path.logs}/${cluster.name}_index_search_slowlog.log 29 | datePattern: "'.'yyyy-MM-dd" 30 | layout: 31 | type: pattern 32 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 33 | 34 | index_indexing_slow_log_file: 35 | type: dailyRollingFile 36 | file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log 37 | datePattern: "'.'yyyy-MM-dd" 38 | layout: 39 | type: pattern 40 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n" 41 | -------------------------------------------------------------------------------- /elasticsearch/recipes/proxy.rb: -------------------------------------------------------------------------------- 1 | include_recipe "elasticsearch::nginx" 2 | 3 | # Create proxy with HTTP authentication via Nginx 4 | # 5 | template "#{node.elasticsearch[:nginx][:dir]}/conf.d/elasticsearch_proxy.conf" do 6 | source "elasticsearch_proxy.conf.erb" 7 | owner node.elasticsearch[:nginx][:user] and group node.elasticsearch[:nginx][:user] and mode 0755 8 | notifies :reload, 'service[nginx]' 9 | end 10 | 11 | ruby_block "add users to passwords file" do 12 | block do 13 | require 'webrick/httpauth/htpasswd' 14 | @htpasswd = WEBrick::HTTPAuth::Htpasswd.new(node.elasticsearch[:nginx][:passwords_file]) 15 | 16 | node.elasticsearch[:nginx][:users].each do |u| 17 | Chef::Log.debug "Adding user '#{u['username']}' to #{node.elasticsearch[:nginx][:passwords_file]}\n" 18 | @htpasswd.set_passwd( 'Elasticsearch', u['username'], u['password'] ) 19 | end 20 | 21 | @htpasswd.flush 22 | end 23 | 24 | not_if { node.elasticsearch[:nginx][:users].empty? } 25 | end 26 | 27 | # Ensure proper permissions and existence of the passwords file 28 | # 29 | file node.elasticsearch[:nginx][:passwords_file] do 30 | owner node.elasticsearch[:nginx][:user] and group node.elasticsearch[:nginx][:user] and mode 0755 31 | action :touch 32 | end 33 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/elasticsearch-env.sh.erb: -------------------------------------------------------------------------------- 1 | # JVM Configuration for ElasticSearch 2 | # =================================== 3 | # See 4 | # 5 | 6 | <%= "JAVA_HOME='#{node.elasticsearch[:java_home]}'\n" if node.elasticsearch[:java_home] -%> 7 | ES_HOME='<%= "#{node.elasticsearch[:dir]}/elasticsearch" %>' 8 | ES_CLASSPATH=$ES_CLASSPATH:$ES_HOME/lib/*:$ES_HOME/lib/sigar/* 9 | ES_HEAP_SIZE=<%= node.elasticsearch[:allocated_memory] %> 10 | 11 | ES_JAVA_OPTS=" 12 | -server 13 | -Djava.net.preferIPv4Stack=true 14 | -Des.config=<%= node.elasticsearch[:path][:conf] %>/elasticsearch.yml 15 | -Xms<%= node.elasticsearch[:allocated_memory] %> 16 | -Xmx<%= node.elasticsearch[:allocated_memory] %> 17 | -Xss<%= node.elasticsearch[:thread_stack_size] %> 18 | -XX:+UseParNewGC 19 | -XX:+UseConcMarkSweepGC 20 | -XX:CMSInitiatingOccupancyFraction=75 21 | -XX:+UseCMSInitiatingOccupancyOnly 22 | -XX:+HeapDumpOnOutOfMemoryError 23 | <% if node.elasticsearch[:jmx] %> 24 | -Dcom.sun.management.jmxremote.ssl=false 25 | -Dcom.sun.management.jmxremote.authenticate=false 26 | -Dcom.sun.management.jmxremote.port=3333 27 | -Djava.rmi.server.hostname=<%= node[:ipaddress] %> 28 | <% end %> 29 | " 30 | -------------------------------------------------------------------------------- /rubies/recipes/default.rb: -------------------------------------------------------------------------------- 1 | # Install all rubies and their gems 2 | node.rubies.versions.each do |ruby| 3 | 4 | remote_file "/opt/rubies/#{ruby}.tgz" do 5 | source "#{node.rubies.source}/#{ruby}.tgz" 6 | action :create_if_missing 7 | end 8 | 9 | bash "unarchive #{ruby}" do 10 | cwd node.rubies.install_path 11 | code %{ 12 | if [ ! -d #{ruby} ]; then 13 | tar zxf #{ruby}.tgz 14 | fi 15 | } 16 | end 17 | 18 | # Rehash 19 | bash "rbenv rehash" do 20 | code "#{node.rubies.rbenv_path}/bin/rbenv rehash" 21 | end 22 | 23 | gems = node.rubies.gems 24 | gems += node.rubies.system_ruby_gems if ruby == node.rubies.system_ruby_version 25 | 26 | gems.each do |g| 27 | bash "install #{g[0]} #{g[1]} to #{ruby}" do 28 | cwd "/opt/rubies/#{ruby}/bin" 29 | code "RBENV_VERSION=#{ruby} ./gem install #{g[0]} -v #{g[1]} --no-rdoc --no-ri" 30 | not_if %(RBENV_VERSION=#{ruby} ./gem list --local | grep -E "(#{g[0]})(.+)(#{g[1]})"), cwd: "/opt/rubies/#{ruby}/bin" 31 | end 32 | end 33 | 34 | end 35 | 36 | # Set the defaults system ruby 37 | bash "set default ruby to #{node.rubies.system_ruby_version}" do 38 | code "#{node.rubies.rbenv_path}/bin/rbenv global #{node.rubies.system_ruby_version}" 39 | end 40 | 41 | # Rehash (again) 42 | bash "rbenv rehash" do 43 | code "#{node.rubies.rbenv_path}/bin/rbenv rehash" 44 | end 45 | -------------------------------------------------------------------------------- /nginx/templates/default/default-host-page.html.erb: -------------------------------------------------------------------------------- 1 | 2 | 3 | Powered By Machines.io 4 | 5 | 6 | 47 | 48 | 49 |
50 |

Machines.io

51 |

52 | If you were expecting to see your site or application here, check your configuration settings and redeploy. 53 |

54 | 55 |
56 | 57 | 58 | 59 |

60 | This server is powered by Machines.io, a product of JK Tech, Inc. 61 |

62 |
63 |
64 | 65 | 66 | 67 | 68 | -------------------------------------------------------------------------------- /redis/templates/default/redis.sysv.erb: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # Simple Redis init.d script conceived to work on Linux systems 4 | # as it does use of the /proc filesystem. 5 | # 6 | # THIS FILE HAS BEEN GENERATED BY CHEF 7 | # ANY MANUAL MODIFICATIONS WILL BE OVERWRITTEN 8 | 9 | REDISPORT=<%= node[:redis][:port] %> 10 | EXEC=/usr/local/bin/redis-server 11 | CLIEXEC=/usr/local/bin/redis-cli 12 | 13 | PIDFILE=<%= node[:redis][:pidfile] %> 14 | CONF=<%= node[:redis][:config] %> 15 | 16 | case "$1" in 17 | start) 18 | if [ -f $PIDFILE ] 19 | then 20 | echo "$PIDFILE exists, process is already running or crashed" 21 | else 22 | echo "Starting Redis server..." 23 | $EXEC $CONF 24 | fi 25 | ;; 26 | stop) 27 | if [ ! -f $PIDFILE ] 28 | then 29 | echo "$PIDFILE does not exist, process is not running" 30 | else 31 | PID=$(cat $PIDFILE) 32 | echo "Stopping ..." 33 | $CLIEXEC -p $REDISPORT shutdown 34 | while [ -x /proc/${PID} ] 35 | do 36 | echo "Waiting for Redis to shutdown ..." 37 | sleep 1 38 | done 39 | echo "Redis stopped" 40 | fi 41 | ;; 42 | *) 43 | echo "Please use start or stop as first argument" 44 | ;; 45 | esac 46 | -------------------------------------------------------------------------------- /elasticsearch/recipes/data.rb: -------------------------------------------------------------------------------- 1 | node.elasticsearch[:data][:devices].each do |device, params| 2 | # Format volume if format command is provided and volume is unformatted 3 | # 4 | bash "Format device: #{device}" do 5 | __command = "#{params[:format_command]} #{device}" 6 | __fs_check = params[:fs_check_command] || 'dumpe2fs' 7 | 8 | code __command 9 | 10 | only_if { params[:format_command] } 11 | not_if "#{__fs_check} #{device}" 12 | end 13 | 14 | # Create directory with proper permissions 15 | # 16 | directory params[:mount_path] do 17 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0775 18 | recursive true 19 | end 20 | 21 | # Mount device to elasticsearch data path 22 | # 23 | mount "#{device}-to-#{params[:mount_path]}" do 24 | mount_point params[:mount_path] 25 | device device 26 | fstype params[:file_system] 27 | options params[:mount_options] 28 | action [:mount, :enable] 29 | 30 | only_if { File.exists?(device) } 31 | if node.elasticsearch[:path][:data].include?(params[:mount_path]) 32 | Chef::Log.debug "Schedule Elasticsearch service restart..." 33 | notifies :restart, 'service[elasticsearch]' 34 | end 35 | end 36 | 37 | # Ensure proper permissions 38 | # 39 | directory params[:mount_path] do 40 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0775 41 | recursive true 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /elasticsearch/attributes/proxy.rb: -------------------------------------------------------------------------------- 1 | include_attribute "elasticsearch::default" 2 | include_attribute "elasticsearch::nginx" 3 | 4 | # Try to load data bag item 'elasticsearch/aws' ------------------ 5 | # 6 | users = Chef::DataBagItem.load('elasticsearch', 'users')[node.chef_environment]['users'] rescue [] 7 | # ---------------------------------------------------------------- 8 | 9 | # === NGINX === 10 | # Allowed users are set based on data bag values, when it exists. 11 | # 12 | # It's possible to define the credentials directly in your node configuration, if your wish. 13 | # 14 | default.elasticsearch[:nginx][:port] = "8080" 15 | default.elasticsearch[:nginx][:dir] = ( node.nginx[:dir] rescue '/etc/nginx' ) 16 | default.elasticsearch[:nginx][:user] = ( node.nginx[:user] rescue 'nginx' ) 17 | default.elasticsearch[:nginx][:log_dir] = ( node.nginx[:log_dir] rescue '/var/log/nginx' ) 18 | default.elasticsearch[:nginx][:users] = users 19 | default.elasticsearch[:nginx][:passwords_file] = "#{node.elasticsearch[:path][:conf]}/passwords" 20 | 21 | # Deny or allow authenticated access to cluster API. 22 | # 23 | # Set this to `true` if you want to use a tool like BigDesk 24 | # 25 | default.elasticsearch[:nginx][:allow_cluster_api] = false 26 | 27 | # Allow responding to unauthorized requests for `/status`, 28 | # returning `curl -I localhost:9200` 29 | # 30 | default.elasticsearch[:nginx][:allow_status] = false 31 | 32 | # Other Nginx proxy settings 33 | # 34 | default.elasticsearch[:nginx][:client_max_body_size] = "50M" 35 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/elasticsearch.conf.erb: -------------------------------------------------------------------------------- 1 | # ------------------------------------------ 2 | # Monit configuration file for ElasticSearch 3 | # ------------------------------------------ 4 | 5 | check process elasticsearch with pidfile <%= node.elasticsearch[:pid_file] %> 6 | start program = "/etc/init.d/elasticsearch restart" with timeout 60 seconds 7 | stop program = "/etc/init.d/elasticsearch stop" 8 | if cpu > 90% for 15 cycles then alert 9 | if totalmem > 90% for 15 cycles then alert 10 | if loadavg(15min) greater than 10 for 50 cycles then alert 11 | group elasticsearch 12 | 13 | <% if node.monit[:notify_email] %> 14 | check host elasticsearch_connection with address 0.0.0.0 15 | if failed url http://0.0.0.0:9200/ with timeout 15 seconds then alert 16 | group elasticsearch 17 | 18 | check host elasticsearch_cluster_health with address 0.0.0.0 19 | if failed url http://0.0.0.0:9200/_cluster/health 20 | and content == 'green' 21 | with timeout 60 seconds 22 | then alert 23 | alert <%= node.monit[:notify_email] %> with mail-format { 24 | subject: [monit] elasticsearch: CLUSTER HEALTH PROBLEM at <%= node.hostname %> 25 | message: [<%= node.hostname %>] $SERVICE $ACTION 26 | <% if node.monit[:http_auth] && node.cloud %> 27 | -- 28 | http://<%= node.monit[:http_auth]['username'] %>:<%= node.monit[:http_auth]['password_encoded'] %>@<%= node.cloud.public_hostname %>:2812/elasticsearch_cluster_health 29 | <% end %> 30 | } 31 | group elasticsearch 32 | <% end %> 33 | -------------------------------------------------------------------------------- /rsyslog/templates/default/rsyslog.conf.erb: -------------------------------------------------------------------------------- 1 | # /etc/rsyslog.conf Configuration file for rsyslog. 2 | # 3 | # For more information see 4 | # /usr/share/doc/rsyslog-doc/html/rsyslog_conf.html 5 | # 6 | # Default logging rules can be found in /etc/rsyslog.d/50-default.conf 7 | 8 | 9 | ################# 10 | #### MODULES #### 11 | ################# 12 | 13 | $ModLoad imfile 14 | $ModLoad imuxsock # provides support for local system logging 15 | $ModLoad imklog # provides kernel logging support (previously done by rklogd) 16 | #$ModLoad immark # provides --MARK-- message capability 17 | 18 | # provides UDP syslog reception 19 | #$ModLoad imudp 20 | #$UDPServerRun 514 21 | 22 | # provides TCP syslog reception 23 | #$ModLoad imtcp 24 | #$InputTCPServerRun 514 25 | 26 | 27 | ########################### 28 | #### GLOBAL DIRECTIVES #### 29 | ########################### 30 | 31 | # 32 | # Use traditional timestamp format. 33 | # To enable high precision timestamps, comment out the following line. 34 | # 35 | $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat 36 | 37 | # Filter duplicated messages 38 | $RepeatedMsgReduction on 39 | 40 | # 41 | # Set the default permissions for all log files. 42 | # 43 | $FileOwner syslog 44 | $FileGroup adm 45 | $FileCreateMode 0640 46 | $DirCreateMode 0755 47 | $Umask 0022 48 | $PrivDropToUser syslog 49 | $PrivDropToGroup syslog 50 | 51 | # 52 | # Where to place spool files 53 | # 54 | $WorkDirectory /var/spool/rsyslog 55 | 56 | # 57 | # Include all config files in /etc/rsyslog.d/ 58 | # 59 | $IncludeConfig /etc/rsyslog.d/*.conf 60 | 61 | *.* @<%= node.rsyslog.papertrail.hostname %>:<%= node.rsyslog.papertrail.port %> 62 | -------------------------------------------------------------------------------- /elasticsearch/libraries/print_value.rb: -------------------------------------------------------------------------------- 1 | module Extensions 2 | module Templates 3 | 4 | # An extension method for convenient printing of values in ERB templates. 5 | # 6 | # The method provides several ways how to evaluate the value: 7 | # 8 | # 1. Using the key as a node attribute: 9 | # 10 | # <%= print_value 'bar' -%> is evaluated as: `node[:bar]` 11 | # 12 | # You may use a dot-separated key for nested attributes: 13 | # 14 | # <%= print_value 'foo.bar' -%> is evaluated in multiple ways in this order: 15 | # 16 | # a) as `node['foo.bar']`, 17 | # b) as `node['foo_bar']`, 18 | # c) as `node.foo.bar` (ie. `node[:foo][:bar]`) 19 | # 20 | # 2. You may also provide an explicit value for the method, which is then used: 21 | # 22 | # <%= print_value 'bar', node[:foo] -%> 23 | # 24 | # You may pass a specific separator to the method: 25 | # 26 | # <%= print_value 'bar', separator: '=' -%> 27 | # 28 | # Do not forget to use an ending dash (`-`) in the ERB block, so lines for missing values are not printed! 29 | # 30 | def print_value key, value=nil, options={} 31 | separator = options[:separator] || ': ' 32 | existing_value = value 33 | 34 | # NOTE: A value of `false` is valid, we need to check for `nil` explicitely 35 | existing_value = node.elasticsearch[key] if existing_value.nil? and not node.elasticsearch[key].nil? 36 | existing_value = node.elasticsearch[key.tr('.', '_')] if existing_value.nil? and not node.elasticsearch[key.tr('.', '_')].nil? 37 | existing_value = key.to_s.split('.').inject(node.elasticsearch) { |result, attr| result[attr] } rescue nil if existing_value.nil? 38 | 39 | [key, separator, existing_value.to_s, "\n"].join unless existing_value.nil? 40 | end 41 | 42 | end 43 | end 44 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/elasticsearch_proxy.conf.erb: -------------------------------------------------------------------------------- 1 | server { 2 | listen <%= node.elasticsearch[:nginx][:port] %>; 3 | server_name elasticsearch; 4 | client_max_body_size <%= node.elasticsearch[:nginx][:client_max_body_size] %>; 5 | 6 | error_log <%= node.elasticsearch[:nginx][:log_dir] %>/elasticsearch-errors.log; 7 | access_log <%= node.elasticsearch[:nginx][:log_dir] %>/elasticsearch.log; 8 | 9 | location / { 10 | 11 | # Deny Nodes Shutdown API 12 | if ($request_filename ~ "_shutdown") { 13 | return 403; 14 | break; 15 | } 16 | 17 | <% unless node.elasticsearch[:nginx][:allow_cluster_api] %> 18 | # Deny access to Cluster API 19 | if ($request_filename ~ "_cluster") { 20 | return 403; 21 | break; 22 | } 23 | <% end %> 24 | 25 | # Pass requests to ElasticSearch 26 | proxy_pass http://<%= node.elasticsearch.network.host rescue 'localhost' %>:<%= node.elasticsearch.http.port rescue 9200 %>; 27 | proxy_redirect off; 28 | 29 | proxy_set_header X-Real-IP $remote_addr; 30 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 31 | proxy_set_header Host $http_host; 32 | 33 | # For CORS Ajax 34 | proxy_pass_header Access-Control-Allow-Origin; 35 | proxy_pass_header Access-Control-Allow-Methods; 36 | proxy_hide_header Access-Control-Allow-Headers; 37 | add_header Access-Control-Allow-Headers 'X-Requested-With, Content-Type'; 38 | add_header Access-Control-Allow-Credentials true; 39 | 40 | # Authorize access 41 | auth_basic "ElasticSearch"; 42 | auth_basic_user_file <%= node.elasticsearch[:nginx][:passwords_file] %>; 43 | 44 | } 45 | 46 | <% if node.elasticsearch[:nginx][:allow_status] %> 47 | location /status { 48 | proxy_method HEAD; 49 | proxy_intercept_errors on; 50 | proxy_pass http://localhost:9200/; 51 | } 52 | <% end %> 53 | 54 | } 55 | -------------------------------------------------------------------------------- /sudo/README.md: -------------------------------------------------------------------------------- 1 | DESCRIPTION 2 | =========== 3 | 4 | This cookbook installs sudo and configures the /etc/sudoers file. 5 | 6 | REQUIREMENTS 7 | ============ 8 | 9 | Requires that the platform has a package named sudo and the sudoers file is /etc/sudoers. 10 | 11 | ATTRIBUTES 12 | ========== 13 | 14 | The following attributes are set to blank arrays: 15 | 16 | node[:authorization][:sudo][:groups] 17 | node[:authorization][:sudo][:users] 18 | 19 | They are passed into the sudoers template which iterates over the values to add sudo permission to the specified users and groups. 20 | 21 | USAGE 22 | ===== 23 | 24 | To use this cookbook, set the attributes above on the node via a role or the node object itself. In a role.rb: 25 | 26 | "authorization" => { 27 | "sudo" => { 28 | "groups" => ["admin", "wheel", "sysadmin"], 29 | "users" => ["jerry", "greg"] 30 | } 31 | } 32 | 33 | In JSON (role.json or on the node object): 34 | 35 | "authorization": { 36 | "sudo": { 37 | "groups": [ 38 | "admin", 39 | "wheel", 40 | "sysadmin" 41 | ], 42 | "users": [ 43 | "jerry", 44 | "greg" 45 | ] 46 | } 47 | } 48 | 49 | Note that the template for the sudoers file has the group "sysadmin" with ALL:ALL permission, though the group by default does not exist. 50 | 51 | LICENSE AND AUTHOR 52 | ================== 53 | 54 | Author:: Adam Jacob 55 | 56 | Copyright 2009-2010, Opscode, Inc. 57 | 58 | Licensed under the Apache License, Version 2.0 (the "License"); 59 | you may not use this file except in compliance with the License. 60 | You may obtain a copy of the License at 61 | 62 | http://www.apache.org/licenses/LICENSE-2.0 63 | 64 | Unless required by applicable law or agreed to in writing, software 65 | distributed under the License is distributed on an "AS IS" BASIS, 66 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 67 | See the License for the specific language governing permissions and 68 | limitations under the License. 69 | -------------------------------------------------------------------------------- /redis/recipes/source.rb: -------------------------------------------------------------------------------- 1 | user "redis" do 2 | comment "Redis Administrator" 3 | system true 4 | shell "/bin/false" 5 | end 6 | 7 | directory node[:redis][:datadir] do 8 | owner "redis" 9 | group "redis" 10 | mode 0755 11 | recursive true 12 | end 13 | 14 | include_recipe "redis::init" 15 | 16 | service "redis" do 17 | supports :start => true, :stop => true, :restart => true 18 | provider Chef::Provider::Service::Upstart if node.redis.init == "upstart" 19 | end 20 | 21 | # download and install a new redis version if not running the latest 22 | # 23 | unless `redis-server -v 2>/dev/null`.include?(node.redis.version) 24 | 25 | # stop if a previous redis version has been installed 26 | # 27 | service "redis" do 28 | action :stop 29 | only_if "[ -e /usr/local/bin/redis-server ]" 30 | end 31 | 32 | # Ensure we have the source directory 33 | directory node.redis.srcdir 34 | 35 | remote_file "#{node.redis.srcdir}/#{node.redis.dir}.tar.gz" do 36 | source node.redis.source 37 | checksum node.redis.checksum 38 | action :create_if_missing 39 | end 40 | 41 | bash "Compiling Redis v#{node[:redis][:version]} from source" do 42 | cwd node.redis.srcdir 43 | code %{ 44 | if [ ! -d #{node.redis.dir} ]; then 45 | tar zxf #{node.redis.dir}.tar.gz 46 | fi 47 | cd #{node.redis.dir} && make install 48 | } 49 | end 50 | end 51 | 52 | file node.redis.logfile do 53 | owner "redis" 54 | group "redis" 55 | mode 0644 56 | action :create_if_missing 57 | backup false 58 | end 59 | 60 | # Ensure we have the config directory 61 | directory node.redis.configdir 62 | 63 | template node.redis.config do 64 | source "redis.conf.erb" 65 | owner "redis" 66 | group "redis" 67 | mode 0644 68 | backup false 69 | notifies :restart, resources(:service => "redis"), :delayed 70 | end 71 | 72 | execute "echo 1 > /proc/sys/vm/overcommit_memory" do 73 | not_if "[ $(cat /proc/sys/vm/overcommit_memory) -eq 1 ]" 74 | notifies :restart, resources(:service => "redis"), :delayed 75 | end 76 | 77 | service "redis" do 78 | action [:enable, :start] 79 | end 80 | -------------------------------------------------------------------------------- /elasticsearch/attributes/aws.rb: -------------------------------------------------------------------------------- 1 | include_attribute 'elasticsearch::default' 2 | include_attribute 'elasticsearch::plugins' 3 | 4 | # Load configuration and credentials from data bag 'elasticsearch/aws' - 5 | # 6 | aws = Chef::DataBagItem.load('elasticsearch', 'aws')[node.chef_environment] rescue {} 7 | # ---------------------------------------------------------------------- 8 | 9 | # To use the AWS discovery, you have to properly set up the configuration, 10 | # either with the data bag, role or environment overrides, or directly 11 | # on the node itself: 12 | # 13 | # cloud: 14 | # aws: 15 | # access_key: 16 | # secret_key: 17 | # region: 18 | # discovery: 19 | # type: ec2 20 | # ec2: 21 | # groups: 22 | # 23 | # Instead of using AWS access tokens, you can create the instance with a IAM role. 24 | # See: http://aws.amazon.com/iam/faqs/#How_do_i_get_started_with_IAM_roles_for_EC2_instances 25 | 26 | default.elasticsearch['plugins']['elasticsearch-cloud-aws']['version'] = '1.11.0' 27 | 28 | # === AWS === 29 | # AWS configuration is set based on data bag values. 30 | # You may choose to configure them in your node configuration instead. 31 | # 32 | default.elasticsearch[:gateway][:type] = ( aws['gateway']['type'] rescue nil ) 33 | default.elasticsearch[:discovery][:type] = ( aws['discovery']['type'] rescue nil ) 34 | default.elasticsearch[:discovery][:ec2][:groups] = ( aws['discovery']['ec2']['group'] rescue nil ) 35 | default.elasticsearch[:discovery][:ec2][:tag] = ( aws['discovery']['ec2']['tag'] rescue {} ) 36 | 37 | default.elasticsearch[:cloud][:aws][:access_key] = ( aws['cloud']['aws']['access_key'] rescue nil ) 38 | default.elasticsearch[:cloud][:aws][:secret_key] = ( aws['cloud']['aws']['secret_key'] rescue nil ) 39 | default.elasticsearch[:cloud][:aws][:region] = ( aws['cloud']['aws']['region'] rescue nil ) 40 | default.elasticsearch[:cloud][:ec2][:endpoint] = ( aws['cloud']['ec2']['endpoint'] rescue nil ) 41 | 42 | default.elasticsearch[:cloud][:node][:auto_attributes] = true 43 | -------------------------------------------------------------------------------- /elasticsearch/tests/cluster_test.rb: -------------------------------------------------------------------------------- 1 | require 'minitest/spec' 2 | 3 | describe_recipe 'elasticsearch::default' do 4 | 5 | include MiniTest::Chef::Assertions 6 | include MiniTest::Chef::Context 7 | include MiniTest::Chef::Resources 8 | require 'net/http' 9 | require 'json' 10 | 11 | cluster_url = 'http://localhost:9200' 12 | health_url = "#{cluster_url}/_cluster/health" 13 | 14 | describe "Cluster health" do 15 | 16 | it "is not red" do 17 | # Let's wait until the service is alive 18 | timeout = 120 19 | until system("curl --silent --show-error '#{health_url}?wait_for_status=yellow&timeout=1m'") or timeout == 0 20 | sleep 1 21 | timeout -= 1 22 | end 23 | 24 | resp = Net::HTTP.get_response URI.parse(health_url) 25 | status = JSON.parse(resp.read_body)['status'] 26 | assert status != "red" 27 | end 28 | 29 | end 30 | 31 | describe "Indexing and searching" do 32 | 33 | it "writes test data and retrieves them" do 34 | # Let's wait until the service is alive 35 | timeout = 120 36 | until system("curl --silent --show-error '#{health_url}?wait_for_status=yellow&timeout=1m'") or timeout == 0 37 | sleep 1 38 | timeout -= 1 39 | end 40 | 41 | # Let's clean up first 42 | system("curl --silent --show-error -X DELETE #{cluster_url}/test_chef_cookbook") 43 | 44 | # Insert test data 45 | system(%Q|curl --silent --show-error -X PUT #{cluster_url}/test_chef_cookbook -d '{"index":{"number_of_shards":1,"number_of_replicas":0}}'|) 46 | (1..5).each do |num| 47 | test_uri = URI.parse "#{cluster_url}/test_chef_cookbook/document/#{num}" 48 | system(%Q|curl --silent --show-error -X PUT #{cluster_url}/test_chef_cookbook/document/#{num} -d '{ "title": "Test #{num}", "time": "#{Time.now.utc}", "enabled": true }'|) 49 | end 50 | system("curl --silent --show-error -X POST #{cluster_url}/test_chef_cookbook/_refresh") 51 | 52 | resp = Net::HTTP.get_response URI.parse("#{cluster_url}/test_chef_cookbook/_search?q=Test") 53 | total_hits = JSON.parse(resp.read_body)['hits']['total'] 54 | 55 | assert total_hits == 5 56 | end 57 | 58 | end 59 | 60 | end 61 | -------------------------------------------------------------------------------- /elasticsearch/libraries/install_plugin.rb: -------------------------------------------------------------------------------- 1 | module Extensions 2 | 3 | # Install an Elasticsearch plugin 4 | # 5 | # In the simplest form, just pass a plugin name in the GitHub / format: 6 | # 7 | # install_plugin 'karmi/elasticsearch-paramedic' 8 | # 9 | # You may also optionally pass a version: 10 | # 11 | # install_plugin 'elasticsearch/elasticsearch-mapper-attachments', 'version' => '1.6.0' 12 | # 13 | # ... as well as the URL: 14 | # 15 | # install_plugin 'hunspell', 'url' => 'https://github.com/downloads/.../elasticsearch-analysis-hunspell-1.1.1.zip' 16 | # 17 | # The "elasticsearch::plugins" recipe will install all plugins listed in 18 | # the role/node attributes or in the data bag (`node.elasticsearch.plugins`). 19 | # 20 | # Example: 21 | # 22 | # { elasticsearch: { 23 | # plugins: { 24 | # 'karmi/elasticsearch-paramedic' => {}, 25 | # 'lukas-vlcek/bigdesk' => { 'version' => '1.0.0' }, 26 | # 'hunspell' => { 'url' => 'https://github.com/downloads/...' } 27 | # } 28 | # } 29 | # } 30 | # 31 | # See for more info. 32 | # 33 | def install_plugin name, params={} 34 | 35 | ruby_block "Install plugin: #{name}" do 36 | block do 37 | version = params['version'] ? "/#{params['version']}" : nil 38 | url = params['url'] ? " -url #{params['url']}" : nil 39 | 40 | command = "/usr/local/bin/plugin -install #{name}#{version}#{url}" 41 | Chef::Log.debug command 42 | 43 | system command 44 | 45 | # Ensure proper permissions 46 | system "chown -R #{node.elasticsearch[:user]}:#{node.elasticsearch[:user]} #{node.elasticsearch[:dir]}/elasticsearch-#{node.elasticsearch[:version]}/plugins/" 47 | end 48 | 49 | notifies :restart, 'service[elasticsearch]' 50 | 51 | not_if do 52 | Dir.entries("#{node.elasticsearch[:dir]}/elasticsearch-#{node.elasticsearch[:version]}/plugins/").any? do |plugin| 53 | next if plugin =~ /^\./ 54 | name.include? plugin 55 | end rescue false 56 | end 57 | 58 | end 59 | 60 | end 61 | 62 | end 63 | -------------------------------------------------------------------------------- /elasticsearch/tests/installation_test.rb: -------------------------------------------------------------------------------- 1 | describe_recipe 'elasticsearch::default' do 2 | 3 | include MiniTest::Chef::Assertions 4 | include MiniTest::Chef::Context 5 | include MiniTest::Chef::Resources 6 | 7 | describe "Installation" do 8 | 9 | it "installs libraries to versioned directory" do 10 | version = node[:elasticsearch][:version] 11 | 12 | directory("/usr/local/elasticsearch-#{node[:elasticsearch][:version]}"). 13 | must_exist. 14 | with(:owner, 'elasticsearch') 15 | end 16 | 17 | it "installs elasticsearch jar" do 18 | version = node[:elasticsearch][:version] 19 | 20 | file("/usr/local/elasticsearch-#{version}/lib/elasticsearch-#{version}.jar"). 21 | must_exist. 22 | with(:owner, 'elasticsearch') 23 | end if Chef::VERSION > '10.14' 24 | 25 | it "has a link to versioned directory" do 26 | version = node[:elasticsearch][:version] 27 | 28 | link("/usr/local/elasticsearch"). 29 | must_exist. 30 | with(:link_type, :symbolic). 31 | and(:to, "/usr/local/elasticsearch-#{version}") 32 | end 33 | 34 | it "creates configuration files" do 35 | file("/usr/local/etc/elasticsearch/elasticsearch.yml"). 36 | must_exist 37 | 38 | file("/usr/local/etc/elasticsearch/elasticsearch-env.sh"). 39 | must_exist. 40 | must_include("ES_HOME='/usr/local/elasticsearch'") 41 | end 42 | 43 | it "creates the configuration file with proper content" do 44 | file("/usr/local/etc/elasticsearch/elasticsearch.yml"). 45 | must_include("cluster.name: elasticsearch_vagrant"). 46 | must_include("path.data: /usr/local/var/data/elasticsearch/disk1"). 47 | must_include("bootstrap.mlockall: false"). 48 | must_include("index.search.slowlog.threshold.query.trace: 1ms"). 49 | must_include("discovery.zen.ping.timeout: 9s"). 50 | must_include("threadpool.index.size: 2") 51 | 52 | if node.name == 'precise64' 53 | file("/usr/local/etc/elasticsearch/elasticsearch.yml"). 54 | must_include("node.name: precise64") 55 | end 56 | end 57 | 58 | it "creates logging file" do 59 | file("/usr/local/etc/elasticsearch/logging.yml"). 60 | must_exist. 61 | must_include("logger.action: DEBUG"). 62 | must_include("logger.discovery: TRACE") 63 | end 64 | 65 | end 66 | 67 | end 68 | -------------------------------------------------------------------------------- /nginx/templates/default/nginx.init.erb: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | ### BEGIN INIT INFO 4 | # Provides: nginx 5 | # Required-Start: $local_fs $remote_fs $network $syslog 6 | # Required-Stop: $local_fs $remote_fs $network $syslog 7 | # Default-Start: 2 3 4 5 8 | # Default-Stop: 0 1 6 9 | # Short-Description: starts the nginx web server 10 | # Description: starts nginx using start-stop-daemon 11 | ### END INIT INFO 12 | 13 | PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin 14 | DAEMON=<%= node.nginx.binary %> 15 | NAME=nginx 16 | DESC=nginx 17 | 18 | # Include nginx defaults if available 19 | if [ -f /etc/default/nginx ]; then 20 | . /etc/default/nginx 21 | fi 22 | 23 | test -x $DAEMON || exit 0 24 | 25 | set -e 26 | 27 | . /lib/lsb/init-functions 28 | 29 | test_nginx_config() { 30 | if $DAEMON -t $DAEMON_OPTS >/dev/null 2>&1; then 31 | return 0 32 | else 33 | $DAEMON -t $DAEMON_OPTS 34 | return $? 35 | fi 36 | } 37 | 38 | case "$1" in 39 | start) 40 | echo -n "Starting $DESC: " 41 | test_nginx_config 42 | # Check if the ULIMIT is set in /etc/default/nginx 43 | if [ -n "$ULIMIT" ]; then 44 | # Set the ulimits 45 | ulimit $ULIMIT 46 | fi 47 | start-stop-daemon --start --quiet --pidfile <%= node.nginx.pid %> \ 48 | --exec $DAEMON -- $DAEMON_OPTS || true 49 | echo "$NAME." 50 | ;; 51 | 52 | stop) 53 | echo -n "Stopping $DESC: " 54 | start-stop-daemon --stop --quiet --pidfile <%= node.nginx.pid %> \ 55 | --exec $DAEMON || true 56 | echo "$NAME." 57 | ;; 58 | 59 | restart|force-reload) 60 | echo -n "Restarting $DESC: " 61 | start-stop-daemon --stop --quiet --pidfile \ 62 | <%= node.nginx.pid %> --exec $DAEMON || true 63 | sleep 1 64 | test_nginx_config 65 | start-stop-daemon --start --quiet --pidfile \ 66 | <%= node.nginx.pid %> --exec $DAEMON -- $DAEMON_OPTS || true 67 | echo "$NAME." 68 | ;; 69 | 70 | reload) 71 | echo -n "Reloading $DESC configuration: " 72 | test_nginx_config 73 | start-stop-daemon --stop --signal HUP --quiet --pidfile <%= node.nginx.pid %> \ 74 | --exec $DAEMON || true 75 | echo "$NAME." 76 | ;; 77 | 78 | configtest|testconfig) 79 | echo -n "Testing $DESC configuration: " 80 | if test_nginx_config; then 81 | echo "$NAME." 82 | else 83 | exit $? 84 | fi 85 | ;; 86 | 87 | status) 88 | status_of_proc -p <%= node.nginx.pid %> "$DAEMON" nginx && exit 0 || exit $? 89 | ;; 90 | *) 91 | echo "Usage: $NAME {start|stop|restart|reload|force-reload|status|configtest}" >&2 92 | exit 1 93 | ;; 94 | esac 95 | 96 | exit 0 97 | -------------------------------------------------------------------------------- /ark/resources/default.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: ark 3 | # Resource:: Ark 4 | # 5 | # Author:: Bryan W. Berry 6 | # Copyright 2012, Bryan W. Berry 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | # 20 | 21 | def initialize(name, run_context=nil) 22 | super 23 | @resource_name = :ark 24 | @allowed_actions.push(:install, :dump, :cherry_pick, :put, :install_with_make, :configure, :setup_py_build, :setup_py_install, :setup_py) 25 | @action = :install 26 | @provider = Chef::Provider::Ark 27 | end 28 | 29 | attr_accessor :path, :release_file, :prefix_bin, :prefix_root, :home_dir, :extension, :version 30 | 31 | attribute :owner, :kind_of => String, :default => 'root' 32 | attribute :group, :kind_of => [String, Fixnum], :default => 0 33 | attribute :url, :kind_of => String, :required => true 34 | attribute :path, :kind_of => String, :default => nil 35 | attribute :full_path, :kind_of => String, :default => nil 36 | attribute :append_env_path, :kind_of => [TrueClass, FalseClass], :default => false 37 | attribute :checksum, :regex => /^[a-zA-Z0-9]{64}$/, :default => nil 38 | attribute :has_binaries, :kind_of => Array, :default => [] 39 | attribute :creates, :kind_of => String, :default => nil 40 | attribute :release_file, :kind_of => String, :default => '' 41 | attribute :strip_leading_dir, :kind_of => [TrueClass, FalseClass], :default => true 42 | attribute :mode, :kind_of => Fixnum, :default => 0755 43 | attribute :prefix_root, :kind_of => String, :default => nil 44 | attribute :prefix_home, :kind_of => String, :default => nil 45 | attribute :prefix_bin, :kind_of => String, :default => nil 46 | attribute :version, :kind_of => String, :default => nil 47 | attribute :home_dir, :kind_of => String, :default => nil 48 | attribute :environment, :kind_of => Hash, :default => {} 49 | attribute :autoconf_opts, :kind_of => Array, :default => [] 50 | attribute :make_opts, :kind_of => Array, :default => [] 51 | attribute :home_dir, :kind_of => String, :default => nil 52 | attribute :autoconf_opts, :kind_of => Array, :default => [] 53 | attribute :extension, :kind_of => String 54 | 55 | -------------------------------------------------------------------------------- /redis/attributes/redis.rb: -------------------------------------------------------------------------------- 1 | default[:redis][:version] = "2.6.16" 2 | default[:redis][:checksum] = "81490918dcf82d124b36e48b0a9911bfba3f13abba04d8c89820324eff7df03a" 3 | default[:redis][:dir] = "redis-#{redis.version}" 4 | default[:redis][:source] = "http://download.redis.io/releases/#{redis.dir}.tar.gz" 5 | default[:redis][:srcdir] = "/usr/local/src" 6 | default[:redis][:configdir] = "/etc/redis" 7 | 8 | default[:redis][:port] = 6379 9 | default[:redis][:datadir] = "/var/db/redis/#{redis.port}" 10 | default[:redis][:config] = "#{redis.configdir}/#{redis.port}.conf" 11 | default[:redis][:logfile] = "/var/log/redis_#{redis.port}.log" 12 | default[:redis][:pidfile] = "/var/run/redis_#{redis.port}.pid" 13 | default[:redis][:init] = "upstart" 14 | default[:redis][:daemonize] = "no" 15 | 16 | default[:redis][:timeout] = 300 17 | default[:redis][:databases] = 16 18 | default[:redis][:maxmemory] = 0 19 | default[:redis][:snapshots] = { 20 | 900 => 1, 21 | 300 => 10, 22 | 60 => 10000 23 | } 24 | 25 | default[:redis][:dbfilename] = "dump.rdb" 26 | default[:redis][:stop_writes_on_bgsave_error] = "yes" 27 | default[:redis][:rdbcompression] = "yes" 28 | default[:redis][:rdbchecksum] = "yes" 29 | default[:redis][:bind] = false 30 | default[:redis][:unixsocket] = false 31 | default[:redis][:loglevel] = "notice" 32 | 33 | default[:redis][:syslog_enabled] = false 34 | default[:redis][:syslog_ident] = "redis" 35 | default[:redis][:syslog_facility] = "local0" 36 | 37 | default[:redis][:slaveof] = false 38 | default[:redis][:password] = false 39 | default[:redis][:slave_serve_stale_data] = "yes" 40 | default[:redis][:slave_read_only] = "yes" 41 | 42 | default[:redis][:maxmemory_policy] = "volatile-lru" 43 | default[:redis][:maxmemory_samples] = 3 44 | 45 | default[:redis][:appendonly] = "no" 46 | default[:redis][:appendfilename] = "appendonly.aof" 47 | default[:redis][:appendfsync] = "everysec" 48 | default[:redis][:no_appendfsync_on_rewrite] = "no" 49 | 50 | default[:redis][:command_renames] = {} 51 | 52 | default[:redis][:hash_max_ziplist_entries] = 512 53 | default[:redis][:hash_max_ziplist_value] = 64 54 | default[:redis][:list_max_ziplist_entries] = 512 55 | default[:redis][:list_max_ziplist_value] = 64 56 | default[:redis][:set_max_intset_entries] = 512 57 | default[:redis][:zset_max_ziplist_entries] = 128 58 | default[:redis][:zset_max_ziplist_value] = 64 59 | default[:redis][:activerehashing] = "yes" 60 | -------------------------------------------------------------------------------- /nginx/templates/default/nginx.conf.erb: -------------------------------------------------------------------------------- 1 | user <%= node.nginx.user %>; 2 | worker_processes <%= node.nginx.worker_processes %>; 3 | worker_rlimit_nofile <%= node.nginx.worker_rlimit_nofile %>; 4 | pid <%= node.nginx.pid %>; 5 | 6 | events { 7 | use epoll; 8 | worker_connections <%= node.nginx.worker_connections %>; 9 | multi_accept <%= node.nginx.multi_accept %>; 10 | } 11 | 12 | http { 13 | ## 14 | # Basic Settings 15 | ## 16 | sendfile on; 17 | tcp_nopush on; 18 | tcp_nodelay on; 19 | 20 | include <%= node.nginx.dir %>/mime.types; 21 | default_type application/octet-stream; 22 | 23 | ## 24 | # Timeouts 25 | ## 26 | client_body_timeout <%= node.nginx.client_body_timeout %>; 27 | client_header_timeout <%= node.nginx.client_header_timeout %>; 28 | keepalive_timeout <%= node.nginx.keepalive_timeout %>; 29 | send_timeout <%= node.nginx.send_timeout %>; 30 | 31 | types_hash_max_size <%= node.nginx.types_hash_max_size %>; 32 | server_tokens <%= node.nginx.server_tokens %>; 33 | client_max_body_size <%= node.nginx.client_max_body_size %>; 34 | 35 | server_names_hash_bucket_size <%= node.nginx.server_names_hash_bucket_size %>; 36 | server_name_in_redirect <%= node.nginx.server_name_in_redirect %>; 37 | 38 | <% if node.nginx.passenger.enabled %> 39 | ## 40 | # Passenger 41 | ## 42 | passenger_root <%= node.nginx.passenger.root %>; 43 | passenger_max_pool_size <%= node.nginx.passenger.max_pool_size %>; 44 | passenger_show_version_in_header off; 45 | passenger_friendly_error_pages off; 46 | passenger_log_level <%= node.nginx.passenger.log_level %>; 47 | passenger_spawn_method <%= node.nginx.passenger.spawn_method %>; 48 | passenger_pool_idle_time <%= node.nginx.passenger.pool_idle_time %>; 49 | passenger_max_instances_per_app <%= node.nginx.passenger.max_instances_per_app %>; 50 | <% end %> 51 | 52 | ## 53 | # Logging Settings 54 | ## 55 | access_log <%= node.nginx.log_dir %>/access.log; 56 | error_log <%= node.nginx.log_dir %>/error.log; 57 | 58 | ## 59 | # Gzip Settings 60 | ## 61 | gzip <%= node.nginx.gzip %>; 62 | gzip_disable <%= node.nginx.gzip_disable %>; 63 | 64 | gzip_min_length <%= node.nginx.gzip_min_length %>; 65 | gzip_vary <%= node.nginx.gzip_vary %>; 66 | gzip_proxied <%= node.nginx.gzip_proxied %>; 67 | gzip_comp_level <%= node.nginx.gzip_comp_level %>; 68 | gzip_buffers <%= node.nginx.gzip_buffers %>; 69 | gzip_http_version <%= node.nginx.gzip_http_version %>; 70 | gzip_types <%= node.nginx.gzip_types.join(" ") %>; 71 | 72 | ## 73 | # Virtual Host Configs 74 | ## 75 | include <%= node.nginx.dir %>/default-host.conf; 76 | include <%= node.nginx.dir %>/sites/*; 77 | } 78 | -------------------------------------------------------------------------------- /nginx/attributes/default.rb: -------------------------------------------------------------------------------- 1 | default[:nginx][:version] = "1.4.7" 2 | default[:nginx][:checksum] = "23b8ff4a76817090678f91b0efbfcef59a93492f6612dc8370c44c1f1ce1b626" 3 | 4 | default[:nginx][:dir] = "/etc/nginx" 5 | default[:nginx][:log_dir] = "/var/log/nginx" 6 | default[:nginx][:install_path] = "/opt/nginx-#{node.nginx.version}" 7 | default[:nginx][:binary] = "#{node.nginx.install_path}/sbin/nginx" 8 | 9 | default[:nginx][:user] = "www-data" 10 | default[:nginx][:worker_processes] = cpu[:total] * 3 11 | default[:nginx][:worker_rlimit_nofile] = 1024 12 | default[:nginx][:pid] = "/var/run/nginx.pid" 13 | 14 | default[:nginx][:worker_connections] = 1024 15 | default[:nginx][:multi_accept] = "off" 16 | 17 | default[:nginx][:client_body_timeout] = 60 18 | default[:nginx][:client_header_timeout] = 60 19 | default[:nginx][:keepalive_timeout] = 75 20 | default[:nginx][:send_timeout] = 60 21 | default[:nginx][:types_hash_max_size] = 2048 22 | default[:nginx][:server_tokens] = "off" 23 | default[:nginx][:server_names_hash_bucket_size] = 64 24 | default[:nginx][:server_name_in_redirect] = "off" 25 | default[:nginx][:client_max_body_size] = "100M" 26 | 27 | default[:nginx][:gzip] = "on" 28 | default[:nginx][:gzip_disable] = "msie6" 29 | 30 | default[:nginx][:gzip_vary] = "on" 31 | default[:nginx][:gzip_proxied] = "any" 32 | default[:nginx][:gzip_comp_level] = 2 33 | default[:nginx][:gzip_min_length] = "1024" 34 | default[:nginx][:gzip_buffers] = "16 8k" 35 | default[:nginx][:gzip_http_version] = "1.1" 36 | default[:nginx][:gzip_types] = [ 37 | "text/plain", 38 | "text/css", 39 | "application/json", 40 | "application/x-javascript", 41 | "text/xml", 42 | "application/xml", 43 | "application/xml+rss", 44 | "text/javascript" 45 | ] 46 | 47 | # Optional Phusion Passenger module 48 | default[:nginx][:passenger][:enabled] = true 49 | default[:nginx][:passenger][:git_revision] = "3cefff4db65fac3d1d55ab0f6d231ff0567fee02" 50 | default[:nginx][:passenger][:root] = "/usr/local/src/passenger-#{nginx.passenger.git_revision}" 51 | default[:nginx][:passenger][:nginx_module_path] = File.join(nginx.passenger.root, 'ext/nginx') 52 | default[:nginx][:passenger][:log_level] = 0 53 | default[:nginx][:passenger][:spawn_method] = "smart" 54 | default[:nginx][:passenger][:pool_idle_time] = 300 55 | default[:nginx][:passenger][:max_instances_per_app] = 0 56 | 57 | # Set a max process count - assumes that each app process takes up 150MB real memory 58 | default[:nginx][:passenger][:max_pool_size] = node.memory.total.to_i / 1024 / 150 59 | 60 | # Custom memory management of Passenger instances 61 | default[:nginx][:passenger][:memory_management_enabled] = false 62 | default[:nginx][:passenger][:max_memory_per_instance] = 400 63 | -------------------------------------------------------------------------------- /ark/files/default/tests/minitest/test_test.rb: -------------------------------------------------------------------------------- 1 | require 'minitest/spec' 2 | 3 | describe_recipe 'ark::test' do 4 | 5 | # It's often convenient to load these includes in a separate 6 | # helper along with 7 | # your own helper methods, but here we just include them directly: 8 | include MiniTest::Chef::Assertions 9 | include MiniTest::Chef::Context 10 | include MiniTest::Chef::Resources 11 | 12 | it "installed the unzip package" do 13 | package("unzip").must_be_installed 14 | end 15 | 16 | if RUBY_PLATFORM =~ /freebsd/ 17 | it "installs the gnu tar package on freebsc" do 18 | package("gtar").must_be_installed 19 | end 20 | end 21 | 22 | it "puts an ark in the desired directory w/out symlinks" do 23 | directory("/usr/local/test_put").must_exist 24 | end 25 | 26 | it "dumps the correct files into place with correct owner and group" do 27 | file("/usr/local/foo_dump/foo1.txt").must_have(:owner, "foobarbaz").and(:group, "foobarbaz") 28 | end 29 | 30 | it "cherrypicks the mysql connector and set the correct owner and group" do 31 | file("/usr/local/foo_cherry_pick/foo_sub/foo1.txt").must_have(:owner, "foobarbaz").and(:group, "foobarbaz") 32 | end 33 | 34 | it "creates directory and symlink properly for the full ark install" do 35 | directory("/usr/local/foo-2").must_have(:owner, "foobarbaz").and(:group, "foobarbaz") 36 | link("/usr/local/foo").must_exist.with(:link_type, :symbolic).and(:to, "/usr/local/foo-2") 37 | end 38 | 39 | it "symlinks multiple binary commands" do 40 | link("/usr/local/bin/do_foo").must_exist.with(:link_type, :symbolic).and(:to, "/usr/local/foo-2/bin/do_foo") 41 | link("/usr/local/bin/do_more_foo").must_exist.with(:link_type, :symbolic).and(:to, "/usr/local/foo-2/bin/do_more_foo") 42 | end 43 | 44 | it "appends to the environment PATH" do 45 | unless RUBY_PLATFORM =~ /freebsd/ 46 | file("/etc/profile.d/foo_append_env.sh").must_include '/usr/local/foo_append_env-7.0.26/bin' 47 | 48 | bin_path_present = !ENV['PATH'].scan( '/usr/local/foo_append_env-7.0.26/bin').empty? 49 | assert bin_path_present 50 | end 51 | end 52 | 53 | it "doesn't strip top-level directory if specified" do 54 | directory( "/usr/local/foo_dont_strip/foo_sub").must_exist 55 | end 56 | 57 | it "does strip for zip file" do 58 | file("/usr/local/foo_zip_strip/foo1.txt").must_exist 59 | end 60 | 61 | 62 | it "successfully compiles haproxy" do 63 | file("/usr/local/haproxy-1.5/haproxy").must_exist 64 | end 65 | 66 | unless RUBY_PLATFORM =~ /freebsd/ 67 | it "installs haproxy binary" do 68 | file("/usr/local/sbin/haproxy").must_exist 69 | directory("/usr/local/doc/haproxy").must_exist 70 | end 71 | end 72 | 73 | it "creates an alternate prefix_bin" do 74 | link("/opt/bin/do_foo").must_exist.with(:link_type, :symbolic).and(:to, "/opt/foo_alt_bin-3/bin/do_foo") 75 | end 76 | 77 | it "properly unpacks .tbz and .tgz archives" do 78 | file("/usr/local/foo_tbz/foo1.txt").must_exist 79 | file("/usr/local/foo_tgz/foo1.txt").must_exist 80 | end 81 | 82 | it "sends notification when resource updated" do 83 | file("/tmp/foobarbaz/notification_successful.txt").must_exist 84 | end 85 | 86 | it "uses autogen.sh to generate configure script" do 87 | file("/usr/local/test_autogen-1/configure").must_exist 88 | end 89 | 90 | end 91 | -------------------------------------------------------------------------------- /nginx/recipes/default.rb: -------------------------------------------------------------------------------- 1 | configure_flags = [ 2 | "--prefix=#{node.nginx.install_path}", 3 | "--conf-path=#{node.nginx.dir}/nginx.conf", 4 | "--with-http_ssl_module", 5 | "--with-http_gzip_static_module", 6 | ] 7 | 8 | if node.nginx.passenger.enabled 9 | configure_flags << "--add-module=#{node.nginx.passenger.nginx_module_path}" 10 | 11 | git node.nginx.passenger.root do 12 | repository "git://github.com/FooBarWidget/passenger.git" 13 | reference node.nginx.passenger.git_revision 14 | action :sync 15 | end 16 | 17 | if node.nginx.passenger.memory_management_enabled 18 | cron "passenger_memory_management" do 19 | command %(for i in `#{node.nginx.passenger.root}/bin/passenger-memory-stats | grep "Passenger RackApp" | awk '{if ($4>#{node.nginx.passenger.max_memory_per_instance}) print $1}'`; do kill -9 $i; done &> /dev/null) 20 | path "/usr/local/rbenv/shims:/usr/local/rbenv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games" 21 | only_if { File.exist?("#{node.nginx.passenger.root}/bin/passenger-memory-stats") } 22 | end 23 | end 24 | end 25 | 26 | remote_file "/usr/local/src/nginx-#{node.nginx.version}.tar.gz" do 27 | source "http://nginx.org/download/nginx-#{node.nginx.version}.tar.gz" 28 | checksum node.nginx.checksum 29 | action :create_if_missing 30 | end 31 | 32 | bash "compile_nginx_source" do 33 | cwd "/usr/local/src" 34 | code <<-EOH 35 | tar zxf nginx-#{node.nginx.version}.tar.gz 36 | cd nginx-#{node.nginx.version} && ./configure #{configure_flags.join(" ")} 37 | make && make install 38 | EOH 39 | not_if do 40 | if node.nginx.passenger.enabled 41 | File.exists?("#{node.nginx.passenger.root}/libout") && File.exists?("#{node.nginx.install_path}/sbin/nginx") 42 | else 43 | File.exists?("#{node.nginx.install_path}/sbin/nginx") 44 | end 45 | end 46 | notifies :restart, "service[nginx]" 47 | end 48 | 49 | directory node.nginx.log_dir do 50 | mode 0755 51 | owner node.nginx.user 52 | end 53 | 54 | directory node.nginx.dir do 55 | owner "root" 56 | group "root" 57 | mode "0755" 58 | end 59 | 60 | directory "/mnt/default-host/public" do 61 | owner node.deploy_user 62 | group node.deploy_user 63 | mode "0755" 64 | recursive true 65 | end 66 | 67 | template "/mnt/default-host/public/index.html" do 68 | source "default-host-page.html.erb" 69 | owner node.deploy_user 70 | group node.deploy_user 71 | mode "0755" 72 | end 73 | 74 | template "/etc/init.d/nginx" do 75 | source "nginx.init.erb" 76 | owner "root" 77 | group "root" 78 | mode "0755" 79 | end 80 | 81 | directory "#{node.nginx.dir}/sites" do 82 | owner node.deploy_user 83 | group node.deploy_user 84 | mode "0755" 85 | end 86 | 87 | template "default-host.conf" do 88 | path "#{node.nginx.dir}/default-host.conf" 89 | source "default-host.erb" 90 | owner node.deploy_user 91 | group node.deploy_user 92 | mode "0644" 93 | notifies :reload, "service[nginx]", :delayed 94 | end 95 | 96 | template "/etc/rsyslog.d/nginx-error.conf" do 97 | source "nginx-error.rsyslog.conf.erb" 98 | owner "root" 99 | group "root" 100 | mode "0644" 101 | notifies :restart, "service[rsyslog]", :delayed 102 | end 103 | 104 | template "nginx.conf" do 105 | path "#{node.nginx.dir}/nginx.conf" 106 | source "nginx.conf.erb" 107 | owner "root" 108 | group "root" 109 | mode "0644" 110 | notifies :reload, "service[nginx]", :immediately 111 | end 112 | 113 | service "nginx" do 114 | supports :status => true, :restart => true, :reload => true 115 | action :enable 116 | end 117 | -------------------------------------------------------------------------------- /elasticsearch/attributes/data.rb: -------------------------------------------------------------------------------- 1 | [Chef::Recipe, Chef::Resource].each { |l| l.send :include, ::Extensions } 2 | 3 | # Loads configuration settings from data bag 'elasticsearch/data' or from node attributes. 4 | # 5 | # In a data bag, you can define multiple devices to be formatted and/or mounted: 6 | # 7 | # { 8 | # "elasticsearch": { 9 | # "data" : { 10 | # "devices" : { 11 | # "/dev/sdb" : { 12 | # "file_system" : "ext3", 13 | # "mount_options" : "rw,user", 14 | # "mount_path" : "/usr/local/var/data/elasticsearch/disk1", 15 | # "format_command" : "mkfs.ext3", 16 | # "fs_check_command" : "dumpe2fs", 17 | # } 18 | # } 19 | # } 20 | # } 21 | # } 22 | # 23 | # To set the configuration with nodes attributes (eg. for Chef Solo), see the Vagrantfile. 24 | # See for more info. 25 | # 26 | # You have to add the `mount_path` of each defined device to `default.elasticsearch[:path][:data]`, 27 | # either as a comma-delimited string or as a Ruby/JSON array, so it is used in the Elasticsearch 28 | # configuration. 29 | # 30 | # For EC2, you can define additional parameters for creating and attaching EBS volumes: 31 | # 32 | # { 33 | # "elasticsearch": { 34 | # "data" : { 35 | # "devices" : { 36 | # "/dev/sda2" : { 37 | # "file_system" : "ext3", 38 | # "mount_options" : "rw,user", 39 | # "mount_path" : "/usr/local/var/data/elasticsearch/disk1", 40 | # "format_command" : "mkfs.ext3", 41 | # "fs_check_command" : "dumpe2fs", 42 | # "ebs" : { 43 | # "region" : "us-east-1", // Optional: instance region is used by default 44 | # "size" : 250, // In GB 45 | # "delete_on_termination" : true, 46 | # "type" : "io1", 47 | # "iops" : 2000 48 | # } 49 | # } 50 | # } 51 | # } 52 | # } 53 | # } 54 | # 55 | # 56 | # Some kernels attach EBS devices to `/dev/xvd*` instead of `/dev/sd*`. You can set a specific name 57 | # with the `ebs.device` property: 58 | # 59 | # { 60 | # "elasticsearch": { 61 | # "data" : { 62 | # "devices" : { 63 | # "/dev/xvda2" : { 64 | # # ... 65 | # "ebs" : { 66 | # # ... 67 | # "device" : "/dev/sda2" 68 | # } 69 | # } 70 | # } 71 | # } 72 | # } 73 | # } 74 | # 75 | # 76 | # When you define a `snapshot_id` property for an EBS device, it will be created from that snapshot, 77 | # having all the data available in the snapshot: 78 | # 79 | # { 80 | # "elasticsearch": { 81 | # "data" : { 82 | # "devices" : { 83 | # "/dev/sda2" : { 84 | # # ... 85 | # "ebs" : { 86 | # # ... 87 | # "snapshot_id" : "snap-123abc4d" 88 | # } 89 | # } 90 | # } 91 | # } 92 | # } 93 | # } 94 | # 95 | # Note, that you have to verify the path to the device file: in some environments, these will 96 | # have the format of `/dev/sd*`, on others `/dev/xvd*`, etc. 97 | # 98 | data = Chef::DataBagItem.load('elasticsearch', 'data')[node.chef_environment] rescue {} 99 | 100 | default.elasticsearch[:data][:devices] = data['devices'] || {} 101 | 102 | # Perform package update (https://github.com/opscode-cookbooks/build-essential#usage) 103 | # 104 | node.default.build_essential.compiletime = true if node.recipes.any? { |r| r =~ /elasticsearch::ebs|build-essential/ } 105 | -------------------------------------------------------------------------------- /elasticsearch/libraries/create_ebs.rb: -------------------------------------------------------------------------------- 1 | module Extensions 2 | 3 | # Creates an EBS volume based on passed parameters and attaches it to the instance 4 | # via the [Fog](http://rubydoc.info/gems/fog/Fog/Compute/AWS/Volume) library. 5 | # 6 | # The credentials for accessing AWS API are loaded from `node.elasticsearch.cloud`. 7 | # Instead of using AWS access tokens, you can create the instance with a IAM role. 8 | # 9 | # You need to provide volume properties such as _size_ in the `params[:ebs]` hash. 10 | # 11 | # If `params[:snapshot_id]` is passed, the volume will be created from 12 | # the corresponding snapshot. 13 | # 14 | def create_ebs device, params={} 15 | 16 | ruby_block "Create EBS volume on #{device} (size: #{params[:ebs][:size]}GB)" do 17 | 18 | block do 19 | require 'fog' 20 | require 'open-uri' 21 | 22 | region = params[:region] || node.elasticsearch[:cloud][:aws][:region] 23 | instance_id = open('http://169.254.169.254/latest/meta-data/instance-id'){|f| f.gets} 24 | raise "[!] Cannot get instance id from AWS meta-data API" unless instance_id 25 | 26 | Chef::Log.debug("Region: #{region}, instance ID: #{instance_id}") 27 | 28 | fog_options = { :provider => 'AWS', :region => region } 29 | if (access_key = node.elasticsearch[:cloud][:aws][:access_key]) && 30 | (secret_key = node.elasticsearch[:cloud][:aws][:secret_key]) 31 | fog_options.merge!(:aws_access_key_id => access_key, :aws_secret_access_key => secret_key) 32 | else # Lack of credentials implies a IAM role will provide keys 33 | fog_options.merge!(:use_iam_profile => true) 34 | end 35 | aws = Fog::Compute.new(fog_options) 36 | 37 | server = aws.servers.get instance_id 38 | 39 | # Create EBS volume if the device is free 40 | ebs_device = params[:ebs][:device] || device 41 | unless server.volumes.map(&:device).include?(ebs_device) 42 | options = { :device => ebs_device, 43 | :size => params[:ebs][:size], 44 | :delete_on_termination => params[:ebs][:delete_on_termination], 45 | :availability_zone => server.availability_zone, 46 | :server => server } 47 | 48 | options[:type] = params[:ebs][:type] if params[:ebs][:type] 49 | options[:iops] = params[:ebs][:iops] if params[:ebs][:iops] and params[:ebs][:type] == "io1" 50 | 51 | if params[:ebs][:snapshot_id] 52 | if snapshot = aws.snapshots.get(params[:ebs][:snapshot_id]) 53 | Chef::Log.info "Creating EBS from snapshot: #{snapshot.id} (" + 54 | "Tags: #{snapshot.tags.inspect}, " + 55 | "Description: #{snapshot.description})" 56 | options[:snapshot_id] = snapshot.id 57 | else 58 | __message = "[!] Cannot find snapshot: #{params[:ebs][:snapshot_id]}" 59 | Chef::Log.fatal __message 60 | raise __message 61 | end 62 | end 63 | 64 | volume = aws.volumes.new options 65 | volume.save 66 | 67 | # Create tags 68 | aws.tags.new(:key => "Name", :value => node.name, :resource_id => volume.id, :resource_type => "volume").save 69 | aws.tags.new(:key => "ClusterName", :value => node.elasticsearch[:cluster][:name], :resource_id => volume.id, :resource_type => "volume").save 70 | 71 | # Checking if block device is attached 72 | Chef::Log.info("Attaching volume: #{volume.id} ") 73 | loop do 74 | `ls #{device} > /dev/null 2>&1` 75 | break if $?.success? 76 | print '.' 77 | sleep 1 78 | end 79 | 80 | Chef::Log.debug("Volume #{volume.id} is attached to #{instance_id} on #{device}") 81 | end 82 | 83 | end 84 | 85 | end 86 | 87 | end 88 | 89 | end 90 | -------------------------------------------------------------------------------- /ark/recipes/test.rb: -------------------------------------------------------------------------------- 1 | require 'fileutils' 2 | 3 | # remove file so we can test sending notification on its creation 4 | if ::File.exist? "/tmp/foobarbaz/foo1.txt" 5 | FileUtils.rm_f "/tmp/foobarbaz/foo1.txt" 6 | end 7 | 8 | ruby_block "test_notification" do 9 | block do 10 | if ::File.exist? "/tmp/foobarbaz/foo1.txt" 11 | FileUtils.touch "/tmp/foobarbaz/notification_successful.txt" 12 | end 13 | end 14 | action :nothing 15 | end 16 | 17 | 18 | user 'foobarbaz' 19 | 20 | directory "/opt/bin" do 21 | recursive true 22 | end 23 | 24 | ark 'test_put' do 25 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tar.gz' 26 | checksum '5996e676f17457c823d86f1605eaa44ca8a81e70d6a0e5f8e45b51e62e0c52e8' 27 | owner 'foobarbaz' 28 | group 'foobarbaz' 29 | action :put 30 | end 31 | 32 | ark "test_dump" do 33 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.zip' 34 | checksum 'deea3a324115c9ca0f3078362f807250080bf1b27516f7eca9d34aad863a11e0' 35 | path '/usr/local/foo_dump' 36 | creates 'foo1.txt' 37 | action :dump 38 | owner 'foobarbaz' 39 | group 'foobarbaz' 40 | end 41 | 42 | ark 'cherry_pick_test' do 43 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tar.gz' 44 | checksum '5996e676f17457c823d86f1605eaa44ca8a81e70d6a0e5f8e45b51e62e0c52e8' 45 | path '/usr/local/foo_cherry_pick' 46 | owner 'foobarbaz' 47 | group 'foobarbaz' 48 | creates "foo_sub/foo1.txt" 49 | action :cherry_pick 50 | end 51 | 52 | 53 | ark "foo" do 54 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tar.gz' 55 | checksum '5996e676f17457c823d86f1605eaa44ca8a81e70d6a0e5f8e45b51e62e0c52e8' 56 | version '2' 57 | prefix_root "/usr/local" 58 | owner "foobarbaz" 59 | group 'foobarbaz' 60 | has_binaries [ 'bin/do_foo', 'bin/do_more_foo' ] 61 | action :install 62 | end 63 | 64 | ark "foo_append_env" do 65 | version "7.0.26" 66 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tar.gz' 67 | checksum '5996e676f17457c823d86f1605eaa44ca8a81e70d6a0e5f8e45b51e62e0c52e8' 68 | append_env_path true 69 | action :install 70 | end 71 | 72 | ark "foo_dont_strip" do 73 | version "2" 74 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tar.gz' 75 | checksum '5996e676f17457c823d86f1605eaa44ca8a81e70d6a0e5f8e45b51e62e0c52e8' 76 | strip_leading_dir false 77 | action :install 78 | end 79 | 80 | ark "foo_zip_strip" do 81 | version "2" 82 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.zip' 83 | checksum 'deea3a324115c9ca0f3078362f807250080bf1b27516f7eca9d34aad863a11e0' 84 | action :install 85 | end 86 | 87 | 88 | ark "haproxy" do 89 | url "http://haproxy.1wt.eu/download/1.5/src/snapshot/haproxy-ss-20120403.tar.gz" 90 | version "1.5" 91 | checksum 'ba0424bf7d23b3a607ee24bbb855bb0ea347d7ffde0bec0cb12a89623cbaf911' 92 | make_opts [ 'TARGET=linux26' ] 93 | action :install_with_make 94 | end unless platform?("freebsd") 95 | 96 | ark "foo_alt_bin" do 97 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tar.gz' 98 | checksum '5996e676f17457c823d86f1605eaa44ca8a81e70d6a0e5f8e45b51e62e0c52e8' 99 | version '3' 100 | prefix_root "/opt" 101 | prefix_home "/opt" 102 | prefix_bin "/opt/bin" 103 | owner "foobarbaz" 104 | group 'foobarbaz' 105 | has_binaries [ 'bin/do_foo' ] 106 | action :install 107 | end 108 | 109 | ark "foo_tbz" do 110 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tbz' 111 | version '3' 112 | end 113 | 114 | ark "foo_tgz" do 115 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.tgz' 116 | version '3' 117 | end 118 | 119 | ark "test notification" do 120 | url 'https://github.com/bryanwb/chef-ark/raw/master/files/default/foo.zip' 121 | path "/tmp/foobarbaz" 122 | creates "foo1.txt" 123 | action :dump 124 | notifies :create, "ruby_block[test_notification]", :immediately 125 | end 126 | 127 | ark "test_autogen" do 128 | url 'https://github.com/zeromq/libzmq/tarball/master' 129 | extension "tar.gz" 130 | action :configure 131 | # autoconf in RHEL < 6 is too old 132 | not_if { platform_family?('rhel') && node['platform_version'].to_f < 6.0 } 133 | end 134 | -------------------------------------------------------------------------------- /elasticsearch/recipes/default.rb: -------------------------------------------------------------------------------- 1 | [Chef::Recipe, Chef::Resource].each { |l| l.send :include, ::Extensions } 2 | 3 | Erubis::Context.send(:include, Extensions::Templates) 4 | 5 | elasticsearch = "elasticsearch-#{node.elasticsearch[:version]}" 6 | 7 | include_recipe "elasticsearch::curl" 8 | include_recipe "ark" 9 | 10 | # Create user and group 11 | # 12 | group node.elasticsearch[:user] do 13 | action :create 14 | end 15 | 16 | user node.elasticsearch[:user] do 17 | comment "ElasticSearch User" 18 | home "#{node.elasticsearch[:dir]}/elasticsearch" 19 | shell "/bin/bash" 20 | gid node.elasticsearch[:user] 21 | supports :manage_home => false 22 | action :create 23 | end 24 | 25 | # FIX: Work around the fact that Chef creates the directory even for `manage_home: false` 26 | bash "remove the elasticsearch user home" do 27 | user 'root' 28 | code "rm -rf #{node.elasticsearch[:dir]}/elasticsearch" 29 | only_if "test -d #{node.elasticsearch[:dir]}/elasticsearch" 30 | end 31 | 32 | # Create ES directories 33 | # 34 | [ node.elasticsearch[:path][:conf], node.elasticsearch[:path][:logs], node.elasticsearch[:pid_path] ].each do |path| 35 | directory path do 36 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0755 37 | recursive true 38 | action :create 39 | end 40 | end 41 | 42 | # Create data path directories 43 | # 44 | data_paths = node.elasticsearch[:path][:data].is_a?(Array) ? node.elasticsearch[:path][:data] : node.elasticsearch[:path][:data].split(',') 45 | 46 | data_paths.each do |path| 47 | directory path.strip do 48 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0755 49 | recursive true 50 | action :create 51 | end 52 | end 53 | 54 | # Create service 55 | # 56 | template "/etc/init.d/elasticsearch" do 57 | source "elasticsearch.init.erb" 58 | owner 'root' and mode 0755 59 | end 60 | 61 | service "elasticsearch" do 62 | supports :status => true, :restart => true 63 | action [ :enable ] 64 | end 65 | 66 | # Download, extract, symlink the elasticsearch libraries and binaries 67 | # 68 | ark "elasticsearch" do 69 | url node.elasticsearch[:download_url] 70 | owner node.elasticsearch[:user] 71 | group node.elasticsearch[:user] 72 | version node.elasticsearch[:version] 73 | has_binaries ['bin/elasticsearch', 'bin/plugin'] 74 | checksum node.elasticsearch[:checksum] 75 | 76 | notifies :start, 'service[elasticsearch]' 77 | notifies :restart, 'service[elasticsearch]' 78 | end 79 | 80 | # Increase open file limits 81 | # 82 | bash "enable user limits" do 83 | user 'root' 84 | 85 | code <<-END.gsub(/^ /, '') 86 | echo 'session required pam_limits.so' >> /etc/pam.d/su 87 | END 88 | 89 | not_if { ::File.read("/etc/pam.d/su").match(/^session required pam_limits\.so/) } 90 | end 91 | 92 | bash "increase limits for the elasticsearch user" do 93 | user 'root' 94 | 95 | code <<-END.gsub(/^ /, '') 96 | echo '#{node.elasticsearch.fetch(:user, "elasticsearch")} - nofile #{node.elasticsearch[:limits][:nofile]}' >> /etc/security/limits.conf 97 | echo '#{node.elasticsearch.fetch(:user, "elasticsearch")} - memlock #{node.elasticsearch[:limits][:memlock]}' >> /etc/security/limits.conf 98 | END 99 | 100 | not_if do 101 | file = ::File.read("/etc/security/limits.conf") 102 | file.include?("#{node.elasticsearch.fetch(:user, "elasticsearch")} - nofile #{node.elasticsearch[:limits][:nofile]}") \ 103 | && \ 104 | file.include?("#{node.elasticsearch.fetch(:user, "elasticsearch")} - memlock #{node.elasticsearch[:limits][:memlock]}") 105 | end 106 | end 107 | 108 | # Create file with ES environment variables 109 | # 110 | template "elasticsearch-env.sh" do 111 | path "#{node.elasticsearch[:path][:conf]}/elasticsearch-env.sh" 112 | source "elasticsearch-env.sh.erb" 113 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0755 114 | 115 | notifies :restart, 'service[elasticsearch]' 116 | end 117 | 118 | # Create ES config file 119 | # 120 | template "elasticsearch.yml" do 121 | path "#{node.elasticsearch[:path][:conf]}/elasticsearch.yml" 122 | source "elasticsearch.yml.erb" 123 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0755 124 | 125 | notifies :restart, 'service[elasticsearch]' 126 | end 127 | 128 | # Create ES logging file 129 | # 130 | template "logging.yml" do 131 | path "#{node.elasticsearch[:path][:conf]}/logging.yml" 132 | source "logging.yml.erb" 133 | owner node.elasticsearch[:user] and group node.elasticsearch[:user] and mode 0755 134 | 135 | notifies :restart, 'service[elasticsearch]' 136 | end 137 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/elasticsearch.init.erb: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # 3 | # elasticsearch 4 | # 5 | # chkconfig: - 57 47 6 | # description: elasticsearch 7 | # processname: elasticsearch 8 | # config: <%= node[:elasticsearch][:path][:conf] %>/elasticsearch.yml 9 | 10 | # Source networking configuration 11 | if [ -f /etc/sysconfig/network ]; then source /etc/sysconfig/network; fi 12 | 13 | # Exit if networking is not up 14 | [ "$NETWORKING" = "no" ] && exit 15 | 16 | PIDFILE='<%= node.elasticsearch[:pid_file] %>' 17 | ES_INCLUDE='<%= node.elasticsearch[:path][:conf] %>/elasticsearch-env.sh' 18 | CHECK_PID_RUNNING=$(ps ax | grep 'java' | grep -e "es.pidfile=$PIDFILE" | sed 's/^\s*\([0-9]*\)\s.*/\1/') 19 | 20 | start() { 21 | if [ -f $PIDFILE ]; then 22 | # PIDFILE EXISTS -- ES RUNNING? 23 | echo -e "\033[31;1mPID file found in $PIDFILE, elasticsearch already running?\033[0m" 24 | es_pid="$(cat $PIDFILE)" 25 | pid_running="$( ps ax | grep 'java' | grep $es_pid )" 26 | 27 | if [ ! -z "$pid_running" ] ; then 28 | # EXIT IF ES IS ALREADY RUNNING 29 | echo -e "\033[31;1mPID $es_pid still alive, already running...\033[0m" 30 | return 1 31 | fi 32 | fi 33 | 34 | echo -e "\033[1mStarting elasticsearch...\033[0m" 35 | su <%= node[:elasticsearch][:user] %> -c "ES_INCLUDE=$ES_INCLUDE /usr/local/bin/elasticsearch -p $PIDFILE" 36 | 37 | return $? 38 | } 39 | 40 | stop() { 41 | if [[ -f $PIDFILE ]]; then 42 | echo -n -e "\033[1mStopping elasticsearch...\033[0m" 43 | 44 | # REMOVE PIDFILE AND EXIT IF PROCESS NOT RUNNING 45 | if [ ! $CHECK_PID_RUNNING ]; then 46 | echo -e "\033[1mPID file found, but no matching process running?\033[0m" 47 | echo "Removing PID file..." 48 | su <%= node[:elasticsearch][:user] %> -m -c "rm $PIDFILE" 49 | exit 0 50 | fi 51 | 52 | # KILL PROCESS 53 | su <%= node[:elasticsearch][:user] %> -m -c "kill $(cat $PIDFILE)" 54 | r=$? 55 | timeout=0 56 | while [ -f $PIDFILE ]; do 57 | echo -n '.' 58 | (( timeout++ )) 59 | if [ $timeout -gt '15' ]; then return; fi 60 | sleep 1 61 | done; echo 62 | return $r 63 | else 64 | echo -e "\033[1mNo PID file found -- elasticsearch not running?\033[0m" 65 | fi 66 | } 67 | 68 | restart() { 69 | stop 70 | timeout=30 71 | while ps aux | grep 'java' | grep -e "es.pidfile"; do 72 | echo -n '.' 73 | (( timeout-- )) 74 | if [ $timeout -lt '1' ]; then return; fi 75 | sleep 1 76 | done; 77 | start 78 | } 79 | 80 | status() { 81 | # GOT PIDFILE? 82 | [ -f $PIDFILE ] && pid=$(cat $PIDFILE) 83 | 84 | # RUNNING 85 | if [[ $pid && -d "/proc/$pid" ]]; then 86 | version=$(curl -s 'http://localhost:9200' | ruby -rubygems -e 'require "json"; print JSON.parse(STDIN.read)["version"]["number"]') 87 | echo -e "\033[1;37;46melasticsearch $version running with PID $pid\033[0m" 88 | # VERBOSE 89 | if [[ $pid && $1 == '-v' || $1 == '--verbose' ]]; then 90 | curl -s 'http://localhost:9200/_cluster/nodes/<%= node.elasticsearch[:node][:name] %>?os&process&jvm&network&transport&settings&pretty' | \ 91 | ruby -rubygems -e ' 92 | begin 93 | require "json"; h = JSON.parse(STDIN.read); id, node = h["nodes"].first; 94 | def e(name, value); puts %Q|\e[1;36m#{(name.to_s+":").ljust(20)}\e[0m #{value || "N/A" rescue "N/A"}|; end 95 | e "HTTP Address", node["http_address"] 96 | e "Node Name", node["name"] 97 | e "Cluster Name", h["cluster_name"] 98 | e "Started", Time.at(node["jvm"]["start_time"].to_i/1000) 99 | e "JVM", "#{node["jvm"]["vm_name"]} (#{node["jvm"]["version"]})" 100 | e "Memory Total", node["os"]["mem"]["total"] 101 | e "Open Files", node["process"]["max_file_descriptors"] 102 | e "Configuration", node["settings"]["config"] 103 | rescue 104 | puts "Metadata cannot be retrieved." 105 | end 106 | ' 107 | fi 108 | # INCORRECT PID? 109 | if [ $pid != $CHECK_PID_RUNNING ]; then 110 | echo -e "\033[1;31;40m[!] Incorrect PID found in $PIDFILE: $pid\033[0m" 111 | return 1 112 | fi 113 | return 0 114 | fi 115 | 116 | # NOT RUNNING 117 | if [[ ! $pid || ! -d "/proc/$pid" ]]; then 118 | echo -e "\033[1;33;40melasticsearch not running\033[0m" 119 | return 3 120 | fi 121 | 122 | # STALE PID FOUND 123 | if [[ ! -d "/proc/$pid" && -f $PIDFILE ]]; then 124 | echo -e "\033[1;31;40m[!] Stale PID found in $PIDFILE\033[0m" 125 | return 1 126 | fi 127 | } 128 | 129 | 130 | case "$1" in 131 | start) 132 | start 133 | ;; 134 | stop) 135 | stop 136 | ;; 137 | restart) 138 | restart 139 | ;; 140 | status) 141 | status $2 142 | ;; 143 | *) 144 | echo $"Usage: $0 {start|stop|restart|status [-v]|}" 145 | exit 1 146 | esac 147 | 148 | exit $? 149 | -------------------------------------------------------------------------------- /elasticsearch/attributes/default.rb: -------------------------------------------------------------------------------- 1 | # Load settings from data bag 'elasticsearch/settings' 2 | # 3 | settings = Chef::DataBagItem.load('elasticsearch', 'settings')[node.chef_environment] rescue {} 4 | Chef::Log.debug "Loaded settings: #{settings.inspect}" 5 | 6 | # Initialize the node attributes with node attributes merged with data bag attributes 7 | # 8 | node.default[:elasticsearch] ||= {} 9 | node.normal[:elasticsearch] ||= {} 10 | node.normal[:elasticsearch] = DeepMerge.merge(node.default[:elasticsearch].to_hash, node.normal[:elasticsearch].to_hash) 11 | node.normal[:elasticsearch] = DeepMerge.merge(node.normal[:elasticsearch].to_hash, settings.to_hash) 12 | 13 | # === VERSION AND LOCATION 14 | # 15 | default.elasticsearch[:version] = "0.90.5" 16 | default.elasticsearch[:checksum] = "f14ff217039b5c398a9256b68f46a90093e0a1e54e89f94ee6a2ee7de557bd6d" 17 | default.elasticsearch[:host] = "http://download.elasticsearch.org" 18 | default.elasticsearch[:repository] = "elasticsearch/elasticsearch" 19 | default.elasticsearch[:filename] = "elasticsearch-#{node.elasticsearch[:version]}.tar.gz" 20 | default.elasticsearch[:download_url] = [node.elasticsearch[:host], node.elasticsearch[:repository], node.elasticsearch[:filename]].join('/') 21 | 22 | # === NAMING 23 | # 24 | default.elasticsearch[:cluster][:name] = 'elasticsearch' 25 | default.elasticsearch[:node][:name] = node.name 26 | 27 | # === USER & PATHS 28 | # 29 | default.elasticsearch[:dir] = "/usr/local" 30 | default.elasticsearch[:user] = "elasticsearch" 31 | 32 | default.elasticsearch[:path][:conf] = "/usr/local/etc/elasticsearch" 33 | default.elasticsearch[:path][:data] = "/mnt/data/elasticsearch" 34 | default.elasticsearch[:path][:logs] = "/mnt/log/elasticsearch" 35 | 36 | default.elasticsearch[:pid_path] = "/mnt/run/elasticsearch" 37 | default.elasticsearch[:pid_file] = "#{node.elasticsearch[:pid_path]}/#{node.elasticsearch[:node][:name].to_s.gsub(/\W/, '_')}.pid" 38 | 39 | # Deprecation notice for legacy path configuration 40 | Chef::Log.warn "DEPRECATION WARNING! The 'conf_path', 'data_path' and 'log_path' attributes have changed, and will be removed in the next release. Please review your attributes." 41 | default.elasticsearch[:conf_path] = default.elasticsearch[:path][:conf] 42 | default.elasticsearch[:data_path] = default.elasticsearch[:path][:data] 43 | default.elasticsearch[:log_path] = default.elasticsearch[:path][:logs] 44 | 45 | # === MEMORY 46 | # 47 | # Maximum amount of memory to use is automatically computed as one half of total available memory on the machine. 48 | # You may choose to set it in your node/role configuration instead. 49 | # 50 | allocated_memory = "#{(node.memory.total.to_i * 0.4 ).floor / 1024}m" 51 | default.elasticsearch[:allocated_memory] = allocated_memory 52 | 53 | # === LIMITS 54 | # 55 | # By default, the `mlockall` is set to true: on weak machines and Vagrant boxes, 56 | # you may want to disable it. 57 | # 58 | default.elasticsearch[:bootstrap][:mlockall] = true 59 | default.elasticsearch[:limits][:memlock] = 'unlimited' 60 | default.elasticsearch[:limits][:nofile] = '64000' 61 | 62 | # === PRODUCTION SETTINGS 63 | # 64 | default.elasticsearch[:index][:mapper][:dynamic] = true 65 | default.elasticsearch[:action][:auto_create_index] = true 66 | default.elasticsearch[:action][:disable_delete_all_indices] = true 67 | default.elasticsearch[:node][:max_local_storage_nodes] = 1 68 | 69 | default.elasticsearch[:discovery][:zen][:ping][:multicast][:enabled] = true 70 | default.elasticsearch[:discovery][:zen][:minimum_master_nodes] = 1 71 | default.elasticsearch[:gateway][:type] = 'local' 72 | default.elasticsearch[:gateway][:expected_nodes] = 1 73 | 74 | default.elasticsearch[:thread_stack_size] = "256k" 75 | 76 | # === CUSTOM CONFIGURATION 77 | # 78 | default.elasticsearch[:custom_config] = {} 79 | 80 | # -------------------------------------------------- 81 | # NOTE: Setting the attributes for elasticsearch.yml 82 | # -------------------------------------------------- 83 | # 84 | # The template uses the `print_value` extension method to print attributes with a "truthy" 85 | # value, set either in data bags, node attributes, role override attributes, etc. 86 | # 87 | # It is possible to set *any* configuration value exposed by the Elasticsearch configuration file. 88 | # 89 | # For example: 90 | # 91 | # <%= print_value 'cluster.routing.allocation.node_concurrent_recoveries' -%> 92 | # 93 | # will print a line: 94 | # 95 | # cluster.routing.allocation.node_concurrent_recoveries: 96 | # 97 | # if the either of following node attributes is set: 98 | # 99 | # * `node.cluster.routing.allocation.node_concurrent_recoveries` 100 | # * `node['cluster.routing.allocation.node_concurrent_recoveries']` 101 | # 102 | # The default attributes set by the cookbook configure a minimal set inferred from the environment 103 | # (eg. memory settings, node name), or reasonable defaults for production. 104 | # 105 | # The template is based on the elasticsearch.yml file from the Elasticsearch distribution; 106 | # to set other configurations, set the `node.elasticsearch[:custom_config]` attribute in the 107 | # node configuration, `elasticsearch/settings` data bag, role/environment definition, etc: 108 | # 109 | # // ... 110 | # 'threadpool.index.type' => 'fixed', 111 | # 'threadpool.index.size' => '2' 112 | # // ... 113 | # 114 | -------------------------------------------------------------------------------- /newrelic/templates/default/nrsysmond.cfg.erb: -------------------------------------------------------------------------------- 1 | # 2 | # New Relic Server Monitor configuration file. 3 | # 4 | # Lines that begin with a # are comment lines and are ignored by the server 5 | # monitor. For those options that have command line equivalents, if the 6 | # option is specified on the command line it will over-ride any value set 7 | # in this file. 8 | # 9 | 10 | # 11 | # Option : license_key 12 | # Value : 40-character hexadecimal string provided by New Relic. This is 13 | # required in order for the server monitor to start. 14 | # Default: none 15 | # 16 | license_key=<%= node[:newrelic][:license_key] %> 17 | 18 | # 19 | # Option : loglevel 20 | # Value : Level of detail you want in the log file (as defined by the logfile 21 | # setting below. Valid values are (in increasing levels of verbosity): 22 | # error - show errors only 23 | # warning - show errors and warnings 24 | # info - show minimal additional information messages 25 | # verbose - show more detailed information messages 26 | # debug - show debug messages 27 | # verbosedebug - show very detailed debug messages 28 | # Default: error 29 | # Note : Can also be set with the -d command line option. 30 | # 31 | loglevel=info 32 | 33 | # 34 | # Option : logfile 35 | # Value : Name of the file where the server monitor will store it's log 36 | # messages. The amount of detail stored in this file is controlled 37 | # by the loglevel option (above). 38 | # Default: none. However it is highly recommended you set a value for this. 39 | # Note : Can also be set with the -l command line option. 40 | # 41 | logfile=/var/log/newrelic/nrsysmond.log 42 | 43 | # 44 | # Option : proxy 45 | # Value : The name and optional login credentials of the proxy server to use 46 | # for all communication with the New Relic collector. In its simplest 47 | # form this setting is just a hostname[:port] setting. The default 48 | # port if none is specified is 1080. If your proxy requires a user 49 | # name, use the syntax user@host[:port]. If it also requires a 50 | # password use the format user:password@host[:port]. For example: 51 | # fred:secret@proxy.mydomain.com:8181 52 | # Default: none (use a direct connection) 53 | # 54 | #proxy= 55 | 56 | # 57 | # Option : ssl 58 | # Value : Whether or not to use the Secure Sockets Layer (SSL) for all 59 | # communication with the New Relic collector. Possible values are 60 | # true/on or false/off. In certain rare cases you may need to modify 61 | # the SSL certificates settings below. 62 | # Default: false 63 | # 64 | #ssl=false 65 | 66 | # 67 | # Option : ssl_ca_bundle 68 | # Value : The name of a PEM-encoded Certificate Authority (CA) bundle to use 69 | # for SSL connections. This very rarely needs to be set. The monitor 70 | # will attempt to find the bundle in the most common locations. If 71 | # you need to use SSL and the monitor is unable to locate a CA bundle 72 | # then either set this value or the ssl_ca_path option below. 73 | # Default: /etc/ssl/certs/ca-certificates.crt or 74 | # /etc/pki/tls/certs/ca-bundle.crt 75 | # Note : Can also be set with the -b command line option. 76 | # 77 | #ssl_ca_bundle=/path/to/your/bundle.crt 78 | 79 | # 80 | # Option : ssl_ca_path 81 | # Value : If your SSL installation does not use CA bundles, but rather has a 82 | # directory with PEM-encoded Certificate Authority files, set this 83 | # option to the name of the directory that contains all the CA files. 84 | # Default: /etc/ssl/certs 85 | # Note : Can also be set with the -S command line option. 86 | # 87 | #ssl_ca_path=/etc/ssl/certs 88 | 89 | # 90 | # Option : pidfile 91 | # Value : Name of a file where the server monitoring daemon will store it's 92 | # process ID (PID). This is used by the startup and shutdown script 93 | # to determine if the monitor is already running, and to start it up 94 | # or shut it down. 95 | # Default: /tmp/nrsysmond.pid 96 | # Note : Can also be set with the -p command line option. 97 | # 98 | #pidfile=/var/run/newrelic/nrsysmond.pid 99 | 100 | # 101 | # Option : collector_host 102 | # Value : The name of the New Relic collector to connect to. This should only 103 | # ever be changed on advise from a New Relic support staff member. 104 | # The format is host[:port]. Using a port number of 0 means the default 105 | # port, which is 80 (if not using the ssl option - see below) or 443 106 | # if SSL is enabled. If the port is omitted the default value is used. 107 | # Default: collector.newrelic.com 108 | # 109 | #collector_host=collector.newrelic.com 110 | 111 | # 112 | # Option : timeout 113 | # Value : How long the monitor should wait to contact the collector host. If 114 | # the connection cannot be established in this period of time, the 115 | # monitor will progressively back off in 15-second increments, up to 116 | # a maximum of 300 seconds. Once the initial connection has been 117 | # established, this value is reset back to the value specified here 118 | # (or the default). This then sets the maximum time to wait for 119 | # a connection to the collector to report data. There is no back-off 120 | # once the original connection has been made. The value is in seconds. 121 | # Default: 30 122 | # 123 | #timeout=30 124 | -------------------------------------------------------------------------------- /elasticsearch/templates/default/elasticsearch.yml.erb: -------------------------------------------------------------------------------- 1 | ######################### ElasticSearch Configuration ######################## 2 | 3 | # This file is managed by Chef, do not edit manually, your changes *will* be overwritten! 4 | # 5 | # Please see the source file for context and more information: 6 | # 7 | # https://github.com/elasticsearch/elasticsearch/blob/master/config/elasticsearch.yml 8 | # 9 | # To set configurations not exposed by this template, set the 10 | # `node.elasticsearch[:custom_config]` attribute in your node configuration, 11 | # `elasticsearch/settings` data bag, role/environment definition, etc: 12 | # 13 | # // ... 14 | # 'threadpool.index.type' => 'fixed', 15 | # 'threadpool.index.size' => '2' 16 | # // ... 17 | 18 | ################################### Cluster ################################### 19 | 20 | <%= print_value 'cluster.name' -%> 21 | 22 | #################################### Node ##################################### 23 | 24 | <%= print_value 'node.name' -%> 25 | <%= print_value 'node.master' -%> 26 | <%= print_value 'node.data' -%> 27 | <%= print_value 'node.max_local_storage_nodes' -%> 28 | 29 | #################################### Index #################################### 30 | 31 | <%= print_value 'index.number_of_shards' -%> 32 | <%= print_value 'index.number_of_replicas' -%> 33 | <%= print_value 'index.mapper.dynamic' -%> 34 | <%= print_value 'action.auto_create_index' -%> 35 | <%= print_value 'action.disable_delete_all_indices' -%> 36 | 37 | #################################### Paths #################################### 38 | 39 | <%= print_value 'path.conf' -%> 40 | <%= print_value 'path.data', (node.elasticsearch.path.data.is_a?(Array) ? node.elasticsearch.path.data.join(",") : node.elasticsearch.path.data) -%> 41 | <%= print_value 'path.work' -%> 42 | <%= print_value 'path.logs' -%> 43 | <%= print_value 'path.plugins' -%> 44 | 45 | #################################### Plugin ################################### 46 | 47 | <%= print_value 'plugin.mandatory', node[:elasticsearch][:plugin][:mandatory].join(',') \ 48 | unless node[:elasticsearch][:plugin][:mandatory].empty? -%> 49 | 50 | ################################### Memory #################################### 51 | 52 | <%= print_value 'bootstrap.mlockall' -%> 53 | 54 | ############################## Network And HTTP ############################### 55 | 56 | <%= print_value 'network.bind_host' -%> 57 | <%= print_value 'network.publish_host' -%> 58 | <%= print_value 'network.host' -%> 59 | <%= print_value 'transport.tcp.port' -%> 60 | <%= print_value 'transport.tcp.compress' -%> 61 | <%= print_value 'http.port' -%> 62 | <%= print_value 'http.max_content_length' -%> 63 | <%= print_value 'http.enabled' -%> 64 | 65 | ################################### Gateway ################################### 66 | 67 | <%= print_value 'gateway.type', node.elasticsearch[:gateway][:type] -%> 68 | <%= print_value 'gateway.recover_after_nodes' -%> 69 | <%= print_value 'gateway.recover_after_time' -%> 70 | <%= print_value 'gateway.expected_nodes' -%> 71 | 72 | ############################# Recovery Throttling ############################# 73 | 74 | <%= print_value 'cluster.routing.allocation.node_initial_primaries_recoveries' -%> 75 | <%= print_value 'cluster.routing.allocation.node_concurrent_recoveries' -%> 76 | <%= print_value 'indices.recovery.max_size_per_sec' -%> 77 | <%= print_value 'indices.recovery.concurrent_streams' -%> 78 | 79 | ################################## Discovery ################################## 80 | 81 | <%= print_value 'discovery.type', node.elasticsearch[:discovery][:type] -%> 82 | 83 | <%= print_value 'discovery.zen.minimum_master_nodes' -%> 84 | <%= print_value 'discovery.zen.ping.timeout' -%> 85 | <%= print_value 'discovery.zen.ping.multicast.enabled' -%> 86 | <%= print_value 'discovery.zen.ping.unicast.hosts' -%> 87 | 88 | <%- if node.elasticsearch[:cloud] -%> 89 | <%= print_value 'cloud.node.auto_attributes' -%> 90 | <%= print_value 'cloud.aws.access_key' -%> 91 | <%= print_value 'cloud.aws.secret_key' -%> 92 | <%= print_value 'cloud.aws.region' -%> 93 | <%= print_value 'cloud.aws.ec2.endpoint' -%> 94 | <%= print_value 'discovery.ec2.groups' -%> 95 | <%= print_value 'discovery.ec2.host_type' -%> 96 | <%= print_value 'discovery.ec2.availability_zones' -%> 97 | <%= print_value 'discovery.ec2.any_group' -%> 98 | <%= print_value 'discovery.ec2.ping_timeout' -%> 99 | <%- node.elasticsearch[:discovery][:ec2][:tag].sort.each do |key, value| -%> 100 | discovery.ec2.tag.<%= key %>: <%= value %> 101 | <%- end unless node.elasticsearch[:discovery][:ec2][:tag].keys.empty? rescue false -%> 102 | <%- end -%> 103 | 104 | ################################## Slow Log ################################### 105 | 106 | <%= print_value 'index.search.slowlog.threshold.query.warn' -%> 107 | <%= print_value 'index.search.slowlog.threshold.query.info' -%> 108 | <%= print_value 'index.search.slowlog.threshold.query.debug' -%> 109 | <%= print_value 'index.search.slowlog.threshold.query.trace' -%> 110 | <%= -%> 111 | <%= print_value 'index.search.slowlog.threshold.fetch.warn' -%> 112 | <%= print_value 'index.search.slowlog.threshold.fetch.info' -%> 113 | <%= print_value 'index.search.slowlog.threshold.fetch.debug' -%> 114 | <%= print_value 'index.search.slowlog.threshold.fetch.trace' -%> 115 | <%= -%> 116 | <%= print_value 'index.indexing.slowlog.threshold.index.warn' -%> 117 | <%= print_value 'index.indexing.slowlog.threshold.index.info' -%> 118 | <%= print_value 'index.indexing.slowlog.threshold.index.debug' -%> 119 | <%= print_value 'index.indexing.slowlog.threshold.index.trace' -%> 120 | 121 | ################################## GC Logging ################################# 122 | 123 | <%= print_value 'monitor.jvm.gc.ParNew.warn' -%> 124 | <%= print_value 'monitor.jvm.gc.ParNew.info' -%> 125 | <%= print_value 'monitor.jvm.gc.ParNew.debug' -%> 126 | <%= -%> 127 | <%= print_value 'monitor.jvm.gc.ConcurrentMarkSweep.warn' -%> 128 | <%= print_value 'monitor.jvm.gc.ConcurrentMarkSweep.info' -%> 129 | <%= print_value 'monitor.jvm.gc.ConcurrentMarkSweep.debug' -%> 130 | 131 | ################################## JMX ######################################## 132 | 133 | <%- if node.elasticsearch[:jmx] -%> 134 | jmx.create_connector: true 135 | jmx.port: 9400-9500 136 | jmx.domain: elasticsearch 137 | <%- end -%> 138 | 139 | ################################## Custom ##################################### 140 | 141 | <% node.elasticsearch[:custom_config].sort.each do |key, value| %> 142 | <%= key %>: <%= value %> 143 | <% end %> 144 | -------------------------------------------------------------------------------- /elasticsearch/Vagrantfile: -------------------------------------------------------------------------------- 1 | # Launch and provision multiple Linux distributions with Vagrant 2 | # 3 | # Support: 4 | # 5 | # * precise64: Ubuntu 12.04 (Precise) 64 bit (primary box) 6 | # * lucid32: Ubuntu 10.04 (Lucid) 32 bit 7 | # * lucid64: Ubuntu 10.04 (Lucid) 64 bit 8 | # * centos6: CentOS 6 32 bit 9 | # 10 | # See: 11 | # 12 | # $ vagrant status 13 | # 14 | # The virtual machines are automatically provisioned upon startup with Chef-Solo 15 | # . 16 | # 17 | 18 | # Lifted from 19 | # 20 | class Hash 21 | def deep_merge!(other_hash) 22 | self.merge(other_hash) do |key, oldval, newval| 23 | oldval = oldval.to_hash if oldval.respond_to?(:to_hash) 24 | newval = newval.to_hash if newval.respond_to?(:to_hash) 25 | oldval.class.to_s == 'Hash' && newval.class.to_s == 'Hash' ? oldval.dup.deep_merge!(newval) : newval 26 | end 27 | end unless respond_to?(:deep_merge!) 28 | end 29 | 30 | puts "[Vagrant ] #{Vagrant::VERSION}" 31 | 32 | # Automatically install and mount cookbooks from Berksfile 33 | # 34 | require 'berkshelf/vagrant' if Vagrant::VERSION < '1.1' 35 | 36 | distributions = { 37 | :precise64 => { 38 | :url => 'http://files.vagrantup.com/precise64.box', 39 | :run_list => %w| apt build-essential vim java monit elasticsearch elasticsearch::plugins elasticsearch::proxy elasticsearch::aws elasticsearch::data elasticsearch::monit elasticsearch::test |, 40 | :ip => '33.33.33.10', 41 | :primary => true, 42 | :node => { 43 | :elasticsearch => { 44 | :path => { 45 | :data => %w| /usr/local/var/data/elasticsearch/disk1 /usr/local/var/data/elasticsearch/disk2 | 46 | }, 47 | :data => { 48 | :devices => { 49 | "/dev/sdb" => { 50 | :file_system => "ext3", 51 | :mount_options => "rw,user", 52 | :mount_path => "/usr/local/var/data/elasticsearch/disk1", 53 | :format_command => "mkfs.ext3 -F", 54 | :fs_check_command => "dumpe2fs" 55 | }, 56 | "/dev/sdc" => { 57 | :file_system => "ext3", 58 | :mount_options => "rw,user", 59 | :mount_path => "/usr/local/var/data/elasticsearch/disk2", 60 | :format_command => "mkfs.ext3 -F", 61 | :fs_check_command => "dumpe2fs" 62 | } 63 | } 64 | } 65 | } 66 | } 67 | }, 68 | 69 | :precise32 => { 70 | :url => 'http://files.vagrantup.com/precise32.box', 71 | :run_list => %w| apt vim java monit elasticsearch elasticsearch::proxy elasticsearch::monit |, 72 | :ip => '33.33.33.10', 73 | :primary => false, 74 | :node => {} 75 | }, 76 | 77 | :lucid64 => { 78 | :url => 'http://files.vagrantup.com/lucid64.box', 79 | :run_list => %w| apt vim java monit elasticsearch elasticsearch::proxy elasticsearch::monit |, 80 | :ip => '33.33.33.10', 81 | :primary => false, 82 | :node => {} 83 | }, 84 | 85 | :lucid32 => { 86 | :url => 'http://files.vagrantup.com/lucid32.box', 87 | :run_list => %w| apt vim java monit elasticsearch elasticsearch::proxy elasticsearch::monit |, 88 | :ip => '33.33.33.11', 89 | :primary => false, 90 | :node => {} 91 | }, 92 | 93 | :centos6 => { 94 | # Note: Monit cookbook broken on CentOS 95 | :url => 'https://opscode-vm.s3.amazonaws.com/vagrant/boxes/opscode-centos-6.3.box', 96 | :run_list => %w| yum::epel build-essential vim java elasticsearch elasticsearch::proxy elasticsearch::data elasticsearch::test |, 97 | :ip => '33.33.33.12', 98 | :primary => false, 99 | :node => { 100 | :java => { 101 | :install_flavor => "openjdk", 102 | :jdk_version => "7" 103 | }, 104 | :elasticsearch => { 105 | :path => { 106 | :data => "/usr/local/var/data/elasticsearch/disk1" 107 | }, 108 | :data => { 109 | :devices => { 110 | "/dev/sdb" => { 111 | :file_system => "ext3", 112 | :mount_options => "rw,user", 113 | :mount_path => "/usr/local/var/data/elasticsearch/disk1", 114 | :format_command => "mkfs.ext3 -F", 115 | :fs_check_command => "dumpe2fs" 116 | } 117 | } 118 | }, 119 | 120 | :nginx => { 121 | :user => 'nginx' 122 | } 123 | } 124 | } 125 | } 126 | } 127 | 128 | node_config = { 129 | :elasticsearch => { 130 | :cluster => { :name => "elasticsearch_vagrant" }, 131 | 132 | :plugins => { 133 | 'karmi/elasticsearch-paramedic' => {} 134 | }, 135 | 136 | :limits => { 137 | :nofile => 1024, 138 | :memlock => 512 139 | }, 140 | :bootstrap => { 141 | :mlockall => false 142 | }, 143 | 144 | :logging => { 145 | :discovery => 'TRACE', 146 | 'index.indexing.slowlog' => 'INFO, index_indexing_slow_log_file' 147 | }, 148 | 149 | :nginx => { 150 | :user => 'www-data', 151 | :users => [{ username: 'USERNAME', password: 'PASSWORD' }] 152 | }, 153 | # For testing flat attributes: 154 | "index.search.slowlog.threshold.query.trace" => "1ms", 155 | # For testing deep attributes: 156 | :discovery => { :zen => { :ping => { :timeout => "9s" } } }, 157 | # For testing custom configuration 158 | :custom_config => { 159 | 'threadpool.index.type' => 'fixed', 160 | 'threadpool.index.size' => '2' 161 | } 162 | } 163 | } 164 | 165 | Vagrant::Config.run do |config| 166 | 167 | distributions.each_pair do |name, options| 168 | 169 | config.vagrant.dotfile_name = Vagrant::VERSION < '1.1' ? '.vagrant-1' : '.vagrant-2' 170 | 171 | config.vm.define name, :options => options[:primary] do |box_config| 172 | 173 | box_config.vm.box = name.to_s 174 | box_config.vm.box_url = options[:url] 175 | 176 | box_config.vm.host_name = name.to_s 177 | 178 | box_config.vm.network :hostonly, options[:ip] 179 | 180 | box_config.berkshelf.enabled = true if Vagrant::VERSION > '1.1' 181 | 182 | # Box customizations 183 | # 184 | # 1. Limit memory to 512 MB 185 | # 186 | box_config.vm.customize ["modifyvm", :id, "--memory", 512] 187 | # 188 | # 2. Create additional disks 189 | # 190 | if name == :precise64 or name == :centos6 191 | disk1, disk2 = "tmp/disk-#{Time.now.to_f}.vdi", "tmp/disk-#{Time.now.to_f}.vdi" 192 | box_config.vm.customize ["createhd", "--filename", disk1, "--size", 250] 193 | box_config.vm.customize ["storageattach", :id, "--storagectl", "SATA Controller", "--port", 1,"--type", "hdd", "--medium", disk1] 194 | box_config.vm.customize ["createhd", "--filename", disk2, "--size", 250] 195 | box_config.vm.customize ["storageattach", :id, "--storagectl", "SATA Controller", "--port", 2,"--type", "hdd", "--medium", disk2] 196 | end 197 | 198 | # Update packages on the machine 199 | # 200 | config.vm.provision :shell do |shell| 201 | shell.inline = %Q{ 202 | which apt-get > /dev/null 2>&1 && apt-get update --quiet --yes && apt-get install curl --quiet --yes 203 | which yum > /dev/null 2>&1 && yum update -y && yum install curl -y 204 | } 205 | end if ENV['UPDATE'] 206 | 207 | # Install latest Chef on the machine 208 | # 209 | config.vm.provision :shell do |shell| 210 | version = ENV['CHEF'].match(/^\d+/) ? ENV['CHEF'] : nil 211 | shell.inline = %Q{ 212 | which apt-get > /dev/null 2>&1 && apt-get install curl --quiet --yes 213 | which yum > /dev/null 2>&1 && yum install curl -y 214 | test -d "/opt/chef" || curl -# -L http://www.opscode.com/chef/install.sh | sudo bash -s -- #{version ? "-v #{version}" : ''} 215 | /opt/chef/embedded/bin/gem list pry | grep pry || /opt/chef/embedded/bin/gem install pry --no-ri --no-rdoc 216 | } 217 | end if ENV['CHEF'] 218 | 219 | # Provision the machine with Chef Solo 220 | # 221 | box_config.vm.provision :chef_solo do |chef| 222 | chef.data_bags_path = './tmp/data_bags' 223 | chef.provisioning_path = '/etc/vagrant-chef' 224 | chef.log_level = :debug 225 | 226 | chef.run_list = options[:run_list] 227 | chef.json = node_config.dup.deep_merge!(options[:node]) 228 | end 229 | end 230 | 231 | end 232 | 233 | end 234 | -------------------------------------------------------------------------------- /monit/templates/default/monitrc.erb: -------------------------------------------------------------------------------- 1 | ############################################################################### 2 | ## Monit control file 3 | ############################################################################### 4 | ## 5 | ## Comments begin with a '#' and extend through the end of the line. Keywords 6 | ## are case insensitive. All path's MUST BE FULLY QUALIFIED, starting with '/'. 7 | ## 8 | ## Below you will find examples of some frequently used statements. For 9 | ## information about the control file and a complete list of statements and 10 | ## options, please have a look in the Monit manual. 11 | ## 12 | ## 13 | ############################################################################### 14 | ## Global section 15 | ############################################################################### 16 | ## 17 | ## Start Monit in the background (run as a daemon): 18 | # 19 | set daemon 120 # check services at 2-minute intervals 20 | # with start delay 240 # optional: delay the first check by 4-minutes (by 21 | # # default Monit check immediately after Monit start) 22 | # 23 | # 24 | ## Set syslog logging with the 'daemon' facility. If the FACILITY option is 25 | ## omitted, Monit will use 'user' facility by default. If you want to log to 26 | ## a standalone log file instead, specify the full path to the log file 27 | # 28 | # set logfile syslog facility log_daemon 29 | set logfile /var/log/monit.log 30 | # 31 | # 32 | ### Set the location of the Monit id file which stores the unique id for the 33 | ### Monit instance. The id is generated and stored on first Monit start. By 34 | ### default the file is placed in $HOME/.monit.id. 35 | # 36 | set idfile /var/lib/monit/id 37 | # 38 | ### Set the location of the Monit state file which saves monitoring states 39 | ### on each cycle. By default the file is placed in $HOME/.monit.state. If 40 | ### the state file is stored on a persistent filesystem, Monit will recover 41 | ### the monitoring state across reboots. If it is on temporary filesystem, the 42 | ### state will be lost on reboot which may be convenient in some situations. 43 | # 44 | set statefile /var/lib/monit/state 45 | # 46 | ## Set the list of mail servers for alert delivery. Multiple servers may be 47 | ## specified using a comma separator. By default Monit uses port 25 - it is 48 | ## possible to override this with the PORT option. 49 | # 50 | # set mailserver mail.bar.baz, # primary mailserver 51 | # backup.bar.baz port 10025, # backup mailserver on port 10025 52 | # localhost # fallback relay 53 | # 54 | # 55 | ## By default Monit will drop alert events if no mail servers are available. 56 | ## If you want to keep the alerts for later delivery retry, you can use the 57 | ## EVENTQUEUE statement. The base directory where undelivered alerts will be 58 | ## stored is specified by the BASEDIR option. You can limit the maximal queue 59 | ## size using the SLOTS option (if omitted, the queue is limited by space 60 | ## available in the back end filesystem). 61 | # 62 | set eventqueue 63 | basedir /var/lib/monit/events # set the base directory where events will be stored 64 | slots 100 # optionally limit the queue size 65 | # 66 | # 67 | ## Send status and events to M/Monit (for more informations about M/Monit 68 | ## see http://mmonit.com/). By default Monit registers credentials with 69 | ## M/Monit so M/Monit can smoothly communicate back to Monit and you don't 70 | ## have to register Monit credentials manually in M/Monit. It is possible to 71 | ## disable credential registration using the commented out option below. 72 | ## Though, if safety is a concern we recommend instead using https when 73 | ## communicating with M/Monit and send credentials encrypted. 74 | # 75 | # set mmonit http://monit:monit@192.168.1.10:8080/collector 76 | # # and register without credentials # Don't register credentials 77 | # 78 | # 79 | # 80 | ## Monit by default uses the following alert mail format: 81 | ## 82 | ## --8<-- 83 | ## From: monit@$HOST # sender 84 | ## Subject: monit alert -- $EVENT $SERVICE # subject 85 | ## 86 | ## $EVENT Service $SERVICE # 87 | ## # 88 | ## Date: $DATE # 89 | ## Action: $ACTION # 90 | ## Host: $HOST # body 91 | ## Description: $DESCRIPTION # 92 | ## # 93 | ## Your faithful employee, # 94 | ## Monit # 95 | ## --8<-- 96 | ## 97 | ## You can override this message format or parts of it, such as subject 98 | ## or sender using the MAIL-FORMAT statement. Macros such as $DATE, etc. 99 | ## are expanded at runtime. For example, to override the sender, use: 100 | # 101 | # set mail-format { from: monit@foo.bar } 102 | # 103 | # 104 | ## You can set alert recipients whom will receive alerts if/when a 105 | ## service defined in this file has errors. Alerts may be restricted on 106 | ## events by using a filter as in the second example below. 107 | # 108 | # set alert sysadm@foo.bar # receive all alerts 109 | # set alert manager@foo.bar only on { timeout } # receive just service- 110 | # # timeout alert 111 | # 112 | # 113 | ## Monit has an embedded web server which can be used to view status of 114 | ## services monitored and manage services from a web interface. See the 115 | ## Monit Wiki if you want to enable SSL for the web server. 116 | # 117 | set httpd port 2812 and 118 | allow 0.0.0.0/0 119 | # use address localhost # only accept connection from localhost 120 | # allow localhost # allow localhost to connect to the server and 121 | # allow admin:monit # require user 'admin' with password 'monit' 122 | # allow @monit # allow users of group 'monit' to connect (rw) 123 | # allow @users readonly # allow users of group 'users' to connect readonly 124 | # 125 | # 126 | ############################################################################### 127 | ## Services 128 | ############################################################################### 129 | ## 130 | ## Check general system resources such as load average, cpu and memory 131 | ## usage. Each test specifies a resource, conditions and the action to be 132 | ## performed should a test fail. 133 | # 134 | # check system myhost.mydomain.tld 135 | # if loadavg (1min) > 4 then alert 136 | # if loadavg (5min) > 2 then alert 137 | # if memory usage > 75% then alert 138 | # if swap usage > 25% then alert 139 | # if cpu usage (user) > 70% then alert 140 | # if cpu usage (system) > 30% then alert 141 | # if cpu usage (wait) > 20% then alert 142 | # 143 | # 144 | ## Check if a file exists, checksum, permissions, uid and gid. In addition 145 | ## to alert recipients in the global section, customized alert can be sent to 146 | ## additional recipients by specifying a local alert handler. The service may 147 | ## be grouped using the GROUP option. More than one group can be specified by 148 | ## repeating the 'group name' statement. 149 | # 150 | # check file apache_bin with path /usr/local/apache/bin/httpd 151 | # if failed checksum and 152 | # expect the sum 8f7f419955cefa0b33a2ba316cba3659 then unmonitor 153 | # if failed permission 755 then unmonitor 154 | # if failed uid root then unmonitor 155 | # if failed gid root then unmonitor 156 | # alert security@foo.bar on { 157 | # checksum, permission, uid, gid, unmonitor 158 | # } with the mail-format { subject: Alarm! } 159 | # group server 160 | # 161 | # 162 | ## Check that a process is running, in this case Apache, and that it respond 163 | ## to HTTP and HTTPS requests. Check its resource usage such as cpu and memory, 164 | ## and number of children. If the process is not running, Monit will restart 165 | ## it by default. In case the service is restarted very often and the 166 | ## problem remains, it is possible to disable monitoring using the TIMEOUT 167 | ## statement. This service depends on another service (apache_bin) which 168 | ## is defined above. 169 | # 170 | # check process apache with pidfile /usr/local/apache/logs/httpd.pid 171 | # start program = "/etc/init.d/httpd start" with timeout 60 seconds 172 | # stop program = "/etc/init.d/httpd stop" 173 | # if cpu > 60% for 2 cycles then alert 174 | # if cpu > 80% for 5 cycles then restart 175 | # if totalmem > 200.0 MB for 5 cycles then restart 176 | # if children > 250 then restart 177 | # if loadavg(5min) greater than 10 for 8 cycles then stop 178 | # if failed host www.tildeslash.com port 80 protocol http 179 | # and request "/somefile.html" 180 | # then restart 181 | # if failed port 443 type tcpssl protocol http 182 | # with timeout 15 seconds 183 | # then restart 184 | # if 3 restarts within 5 cycles then timeout 185 | # depends on apache_bin 186 | # group server 187 | # 188 | # 189 | ## Check filesystem permissions, uid, gid, space and inode usage. Other services, 190 | ## such as databases, may depend on this resource and an automatically graceful 191 | ## stop may be cascaded to them before the filesystem will become full and data 192 | ## lost. 193 | # 194 | # check filesystem datafs with path /dev/sdb1 195 | # start program = "/bin/mount /data" 196 | # stop program = "/bin/umount /data" 197 | # if failed permission 660 then unmonitor 198 | # if failed uid root then unmonitor 199 | # if failed gid disk then unmonitor 200 | # if space usage > 80% for 5 times within 15 cycles then alert 201 | # if space usage > 99% then stop 202 | # if inode usage > 30000 then alert 203 | # if inode usage > 99% then stop 204 | # group server 205 | # 206 | # 207 | ## Check a file's timestamp. In this example, we test if a file is older 208 | ## than 15 minutes and assume something is wrong if its not updated. Also, 209 | ## if the file size exceed a given limit, execute a script 210 | # 211 | # check file database with path /data/mydatabase.db 212 | # if failed permission 700 then alert 213 | # if failed uid data then alert 214 | # if failed gid data then alert 215 | # if timestamp > 15 minutes then alert 216 | # if size > 100 MB then exec "/my/cleanup/script" as uid dba and gid dba 217 | # 218 | # 219 | ## Check directory permission, uid and gid. An event is triggered if the 220 | ## directory does not belong to the user with uid 0 and gid 0. In addition, 221 | ## the permissions have to match the octal description of 755 (see chmod(1)). 222 | # 223 | # check directory bin with path /bin 224 | # if failed permission 755 then unmonitor 225 | # if failed uid 0 then unmonitor 226 | # if failed gid 0 then unmonitor 227 | # 228 | # 229 | ## Check a remote host availability by issuing a ping test and check the 230 | ## content of a response from a web server. Up to three pings are sent and 231 | ## connection to a port and an application level network check is performed. 232 | # 233 | # check host myserver with address 192.168.1.1 234 | # if failed icmp type echo count 3 with timeout 3 seconds then alert 235 | # if failed port 3306 protocol mysql with timeout 15 seconds then alert 236 | # if failed url http://user:password@www.foo.bar:8080/?querystring 237 | # and content == 'action="j_security_check"' 238 | # then alert 239 | # 240 | # 241 | ############################################################################### 242 | ## Includes 243 | ############################################################################### 244 | ## 245 | ## It is possible to include additional configuration parts from other files or 246 | ## directories. 247 | # 248 | include /etc/monit/conf.d/* 249 | # 250 | # 251 | -------------------------------------------------------------------------------- /elasticsearch/LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /ark/README.md: -------------------------------------------------------------------------------- 1 | # chef-ark [![Build Status](https://secure.travis-ci.org/bryanwb/chef-ark.png?branch=master)](http://travis-ci.org/bryanwb/chef-ark) 2 | 3 | Overview 4 | ======== 5 | 6 | An ''ark'' is like an archive but ''Kewler'' 7 | 8 | Does the fetch-unpack-configure-build-install dance. This is a 9 | modified verion of Infochimps awesome install_from cookbook 10 | [http://github.com/infochimps-cookbooks/install_from]. It has been 11 | heavily refactored and extended to meet different use cases. 12 | 13 | Given a simple project archive available at a url: 14 | 15 | ark 'pig' do 16 | url 'http://apache.org/pig/pig-0.8.0.tar.gz' 17 | end 18 | 19 | this provider will 20 | 21 | * fetch it to to `/var/cache/chef/` 22 | * unpack it to the default path (`/usr/local/pig-0.8.0`) 23 | * create a symlink for `:home_dir` (`/usr/local/pig`) pointing to path 24 | * add specified binary commands to the enviroment `PATH` variable 25 | 26 | By default, the ark will not run again if the `:path` is not 27 | empty. Ark provides many actions to accommodate different use cases, 28 | such as `:dump`, `:cherry_pick`, `:put`, and `:install_with_make`. 29 | 30 | At this time ark only handles files available from URLs. It does not 31 | handle local files. 32 | 33 | Attributes 34 | ========== 35 | 36 | Customize the attributes to suit site specific conventions and 37 | defaults. 38 | 39 | * `node['ark']['apache_mirror']` - if the URL is an apache mirror, use 40 | the attribute as the default. 41 | * `node['ark']['prefix_root']` - default base location if the `prefix_root` 42 | is not passed into the resource. 43 | * `node['ark']['prefix_bin']` - default binary location if the 44 | `prefix_bin` is not passed into the resource. 45 | * `node['ark']['prefix_home']` - default home location if the 46 | `prefix_home` is not passed into the resource. 47 | 48 | Resources/Providers 49 | =================== 50 | 51 | * `ark` - does the extract/build/configure dance 52 | 53 | Actions 54 | ------- 55 | 56 | - `:install`: extracts the file and creates a 'friendly' symbolic link 57 | to the extracted directory path 58 | - `:configure`: configure ahead of the install action 59 | - `:install_with_make`: extracts the archive to a path, runs `make`, and 60 | `make install`. It does _not_ run the configure step at this time 61 | - `:dump`: strips all directories from the archive and dumps the 62 | contained files into a specified path 63 | - `:cherry_pick`: extract a specified file from an archive and places 64 | in specified path 65 | - `:put`: extract the archive to a specified path, does not create any 66 | symbolic links 67 | - `:remove`: removes the extracted directory and related symlink #TODO 68 | - `:setup_py_build`: runs the command "python setup.py build" in the 69 | extracted directory 70 | - `:setup_py_install`: runs the comand "python setup.py install" in the 71 | extracted directory 72 | 73 | ## :put 74 | 75 | Extract the archive to a specified path, does not create any symbolic links. 76 | 77 | ### Attribute Parameters for :put 78 | 79 | - `path`: path to extract to. 80 | - Default: `/usr/local` 81 | - `has_binaries`: array of binary commands to symlink into `/usr/local/bin/`, 82 | you must specify the relative path. 83 | - Example: `[ 'bin/java', 'bin/javaws' ]` 84 | - `append_env_path`: boolean, if true, append the `./bin` directory of the 85 | extracted directory to the global `PATH` variable for all users. 86 | 87 | ## :dump 88 | 89 | Strips all directories from the archive and dumps the contained files into a specified path. 90 | 91 | NOTE: This currently only works for zip archives 92 | 93 | ### Attribute Parameters for :dump 94 | 95 | - `path`: path to dump files to. 96 | - `mode`: file mode for `app_home`, as an integer. 97 | - Example: `0775` 98 | - `creates`: if you are appending files to a given directory, ark 99 | needs a condition to test whether the file has already been 100 | extracted. You can specify with creates, a file whose existence 101 | indicates the ark has previously been extracted and does not need to 102 | be extracted again. 103 | 104 | ## :cherry_pick 105 | 106 | Extract a specified file from an archive and places in specified path. 107 | 108 | ### Relevant Attribute Parameters for :cherry_pick 109 | 110 | - `path`: directory to place file in. 111 | - `creates`: specific file to cherry-pick. 112 | 113 | Attribute Parameters 114 | -------------------- 115 | 116 | - `name`: name of the package, defaults to the resource name. 117 | - `url`: url for tarball, `.tar.gz`, `.bin` (oracle-specific), `.war`, 118 | and `.zip` currently supported. Also supports special syntax 119 | `:name:version:apache_mirror:` that will auto-magically construct 120 | download url from the apache mirrors site. 121 | - `version`: software version, defaults to `1`. 122 | - `checksum`: sha256 checksum, used for security . 123 | - `mode`: file mode for `app_home`, is an integer. 124 | - `prefix_root`: default `prefix_root`, for use with `:install*` 125 | actions. 126 | - `prefix_home`: default directory prefix for a friendly symlink to 127 | the path. 128 | - Example: `/usr/local/maven` -> `/usr/local/maven-2.2.1` 129 | - `prefix_bin`: default directory to place a symlink to a binary 130 | command. 131 | - Example: `/opt/bin/mvn` -> `/opt/maven-2.2.1/bin/mvn`, where the 132 | `prefix_bin` is `/opt/bin` 133 | - `path`: path to extract the ark to. The `:install*` actions 134 | overwrite any user-provided values for `:path`. 135 | - Default: `/usr/local/-` for the `:install`, 136 | `:install_with_make` actions 137 | - `home_dir`: symbolic link to the path `:prefix_root/:name-:version`, 138 | does not apply to `:dump`, `:put`, or `:cherry_pick` actions. 139 | - Default: `:prefix_root/:name` 140 | - `has_binaries`: array of binary commands to symlink into 141 | `/usr/local/bin/`, you must specify the relative path. 142 | - Example: `[ 'bin/java', 'bin/javaws' ]` 143 | - `append_env_path`: boolean, similar to `has_binaries` but less 144 | granular. If true, append the `./bin` directory of the extracted 145 | directory to. the `PATH` environment variable for all users, by 146 | placing a file in `/etc/profile.d/`. The commands are symbolically 147 | linked into `/usr/bin/*`. This option provides more granularity than 148 | the boolean option. 149 | - Example: `mvn`, `java`, `javac`, etc. 150 | - `environment`: hash of environment variables to pass to invoked 151 | shell commands like `tar`, `unzip`, `configure`, and `make`. 152 | - `strip_leading_dir`: by default, ark strips the leading directory 153 | from an archive, which is the default for both `unzip` and `tar` 154 | commands 155 | - `autoconf_opts`: an array of command line options for use with the 156 | GNU `autoconf` script. 157 | - Example: `[ '--include=/opt/local/include', '--force' ]` 158 | - `make_opts`: an array of command line options for use with `make`. 159 | - Example: `[ '--warn-undefined-variables', '--load-average=2' ]` 160 | - `owner`: owner of extracted directory. 161 | - Default: `root` 162 | 163 | ### Examples 164 | 165 | # install Apache Ivy dependency resolution tool 166 | ark "ivy" do 167 | url 'http://someurl.example.com/ivy.tar.gz' 168 | version '2.2.0' 169 | checksum '89ba5fde0c596db388c3bbd265b63007a9cc3df3a8e6d79a46780c1a39408cb5' 170 | action :install 171 | end 172 | 173 | This example copies `ivy.tar.gz` to 174 | `/var/cache/chef/ivy-2.2.0.tar.gz`, unpacks its contents to 175 | `/usr/local/ivy-2.2.0/` -- stripping the leading directory, and 176 | symlinks `/usr/local/ivy` to `/usr/local/ivy-2.2.0` 177 | 178 | ark 'jdk' do 179 | url 'http://download.example.com/jdk-7u2-linux-x64.tar.gz' 180 | version '7.2' 181 | path "/usr/local/jvm/" 182 | home_dir "/usr/local/jvm/default" 183 | checksum '89ba5fde0c596db388c3bbd265b63007a9cc3df3a8e6d79a46780c1a39408cb5' 184 | append_env_path true 185 | owner 'foobar' 186 | end 187 | 188 | This example copies `jdk-7u2-linux-x64.tar.gz` to 189 | `/var/cache/chef/jdk-7.2.tar.gz`, unpacks its contents to 190 | `/usr/local/jvm/jdk-7.2/` -- stripping the leading directory, symlinks 191 | `/usr/local/jvm/default` to `/usr/local/jvm/jdk-7.2`, and adds 192 | `/usr/local/jvm/jdk-7.2/bin/` to the global `PATH` for all users. The 193 | user 'foobar' is the owner of the `/usr/local/jvm/jdk-7.2` directory 194 | 195 | # install Apache Ivy dependency resolution tool 196 | # in /resource_name in this case 197 | # /usr/local/ivy, no symlink created 198 | # it strips any leading directory if one exists in the tarball 199 | 200 | ark "ivy" do 201 | url 'http://someurl.example.com/ivy.tar.gz' 202 | checksum '89ba5fde0c596db388c3bbd265b63007a9cc3df3a8e6d79a46780c1a39408cb5' 203 | action :put 204 | end 205 | 206 | # install Apache Ivy dependency resolution tool 207 | # in /home/foobar/ivy 208 | # it does strip any leading directory if one exists 209 | 210 | ark "ivy" do 211 | path "/home/foobar 212 | url 'http://someurl.example.com/ivy.tar.gz' 213 | checksum '89ba5fde0c596db388c3bbd265b63007a9cc3df3a8e6d79a46780c1a39408cb5' 214 | action :put 215 | end 216 | 217 | # strip all directories and dump files into path specified by 218 | # the path attribute, you must specify the `creates` attribute 219 | # in order to keep the extraction from running every time 220 | # the directory path will be created if it doesn't already exist 221 | 222 | ark "my_jars" do 223 | url "http://example.com/bunch_of_jars.zip" 224 | path "/usr/local/tomcat/lib" 225 | creates "mysql.jar" 226 | owner "tomcat" 227 | action :dump 228 | end 229 | 230 | # extract specific files from a tarball, currently only handles 231 | # one named file 232 | 233 | ark 'mysql-connector-java' do 234 | url 'http://oracle.com/mysql-connector.zip' 235 | creates 'mysql-connector-java-5.0.8-bin.jar' 236 | path '/usr/local/tomcat/lib' 237 | action :cherry_pick 238 | end 239 | 240 | # build and install haproxy and use alternave values for 241 | # prefix_root, prefix_home, and prefix_bin 242 | 243 | ark "haproxy" do 244 | url "http://haproxy.1wt.eu/download/1.5/src/snapshot/haproxy-ss-20120403.tar.gz" 245 | version "1.5" 246 | checksum 'ba0424bf7d23b3a607ee24bbb855bb0ea347d7ffde0bec0cb12a89623cbaf911' 247 | make_opts [ 'TARGET=linux26' ] 248 | prefix_root '/opt' 249 | prefix_home '/opt' 250 | prefix_bin '/opt/bin' 251 | action :install_with_make 252 | end 253 | 254 | # you can also pass multiple actions to ark and supply the file extension 255 | # in case the file extension can not be determined by the URL 256 | 257 | ark "test_autogen" do 258 | url 'https://github.com/zeromq/libzmq/tarball/master' 259 | extension "tar.gz" 260 | action [ :configure, :build_with_make ] 261 | end 262 | 263 | # you can also pass multiple actions to ark and supply the file extension 264 | # in case the file extension can not be determined by the URL 265 | ark "test_autogen" do 266 | url 'https://github.com/zeromq/libzmq/tarball/master' 267 | extension "tar.gz" 268 | action [ :configure, :build_with_make ] 269 | end 270 | 271 | License and Author 272 | ================== 273 | 274 | - Author: Philip (flip) Kromer - Infochimps, Inc() 275 | - Author: Bryan W. Berry () 276 | - Author: Denis Barishev () 277 | - Copyright: 2011, Philip (flip) Kromer - Infochimps, Inc 278 | - Copyright: 2012, Bryan W. Berry 279 | - Copyright: 2012, Denis Barishev 280 | 281 | Licensed under the Apache License, Version 2.0 (the "License"); 282 | you may not use this file except in compliance with the License. 283 | You may obtain a copy of the License at 284 | 285 | http://www.apache.org/licenses/LICENSE-2.0 286 | 287 | Unless required by applicable law or agreed to in writing, software 288 | distributed under the License is distributed on an "AS IS" BASIS, 289 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 290 | See the License for the specific language governing permissions and 291 | limitations under the License. 292 | -------------------------------------------------------------------------------- /elasticsearch/README.markdown: -------------------------------------------------------------------------------- 1 | Description 2 | ----------- 3 | 4 | This _Chef_ cookbook installs and configures the [_Elasticsearch_](http://www.elasticsearch.org) 5 | search engine on a Linux compatible operating system. 6 | 7 | ----- 8 | ### Important Upgrade Notice ### 9 | 10 | As of version 0.2.0, the Elasticsearch cookbook available from the Opscode community site is no longer 11 | compatible with the previous version. If you are a user of the previous cookbook, please be aware that 12 | there is no recommended upgrade process and you have to actively test the upgrade in your environment. 13 | If you have questions, please leave a message in the comments section on the community site. Thanks! 14 | 15 | ----- 16 | 17 | It requires a working _Java_ installation on the target node; add your preferred `java` cookbook to the node `run_list`. 18 | 19 | The cookbook downloads the _elasticsearch_ tarball (via the [`ark`](http://github.com/bryanwb/chef-ark) provider), 20 | unpacks and moves it to the directory you have specified in the node configuration (`/usr/local/elasticsearch` by default). 21 | 22 | It installs a service which enables you to start, stop, restart and check status of the _elasticsearch_ process. 23 | 24 | If you include the `elasticsearch::monit` recipe, it will create a configuration file for _Monit_, 25 | which will check whether _elasticsearch_ is running, reachable by HTTP and the cluster is in the "green" state. 26 | (Assumed you have included a compatible ["monit" cookbook](http://community.opscode.com/cookbooks/monit) 27 | in your run list first.) 28 | 29 | If you include the `elasticsearch::aws` recipe, the 30 | [AWS Cloud Plugin](http://github.com/elasticsearch/elasticsearch-cloud-aws) will be installed on the node, 31 | allowing you to use the _Amazon_ AWS-related features (node auto-discovery, etc). 32 | Set your AWS credentials either in the "elasticsearch/aws" data bag, or directly in the role/node configuration. 33 | Instead of using AWS access tokens, you can create the instance with a 34 | [IAM role](http://aws.amazon.com/iam/faqs/#How_do_i_get_started_with_IAM_roles_for_EC2_instances). 35 | 36 | If you include the `elasticsearch::data` and `elasticsearch::ebs` recipes, an EBS volume will be automatically 37 | created, formatted and mounted so you can use it as a local gateway for _Elasticsearch_. 38 | When the EBS configuration contains a `snapshot_id` value, it will be created with data from the corresponding snapshot. See the `attributes/data` file for more information. 39 | 40 | If you include the `elasticsearch::proxy` recipe, it will configure the _Nginx_ server as 41 | a reverse proxy for _Elasticsearch_, so you may access it remotely with HTTP authentication. 42 | Set the credentials either in a "elasticsearch/users" data bag, or directly in the role/node configuration. 43 | 44 | 45 | Usage 46 | ----- 47 | 48 | For an overview, please read the tutorial on 49 | [deploying elasticsearch with _Chef Solo_](http://www.elasticsearch.org/tutorials/deploying-elasticsearch-with-chef-solo/) 50 | which uses this cookbook. 51 | 52 | For _Chef Server_ based deployment, include the recipes you want to be executed in a 53 | dedicated `elasticsearch` role, or in the node `run_list`. 54 | 55 | Then, upload the cookbook to the _Chef_ server: 56 | 57 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 58 | knife cookbook upload elasticsearch 59 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 60 | 61 | To enable the _Amazon_ AWS related features, include the `elasticsearch::aws` recipe. 62 | You will need to configure the AWS credentials. 63 | 64 | You may do that in the node configuration (with `knife node edit MYNODE` or in the _Chef Server_ console), 65 | in a role with `override_attributes` declaration, but it is arguably most convenient to store 66 | the information in an "elasticsearch" _data bag_: 67 | 68 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 69 | mkdir -p ./data_bags/elasticsearch 70 | echo '{ 71 | "id" : "aws", 72 | "_default" : { 73 | "discovery" : { "type": "ec2" }, 74 | 75 | "cloud" : { 76 | "aws" : { "access_key": "YOUR ACCESS KEY", "secret_key": "YOUR SECRET ACCESS KEY" }, 77 | "ec2" : { "security_group": "elasticsearch" } 78 | } 79 | } 80 | }' >> ./data_bags/elasticsearch/aws.json 81 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 82 | 83 | Do not forget to upload the data bag to the _Chef_ server: 84 | 85 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 86 | knife data bag from file elasticsearch aws.json 87 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 88 | 89 | To use the EBS related features, use your preferred method of configuring node attributes, 90 | or store the configuration in a data bag called `elasticsearch/data`: 91 | 92 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~json 93 | { 94 | "elasticsearch": { 95 | // ... 96 | "data" : { 97 | "devices" : { 98 | "/dev/sda2" : { 99 | "file_system" : "ext3", 100 | "mount_options" : "rw,user", 101 | "mount_path" : "/usr/local/var/data/elasticsearch/disk1", 102 | "format_command" : "mkfs.ext3", 103 | "fs_check_command" : "dumpe2fs", 104 | "ebs" : { 105 | "size" : 250, // In GB 106 | "delete_on_termination" : true, 107 | "type" : "io1", 108 | "iops" : 2000 109 | } 110 | } 111 | } 112 | } 113 | } 114 | } 115 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 116 | 117 | Usually, you will restrict the access to _Elasticsearch_ with firewall rules. However, it's convenient 118 | to be able to connect to the _Elasticsearch_ cluster from `curl` or a HTTP client, or to use a 119 | management tool such as [_BigDesk_](http://github.com/lukas-vlcek/bigdesk) or 120 | [_Paramedic_](http://github.com/karmi/elasticsearch-paramedic). 121 | (Don't forget to set the `node.elasticsearch[:nginx][:allow_cluster_api]` attribute to _true_ 122 | if you want to access these tools via the proxy.) 123 | 124 | To enable authorized access to _elasticsearch_, you need to include the `elasticsearch::proxy` recipe, 125 | which will install, configure and run [_Nginx_](http://nginx.org) as a reverse proxy, allowing users with proper 126 | credentials to connect. 127 | 128 | Usernames and passwords may be stored in a data bag `elasticsearch/users`: 129 | 130 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 131 | mkdir -p ./data_bags/elasticsearch 132 | echo '{ 133 | "id" : "users", 134 | "_default" : { 135 | "users" : [ 136 | {"username" : "USERNAME", "password" : "PASSWORD"}, 137 | {"username" : "USERNAME", "password" : "PASSWORD"} 138 | ] 139 | } 140 | } 141 | ' >> ./data_bags/elasticsearch/users.json 142 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 143 | 144 | Again, do not forget to upload the data bag to the _Chef_ server: 145 | 146 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 147 | knife data bag from file elasticsearch users.json 148 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 149 | 150 | After you have configured the node and uploaded all the information to the _Chef_ server, 151 | run `chef-client` on the node(s): 152 | 153 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 154 | knife ssh name:elasticsearch* 'sudo chef-client' 155 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 156 | 157 | Please note that all data bags _must_ have attributes enclosed in an environment 158 | (use the `_default` environment), as suggested by the Chef 159 | [documentation](http://docs.opscode.com/chef/essentials_data_bags.html#use-data-bags-with-environments). 160 | 161 | Testing with Vagrant 162 | -------------------- 163 | 164 | The cookbook comes with a [`Vagrantfile`](https://github.com/elasticsearch/cookbook-elasticsearch/blob/master/Vagrantfile), which allows you to test-drive the installation and configuration with 165 | [_Vagrant_](http://vagrantup.com/), a tool for building virtualized infrastructures. 166 | 167 | First, make sure, you have both _VirtualBox_ and _Vagrant_ 168 | [installed](http://docs.vagrantup.com/v1/docs/getting-started/index.html). 169 | 170 | Then, clone this repository into an `elasticsearch` directory on your development machine: 171 | 172 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 173 | git clone git://github.com/elasticsearch/cookbook-elasticsearch.git elasticsearch 174 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 175 | 176 | Switch to the cloned repository: 177 | 178 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 179 | cd elasticsearch 180 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 181 | 182 | Install the neccessary gems with [Bundler](http://gembundler.com): 183 | 184 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 185 | gem install bundler 186 | bundle install 187 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 188 | 189 | All the required third-party cookbooks will be automatically installed via the 190 | [_Berkshelf_](http://berkshelf.com) integration. If you want to install them 191 | locally (eg. to inspect them), use the `berks` command: 192 | 193 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 194 | berks install --path ./tmp/cookbooks 195 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 196 | 197 | The `Vagrantfile` supports four Linux distributions: 198 | 199 | * Ubuntu Precise 64 bit 200 | * Ubuntu Lucid 32 bit 201 | * Ubuntu Lucid 64 bit 202 | * CentOS 6 32 bit 203 | 204 | Use the `vagrant status` command for more information. 205 | 206 | We will use the [_Ubuntu Precise 64_](http://vagrantup.com/v1/docs/boxes.html) box for the purpose of this demo. 207 | You may want to test-drive this cookbook on a different distribution; check out the available boxes at or build a custom one with [_veewee_](https://github.com/jedi4ever/veewee/tree/master/templates). 208 | 209 | Launch the virtual machine (it will download the box unless you already have it): 210 | 211 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 212 | time CHEF=latest bundle exec vagrant up precise64 213 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 214 | 215 | The machine will be started and automatically provisioned with 216 | [_chef-solo_](http://vagrantup.com/v1/docs/provisioners/chef_solo.html). 217 | (Note: You may substitute _latest_ with a specific Chef version. 218 | Set the `UPDATE` environment variable to update packages on the machine as well.) 219 | 220 | You'll see _Chef_ debug messages flying by in your terminal, downloading, installing and configuring _Java_, 221 | _Nginx_, _Elasticsearch_, and all the other components. 222 | The process should take less then 10 minutes on a reasonable machine and internet connection. 223 | 224 | After the process is done, you may connect to _elasticsearch_ via the _Nginx_ proxy from the outside: 225 | 226 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 227 | curl 'http://USERNAME:PASSWORD@33.33.33.10:8080/test_chef_cookbook/_search?pretty&q=*' 228 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 229 | 230 | Of course, you should connect to the box with SSH and check things out: 231 | 232 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~bash 233 | bundle exec vagrant ssh precise64 234 | 235 | ps aux | grep elasticsearch 236 | service elasticsearch status --verbose 237 | curl http://localhost:9200/_cluster/health?pretty 238 | sudo monit status elasticsearch 239 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 240 | 241 | The cookbook provides test cases in the `files/default/tests/minitest/` directory, 242 | which are executed as a part of the _Chef_ run in _Vagrant_ 243 | (via the [Minitest Chef Handler](https://github.com/calavera/minitest-chef-handler) support). 244 | They check the basic installation mechanics, populate the `test_chef_cookbook` index 245 | with some sample data, perform a simple search, etc. 246 | 247 | 248 | Repository 249 | ---------- 250 | 251 | http://github.com/elasticsearch/cookbook-elasticsearch 252 | 253 | License 254 | ------- 255 | 256 | Author: Karel Minarik () and [contributors](http://github.com/elasticsearch/cookbook-elasticsearch/graphs/contributors) 257 | 258 | License: Apache 259 | -------------------------------------------------------------------------------- /ark/providers/default.rb: -------------------------------------------------------------------------------- 1 | # 2 | # Cookbook Name:: ark 3 | # Provider:: Ark 4 | # 5 | # Author:: Bryan W. Berry 6 | # Copyright 2012, Bryan W. Berry 7 | # 8 | # Licensed under the Apache License, Version 2.0 (the "License"); 9 | # you may not use this file except in compliance with the License. 10 | # You may obtain a copy of the License at 11 | # 12 | # http://www.apache.org/licenses/LICENSE-2.0 13 | # 14 | # Unless required by applicable law or agreed to in writing, software 15 | # distributed under the License is distributed on an "AS IS" BASIS, 16 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 17 | # See the License for the specific language governing permissions and 18 | # limitations under the License. 19 | # 20 | 21 | def load_current_resource 22 | @current_resource = Chef::Resource::Ark.new(@new_resource.name) 23 | end 24 | 25 | def action_download 26 | unless new_resource.url =~ /^(http|ftp).*$/ 27 | new_resource.url = set_apache_url(url) 28 | end 29 | unless unpacked? new_resource.path 30 | f = Chef::Resource::RemoteFile.new(new_resource.release_file, run_context) 31 | f.source new_resource.url 32 | if new_resource.checksum 33 | f.checksum new_resource.checksum 34 | end 35 | f.run_action(:create) 36 | end 37 | end 38 | 39 | def action_dump 40 | set_dump_paths 41 | action_download 42 | action_dump_contents 43 | action_set_owner new_resource.path 44 | end 45 | 46 | def action_install 47 | set_paths 48 | action_download 49 | action_unpack 50 | action_set_owner new_resource.path 51 | action_install_binaries 52 | action_link_paths 53 | end 54 | 55 | def action_autogen 56 | b = Chef::Resource::Script::Bash.new("autogen.sh to generate configure", run_context) 57 | b.cwd new_resource.path 58 | b.environment new_resource.environment 59 | b.code "./autogen.sh" 60 | b.run_action(:run) 61 | end 62 | 63 | def action_configure 64 | set_paths 65 | action_download 66 | action_unpack 67 | unless ::File.exists?(::File.join(new_resource.path, 'configure')) 68 | action_autogen 69 | end 70 | unless ::File.exists?(::File.join(new_resource.path, 'config.status')) 71 | b = Chef::Resource::Execute.new("configure with autoconf", run_context) 72 | b.cwd new_resource.path 73 | b.environment new_resource.environment 74 | b.command "./configure #{new_resource.autoconf_opts.join(' ')}" 75 | b.run_action(:run) 76 | end 77 | end 78 | 79 | def action_build_with_make 80 | unless new_resource.creates and ::File.exists? new_resource.creates 81 | set_paths 82 | action_download 83 | action_unpack 84 | b = Chef::Resource::Script::Bash.new("build with make", run_context) 85 | b.cwd new_resource.path 86 | b.environment new_resource.environment 87 | b.code "make #{new_resource.make_opts.join(' ')}" 88 | b.run_action(:run) 89 | action_set_owner new_resource.path 90 | action_link_paths 91 | action_install_binaries 92 | end 93 | end 94 | 95 | def action_install_with_make 96 | unless new_resource.creates and ::File.exists? new_resource.creates 97 | action_build_with_make 98 | b = Chef::Resource::Script::Bash.new("make install", run_context) 99 | b.cwd new_resource.path 100 | b.environment new_resource.environment 101 | b.code "make install" 102 | b.run_action(:run) 103 | end 104 | end 105 | 106 | 107 | #TODO needs a test, start here http://guide.python-distribute.org/quickstart.html 108 | def action_setup_py_build 109 | unless new_resource.creates and ::File.exists? new_resource.creates 110 | set_paths 111 | action_download 112 | action_unpack 113 | b = Chef::Resource::Script::Bash.new("setup.py build", run_context) 114 | b.cwd new_resource.path 115 | b.environment new_resource.environment 116 | b.code "python setup.py build" 117 | b.run_action(:run) 118 | end 119 | end 120 | 121 | #TODO needs a test, start here http://guide.python-distribute.org/quickstart.html 122 | def action_setup_py_install 123 | unless new_resource.creates and ::File.exists? new_resource.creates 124 | set_paths 125 | action_download 126 | action_unpack 127 | b = Chef::Resource::Script::Bash.new("setup.py install", run_context) 128 | b.cwd new_resource.path 129 | b.environment new_resource.environment 130 | b.code "python setup.py install" 131 | b.run_action(:run) 132 | end 133 | end 134 | 135 | alias action_setup_py action_setup_py_install 136 | 137 | def action_link_paths 138 | l = Chef::Resource::Link.new(new_resource.home_dir, run_context) 139 | l.to new_resource.path 140 | l.run_action(:create) 141 | end 142 | 143 | def action_cherry_pick 144 | full_path = ::File.join(new_resource.path, new_resource.creates) 145 | set_dump_paths 146 | action_download 147 | action_cherry_pick_contents full_path 148 | action_set_owner full_path 149 | end 150 | 151 | def action_put 152 | set_put_paths 153 | action_download 154 | action_unpack 155 | action_set_owner new_resource.path 156 | end 157 | 158 | def action_cherry_pick_contents(full_path) 159 | chef_mkdir_p new_resource.path 160 | cmd = expand_cmd 161 | unless unpacked? new_resource.path 162 | eval("#{cmd}_cherry_pick") 163 | new_resource.updated_by_last_action(true) 164 | end 165 | end 166 | 167 | def action_dump_contents 168 | full_path = ::File.join(new_resource.path, new_resource.creates) 169 | chef_mkdir_p new_resource.path 170 | cmd = expand_cmd 171 | unless unpacked? full_path 172 | eval("#{cmd}_dump") 173 | new_resource.updated_by_last_action(true) 174 | end 175 | end 176 | 177 | def action_unpack 178 | chef_mkdir_p new_resource.path 179 | cmd = expand_cmd 180 | unless unpacked? new_resource.path 181 | eval(cmd) 182 | new_resource.updated_by_last_action(true) 183 | end 184 | end 185 | 186 | def action_set_owner(path) 187 | require 'fileutils' 188 | Chef::Log.debug("Setting owner/group on #{path} to #{new_resource.owner}:#{new_resource.group}") 189 | FileUtils.chown_R new_resource.owner, new_resource.group, path 190 | Chef::Log.debug("Setting mode on #{path} to #{new_resource.mode}") 191 | FileUtils.chmod_R new_resource.mode, path 192 | end 193 | 194 | def action_install_binaries 195 | unless new_resource.has_binaries.empty? 196 | new_resource.has_binaries.each do |bin| 197 | file_name = ::File.join(new_resource.prefix_bin, ::File.basename(bin)) 198 | l = Chef::Resource::Link.new(file_name, run_context) 199 | l.to ::File.join(new_resource.path, bin) 200 | l.run_action(:create) 201 | end 202 | end 203 | if new_resource.append_env_path 204 | append_to_env_path 205 | end 206 | end 207 | 208 | private 209 | 210 | def unpacked?(path) 211 | if new_resource.creates 212 | full_path = ::File.join(new_resource.path, new_resource.creates) 213 | else 214 | full_path = path 215 | end 216 | if ::File.directory? full_path 217 | if ::File.stat(full_path).nlink == 2 218 | false 219 | else 220 | true 221 | end 222 | elsif ::File.exists? full_path 223 | true 224 | else 225 | false 226 | end 227 | end 228 | 229 | def expand_cmd 230 | case parse_file_extension 231 | when /tar.gz|tgz/ then "tar_xzf" 232 | when /tar.bz2|tbz/ then "tar_xjf" 233 | when /zip|war|jar/ then "unzip" 234 | else raise "Don't know how to expand #{new_resource.url}" 235 | end 236 | end 237 | 238 | def set_paths 239 | release_ext = parse_file_extension 240 | prefix_bin = new_resource.prefix_bin.nil? ? new_resource.run_context.node['ark']['prefix_bin'] : new_resource.prefix_bin 241 | prefix_root = new_resource.prefix_root.nil? ? new_resource.run_context.node['ark']['prefix_root'] : new_resource.prefix_root 242 | if new_resource.prefix_home.nil? 243 | default_home_dir = ::File.join(new_resource.run_context.node['ark']['prefix_home'], "#{new_resource.name}") 244 | else 245 | default_home_dir = ::File.join(new_resource.prefix_home, "#{new_resource.name}") 246 | end 247 | # set effective paths 248 | new_resource.prefix_bin = prefix_bin 249 | new_resource.version ||= "1" # initialize to one if nil 250 | new_resource.path = ::File.join(prefix_root, "#{new_resource.name}-#{new_resource.version}") 251 | new_resource.home_dir ||= default_home_dir 252 | Chef::Log.debug("path is #{new_resource.path}") 253 | new_resource.release_file = ::File.join(Chef::Config[:file_cache_path], "#{new_resource.name}.#{release_ext}") 254 | end 255 | 256 | def set_put_paths 257 | release_ext = parse_file_extension 258 | path = new_resource.path.nil? ? new_resource.run_context.node['ark']['prefix_root'] : new_resource.path 259 | new_resource.path = ::File.join(path, "#{new_resource.name}") 260 | Chef::Log.debug("path is #{new_resource.path}") 261 | new_resource.release_file = ::File.join(Chef::Config[:file_cache_path], "#{new_resource.name}.#{release_ext}") 262 | end 263 | 264 | def set_dump_paths 265 | release_ext = parse_file_extension 266 | new_resource.release_file = ::File.join(Chef::Config[:file_cache_path], "#{new_resource.name}.#{release_ext}") 267 | end 268 | 269 | def parse_file_extension 270 | if new_resource.extension.nil? 271 | # purge any trailing redirect 272 | url = new_resource.url.clone 273 | url =~ /^https?:\/\/.*(.gz|bz2|bin|zip|jar|tgz|tbz)(\/.*\/)/ 274 | url.gsub!($2, '') unless $2.nil? 275 | # remove tailing query string 276 | release_basename = ::File.basename(url.gsub(/\?.*\z/, '')).gsub(/-bin\b/, '') 277 | # (\?.*)? accounts for a trailing querystring 278 | Chef::Log.debug("release_basename is #{release_basename}") 279 | release_basename =~ %r{^(.+?)\.(tar\.gz|tar\.bz2|zip|war|jar|tgz|tbz)(\?.*)?} 280 | Chef::Log.debug("file_extension is #{$2}") 281 | new_resource.extension = $2 282 | end 283 | new_resource.extension 284 | end 285 | 286 | def set_apache_url(url_ref) 287 | raise "Missing required resource attribute url" unless url_ref 288 | url_ref.gsub!(/:name:/, name.to_s) 289 | url_ref.gsub!(/:version:/, version.to_s) 290 | url_ref.gsub!(/:apache_mirror:/, node['install_from']['apache_mirror']) 291 | url_ref 292 | end 293 | 294 | 295 | def unzip 296 | FileUtils.mkdir_p new_resource.path 297 | if new_resource.strip_leading_dir 298 | require 'tmpdir' 299 | tmpdir = Dir.mktmpdir 300 | cmd = Mixlib::ShellOut.new("unzip -q -u -o '#{new_resource.release_file}' -d '#{tmpdir}'") 301 | cmd.run_command 302 | cmd.error! 303 | subdirectory_children = Dir.glob("#{tmpdir}/**") 304 | if subdirectory_children.length == 1 305 | subdir = subdirectory_children[0] 306 | subdirectory_children = Dir.glob("#{subdir}/**") 307 | end 308 | FileUtils.mv subdirectory_children, new_resource.path 309 | FileUtils.rm_rf tmpdir 310 | else 311 | cmd = Mixlib::ShellOut.new("unzip -q -u -o #{new_resource.release_file} -d #{new_resource.path}") 312 | cmd.run_command 313 | cmd.error! 314 | end 315 | end 316 | 317 | def unzip_dump 318 | cmd = Mixlib::ShellOut.new( 319 | %Q{unzip -j -q -u -o '#{new_resource.release_file}' -d '#{new_resource.path}'} 320 | ) 321 | cmd.run_command 322 | cmd.error! 323 | end 324 | 325 | def unzip_cherry_pick 326 | b = Chef::Resource::Script::Bash.new(new_resource.name, run_context) 327 | b.code <<-EOS 328 | unzip -t #{new_resource.release_file} "*/#{new_resource.creates}" 329 | if [ $? -eq 11 ] ; then 330 | unzip -j -o #{new_resource.release_file} "#{new_resource.creates}" -d #{new_resource.path} 331 | else 332 | unzip -j -o #{new_resource.release_file} "*/#{new_resource.creates}" -d #{new_resource.path} 333 | fi 334 | EOS 335 | b.run_action(:run) 336 | end 337 | 338 | def tar_xjf 339 | untar_cmd("xjf") 340 | end 341 | 342 | def tar_xzf 343 | untar_cmd("xzf") 344 | end 345 | 346 | def tar_xjf_dump 347 | Chef::Application.fatal!("Cannot yet dump paths for tar archives") 348 | end 349 | 350 | def tar_xzf_dump 351 | Chef::Application.fatal!("Cannot yet dump paths for tar archives") 352 | end 353 | 354 | def tar_xjf_cherry_pick 355 | untar_cmd_cherry_pick("xjf") 356 | end 357 | 358 | def tar_xzf_cherry_pick 359 | untar_cmd_cherry_pick("xzf") 360 | end 361 | 362 | def untar_cmd(sub_cmd) 363 | if new_resource.strip_leading_dir 364 | strip_argument = "--strip-components=1" 365 | else 366 | strip_argument = "" 367 | end 368 | 369 | b = Chef::Resource::Script::Bash.new(new_resource.name, run_context) 370 | cmd = %Q{#{tar_cmd} -#{sub_cmd} #{new_resource.release_file} #{strip_argument} -C #{new_resource.path} } 371 | b.flags "-x" 372 | b.code <<-EOH 373 | tar -#{sub_cmd} #{new_resource.release_file} #{strip_argument} -C #{new_resource.path} 374 | EOH 375 | b.run_action(:run) 376 | end 377 | 378 | def untar_cmd_cherry_pick(sub_cmd) 379 | dest = ::File.join(new_resource.path, new_resource.creates) 380 | cmd = Mixlib::ShellOut.new(%Q{#{tar_cmd} -#{sub_cmd} '#{new_resource.release_file}' -C '#{new_resource.path}' #{new_resource.creates};}) 381 | cmd.run_command 382 | cmd.error! 383 | end 384 | 385 | def chef_mkdir_p(dir) 386 | d = Chef::Resource::Directory.new(dir, run_context) 387 | d.mode '0755' 388 | d.recursive true 389 | d.run_action(:create) 390 | end 391 | 392 | def append_to_env_path 393 | if platform?("freebsd") 394 | if new_resource.has_binaries.empty? 395 | Chef::Log.warn "#{new_resource} specifies append_env_path but that is unimplemented on FreeBSD; " + 396 | "consider using has_binaries" 397 | else 398 | Chef::Log.info "#{new_resource} specifies both has_binaries and append_env_path; " + 399 | "the latter is a noop on FreeBSD." 400 | end 401 | return 402 | end 403 | 404 | new_path = ::File.join(new_resource.path, 'bin') 405 | Chef::Log.debug("new_path is #{new_path}") 406 | path = "/etc/profile.d/#{new_resource.name}.sh" 407 | f = Chef::Resource::File.new(path, run_context) 408 | f.content <<-EOF 409 | export PATH=$PATH:#{new_path} 410 | EOF 411 | f.mode 0755 412 | f.owner 'root' 413 | f.group 'root' 414 | f.run_action(:create) 415 | 416 | bin_path = ::File.join(new_resource.path, 'bin') 417 | if ENV['PATH'].scan(bin_path).empty? 418 | ENV['PATH'] = ENV['PATH'] + ':' + bin_path 419 | end 420 | Chef::Log.debug("PATH after setting_path is #{ENV['PATH']}") 421 | end 422 | 423 | def tar_cmd 424 | platform?("freebsd") ? "gtar" : "tar" 425 | end 426 | --------------------------------------------------------------------------------