├── dev
├── .gitkeep
├── development
│ ├── files
│ │ ├── home
│ │ │ └── vagrant
│ │ │ │ ├── .zlogin
│ │ │ │ ├── .zsh_prompt
│ │ │ │ ├── .zshrc
│ │ │ │ ├── oh-my-zsh
│ │ │ │ └── custom
│ │ │ │ │ └── plugins
│ │ │ │ │ └── spryker
│ │ │ │ │ └── spryker.plugin.zsh
│ │ │ │ └── bin
│ │ │ │ └── xdebug
│ │ └── etc
│ │ │ └── cron.d
│ │ │ └── vagrant-ntpdate
│ └── init.sls
└── mailcatcher
│ ├── files
│ └── etc
│ │ ├── systemd
│ │ └── system
│ │ │ └── mailcatcher.service
│ │ └── init.d
│ │ └── mailcatcher
│ └── init.sls
├── prod
└── .gitkeep
├── qa
└── .gitkeep
├── stag
└── .gitkeep
├── base
├── spryker
│ ├── files
│ │ ├── etc
│ │ │ ├── deploy
│ │ │ │ ├── deploy.key
│ │ │ │ ├── ssh_wrapper.sh
│ │ │ │ ├── config.rb
│ │ │ │ └── functions.rb
│ │ │ ├── nginx
│ │ │ │ ├── htpasswd-zed
│ │ │ │ ├── htpasswd-staging
│ │ │ │ ├── conf.d
│ │ │ │ │ └── backend.conf
│ │ │ │ ├── sites-available
│ │ │ │ │ ├── static.conf
│ │ │ │ │ ├── XX-zed.conf
│ │ │ │ │ └── XX-yves.conf
│ │ │ │ └── spryker
│ │ │ │ │ ├── static.conf
│ │ │ │ │ ├── zed.conf
│ │ │ │ │ └── yves.conf
│ │ │ ├── cron.d
│ │ │ │ └── yves-zed-cleanup
│ │ │ └── php
│ │ │ │ └── 7.1
│ │ │ │ └── fpm
│ │ │ │ └── pool.d
│ │ │ │ ├── yves.conf
│ │ │ │ └── zed.conf
│ │ ├── config
│ │ │ ├── console_env_local.php
│ │ │ ├── config_local.php
│ │ │ └── config_local_XX.php
│ │ └── jenkins_instance
│ │ │ ├── config.xml
│ │ │ └── etc
│ │ │ ├── default
│ │ │ └── jenkins
│ │ │ └── init.d
│ │ │ └── jenkins
│ ├── cleanup-cronjobs.sls
│ ├── init.sls
│ ├── nginx.sls
│ ├── tools.sls
│ ├── install.sls
│ ├── htpasswd.sls
│ ├── macros
│ │ └── jenkins_instance.sls
│ ├── stores.sls
│ ├── deployment.sls
│ └── environments.sls
├── pound
│ ├── files
│ │ └── etc
│ │ │ ├── default
│ │ │ └── pound
│ │ │ └── pound
│ │ │ ├── pound.cfg
│ │ │ └── certs
│ │ │ ├── 1star_local
│ │ │ ├── 2star_local
│ │ │ ├── 3star_local
│ │ │ ├── 4star_local
│ │ │ └── star_spryker_dev
│ └── init.sls
├── newrelic
│ ├── init.sls
│ └── php.sls
├── postfix
│ ├── files
│ │ └── etc
│ │ │ ├── mailname
│ │ │ └── postfix
│ │ │ ├── sasl_passwd
│ │ │ └── main.cf
│ ├── init.sls
│ ├── install.sls
│ └── config.sls
├── elk
│ ├── init.sls
│ ├── files
│ │ ├── etc
│ │ │ ├── systemd
│ │ │ │ └── system
│ │ │ │ │ └── kibana.service
│ │ │ └── filebeat
│ │ │ │ └── filebeat.yml
│ │ └── opt
│ │ │ └── kibana
│ │ │ └── config
│ │ │ └── kibana.yml
│ ├── filebeat.sls
│ └── kibana.sls
├── docker
│ ├── init.sls
│ └── install.sls
├── system
│ ├── files
│ │ └── etc
│ │ │ ├── profile.d
│ │ │ └── fix_charset.sh
│ │ │ ├── salt
│ │ │ └── minion.d
│ │ │ │ └── mine.conf
│ │ │ ├── motd
│ │ │ ├── apt
│ │ │ └── sources.list
│ │ │ └── sudoers
│ ├── systemd.sls
│ ├── motd.sls
│ ├── charset.sls
│ ├── sudoers.sls
│ ├── sysctl.sls
│ ├── time.sls
│ ├── minion.sls
│ ├── utils.sls
│ ├── init.sls
│ ├── filesystems.sls
│ └── repositories.sls
├── hosting
│ ├── files
│ │ ├── simple
│ │ │ └── etc
│ │ │ │ ├── resolv.conf
│ │ │ │ └── hosts
│ │ ├── vagrant
│ │ │ └── etc
│ │ │ │ ├── resolv.conf
│ │ │ │ └── hosts
│ │ └── rackspace
│ │ │ └── etc
│ │ │ └── sudoers.d
│ │ │ └── rackspace-support
│ ├── macros
│ │ └── firewall
│ │ │ └── ufw.sls
│ ├── init.sls
│ ├── simple.sls
│ ├── vagrant.sls
│ ├── firewall.sls
│ ├── claranet.sls
│ ├── filesystem.sls
│ └── rackspace.sls
├── mysql-server
│ ├── dependencies.sls
│ ├── update.sls
│ ├── files
│ │ └── etc
│ │ │ └── mysql
│ │ │ ├── conf.d
│ │ │ ├── strict.cnf
│ │ │ └── binlog.cnf
│ │ │ └── my.cnf
│ ├── init.sls
│ ├── setup.sls
│ └── credentials.sls
├── nodejs
│ ├── update.sls
│ └── init.sls
├── jenkins
│ ├── update.sls
│ ├── init.sls
│ └── install.sls
├── serverspec
│ └── init.sls
├── postgresql
│ ├── update.sls
│ ├── files
│ │ └── etc
│ │ │ └── postgresql
│ │ │ ├── pg_hba.conf
│ │ │ └── postgresql.conf
│ ├── init.sls
│ ├── setup.sls
│ └── credentials.sls
├── rabbitmq
│ ├── update.sls
│ ├── init.sls
│ ├── setup.sls
│ └── credentials.sls
├── elasticsearch
│ ├── update.sls
│ ├── files
│ │ ├── etc
│ │ │ └── logrotate.d
│ │ │ │ └── elasticsearch-instances
│ │ └── elasticsearch_instance
│ │ │ └── etc
│ │ │ ├── default
│ │ │ └── elasticsearch
│ │ │ ├── elasticsearch
│ │ │ ├── logging.yml
│ │ │ └── elasticsearch.yml
│ │ │ └── init.d
│ │ │ └── elasticsearch
│ ├── init.sls
│ ├── environments.sls
│ ├── install.sls
│ └── macros
│ │ └── elasticsearch_instance.sls
├── redis
│ ├── files
│ │ └── etc
│ │ │ ├── logrotate.d
│ │ │ └── redis-instances
│ │ │ ├── systemd
│ │ │ └── system
│ │ │ │ └── redis-server.service
│ │ │ └── init.d
│ │ │ └── redis-server
│ ├── init.sls
│ ├── environments.sls
│ ├── install.sls
│ └── macros
│ │ └── redis_instance.sls
├── php
│ ├── files
│ │ └── etc
│ │ │ └── php
│ │ │ └── 7.1
│ │ │ ├── fpm
│ │ │ └── php-fpm.conf
│ │ │ ├── mods-available
│ │ │ ├── xdebug.ini
│ │ │ └── opcache.ini
│ │ │ └── php.ini
│ ├── init.sls
│ ├── config.sls
│ ├── dependencies.sls
│ ├── update.sls
│ ├── fpm.sls
│ ├── macros
│ │ └── php_module.sls
│ ├── install.sls
│ └── extensions.sls
├── nginx
│ ├── files
│ │ └── etc
│ │ │ └── nginx
│ │ │ ├── conf.d
│ │ │ ├── logformat.conf
│ │ │ ├── real-ip.conf
│ │ │ └── allow-ip.conf
│ │ │ ├── fastcgi_params
│ │ │ └── nginx.conf
│ └── init.sls
├── java
│ └── init.sls
├── samba
│ ├── init.sls
│ └── files
│ │ └── etc
│ │ └── samba
│ │ └── smb.conf
├── settings
│ ├── init.sls
│ ├── port_numbering.sls
│ ├── hosts.sls
│ └── environments.sls
├── user
│ └── init.sls
├── ruby
│ └── init.sls
└── top.sls
├── test
├── .rspec
├── Gemfile
├── spec
│ ├── spec_helper.rb
│ └── server
│ │ ├── pound_spec.rb
│ │ ├── filesystem_spec.rb
│ │ ├── nodejs_spec.rb
│ │ ├── kibana_spec.rb
│ │ ├── packages_spec.rb
│ │ ├── jenkins_spec.rb
│ │ ├── nginx_spec.rb
│ │ ├── mysql_spec.rb
│ │ ├── pgsql_spec.rb
│ │ ├── services_spec.rb
│ │ └── php_spec.rb
├── README.md
├── Rakefile
└── Gemfile.lock
└── LICENSE.txt
/dev/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/prod/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/qa/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/stag/.gitkeep:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/deploy/deploy.key:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/base/pound/files/etc/default/pound:
--------------------------------------------------------------------------------
1 | startup=1
2 |
--------------------------------------------------------------------------------
/base/newrelic/init.sls:
--------------------------------------------------------------------------------
1 | # Todo implement me
2 |
3 |
--------------------------------------------------------------------------------
/base/newrelic/php.sls:
--------------------------------------------------------------------------------
1 | # Todo: implement me
2 |
3 |
--------------------------------------------------------------------------------
/test/.rspec:
--------------------------------------------------------------------------------
1 | --color
2 | --format documentation
3 |
--------------------------------------------------------------------------------
/base/postfix/files/etc/mailname:
--------------------------------------------------------------------------------
1 | {{ grains.nodename }}
2 |
--------------------------------------------------------------------------------
/base/elk/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - .filebeat
3 | - .kibana
4 |
--------------------------------------------------------------------------------
/dev/development/files/home/vagrant/.zlogin:
--------------------------------------------------------------------------------
1 | cd /data/shop/development/current
2 |
--------------------------------------------------------------------------------
/base/docker/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup docker
3 | #
4 |
5 | include:
6 | - .install
7 |
--------------------------------------------------------------------------------
/dev/development/files/home/vagrant/.zsh_prompt:
--------------------------------------------------------------------------------
1 | prompt_hostname="spryker-vagrant"
2 |
--------------------------------------------------------------------------------
/test/Gemfile:
--------------------------------------------------------------------------------
1 | gem 'rake'
2 | gem 'serverspec'
3 | gem 'serverspec-extended-types'
4 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/htpasswd-zed:
--------------------------------------------------------------------------------
1 | spryker:$apr1$QeNfhIRs$HyfdlKMTU./oA9nEGzYmv0
2 |
--------------------------------------------------------------------------------
/base/spryker/files/config/console_env_local.php:
--------------------------------------------------------------------------------
1 | 'ext4' ) }
6 | end
7 |
8 | describe file('/data/shop/development/current') do
9 | it { should be_mounted }
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/base/nodejs/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install NodeJS and Yarn package manager
3 | #
4 |
5 | nodejs:
6 | pkg.installed
7 |
8 | yarn:
9 | pkg.installed
10 |
11 | # Include autoupdate if configured to do so
12 | {% if salt['pillar.get']('autoupdate:nodejs', False) %}
13 | include:
14 | - .update
15 | {% endif %}
16 |
--------------------------------------------------------------------------------
/base/spryker/nginx.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Populate NginX configuration includes, used in VHost definitions.
3 | #
4 |
5 | {% if 'web' in grains.roles %}
6 | /etc/nginx/spryker:
7 | file.recurse:
8 | - source: salt://spryker/files/etc/nginx/spryker
9 | - watch_in:
10 | - cmd: reload-nginx
11 | {% endif %}
12 |
--------------------------------------------------------------------------------
/test/spec/server/nodejs_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'nodejs' do
4 | describe command('/usr/bin/node -v') do
5 | its(:stdout) { should include('v6.') }
6 | end
7 |
8 | describe command('/usr/bin/yarn --version') do
9 | its(:stdout) { should include('1.') }
10 | end
11 | end
12 |
--------------------------------------------------------------------------------
/base/php/files/etc/php/7.1/mods-available/xdebug.ini:
--------------------------------------------------------------------------------
1 | zend_extension=xdebug.so
2 | xdebug.remote_enable=0
3 | xdebug.remote_host=10.10.0.1
4 | xdebug.remote_autostart=0
5 | xdebug.remote_port=9000
6 | # This is needed to prevent max recursion exeception when Twig templates are very complicated
7 | xdebug.max_nesting_level=1000
8 |
--------------------------------------------------------------------------------
/base/spryker/files/jenkins_instance/config.xml:
--------------------------------------------------------------------------------
1 | {%- if environment == "production" -%}
2 | {%- set number_of_processes = grains.num_cpus * 4 -%}
3 | {%- else -%}
4 | {%- set number_of_processes = 2 -%}
5 | {%- endif -%}
6 |
7 |
8 | {{ number_of_processes }}
9 |
--------------------------------------------------------------------------------
/base/system/files/etc/motd:
--------------------------------------------------------------------------------
1 | Welcome to
2 | ______ _
3 | / _____) | |
4 | ( (____ ____ ____ _ _| | _ _____ ____
5 | \____ \| _ \ / ___) | | | |_/ ) ___ |/ ___)
6 | _____) ) |_| | | | |_| | _ (| ____| |
7 | (______/| __/|_| \__ |_| \_)_____)_|
8 | |_| (____/
9 |
10 |
--------------------------------------------------------------------------------
/base/elk/files/etc/systemd/system/kibana.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Kibana 4
3 |
4 | [Service]
5 | Type=simple
6 | User=www-data
7 | Environment=NODE_ENV=production
8 | Environment=CONFIG_PATH=/opt/kibana/config/kibana.yml
9 | ExecStart=/opt/kibana/node/bin/node /opt/kibana/src/cli
10 |
11 | [Install]
12 | WantedBy=multi-user.target
13 |
--------------------------------------------------------------------------------
/base/mysql-server/files/etc/mysql/conf.d/strict.cnf:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ###############################################################################
4 |
5 | [mysqld]
6 | sql_mode = STRICT_ALL_TABLES
7 |
--------------------------------------------------------------------------------
/base/nginx/files/etc/nginx/conf.d/logformat.conf:
--------------------------------------------------------------------------------
1 | # This file is managed by Salt!
2 | # Custom logging format with X-Forwarded-For header handling
3 |
4 | log_format extended '$remote_addr $http_x_forwarded_for $http_host $remote_user [$time_local] '
5 | '"$request" $status $body_bytes_sent '
6 | '"$http_referer" "$http_user_agent"';
7 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/sites-available/static.conf:
--------------------------------------------------------------------------------
1 | server {
2 |
3 | listen {{ settings.environments[environment].static.port }};
4 | listen 80;
5 |
6 | server_name {{ settings.environments[environment].static.hostname }};
7 | access_log off;
8 |
9 | root /data/storage/{{ environment }}/static;
10 |
11 | include "spryker/static.conf";
12 | }
13 |
--------------------------------------------------------------------------------
/test/spec/server/kibana_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'Kibana' do
4 | describe service('kibana') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe http_get(5601, 'localhost', '/app/kibana') do
10 | its(:body) { should match /Kibana/ }
11 | its(:body) { should match /good stuff/ }
12 | end
13 | end
14 |
--------------------------------------------------------------------------------
/base/nginx/files/etc/nginx/conf.d/real-ip.conf:
--------------------------------------------------------------------------------
1 | # This file is managed by Salt!
2 | # For requsts coming from loadbalancers (IP's below),
3 | # use X-Forwarded-For header value to determine real client's IP
4 |
5 | set_real_ip_from 127.0.0.1;
6 | set_real_ip_from 10.0.0.0/8;
7 | set_real_ip_from 172.16.0.0/12;
8 | set_real_ip_from 192.168.0.0/16;
9 | real_ip_header "X-Forwarded-For";
10 |
--------------------------------------------------------------------------------
/base/redis/files/etc/systemd/system/redis-server.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Spryker key-value store: {{ environment }}
3 | After=network.target
4 |
5 | [Service]
6 | Type=forking
7 | ExecStart=/usr/bin/redis-server /etc/redis/redis_{{ environment }}.conf
8 | TimeoutStopSec=0
9 | Restart=always
10 | User=redis
11 | Group=redis
12 |
13 | [Install]
14 | WantedBy=multi-user.target
15 |
--------------------------------------------------------------------------------
/base/php/config.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Set php.ini configuration files
3 | #
4 |
5 | # Web apps (FPM)
6 | /etc/php/7.1/fpm/php.ini:
7 | file.managed:
8 | - source: salt://php/files/etc/php/7.1/php.ini
9 | - require:
10 | - pkg: php
11 |
12 | # CLI
13 | /etc/php/7.1/cli/php.ini:
14 | file.managed:
15 | - source: salt://php/files/etc/php/7.1/php.ini
16 | - require:
17 | - pkg: php
18 |
--------------------------------------------------------------------------------
/base/rabbitmq/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install RabbitMQ (Message Queue)
3 | #
4 |
5 | include:
6 | - .setup
7 |
8 | # Create users only if service is enabled
9 | {% if salt['pillar.get']('rabbitmq:enabled', False) %}
10 | - .credentials
11 | {% endif %}
12 |
13 | # Include autoupdate if configured to do so
14 | {% if salt['pillar.get']('autoupdate:rabbitmq', False) %}
15 | - .update
16 | {% endif %}
17 |
--------------------------------------------------------------------------------
/dev/mailcatcher/files/etc/systemd/system/mailcatcher.service:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Mailcatcher
3 | After=network.target
4 |
5 | [Service]
6 | Type=simple
7 | ExecStart=/usr/local/bin/mailcatcher --http-ip 0.0.0.0 --http-port 1080 --smtp-ip 0.0.0.0 --smtp-port 1025 -f
8 | TimeoutStopSec=0
9 | Restart=always
10 | User=www-data
11 | Group=www-data
12 |
13 | [Install]
14 | WantedBy=multi-user.target
15 |
--------------------------------------------------------------------------------
/test/spec/server/packages_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe "Packages" do
4 | packages_absent = [
5 | 'exim4',
6 | 'apache2-bin',
7 | 'apache2.2-bin',
8 | 'php5-fpm',
9 | 'php5-cli',
10 | 'php5-common',
11 | ]
12 |
13 | packages_absent.each do |package|
14 | describe package(package) do
15 | it { should_not be_installed }
16 | end
17 | end
18 | end
19 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/deploy/ssh_wrapper.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # SSH wrapper script
3 | # The path to this script is passed as GIT_SSH environmental variable during deployment.
4 | # It forces ssh to use custom private key, it this case - /etc/deploy/deploy.key
5 | # The appropiate public key has to be allowed in git repository.
6 |
7 |
8 | [ -O /tmp/ssh_agent ] && eval `cat /tmp/ssh_agent` &> /dev/null
9 | ssh -i /etc/deploy/deploy.key $1 $2
--------------------------------------------------------------------------------
/base/system/time.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup time-related parameters
3 | #
4 |
5 | # Set system timezone - we always run operating system in ETC
6 | # regardless of location and application settings
7 | Etc/UTC:
8 | timezone.system:
9 | - utc: True
10 |
11 | # NTP for time synchronization
12 | ntp:
13 | pkg:
14 | - installed
15 | service:
16 | - running
17 | - enable: True
18 | - require:
19 | - pkg: ntp
20 |
--------------------------------------------------------------------------------
/base/elk/filebeat.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install and configure filebeat log shipper
3 | #
4 |
5 | filebeat-install:
6 | pkg.installed:
7 | - name: filebeat
8 |
9 | filebeat-service:
10 | service.running:
11 | - name: filebeat
12 | - enable: True
13 |
14 | /etc/filebeat/filebeat.yml:
15 | file.managed:
16 | - source: salt://elk/files/etc/filebeat/filebeat.yml
17 | - template: jinja
18 | - watch_in:
19 | - service: filebeat-service
20 |
--------------------------------------------------------------------------------
/base/spryker/tools.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Define service reload commands here, so that the state spryker does not depend
3 | # on the other states.
4 | #
5 | # The commands here are defined as "cmd.wait", so they only get called if they are
6 | # included in watch_in element and change is triggered.
7 |
8 |
9 | reload-php-fpm:
10 | cmd.wait:
11 | - name: service php7.1-fpm restart
12 |
13 | reload-nginx:
14 | cmd.wait:
15 | - name: service nginx restart
16 |
--------------------------------------------------------------------------------
/base/system/minion.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup salt minion parameters to allow enable mine mechanism
3 | #
4 |
5 | salt-minion:
6 | service.running:
7 | - enable: True
8 | - watch:
9 | - file: /etc/salt/minion.d/mine.conf
10 | file.managed:
11 | - name: /etc/salt/minion.d/mine.conf
12 | - source: salt://system/files/etc/salt/minion.d/mine.conf
13 |
14 | sync-grains:
15 | module.run:
16 | - name: saltutil.sync_grains
17 | - refresh: True
18 |
--------------------------------------------------------------------------------
/base/php/dependencies.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Dependency packages for php extenstions
3 | #
4 |
5 | {% set mysql_client_libs_package_name = {
6 | 'stretch': 'libmysqlclient20',
7 | 'wheezy': 'libmysqlclient18',
8 | 'jessie': 'libmysqlclient18',
9 | }.get(grains.lsb_distrib_codename) %}
10 |
11 | php-extension-dependencies:
12 | pkg.installed:
13 | - pkgs:
14 | - pkg-config
15 | - mysql-common
16 | - {{ mysql_client_libs_package_name }}
17 |
18 |
--------------------------------------------------------------------------------
/base/mysql-server/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Prepare local development MySQL server
3 | #
4 |
5 | include:
6 | {%- if salt['pillar.get']('hosting:external_mysql', '') == '' %}
7 | - .setup
8 | {%- endif %}
9 | - .dependencies
10 | - .credentials
11 | # Include autoupdate if configured to do so
12 | {%- if salt['pillar.get']('hosting:external_mysql', '') == '' %}
13 | {%- if salt['pillar.get']('autoupdate:mysql', False) %}
14 | - .update
15 | {%- endif %}
16 | {%- endif %}
17 |
--------------------------------------------------------------------------------
/base/elasticsearch/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Elasticsearch - install
3 | #
4 | # This state performs elasticsearch installation and prepares instances for
5 | # spryker environments.
6 | #
7 |
8 | {%- if salt['pillar.get']('hosting:external_elasticsearch', '') == '' %}
9 | include:
10 | - .install
11 | - .environments
12 | # Include autoupdate if configured to do so
13 | {%- if salt['pillar.get']('autoupdate:elasticsearch', False) %}
14 | - .update
15 | {%- endif %}
16 | {%- endif %}
17 |
--------------------------------------------------------------------------------
/base/nginx/files/etc/nginx/conf.d/allow-ip.conf:
--------------------------------------------------------------------------------
1 | # This file is managed by Salt!
2 | #
3 | # The list of IP addresses allowed to access services directly (without http auth).
4 | # This is required for internal Yves -> Zed communication and Load Balancers health checks.
5 | # Please include all server IP's or networks.
6 |
7 | satisfy any;
8 |
9 | {%- for network in salt['pillar.get']("hosting:http_auth_whitelist", ["127.0.0.1/32"]) %}
10 | allow {{ network }};
11 | {%- endfor %}
12 |
--------------------------------------------------------------------------------
/base/postgresql/files/etc/postgresql/pg_hba.conf:
--------------------------------------------------------------------------------
1 | # TYPE DATABASE USER ADDRESS METHOD
2 | local all postgres trust
3 | local all all peer
4 | host all all 127.0.0.1/32 md5
5 | {%- if 'postgresql_network' in pillar.hosting %}
6 | host all all {{ pillar.hosting.postgresql_network }} md5
7 | {%- endif %}
8 |
--------------------------------------------------------------------------------
/test/spec/server/jenkins_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'Jenkins' do
4 | describe service('jenkins') do
5 | it { should_not be_running }
6 | end
7 |
8 | describe service('jenkins-development') do
9 | it { should be_enabled }
10 | it { should be_running }
11 | end
12 |
13 | describe http_get(10007, 'localhost', '/') do
14 | its(:body) { should match /Jenkins ver. 1/ }
15 | its(:body) { should match /Manage Jenkins/ }
16 | end
17 | end
18 |
--------------------------------------------------------------------------------
/base/php/update.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Update PHP package
3 | #
4 | # Note: this state is included only if pillar setting autoupdate:php is true
5 |
6 | # Update php packages. We have to specify here php7.1, php7.1-common (to force
7 | # upgrading php extensions installed via debian packages) and php7.1-fpm
8 | # (to workaround debian package system installing libapache2-mod-php7.1)
9 | update-php:
10 | pkg.latest:
11 | - pkgs:
12 | - php7.1-fpm
13 | - php7.1-common
14 | - php7.1-dev
15 |
--------------------------------------------------------------------------------
/base/system/utils.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install utility debian packages
3 | #
4 |
5 | base-utilities:
6 | pkg.installed:
7 | - pkgs:
8 | - git
9 | - unzip
10 | - pbzip2
11 | - zsh
12 | - screen
13 | - mc
14 | - curl
15 | - lsof
16 | - htop
17 | - iotop
18 | - dstat
19 | - telnet
20 | - make
21 | - python-apt
22 | - vim
23 | - require:
24 | - cmd: apt-get-update
25 |
26 | git:
27 | pkg.installed:
28 | - fromrepo: git-repo
29 |
--------------------------------------------------------------------------------
/base/java/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install Java Runtime Environment - OpenJDK version 8
3 | #
4 |
5 | ca-certificates-java:
6 | pkg.latest:
7 | - fromrepo: {{ grains.lsb_distrib_codename }}-backports
8 | - refresh: False
9 |
10 | java:
11 | pkg.installed:
12 | - name: openjdk-8-jre-headless
13 | - require:
14 | - pkg: ca-certificates-java
15 | alternatives.set:
16 | - name: java
17 | - path: /usr/lib/jvm/java-8-openjdk-amd64/jre/bin/java
18 | - require:
19 | - pkg: openjdk-8-jre-headless
20 |
--------------------------------------------------------------------------------
/base/samba/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup samba for reversed option of sharing. This is optional.
3 | #
4 |
5 | install-smb-server:
6 | pkg.installed:
7 | - name: samba
8 |
9 | /etc/samba/smb.conf:
10 | file.managed:
11 | - source: salt://samba/files/etc/samba/smb.conf
12 | - require:
13 | - pkg: install-smb-server
14 |
15 | samba:
16 | service.dead:
17 | - enable: False
18 | - require:
19 | - pkg: install-smb-server
20 | - file: /etc/samba/smb.conf
21 | - watch:
22 | - file: /etc/samba/smb.conf
23 |
--------------------------------------------------------------------------------
/test/spec/server/nginx_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'nginx' do
4 | describe service('nginx') do
5 | it { should be_enabled }
6 | it { should be_running }
7 | end
8 |
9 | describe port(80) do
10 | it { should be_listening }
11 | end
12 |
13 | describe command('/usr/sbin/nginx -T') do
14 | its(:stderr) { should include('test is successful') }
15 | its(:stdout) { should match(/server_name.*www.*de.*local/) }
16 | its(:stdout) { should match(/server_name.*zed.*de.*local/) }
17 | end
18 | end
19 |
--------------------------------------------------------------------------------
/base/php/fpm.sls:
--------------------------------------------------------------------------------
1 | #
2 | # General PHP-FPM configuration
3 | #
4 |
5 | # FPM global configuration file
6 | /etc/php/7.1/fpm/php-fpm.conf:
7 | file.managed:
8 | - source: salt://php/files/etc/php/7.1/fpm/php-fpm.conf
9 |
10 | # Remove the default pool
11 | /etc/php/7.1/fpm/pool.d/www.conf:
12 | file.absent
13 |
14 | # Enable or disable FPM service
15 | php7.1-fpm:
16 | service:
17 | {#% if 'web' in grains.roles %#}
18 | - running
19 | - enable: True
20 | {#% else %#}
21 | # - dead
22 | # - enable: False
23 | {#% endif %#}
24 |
--------------------------------------------------------------------------------
/base/system/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # This is the first, base state, which is preparing basic operating system
3 | # setup, like debian repositories, time configuration, sudoers, install basic
4 | # utility packages/editors, configure salt minion.
5 | #
6 | # No spryker-specific logic should be included here.
7 | #
8 |
9 | include:
10 | - .filesystems
11 | {%- if 'systemd' in grains %}
12 | - .systemd
13 | {%- endif %}
14 | - .repositories
15 | - .minion
16 | - .utils
17 | - .sudoers
18 | - .time
19 | - .sysctl
20 | - .motd
21 | #- .charset
22 |
--------------------------------------------------------------------------------
/base/redis/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # This state downloads and prepares to run Redis-server
3 | #
4 | # Note that this state should be used only in non-production environments,
5 | # as we do not setup any replication/failover mechanism via salt.
6 | # Production environments should run either master-slave replication with failover,
7 | # redis cluster or managed redis (e.g. ObjectRocket at Rackspace or ElastiCache at AWS)
8 |
9 | {%- if salt['pillar.get']('hosting:external_redis', '') == '' %}
10 | include:
11 | - .install
12 | - .environments
13 | {%- endif %}
14 |
--------------------------------------------------------------------------------
/base/elasticsearch/files/elasticsearch_instance/etc/default/elasticsearch:
--------------------------------------------------------------------------------
1 | ES_USER=elasticsearch
2 | ES_GROUP=elasticsearch
3 | ES_HEAP_SIZE={{ settings.environments[environment].elasticsearch.heap_size }}
4 | MAX_OPEN_FILES=65535
5 | MAX_LOCKED_MEMORY=unlimited
6 | LOG_DIR=/data/logs/{{ environment }}/elasticsearch
7 | DATA_DIR=/data/shop/{{ environment }}/shared/elasticsearch
8 | WORK_DIR=/tmp/elasticsearch-{{ environment }}
9 | CONF_DIR=/etc/elasticsearch-{{ environment }}
10 | CONF_FILE=/etc/elasticsearch-{{ environment }}/elasticsearch.yml
11 | RESTART_ON_UPGRADE=true
12 |
--------------------------------------------------------------------------------
/test/spec/server/mysql_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'MySQL' do
4 | credentials="-uroot --password=''"
5 |
6 | describe command("/usr/sbin/mysqld --version") do
7 | its(:stdout) { should include('Ver 5.7') }
8 | end
9 |
10 | describe command("mysql #{credentials} -e 'show databases'") do
11 | its(:stdout) { should include('DE_development_zed') }
12 | its(:stdout) { should include('DE_devtest_zed') }
13 | its(:stdout) { should include('US_development_zed') }
14 | its(:stdout) { should include('US_devtest_zed') }
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/base/hosting/files/simple/etc/hosts:
--------------------------------------------------------------------------------
1 | # This file is managed by Salt
2 |
3 | # IPv4 loopback
4 | 127.0.0.1 localhost
5 |
6 | {%- for environment, environment_details in pillar.environments.items() %}{%- if loop.first %}
7 | {%- for store in pillar.stores %}
8 | 127.0.0.1 {{ settings.environments[environment].stores[store].zed.hostname }}
9 | {%- endfor %}
10 | {%- endif %}{%- endfor %}
11 |
12 | # The following lines are desirable for IPv6 capable hosts
13 | ::1 localhost ip6-localhost ip6-loopback
14 | ff02::1 ip6-allnodes
15 | ff02::2 ip6-allrouters
--------------------------------------------------------------------------------
/base/mysql-server/files/etc/mysql/conf.d/binlog.cnf:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ###############################################################################
4 |
5 | [mysqld]
6 | # log_bin = /data/mysql/logs/binlogs
7 | expire_logs_days = 3
8 | sync_binlog = 1
9 | max_binlog_size = 512M
10 | binlog_ignore_db = mysql
11 | log_slave_updates
12 | binlog_format = mixed
13 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/cron.d/yves-zed-cleanup:
--------------------------------------------------------------------------------
1 | PATH=/usr/bin:/usr/sbin:/bin:/sbin
2 |
3 | # Remove all but last 5 shop releases
4 | */30 * * * * root cd /data/shop; for ENV in *; do nice rm -rf `find $ENV/releases -mindepth 1 -maxdepth 1 2>/dev/null | sort -r | tail -n+6`; done
5 |
6 | # Remove tomcat logs older than 4 days
7 | 9 1 * * * root nice rm -rf `find /data/logs/*/tomcat/ -maxdepth 1 -mindepth 1 -mtime +4 2>/dev/null`
8 |
9 | # Remove jenkins builds older than 7 days
10 | 10 1 * * * root nice rm -rf `find /data/shop/*/shared/data/jenkins/jobs/*/builds/ -maxdepth 1 -mindepth 1 -ctime +7 2>/dev/null`
11 |
--------------------------------------------------------------------------------
/base/rabbitmq/setup.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install RabbitMQ (message queue broker)
3 | #
4 |
5 | rabbitmq-server:
6 | pkg.installed:
7 | - name: rabbitmq-server
8 |
9 | rabbitmq-service:
10 | service.running:
11 | - name: rabbitmq-server
12 | - enable: {{ salt['pillar.get']('rabbitmq:enabled', True) }}
13 | - require:
14 | - pkg: rabbitmq-server
15 |
16 | enable-rabbitmq-management:
17 | cmd.run:
18 | - name: rabbitmq-plugins enable rabbitmq_management
19 | - unless: rabbitmq-plugins list | grep '\[[eE]\*\] rabbitmq_management '
20 | - require:
21 | - service: rabbitmq-server
22 |
--------------------------------------------------------------------------------
/base/docker/install.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install docker-engine, docker-compose
3 | #
4 |
5 | docker.io:
6 | pkg.removed
7 |
8 | docker-engine:
9 | pkg.installed
10 |
11 | docker:
12 | service.running:
13 | - enable: True
14 |
15 | # At the moment we need to get docker compose directly from github. If the release version is changed here,
16 | # the source_hash value must be updated as well.
17 | /usr/local/bin/docker-compose:
18 | file.managed:
19 | - source: https://github.com/docker/compose/releases/download/1.8.0/docker-compose-Linux-x86_64
20 | - source_hash: md5=6a598739bda87a591efbcdc9ab734da1
21 | - mode: 755
22 |
--------------------------------------------------------------------------------
/base/hosting/simple.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Provider-specific configuration for hoster: simple
3 | #
4 | # This provider provide a basic setup for a single machine setup which comes wit a pre configured image as provided by
5 | # managed servers
6 |
7 | include:
8 | - .firewall
9 |
10 | # Networking configuration: setup /etc/hosts, dns configuration
11 | /etc/resolv.conf:
12 | file.managed:
13 | - source: salt://hosting/files/simple/etc/resolv.conf
14 |
15 | /etc/hosts:
16 | file.managed:
17 | - source: salt://hosting/files/simple/etc/hosts
18 | - template: jinja
19 |
20 | # Monitoring: nothing to do
21 |
22 | # Backup: nothing to do
23 |
--------------------------------------------------------------------------------
/test/README.md:
--------------------------------------------------------------------------------
1 | # ServerSpec tests
2 | Those tests check server provisioning - running services, etc. - all things
3 | that can be checked that do not depend on any code or data
4 |
5 | ## Running it
6 | To execute test suite: inside the VM, go to the directory with the testfiles
7 | (ie. `/srv/salt/test`) and run complete test suite:
8 | ```
9 | cd /srv/salt/test
10 | sudo rake2.1 spec:server
11 | ```
12 |
13 | ## Pre-requisities
14 | Packages required to run ServerSpec tests are installed by Saltstack. If you want
15 | to install them manually, make sure that system has installed ruby with gems
16 | `serverspec`, `serverspec-extended-types` and `rake`
17 |
--------------------------------------------------------------------------------
/base/php/macros/php_module.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Macro: Enable or disable PHP module
3 | #
4 |
5 | {% macro php_module(name, enable, sapi) -%}
6 | {% if enable %}
7 | enable-php-module-{{ name }}-for-{{ sapi }}:
8 | cmd.run:
9 | - name: phpenmod -v 7.1 -s {{ sapi }} {{ name }}
10 | - unless: phpquery -v 7.1 -s {{ sapi }} -m {{ name }}
11 | - require:
12 | - file: /etc/php/7.1/mods-available/{{ name }}.ini
13 | {% else %}
14 | disable-php-module-{{ name }}-for-{{ sapi }}:
15 | cmd.run:
16 | - name: phpdismod -v 7.1 -s {{ sapi }} {{ name }}
17 | - onlyif: phpquery -v 7.1 -s {{ sapi }} -m {{ name }}
18 | {% endif %}
19 |
20 | {% endmacro %}
21 |
--------------------------------------------------------------------------------
/base/system/files/etc/apt/sources.list:
--------------------------------------------------------------------------------
1 | # This file is managed by Salt!
2 | {%- set mirror = salt['pillar.get']('hosting:debian_mirror_host', 'cloudfront.debian.net') %}
3 | {%- set distro = grains.lsb_distrib_codename %}
4 |
5 | deb http://{{ mirror }}/debian {{ distro }} main contrib non-free
6 | deb-src http://{{ mirror }}/debian {{ distro }} main contrib non-free
7 |
8 | deb http://security.debian.org/ {{ distro }}/updates main contrib non-free
9 | deb-src http://security.debian.org/ {{ distro }}/updates main contrib non-free
10 |
11 | deb http://{{ mirror }}/debian {{ distro }}-updates main
12 | deb-src http://{{ mirror }}/debian {{ distro }}-updates main
13 |
--------------------------------------------------------------------------------
/base/settings/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # This state holds centrally parsed settings, which are taken from grains, pillars and mine
3 | # This state does not perform any operations, but is included in many other states
4 | #
5 |
6 | {% from 'settings/hosts.sls' import host, hosts, elasticsearch, publish_ip with context %}
7 | {% from 'settings/environments.sls' import environments with context %}
8 |
9 | {%- set settings = {} %}
10 | {%- do settings.update ({
11 | 'environments' : environments,
12 | 'host' : host,
13 | 'hosts' : hosts,
14 | 'publish_ip' : publish_ip,
15 | 'elasticsearch' : elasticsearch,
16 | }) %}
17 |
--------------------------------------------------------------------------------
/base/hosting/files/vagrant/etc/hosts:
--------------------------------------------------------------------------------
1 | {% from 'settings/init.sls' import settings with context %}
2 | # This file is managed by Salt
3 |
4 | # IPv4 loopback
5 | 127.0.0.1 localhost
6 | 127.0.0.2 spryker-vagrant
7 |
8 | {%- for environment, environment_details in pillar.environments.items() %}{%- if loop.first %}
9 | {%- for store in pillar.stores %}
10 | 127.0.0.1 {{ settings.environments[environment].stores[store].zed.hostname }}
11 | {%- endfor %}
12 | {%- endif %}{%- endfor %}
13 |
14 | # The following lines are desirable for IPv6 capable hosts
15 | ::1 localhost ip6-localhost ip6-loopback
16 | ff02::1 ip6-allnodes
17 | ff02::2 ip6-allrouters
18 |
--------------------------------------------------------------------------------
/base/php/install.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install PHP and modules available from operating system distribution
3 | #
4 |
5 | php:
6 | pkg.installed:
7 | - pkgs:
8 | - php7.1-dev
9 | - php7.1-bcmath
10 | - php7.1-bz2
11 | - php7.1-cli
12 | - php7.1-fpm
13 | - php7.1-curl
14 | - php7.1-gd
15 | - php7.1-gmp
16 | - php7.1-intl
17 | - php7.1-mbstring
18 | - php7.1-mcrypt
19 | - php7.1-mysql
20 | - php7.1-pgsql
21 | - php7.1-sqlite3
22 | - php7.1-xml
23 | - php7.1-opcache
24 | - php-igbinary
25 | - php-imagick
26 | - php-memcached
27 | - php-msgpack
28 | - php-redis
29 | - php-ssh2
30 |
31 |
--------------------------------------------------------------------------------
/base/elk/files/opt/kibana/config/kibana.yml:
--------------------------------------------------------------------------------
1 | #
2 | # This file is maintained by salt!
3 | #
4 |
5 | port: 5601
6 | host: "0.0.0.0"
7 | elasticsearch_url: "http://{{ pillar.elk.elasticsearch.host }}:{{ pillar.elk.elasticsearch.port }}"
8 | elasticsearch_preserve_host: true
9 | kibana_index: ".kibana"
10 | default_app_id: "discover"
11 | request_timeout: 300000
12 | shard_timeout: 0
13 | verify_ssl: false
14 |
15 | bundled_plugin_ids:
16 | - plugins/dashboard/index
17 | - plugins/discover/index
18 | - plugins/doc/index
19 | - plugins/kibana/index
20 | - plugins/markdown_vis/index
21 | - plugins/metric_vis/index
22 | - plugins/settings/index
23 | - plugins/table_vis/index
24 | - plugins/vis_types/index
25 | - plugins/visualize/index
26 |
--------------------------------------------------------------------------------
/base/spryker/files/jenkins_instance/etc/default/jenkins:
--------------------------------------------------------------------------------
1 | NAME=jenkins-{{ environment }}
2 | JAVA=/usr/bin/java
3 | JAVA_ARGS="-Djava.awt.headless=true"
4 | PIDFILE=/var/run/jenkins/jenkins-{{ environment }}.pid
5 | JENKINS_USER=www-data
6 | JENKINS_GROUP=www-data
7 | JENKINS_WAR=/usr/share/jenkins/jenkins.war
8 | JENKINS_HOME=/data/shop/{{ environment }}/shared/data/common/jenkins
9 | RUN_STANDALONE=true
10 | JENKINS_LOG=/data/logs/{{ environment }}/jenkins.log
11 | MAXOPENFILES=8192
12 | HTTP_PORT={{ settings.environments[environment].jenkins.port }}
13 | AJP_PORT=-1
14 | # fixme: hmmm, the line below seems to be ignored.... /marek
15 | PREFIX=/jenkins
16 | JENKINS_ARGS="--webroot=/var/cache/jenkins/war --httpPort=$HTTP_PORT --ajp13Port=$AJP_PORT"
17 |
--------------------------------------------------------------------------------
/test/spec/server/pgsql_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'PostgreSQL' do
4 | ENV['PGPASSWORD'] = 'mate20mg'
5 |
6 | describe command("psql --user development --host 127.0.0.1 DE_development_zed -c \"SELECT * FROM pg_extension WHERE extname='citext'\"") do
7 | its(:stdout) { should include('1 row') }
8 | end
9 |
10 | describe command("psql --user development --host 127.0.0.1 DE_development_zed -c \"SELECT datname FROM pg_database WHERE datistemplate = false\"") do
11 | its(:stdout) { should include('DE_development_zed') }
12 | its(:stdout) { should include('DE_devtest_zed') }
13 | its(:stdout) { should include('US_development_zed') }
14 | its(:stdout) { should include('US_devtest_zed') }
15 | end
16 | end
17 |
--------------------------------------------------------------------------------
/base/hosting/vagrant.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Provider-specific configuration for hoster: Vagrant
3 | #
4 | # Vagrant actually does not need any hoster-specific configuration.
5 | # This file can be used as an empty placeholder for creating another hoster
6 | # configurations.
7 |
8 | # Firewall: we don't use it, but let's install UFW package
9 | ufw:
10 | pkg.installed
11 |
12 | # Networking configuration: setup /etc/hosts, dns configuration
13 | /etc/resolv.conf:
14 | file.managed:
15 | - source: salt://hosting/files/vagrant/etc/resolv.conf
16 |
17 | # Hosts file: nothing to do, we rely on vagrant-hostmanager
18 |
19 | # Disk drives: nothing to do, we're just using easy vagrant setup
20 |
21 | # Monitoring: nothing to do
22 |
23 | # Backup: nothing to do
24 |
--------------------------------------------------------------------------------
/base/php/files/etc/php/7.1/mods-available/opcache.ini:
--------------------------------------------------------------------------------
1 | ; configuration for php opcache module
2 | ; priority=10
3 | zend_extension=opcache.so
4 | {%- if salt['pillar.get']('php:enable_opcache', True) %}
5 | opcache.enable=1
6 | {%- else %}
7 | opcache.enable=0
8 | {%- endif %}
9 | opcache.enable_cli=1
10 | opcache.max_accelerated_files=8192
11 | opcache.memory_consumption=256
12 | opcache.interned_strings_buffer=16
13 | opcache.fast_shutdown=1
14 | {% if 'dev' in grains.roles %}
15 | ; Check if file updated on each request - for development
16 | opcache.revalidate_freq=0
17 | {% else -%}
18 | ; Check if file updated each 60 seconds - for production
19 | ; To force file reload (e.g. on deployment) - restart php7.1-fpm
20 | opcache.revalidate_freq=60
21 | {%- endif -%}
22 |
--------------------------------------------------------------------------------
/base/redis/environments.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup for multiple environments of Spryker
3 | #
4 | # This implementation is Spryker-specific and it takes data from Spryker pillars
5 | # Instances created here are used by Spryker and are required for production use.
6 | # Each environment works on seperate redis instance.
7 | #
8 | {% from 'settings/init.sls' import settings with context %}
9 | {% from 'redis/macros/redis_instance.sls' import redis_instance with context %}
10 |
11 | /etc/logrotate.d/redis-instances:
12 | file.managed:
13 | - source: salt://redis/files/etc/logrotate.d/redis-instances
14 |
15 | {%- for environment, environment_details in pillar.environments.items() %}
16 | {{ redis_instance(environment, environment_details, settings) }}
17 | {%- endfor %}
18 |
--------------------------------------------------------------------------------
/base/jenkins/install.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install package, remove default service
3 | #
4 |
5 | # Here we use specific version of the package to avoid auth issues with Jenkins 2.0
6 | jenkins:
7 | pkg.installed:
8 | - hold: True
9 | - sources:
10 | - jenkins: http://pkg.jenkins-ci.org/debian-stable/binary/jenkins_1.651.3_all.deb
11 |
12 | disable-jenkins-service:
13 | service.dead:
14 | - name: jenkins
15 | - enable: False
16 | - require:
17 | - pkg: jenkins
18 |
19 | # Make sure that www-data can unpack jenkins war file
20 | /var/cache/jenkins:
21 | file.directory:
22 | - user: www-data
23 | - group: www-data
24 | - mode: 775
25 | - recurse:
26 | - user
27 | - group
28 | - require:
29 | - pkg: jenkins
30 |
--------------------------------------------------------------------------------
/base/mysql-server/setup.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install and configure local MySQL server for development / QA
3 | # For production setup, a high-available solution or DBaaS (database-as-a-service) should be used
4 | #
5 |
6 | mysql-server:
7 | pkg.installed:
8 | - pkgs:
9 | - mysql-community-server
10 | - mysql-community-client
11 | - libmysqlclient20
12 |
13 | mysql:
14 | service.running:
15 | - enable: True
16 | - watch:
17 | - pkg: mysql-server
18 | - file: /etc/mysql/my.cnf
19 |
20 | /etc/mysql/my.cnf:
21 | file.managed:
22 | - source: salt://mysql-server/files/etc/mysql/my.cnf
23 | - template: jinja
24 |
25 | /etc/mysql/conf.d/strict.cnf:
26 | file.managed:
27 | - source: salt://mysql-server/files/etc/mysql/conf.d/strict.cnf
28 |
--------------------------------------------------------------------------------
/base/user/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Create operating system users and group
3 | #
4 |
5 | # Group for developers, to allow deployment with sudo
6 | dev:
7 | group.present:
8 | - system: true
9 |
10 | {% for username, user in pillar.get('user', {}).items() %}
11 |
12 | {{ username }}:
13 | user.present:
14 | - fullname: {{ user.fullname }}
15 | - groups:
16 | - dev
17 | {% if (user.admin is defined) and user.admin %}
18 | - adm
19 | {% endif %}
20 | - shell: {% if user.shell is defined %}{{ user.shell }}{% else %}/bin/bash{% endif %}
21 |
22 | {% if user.ssh_key is defined %}
23 | ssh_auth:
24 | - present
25 | - user: {{ username }}
26 | - name: {{ user.ssh_key }}
27 | - require:
28 | - user: {{ username }}
29 | {% endif %}
30 | {% endfor %}
31 |
--------------------------------------------------------------------------------
/base/hosting/firewall.sls:
--------------------------------------------------------------------------------
1 | ufw:
2 | pkg.installed
3 |
4 | {% from 'hosting/macros/firewall/ufw.sls' import ufw_rule with context %}
5 |
6 | ufw enable:
7 | cmd.run:
8 | - unless: "ufw status| grep 'Status: active'"
9 |
10 | ufw default deny:
11 | cmd.run:
12 | - name: "ufw default deny"
13 | # firewall rules
14 | {{ ufw_rule('allow proto tcp from any to any port 2200', '2200/tcp')}}
15 | {{ ufw_rule('allow 4505/tcp', '4505/tcp') }}
16 | {{ ufw_rule('allow 4506/tcp', '4506/tcp') }}
17 | {{ ufw_rule('allow from 127.0.0.1', '127.0.0.1') }}
18 | {{ ufw_rule('allow 443/tcp', '443/tcp') }}
19 | {{ ufw_rule('allow 80/tcp', '80/tcp') }}
20 | {{ ufw_rule('allow proto tcp from any to any port 22', '22/tcp')}}
21 |
22 | force --force enable:
23 | cmd.run:
24 | - name: "ufw --force enable"
--------------------------------------------------------------------------------
/base/postgresql/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install and configure PostgreSQL database
3 | #
4 | # This state manages the configuration of PostgreSQL database, creates
5 | # data directory in /data and sets up default cluster (main).
6 | # Note that this configuration does not include any failover and/or replication.
7 | # It is suitable to run on development and QA environments.
8 | #
9 | # To deploy Spryker in production, a stable and secure PostgreSQL setup is
10 | # recommended, which includes:
11 | # - backup
12 | # - replication
13 | # - hot-standby slave
14 | # - failover mechanism
15 | # - appropiate hardware
16 |
17 | include:
18 | - .setup
19 | - .credentials
20 | # Include autoupdate if configured to do so
21 | {% if salt['pillar.get']('autoupdate:postgresql', False) %}
22 | - .update
23 | {% endif %}
24 |
--------------------------------------------------------------------------------
/base/elk/files/etc/filebeat/filebeat.yml:
--------------------------------------------------------------------------------
1 | filebeat:
2 | registry_file: /var/lib/filebeat/registry
3 | prospectors:
4 | -
5 | paths:
6 | - "/data/shop/development/current/data/*/logs/YVES/*.log"
7 | input_type: "log"
8 | fields:
9 | application: "YVES"
10 | -
11 | paths:
12 | - "/data/shop/development/current/data/*/logs/ZED/*.log"
13 | input_type: "log"
14 | fields:
15 | application: "ZED"
16 | -
17 | paths:
18 | - "/data/shop/development/current/data/*/logs/application.log"
19 | input_type: "log"
20 |
21 | output:
22 | elasticsearch:
23 | hosts: ["localhost:10005"]
24 | protocol: "http"
25 | index: "logstash"
26 |
27 | shipper:
28 |
29 | logging:
30 | files:
31 | rotateeverybytes: 10485760 # = 10MB
32 |
--------------------------------------------------------------------------------
/base/hosting/claranet.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Provider-specific configuration for hoster: Claranet
3 | #
4 | # Vagrant actually does not need any hoster-specific configuration.
5 | # This file can be used as an empty placeholder for creating another hoster
6 | # configurations.
7 |
8 | # Firewall: we don't use it, but let's install UFW package
9 | ufw:
10 | pkg.installed
11 |
12 | # Networking configuration: setup /etc/hosts, dns configuration
13 | /etc/resolv.conf:
14 | file.managed:
15 | - source: salt://hosting/files/vagrant/etc/resolv.conf
16 |
17 | #/etc/hosts:
18 | # file.managed:
19 | # - source: salt://hosting/files/vagrant/etc/hosts
20 | # - template: jinja
21 |
22 | # Disk drives: nothing to do, we're just using easy vagrant setup
23 |
24 | # Monitoring: nothing to do
25 |
26 | # Backup: nothing to do
27 |
--------------------------------------------------------------------------------
/test/spec/server/services_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | services = [
4 | # System Services
5 | 'vboxadd-service',
6 | 'cron',
7 | 'ntp',
8 | 'docker',
9 | # App services
10 | 'postgresql',
11 | 'mysql',
12 | 'rabbitmq-server',
13 | 'redis-server-development',
14 | 'elasticsearch-development',
15 | ]
16 |
17 | describe 'Active services' do
18 | services.each do |service|
19 | describe service(service) do
20 | it { should be_enabled }
21 | it { should be_running }
22 | end
23 | end
24 | end
25 |
26 | dead_services = [
27 | 'redis-server',
28 | 'elasticsearch',
29 | 'jenkins',
30 | ]
31 |
32 | describe 'Inactive services' do
33 | dead_services.each do |service|
34 | describe service(service) do
35 | it { should_not be_running }
36 | end
37 | end
38 | end
39 |
--------------------------------------------------------------------------------
/base/elasticsearch/environments.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup for multiple environments of Spryker
3 | #
4 | # This implementation is Spryker-specific and it takes data from Spryker pillars
5 | # Instances created here are used by Spryker and are required for production use.
6 | # Each environment works on seperate elasticsearch instance.
7 | #
8 | {% from 'settings/init.sls' import settings with context %}
9 | {% from 'elasticsearch/macros/elasticsearch_instance.sls' import elasticsearch_instance with context %}
10 |
11 | /etc/logrotate.d/elasticsearch-instances:
12 | file.managed:
13 | - source: salt://elasticsearch/files/etc/logrotate.d/elasticsearch-instances
14 |
15 | {%- for environment, environment_details in pillar.environments.items() %}
16 | {{ elasticsearch_instance(environment, environment_details, settings) }}
17 | {%- endfor %}
18 |
--------------------------------------------------------------------------------
/base/ruby/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install Ruby and used gems
3 | #
4 |
5 | {% set ruby_package_name = {
6 | 'stretch': 'ruby',
7 | 'wheezy': 'ruby1.9.1',
8 | 'jessie': 'ruby',
9 | }.get(grains.lsb_distrib_codename) %}
10 |
11 | ruby:
12 | pkg.installed:
13 | - pkgs:
14 | - {{ ruby_package_name }}
15 | - ruby-dev
16 | - libncurses5-dev
17 | - build-essential
18 |
19 | compass:
20 | gem.installed
21 |
22 | psych:
23 | gem.installed
24 |
25 | highline:
26 | gem.installed:
27 | - require:
28 | - gem: psych
29 |
30 |
31 | # Install fixed versions, as the 2.8.0+ had problems with changed packet sizes
32 | net-ssh:
33 | gem.installed:
34 | - version: 2.7.0
35 |
36 | net-scp:
37 | gem.installed:
38 | - version: 1.1.2
39 |
40 | net-ssh-multi:
41 | gem.installed:
42 | - version: 1.2.0
43 |
--------------------------------------------------------------------------------
/base/spryker/install.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Installation of spryker-specific packages
3 | # Setup of basic directory structure
4 | #
5 |
6 |
7 | install helper app utilities:
8 | pkg.installed:
9 | - pkgs:
10 | - graphviz
11 | - libjpeg-progs
12 | - libavahi-compat-libdnssd-dev
13 |
14 | /data/shop:
15 | file.directory:
16 | - makedirs: true
17 | - user: www-data
18 | - group: www-data
19 | - dir_mode: 755
20 | - requires:
21 | - file: /data
22 |
23 | /data/logs:
24 | file.directory:
25 | - makedirs: true
26 | - user: www-data
27 | - group: www-data
28 | - dir_mode: 755
29 | - requires:
30 | - file: /data
31 |
32 | /data/storage:
33 | file.directory:
34 | - makedirs: true
35 | - user: www-data
36 | - group: www-data
37 | - dir_mode: 755
38 | - requires:
39 | - file: /data
40 |
--------------------------------------------------------------------------------
/dev/development/files/home/vagrant/.zshrc:
--------------------------------------------------------------------------------
1 | # This file is maintained by Salt!
2 | # local modifications to this file will be preserved - if this file exists,
3 | # salt will not overwrite it.
4 |
5 | ZSH=$HOME/.oh-my-zsh
6 | ZSH_THEME="robbyrussell"
7 | plugins=(gitfast redis-cli spryker sudo)
8 | source $ZSH/oh-my-zsh.sh
9 | [ -f $HOME/.zsh_prompt ] && source $HOME/.zsh_prompt
10 | export PATH=/usr/local/bin:/usr/bin:/bin:/usr/local/sbin:/usr/sbin:/sbin:/home/vagrant/.composer/vendor/bin:/home/vagrant/bin
11 | export PS1='%n@${prompt_hostname} ${ret_status}%{$fg_bold[green]%}%p %{$fg[cyan]%}%c %{$fg_bold[blue]%}$(git_prompt_info)%{$fg_bold[blue]%} % %{$reset_color%}'
12 | export LC_ALL="en_US.UTF-8"
13 | if [ "$LC_CTYPE" = "UTF-8" ]; then export LC_CTYPE=C; fi
14 |
15 | set-vm-name() {
16 | echo "prompt_hostname=\"$1\"" > $HOME/.zsh_prompt
17 | echo "OK, changes will be visible after next login"
18 | }
19 |
--------------------------------------------------------------------------------
/base/samba/files/etc/samba/smb.conf:
--------------------------------------------------------------------------------
1 | [global]
2 | workgroup = WORKGROUP
3 | dns proxy = no
4 | interfaces = 127.0.0.0/8 eth0 eth1
5 | log file = /var/log/samba/log.%m
6 | max log size = 1000
7 | syslog = 0
8 | panic action = /usr/share/samba/panic-action %d
9 | server role = standalone server
10 | passdb backend = tdbsam
11 | obey pam restrictions = yes
12 | unix password sync = yes
13 | passwd program = /usr/bin/passwd %u
14 | passwd chat = *Enter\snew\s*\spassword:* %n\n *Retype\snew\s*\spassword:* %n\n *password\supdated\ssuccessfully* .
15 | pam password change = yes
16 | map to guest = bad user
17 | usershare allow guests = yes
18 |
19 | [project]
20 | comment = Project directory
21 | path = /data/shop/development/
22 | public = yes
23 | browsable = yes
24 | read only = no
25 | create mask = 0775
26 | directory mask = 0775
27 | guest ok = yes
28 |
29 |
--------------------------------------------------------------------------------
/base/pound/files/etc/pound/pound.cfg:
--------------------------------------------------------------------------------
1 | User "www-data"
2 | Group "www-data"
3 | LogLevel 2
4 | Alive 30
5 | Control "/var/run/pound/poundctl.socket"
6 |
7 | ListenHTTPS
8 | Address 0.0.0.0
9 | Port 443
10 | RewriteLocation 0
11 |
12 | xHTTP 1
13 | Service
14 | BackEnd
15 | Address 0.0.0.0
16 | Port 80
17 | End
18 | End
19 |
20 | # Certs must contain file with key, cert and complete ca-bundle of CA
21 | Cert "/etc/pound/certs/star_spryker_dev"
22 | Cert "/etc/pound/certs/1star_local"
23 | Cert "/etc/pound/certs/2star_local"
24 | Cert "/etc/pound/certs/3star_local"
25 | Cert "/etc/pound/certs/4star_local"
26 |
27 | Ciphers "ECDHE-RSA-AES128-SHA256:AES128-GCM-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH"
28 | AddHeader "X-Forwarded-Proto: https"
29 | End
30 |
--------------------------------------------------------------------------------
/test/Rakefile:
--------------------------------------------------------------------------------
1 | require 'rake'
2 | require 'rspec/core/rake_task'
3 |
4 | SPEC_SUITES = [
5 | { id: 'server', pattern: 'spec/server/*_spec.rb' },
6 | ]
7 |
8 | task :spec => 'spec:all'
9 | task :default => :spec
10 |
11 | namespace :spec do
12 | targets = ['localhost']
13 |
14 | targets.each do |target|
15 | original_target = target == "_default" ? target[1..-1] : target
16 | ENV['TARGET_HOST'] = original_target
17 |
18 | desc "Run all tests to #{original_target}"
19 | RSpec::Core::RakeTask.new('all') do |t|
20 | t.pattern = "spec/*/*_spec.rb"
21 | t.verbose = false
22 | t.fail_on_error = true
23 | end
24 |
25 | SPEC_SUITES.each do |suite|
26 | desc "Run #{suite[:id]} tests to #{original_target}"
27 | RSpec::Core::RakeTask.new("#{suite[:id]}") do |t|
28 | t.pattern = suite[:pattern]
29 | t.verbose = true
30 | t.fail_on_error = true
31 | end
32 | end
33 | end
34 | end
35 |
--------------------------------------------------------------------------------
/base/spryker/htpasswd.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Create initial .htpasswd files
3 | # Note - the paths here should be same as paths defined in pillar app config
4 | # Files have replace: False, which means that the contents of the files will
5 | # not be forced if the files will be changed manually on the servers. This
6 | # state will create the files only if they don't exist (setup initial password).
7 | #
8 |
9 | {% if 'web' in grains.roles %}
10 | # The default password for production-zed (yves remains open)
11 | /etc/nginx/htpasswd-zed:
12 | file.managed:
13 | - source: salt://spryker/files/etc/nginx/htpasswd-zed
14 | - user: www-data
15 | - group: www-data
16 | - mode: 640
17 | - replace: False
18 |
19 | # The default password for staging (both yves and zed)
20 | /etc/nginx/htpasswd-staging:
21 | file.managed:
22 | - source: salt://spryker/files/etc/nginx/htpasswd-staging
23 | - user: www-data
24 | - group: www-data
25 | - mode: 640
26 | - replace: False
27 | {% endif %}
28 |
--------------------------------------------------------------------------------
/dev/mailcatcher/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install mailcatcher - http://mailcatcher.me/
3 | #
4 | # MailCatcher runs a super simple SMTP server which catches any message sent to it to display in a web interface.
5 | # Mails delivered via smtp to 127.0.0.1:1025 will be visible in web browser on http://127.0.0.1:1080
6 |
7 | libsqlite3-dev:
8 | pkg.installed:
9 | - require_in:
10 | - gem: mailcatcher
11 |
12 | mailcatcher:
13 | gem.installed
14 |
15 | mailcatcher-systemd-script:
16 | file.managed:
17 | - name: /etc/systemd/system/mailcatcher.service
18 | - mode: 0755
19 | - source: salt://mailcatcher/files/etc/systemd/system/mailcatcher.service
20 | - watch_in:
21 | - cmd: mailcatcher-systemd-reload
22 |
23 | mailcatcher-systemd-reload:
24 | cmd.wait:
25 | - name: systemctl daemon-reload
26 |
27 | mailcatcher-service:
28 | service.running:
29 | - name: mailcatcher
30 | - enable: True
31 | - require:
32 | - file: mailcatcher-systemd-script
33 | - gem: mailcatcher
34 | - cmd: mailcatcher-systemd-reload
35 |
--------------------------------------------------------------------------------
/base/elasticsearch/install.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install Elasticsearch and plugins configured in pillar
3 | #
4 |
5 | elasticsearch-requirements:
6 | pkg.installed:
7 | - pkgs:
8 | - openjdk-8-jre-headless
9 |
10 | elasticsearch:
11 | pkg.installed:
12 | - version: {{ pillar.elasticsearch.version }}
13 | - require:
14 | - pkg: elasticsearch-requirements
15 |
16 | # For each plugin - we need to restart Elasticsearch service on each environment
17 | {%- for shortname, plugin in pillar.elasticsearch.plugins.items() %}
18 | /usr/share/elasticsearch/bin/plugin install {% if plugin.url is defined %}{{ plugin.url }}{% else %}{{ plugin.name }}{% endif %}:
19 | cmd.run:
20 | - unless: test -d /usr/share/elasticsearch/plugins/{{ shortname }}
21 | - require:
22 | - pkg: elasticsearch
23 | - watch_in:
24 | {%- for environment, environment_details in pillar.environments.items() %}
25 | {%- if 'skip_instance_setup' not in environment_details.elasticsearch %}
26 | - service: elasticsearch-{{ environment }}
27 | {%- endif %}
28 | {%- endfor %}
29 | {%- endfor %}
30 |
--------------------------------------------------------------------------------
/base/postfix/config.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Configuratiuon files for local postfix server
3 | #
4 |
5 | # Main configuration file
6 | /etc/postfix/main.cf:
7 | file.managed:
8 | - source: salt://postfix/files/etc/postfix/main.cf
9 | - template: jinja
10 | - user: root
11 | - group: root
12 | - mode: 644
13 | - require:
14 | - pkg: postfix
15 | - watch_in:
16 | - service: postfix
17 |
18 | # Hostname for outgoing mails
19 | /etc/mailname:
20 | file.managed:
21 | - source: salt://postfix/files/etc/mailname
22 | - template: jinja
23 | - user: root
24 | - group: root
25 | - mode: 644
26 |
27 | # SASL authentication for using third-party relays with authentication
28 | /etc/postfix/sasl_passwd:
29 | file.managed:
30 | - source: salt://postfix/files/etc/postfix/sasl_passwd
31 | - template: jinja
32 | - user: root
33 | - group: root
34 | - mode: 644
35 |
36 | run-postmap:
37 | cmd.wait:
38 | - name: /usr/sbin/postmap /etc/postfix/sasl_passwd
39 | - cwd: /
40 | - watch:
41 | - file: /etc/postfix/sasl_passwd
42 | - require:
43 | - file: /etc/postfix/sasl_passwd
44 |
--------------------------------------------------------------------------------
/base/postfix/files/etc/postfix/main.cf:
--------------------------------------------------------------------------------
1 | # This file is managed by salt!
2 |
3 | smtpd_banner = $myhostname ESMTP $mail_name (unix)
4 | biff = no
5 | append_dot_mydomain = no
6 | readme_directory = no
7 |
8 | # TLS parameters (dummy cert)
9 | smtpd_tls_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem
10 | smtpd_tls_key_file=/etc/ssl/private/ssl-cert-snakeoil.key
11 | smtpd_use_tls=yes
12 | smtpd_tls_session_cache_database = btree:${data_directory}/smtpd_scache
13 | smtp_tls_session_cache_database = btree:${data_directory}/smtp_scache
14 |
15 | # Local settings
16 | myhostname = {{ grains.nodename }}
17 | alias_maps = hash:/etc/aliases
18 | alias_database = hash:/etc/aliases
19 | myorigin = /etc/mailname
20 | mydestination = {{ grains.nodename }}, localhost
21 | relayhost = {{ salt['pillar.get']('postfix:relay:host', '') }}
22 | mynetworks = 127.0.0.0/8 [::ffff:127.0.0.0]/104 [::1]/128
23 | mailbox_command = procmail -a "$EXTENSION"
24 | mailbox_size_limit = 0
25 | recipient_delimiter = +
26 | inet_interfaces = all
27 |
28 | # SASL auth
29 | smtp_sasl_auth_enable = yes
30 | smtp_sasl_password_maps = hash:/etc/postfix/sasl_passwd
31 | smtp_sasl_security_options =
32 |
--------------------------------------------------------------------------------
/test/Gemfile.lock:
--------------------------------------------------------------------------------
1 | GEM
2 | specs:
3 | diff-lcs (1.2.5)
4 | multi_json (1.12.1)
5 | net-scp (1.1.2)
6 | net-ssh (>= 2.6.5)
7 | net-ssh (2.7.0)
8 | net-telnet (0.1.1)
9 | rake (11.3.0)
10 | rspec (3.6.0.beta1)
11 | rspec-core (= 3.6.0.beta1)
12 | rspec-expectations (= 3.6.0.beta1)
13 | rspec-mocks (= 3.6.0.beta1)
14 | rspec-core (3.6.0.beta1)
15 | rspec-support (= 3.6.0.beta1)
16 | rspec-expectations (3.6.0.beta1)
17 | diff-lcs (>= 1.2.0, < 2.0)
18 | rspec-support (= 3.6.0.beta1)
19 | rspec-its (1.2.0)
20 | rspec-core (>= 3.0.0)
21 | rspec-expectations (>= 3.0.0)
22 | rspec-mocks (3.6.0.beta1)
23 | diff-lcs (>= 1.2.0, < 2.0)
24 | rspec-support (= 3.6.0.beta1)
25 | rspec-support (3.6.0.beta1)
26 | serverspec (2.37.2)
27 | multi_json
28 | rspec (~> 3.0)
29 | rspec-its
30 | specinfra (~> 2.53)
31 | sfl (2.3)
32 | specinfra (2.63.3)
33 | net-scp
34 | net-ssh (>= 2.7, < 4.0)
35 | net-telnet
36 | sfl
37 |
38 | PLATFORMS
39 | ruby
40 |
41 | DEPENDENCIES
42 | rake
43 | serverspec
44 |
45 | BUNDLED WITH
46 | 1.13.4
47 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2016, Spryker Systems GmbH in cooperation with Kore Poland sp. z o.o.
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in
13 | all copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21 | THE SOFTWARE.
22 |
--------------------------------------------------------------------------------
/base/elk/kibana.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install Kibana
3 | #
4 |
5 | install-kibana:
6 | cmd.run:
7 | - name: cd /opt && wget -q https://download.elastic.co/kibana/kibana/kibana-{{ pillar.elk.kibana.version }}-linux-x64.tar.gz && tar zxf kibana-{{ pillar.elk.kibana.version }}-*.tar.gz && rm -f kibana-{{ pillar.elk.kibana.version }}-*.tar.gz && chown -R www-data. /opt/kibana-{{ pillar.elk.kibana.version }}-linux-x64
8 | - unless: test -d /opt/kibana-{{ pillar.elk.kibana.version }}-linux-x64
9 |
10 | /opt/kibana:
11 | file.symlink:
12 | - target: /opt/kibana-{{ pillar.elk.kibana.version }}-linux-x64
13 | - require:
14 | - cmd: install-kibana
15 |
16 | /opt/kibana/config/kibana.yml:
17 | file.managed:
18 | - source: salt://elk/files/opt/kibana/config/kibana.yml
19 | - template: jinja
20 | - require:
21 | - file: /opt/kibana
22 | - watch_in:
23 | - service: kibana
24 |
25 | /etc/systemd/system/kibana.service:
26 | file.managed:
27 | - source: salt://elk/files/etc/systemd/system/kibana.service
28 | - template: jinja
29 |
30 | kibana:
31 | service.running:
32 | - enable: True
33 | - require:
34 | - file: /etc/systemd/system/kibana.service
35 | - file: /opt/kibana/config/kibana.yml
36 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/php/7.1/fpm/pool.d/yves.conf:
--------------------------------------------------------------------------------
1 | {%- if environment == "production" -%}
2 | {%- set number_of_processes = grains.num_cpus * 3 -%}
3 | {%- else -%}
4 | {%- set number_of_processes = 2 -%}
5 | {%- endif -%}
6 | [{{ environment }}-yves]
7 |
8 | listen = /tmp/.fpm.$pool.sock
9 | listen.backlog = 1000
10 | listen.allowed_clients = 127.0.0.1
11 | listen.mode=0666
12 | user = www-data
13 | group = www-data
14 | pm = dynamic
15 | pm.max_children = {{ number_of_processes }}
16 | pm.start_servers = {{ number_of_processes }}
17 | pm.min_spare_servers = {{ number_of_processes }}
18 | pm.max_spare_servers = {{ number_of_processes }}
19 | ; Avoid PHP memory leaks
20 | pm.max_requests = 500
21 |
22 | pm.status_path = /php-fpm-status-{{ environment }}-yves
23 | ping.path = /fpm-ping.php
24 | ping.response = OK
25 |
26 | request_terminate_timeout = 1800
27 |
28 | chdir = /
29 |
30 | php_admin_value[memory_limit] = 256M
31 | php_admin_value[expose_php] = off
32 | {%- if environment == "production" %}
33 | php_admin_value[display_errors] = no
34 | {%- endif %}
35 | php_admin_value[error_log] = /data/logs/{{ environment }}/yves-php-errors.log
36 | php_admin_value[newrelic.appname] = "Yves({{ environment }})"
37 | php_admin_value[newrelic.framework] = "symfony2"
38 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/php/7.1/fpm/pool.d/zed.conf:
--------------------------------------------------------------------------------
1 | {%- if environment == "production" -%}
2 | {%- set number_of_processes = grains.num_cpus * 3 -%}
3 | {%- else -%}
4 | {%- set number_of_processes = 2 -%}
5 | {%- endif -%}
6 | [{{ environment }}-zed]
7 |
8 | listen = /tmp/.fpm.$pool.sock
9 | listen.backlog = 1000
10 | listen.allowed_clients = 127.0.0.1
11 | listen.mode=0666
12 | user = www-data
13 | group = www-data
14 | pm = dynamic
15 | pm.max_children = {{ number_of_processes }}
16 | pm.start_servers = {{ number_of_processes }}
17 | pm.min_spare_servers = {{ number_of_processes }}
18 | pm.max_spare_servers = {{ number_of_processes }}
19 | ; Avoid PHP memory leaks
20 | pm.max_requests = 50
21 |
22 | pm.status_path = /php-fpm-status-{{ environment }}-zed
23 | ping.path = /fpm-ping.php
24 | ping.response = OK
25 |
26 | request_terminate_timeout = 1800
27 |
28 | chdir = /
29 |
30 | php_admin_value[memory_limit] = 2048M
31 | {%- if environment == "production" %}
32 | php_admin_value[display_errors] = no
33 | {%- endif %}
34 | php_admin_value[error_log] = /data/logs/{{ environment }}/yves-php-errors.log
35 | php_admin_value[newrelic.appname] = "Zed({{ environment }})"
36 | php_admin_value[newrelic.framework] = "symfony2"
37 | php_admin_value[max_execution_time] = 600
38 |
--------------------------------------------------------------------------------
/dev/development/files/home/vagrant/oh-my-zsh/custom/plugins/spryker/spryker.plugin.zsh:
--------------------------------------------------------------------------------
1 | alias composer='php -d xdebug.remote_enable=0 composer.phar'
2 | alias ci='php -d xdebug.remote_enable=0 composer.phar install'
3 | alias cu='php -d xdebug.remote_enable=0 composer.phar update'
4 |
5 | alias debug='XDEBUG_CONFIG="remote_host=10.10.0.1" PHP_IDE_CONFIG="serverName=zed.de.spryker.dev"'
6 |
7 | codecept () {
8 | APPLICATION_ENV=development APPLICATION_STORE=DE /data/shop/development/current/vendor/bin/codecept $*
9 | }
10 |
11 | debug-console () {
12 | XDEBUG_CONFIG="remote_host=10.10.0.1" PHP_IDE_CONFIG="serverName=zed.spryker.dev" /data/shop/development/current/vendor/bin/console $*
13 | }
14 |
15 | console () {
16 | /data/shop/development/current/vendor/bin/console $*
17 | }
18 |
19 | # Composer aliases
20 | alias c='composer'
21 | alias csu='composer self-update'
22 | alias cu='composer update'
23 | alias cr='composer require'
24 | alias crm='composer remove'
25 | alias ci='composer install'
26 | alias ccp='composer create-project'
27 | alias cdu='composer dump-autoload'
28 | alias cdo='composer dump-autoload --optimize-autoloader'
29 | alias cgu='composer global update'
30 | alias cgr='composer global require'
31 | alias cgrm='composer global remove'
32 |
--------------------------------------------------------------------------------
/base/system/files/etc/sudoers:
--------------------------------------------------------------------------------
1 | # This file is managed by Salt!
2 |
3 | Defaults env_reset
4 | Defaults exempt_group=adm
5 | Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
6 | Defaults env_keep+=SSH_AUTH_SOCK
7 |
8 | # Host alias specification
9 |
10 | # User alias specification
11 |
12 | # Cmnd alias specification
13 | # Spryker application specific - allow: deployment, remove deployment locks, restart php
14 | Cmnd_Alias APP = /etc/deploy/deploy.rb *, \
15 | /bin/rm -f /data/deploy/.lock.production, \
16 | /bin/rm -f /data/deploy/.lock.staging, \
17 | /bin/rm -f /data/deploy/.lock.testing, \
18 | /etc/init.d/php7.1-fpm restart
19 |
20 | # User privilege specification
21 | root ALL=(ALL) ALL
22 |
23 | # Allow members of group adm to execute any command, without password authentication
24 | %adm ALL=(ALL) NOPASSWD:ALL
25 |
26 | # Allow members of group dev to execute specified APP commands, without password authentication
27 | %dev ALL=(root) NOPASSWD:APP
28 |
29 | # Allow members of group sudo to execute any command, with password authentication
30 | %sudo ALL=(ALL) ALL
31 |
32 | # Include further sudo settings - note that "#includedir" is a statement, not comment
33 | #includedir /etc/sudoers.d
34 |
--------------------------------------------------------------------------------
/base/postgresql/files/etc/postgresql/postgresql.conf:
--------------------------------------------------------------------------------
1 | # server
2 | data_directory = '/data/pgsql'
3 | listen_addresses = '0.0.0.0'
4 | port = 5432
5 | max_connections = {{ salt['pillar.get']('postgresql:max_connections', '1024') }}
6 |
7 | # memory
8 | shared_buffers = {{ salt['pillar.get']('postgresql:shared_buffers', '64MB') }}
9 | temp_buffers = {{ salt['pillar.get']('postgresql:temp_buffers', '8MB') }}
10 | work_mem = {{ salt['pillar.get']('postgresql:work_mem', '8MB') }}
11 | maintenance_work_mem = {{ salt['pillar.get']('postgresql:maintenance_work_mem', '128MB') }}
12 |
13 | # planner
14 | seq_page_cost = 1.0
15 | random_page_cost = 4.0
16 | effective_cache_size = {{ salt['pillar.get']('postgresql:effective_cache_size', '64MB') }}
17 |
18 | # io
19 | wal_level = minimal
20 | fsync = on
21 | synchronous_commit = off
22 | full_page_writes = off
23 | wal_buffers = -1
24 |
25 | # query log
26 | logging_collector = on
27 | log_directory = '/var/log/postgresql/'
28 | log_filename = 'query.log'
29 |
30 | log_min_duration_statement = 200
31 | debug_pretty_print = on
32 |
33 |
34 | # l10n
35 | lc_messages = 'C'
36 | lc_monetary = 'C'
37 | lc_numeric = 'C'
38 | lc_time = 'C'
39 | timezone = 'Etc/UTC'
40 |
41 | # vacuum
42 | track_counts = on
43 | autovacuum = on
44 | autovacuum_max_workers = 3
45 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/sites-available/XX-zed.conf:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ###############################################################################
4 |
5 | server {
6 | # Listener for production/staging - requires external LoadBalancer directing traffic to this port
7 | listen {{ settings.environments[environment].stores[store].zed.port }};
8 |
9 | # Listener for testing/development - one host only, doesn't require external LoadBalancer
10 | listen 80;
11 |
12 | server_name {{ settings.environments[environment].stores[store].zed.hostname }};
13 |
14 | keepalive_timeout 0;
15 | access_log /data/logs/{{ environment }}/zed-access.log extended;
16 |
17 | root /data/shop/{{ environment }}/current/public/Zed;
18 |
19 | {%- if settings.environments[environment].stores[store].zed.htpasswd_file is defined %}
20 | auth_basic "Restricted Files";
21 | auth_basic_user_file {{ settings.environments[environment].stores[store].zed.htpasswd_file }};
22 | {%- endif %}
23 |
24 | set $application_env {{ environment }};
25 | set $application_store {{ store }};
26 | include "spryker/zed.conf";
27 | }
28 |
--------------------------------------------------------------------------------
/base/settings/port_numbering.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Constants for port numbering scheme (see README.md file)
3 | #
4 |
5 | environment:
6 | production:
7 | port: '5'
8 | staging:
9 | port: '3'
10 | testing:
11 | port: '1'
12 | devtest:
13 | port: '0'
14 | development:
15 | port: '0'
16 |
17 | store:
18 | DE:
19 | locale: de_DE
20 | appdomain: '00'
21 | PL:
22 | locale: pl_PL
23 | appdomain: '01'
24 | FR:
25 | locale: fr_FR
26 | appdomain: '02'
27 | AT:
28 | locale: de_AT
29 | appdomain: '03'
30 | NL:
31 | locale: nl_NL
32 | appdomain: '04'
33 | CH:
34 | locale: de_CH
35 | appdomain: '05'
36 | BR:
37 | locale: pt_BR
38 | appdomain: '06'
39 | UK:
40 | locale: en_UK
41 | appdomain: '07'
42 | SE:
43 | locale: sv_SE
44 | appdomain: '08'
45 | BE:
46 | locale: nl_BE
47 | appdomain: '09'
48 | US:
49 | locale: en_US
50 | appdomain: '10'
51 | MX:
52 | locale: es_MX
53 | appdomain: '11'
54 | AR:
55 | locale: es_AR
56 | appdomain: '12'
57 | CL:
58 | locale: es_CL
59 | appdomain: '13'
60 | CO:
61 | locale: es_CO
62 | appdomain: '14'
63 | COM:
64 | locale: en_US
65 | appdomain: '98'
66 | EU:
67 | locale: en_UK
68 | appdomain: '99'
69 |
--------------------------------------------------------------------------------
/base/redis/install.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install package, remove default service
3 | #
4 |
5 | redis-server:
6 | pkg:
7 | - installed
8 |
9 | # Make sure that redis bgsave can overcommit virtual memory
10 | vm.overcommit_memory:
11 | sysctl.present:
12 | - value: 1
13 |
14 | # Setup init script(s) for OS without systemd
15 | {%- if "systemd" not in grains %}
16 | # Init script for multiple instances
17 | /etc/init.d/redis-server:
18 | file.managed:
19 | - source: salt://redis/files/etc/init.d/redis-server
20 | - watch_in:
21 | - service: redis-services
22 |
23 | # Remove default redis instance
24 | /etc/redis/redis.conf:
25 | file.absent:
26 | - watch_in:
27 | - service: redis-services
28 |
29 | redis-services:
30 | service.running:
31 | - name: redis-server
32 | - enable: True
33 | - require:
34 | - pkg: redis-server
35 | - file: /etc/init.d/redis-server
36 | - file: /etc/redis/redis.conf
37 |
38 | {%- else %}
39 | redis-service-disable:
40 | service.disabled:
41 | - name: redis-server
42 |
43 | redis-service-dead:
44 | service.dead:
45 | - name: redis-server
46 |
47 | # Try harder, because service.disabled doesn't seem to be good enough
48 | redis-service-disable-really:
49 | cmd.run:
50 | - name: systemctl disable redis-server
51 | - onlyif: systemctl is-enabled redis-server
52 | {%- endif %}
53 |
--------------------------------------------------------------------------------
/base/rabbitmq/credentials.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Manage RabbitMQ credentials
3 | #
4 |
5 | # Delete default guest user
6 | rabbitmq_user_guest:
7 | rabbitmq_user.absent:
8 | - name: guest
9 |
10 | # Create rabbitmq user and vhost for each environment/store
11 | {%- from 'settings/init.sls' import settings with context %}
12 | {%- for environment, environment_details in settings.environments.items() %}
13 | {%- for store in pillar.stores %}
14 |
15 | rabbitmq_vhost_{{ store }}_{{ environment }}_zed:
16 | rabbitmq_vhost.present:
17 | - name: {{ settings.environments[environment].stores[store].rabbitmq.vhost }}
18 |
19 | rabbitmq_user_{{ store }}_{{ environment }}_zed:
20 | rabbitmq_user.present:
21 | - name: {{ settings.environments[environment].stores[store].rabbitmq.username }}
22 | - password: {{ settings.environments[environment].stores[store].rabbitmq.password }}
23 | - perms:
24 | - {{ settings.environments[environment].stores[store].rabbitmq.vhost }}:
25 | - '.*'
26 | - '.*'
27 | - '.*'
28 | - require:
29 | - rabbitmq_vhost: rabbitmq_vhost_{{ store }}_{{ environment }}_zed
30 |
31 | {% endfor %}
32 | {% endfor %}
33 |
34 | # Create admin username for GUI
35 | {%- set admin_user = salt['pillar.get']('rabbitmq:admin_user', False) %}
36 | {%- if admin_user %}
37 | rabbitmq_admin_user:
38 | rabbitmq_user.present:
39 | - name: {{ pillar.rabbitmq.admin_user.username }}
40 | - password: {{ pillar.rabbitmq.admin_user.password }}
41 | - tags:
42 | - administrator
43 | {%- endif %}
44 |
--------------------------------------------------------------------------------
/base/php/extensions.sls:
--------------------------------------------------------------------------------
1 | #
2 | # PHP Extensions:
3 | # - install extensions not provided by deb repositories
4 | # - configure extensions
5 | #
6 | {% from 'php/macros/php_module.sls' import php_module with context %}
7 |
8 | # If pillar enables xdebug - install and configure it
9 | {% if salt['pillar.get']('php:install_xdebug', False) %}
10 | xdebug:
11 | pkg.installed:
12 | - name: php-xdebug
13 |
14 | /etc/php/7.1/mods-available/xdebug.ini:
15 | file.managed:
16 | - source: salt://php/files/etc/php/7.1/mods-available/xdebug.ini
17 | - user: root
18 | - group: root
19 | - mode: 644
20 | - require:
21 | - pkg: xdebug
22 |
23 | {{ php_module('xdebug', salt['pillar.get']('php:enable_xdebug', False), 'fpm') }}
24 | {{ php_module('xdebug', salt['pillar.get']('php:enable_xdebug', False), 'cli') }}
25 | {% endif %}
26 |
27 |
28 | # Configure Zend OpCache extension
29 | /etc/php/7.1/mods-available/opcache.ini:
30 | file.managed:
31 | - source: salt://php/files/etc/php/7.1/mods-available/opcache.ini
32 | - template: jinja
33 | - user: root
34 | - group: root
35 | - mode: 644
36 |
37 | /etc/php/7.1/cli/conf.d/05-opcache.ini:
38 | file.absent
39 |
40 | /etc/php/7.1/fpm/conf.d/05-opcache.ini:
41 | file.absent
42 |
43 | /var/lib/php/modules/7.1/cli/enabled_by_maint/opcache:
44 | file.absent
45 |
46 | /var/lib/php/modules/7.1/fpm/enabled_by_maint/opcache:
47 | file.absent
48 |
49 | {{ php_module('opcache', salt['pillar.get']('php:enable_opcache', True), 'fpm') }}
50 | {{ php_module('opcache', salt['pillar.get']('php:enable_opcache', True), 'cli') }}
51 |
--------------------------------------------------------------------------------
/base/nginx/files/etc/nginx/fastcgi_params:
--------------------------------------------------------------------------------
1 | ###
2 | ### This file is maintained by salt
3 | ###
4 |
5 | fastcgi_param QUERY_STRING $query_string;
6 | fastcgi_param REQUEST_METHOD $request_method;
7 | fastcgi_param CONTENT_TYPE $content_type;
8 | fastcgi_param CONTENT_LENGTH $content_length;
9 |
10 | fastcgi_param SCRIPT_FILENAME $request_filename;
11 | fastcgi_param SCRIPT_NAME $fastcgi_script_name;
12 | fastcgi_param REQUEST_URI $request_uri;
13 | fastcgi_param DOCUMENT_URI $document_uri;
14 | fastcgi_param DOCUMENT_ROOT $document_root;
15 | fastcgi_param SERVER_PROTOCOL $server_protocol;
16 |
17 | fastcgi_param GATEWAY_INTERFACE CGI/1.1;
18 | fastcgi_param SERVER_SOFTWARE nginx/$nginx_version;
19 |
20 | fastcgi_param REMOTE_ADDR $remote_addr;
21 | fastcgi_param REMOTE_PORT $remote_port;
22 | fastcgi_param SERVER_ADDR $server_addr;
23 | fastcgi_param SERVER_PORT $server_port;
24 | fastcgi_param SERVER_NAME $server_name;
25 |
26 | # PHP only, required if PHP was built with --enable-force-cgi-redirect
27 | fastcgi_param REDIRECT_STATUS 200;
28 |
29 | # Are we using ssl? Backward compatibility env, to emulate Apache.
30 | # According to RFC, app should take a look at "X-Forwarded-Proto" header to deterimine if SSL is on.
31 | if ($http_x_forwarded_proto = "https") {
32 | set $have_https on;
33 | }
34 | fastcgi_param HTTPS $have_https;
35 |
36 | # Pass request start time to CGI script - NewRelic uses this to monitor queue wait time
37 | fastcgi_param HTTP_X_REQUEST_START "t=${msec}";
38 |
--------------------------------------------------------------------------------
/base/nginx/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install NginX webserver, setup global configuration
3 | #
4 |
5 | # Install package and setup service
6 | install-nginx:
7 | pkg.installed:
8 | - pkgs:
9 | - nginx-extras
10 | - libnginx-mod-http-headers-more-filter
11 |
12 | nginx:
13 | service.running:
14 | - require:
15 | - pkg: install-nginx
16 | - watch:
17 | - file: /etc/nginx/nginx.conf
18 |
19 | # Apache Utilities - for tools like ab, htpasswd
20 | apache2-utils:
21 | pkg.installed
22 |
23 | # Main nginx configurationf file
24 | /etc/nginx/nginx.conf:
25 | file.managed:
26 | - source: salt://nginx/files/etc/nginx/nginx.conf
27 | - template: jinja
28 | - require:
29 | - pkg: install-nginx
30 |
31 | # Global includes
32 | /etc/nginx/conf.d:
33 | file.recurse:
34 | - source: salt://nginx/files/etc/nginx/conf.d
35 | - template: jinja
36 | - require:
37 | - pkg: install-nginx
38 | - watch_in:
39 | - service: nginx
40 |
41 | # FastCGI parameters
42 | /etc/nginx/fastcgi_params:
43 | file.managed:
44 | - source: salt://nginx/files/etc/nginx/fastcgi_params
45 | - require:
46 | - pkg: install-nginx
47 | - watch_in:
48 | - service: nginx
49 |
50 | # Create directory for SSL certificates
51 | /etc/nginx/ssl:
52 | file.directory:
53 | - user: root
54 | - group: www-data
55 | - mode: 640
56 | - require:
57 | - pkg: install-nginx
58 |
59 | # Delete default vhost
60 | /etc/nginx/sites-enabled/default:
61 | file.absent:
62 | - require:
63 | - pkg: install-nginx
64 | - watch_in:
65 | - service: nginx
66 |
--------------------------------------------------------------------------------
/base/spryker/files/config/config_local.php:
--------------------------------------------------------------------------------
1 | On => On') }
31 | its(:stderr) { should_not include('NOTICE') }
32 | its(:stderr) { should_not include('WARNING') }
33 | its(:stderr) { should_not include('ERROR') }
34 | end
35 |
36 | # Commands from README.md for enabling / disabling xdebug
37 | describe command("phpenmod -v #{PHP_VERSION} -s cli xdebug; phpenmod -v #{PHP_VERSION} -s fpm xdebug && service php#{PHP_VERSION}-fpm restart && php -v") do
38 | its(:stdout) { should include('with Xdebug') }
39 | end
40 |
41 | describe command("phpdismod -v #{PHP_VERSION} -s cli xdebug; phpdismod -v #{PHP_VERSION} -s fpm xdebug; service php#{PHP_VERSION}-fpm restart; php -v") do
42 | its(:stdout) { should_not include('with Xdebug') }
43 | end
44 |
45 | end
46 |
--------------------------------------------------------------------------------
/base/system/filesystems.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Automatically format and mount partitions given in grains.filesystems (ext4 filesystem)
3 | #
4 |
5 | {% for volume, mount_point in grains.get('filesystems', {}).iteritems() %}
6 | create-fs-{{ volume }}:
7 | cmd.run:
8 | - name: mkfs -t ext4 {{ volume }} && tune2fs -o journal_data_writeback {{ volume }} && tune2fs -O ^has_journal {{ volume }} && e2fsck -f -y {{ volume }}
9 | - onlyif: test -b {{ volume }} && file -sL {{ volume }} | grep -v 'ext[234]'
10 |
11 | {{ mount_point }}:
12 | file.directory:
13 | - makedirs: True
14 |
15 | fstab-for-{{ volume }}:
16 | file.append:
17 | - name: /etc/fstab
18 | - text: {{ volume }} {{ mount_point }} ext4 rw,noatime,nodiratime,nobarrier 0 1
19 | - require:
20 | - file: {{ mount_point }}
21 | - cmd: create-fs-{{ volume }}
22 |
23 | mount-fs-{{ volume }}:
24 | cmd.wait:
25 | - name: mount {{ mount_point }}
26 | - watch:
27 | - file: fstab-for-{{ volume }}
28 | - requires:
29 | - file: {{ mount_point }}
30 |
31 | {% endfor %}
32 |
33 | #
34 | # Init and activate swap on devices given in grains
35 | #
36 |
37 | {% for path, size in grains.get('swap', {}).items() %}
38 | init-swap-{{ path }}:
39 | cmd.run:
40 | - name: dd if=/dev/zero of={{ path }} bs=1048576 count={{ size }} && mkswap {{ path }}
41 | - unless: test -f {{ path }}
42 |
43 | fstab-for-swap-{{ path }}:
44 | file.append:
45 | - name: /etc/fstab
46 | - text: {{ path }} none swap sw 0 0
47 | - require:
48 | - cmd: init-swap-{{ path }}
49 |
50 | mount-swap-{{ path }}:
51 | cmd.wait:
52 | - name: swapon {{ path }}
53 | - watch:
54 | - file: fstab-for-swap-{{ path }}
55 |
56 | {% endfor %}
57 |
--------------------------------------------------------------------------------
/base/top.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Topfile - used by salt ... state.highstate
3 | #
4 |
5 | # Production-like setup - we apply specific states to machines
6 | qa:
7 | # apply to all roles
8 | '*':
9 | - system
10 | - hosting
11 | - user
12 | - postfix
13 | - newrelic
14 | - ruby
15 |
16 | # php and application code
17 | 'roles:app':
18 | - match: grain
19 | - php
20 | - spryker
21 | - nodejs
22 |
23 | # nginx and web components
24 | 'roles:web':
25 | - match: grain
26 | - nginx
27 | - newrelic.php
28 | - nodejs
29 |
30 | # jenkins to run cronjob and indexers
31 | 'roles:cronjobs':
32 | - match: grain
33 | - spryker
34 | - java
35 | - jenkins
36 | - newrelic.php
37 |
38 | # elasticsearch (for spryker data)
39 | 'roles:elasticsearch':
40 | - match: grain
41 | - java
42 | - elasticsearch
43 |
44 | # Rabbit MQ
45 | 'roles:queue':
46 | - match: grain
47 | - rabbitmq
48 |
49 | # Redis
50 | 'roles:redis':
51 | - match: grain
52 | - redis
53 |
54 | # Database
55 | 'roles:postgresq':
56 | - match: grain
57 | - postgresql
58 | 'roles:mysql':
59 | - match: grain
60 | - mysql-server
61 |
62 | base:
63 | '*':
64 | - system
65 | - user
66 |
67 | dev:
68 | # apply all states on a single machine, don't divide by roles
69 | '*':
70 | - system
71 | - hosting
72 | - user
73 | - postfix
74 | - docker
75 | - mysql-server
76 | - postgresql
77 | - rabbitmq
78 | - ruby
79 | - nodejs
80 | - php
81 | - java
82 | - development
83 | - mailcatcher
84 | - elk
85 | - nginx
86 | - pound
87 | - jenkins
88 | - redis
89 | - samba
90 | - elasticsearch
91 | - serverspec
92 | - spryker
93 |
--------------------------------------------------------------------------------
/base/elasticsearch/files/elasticsearch_instance/etc/elasticsearch/logging.yml:
--------------------------------------------------------------------------------
1 | # you can override this using by setting a system property, for example -Des.logger.level=DEBUG
2 | es.logger.level: INFO
3 | rootLogger: ${es.logger.level}, console, file
4 | logger:
5 | # log action execution errors for easier debugging
6 | action: DEBUG
7 | # reduce the logging for aws, too much is logged under the default INFO
8 | com.amazonaws: WARN
9 |
10 | # gateway
11 | #gateway: DEBUG
12 | #index.gateway: DEBUG
13 |
14 | # peer shard recovery
15 | #indices.recovery: DEBUG
16 |
17 | # discovery
18 | #discovery: TRACE
19 |
20 | index.search.slowlog: TRACE, index_search_slow_log_file
21 | index.indexing.slowlog: TRACE, index_indexing_slow_log_file
22 |
23 | additivity:
24 | index.search.slowlog: false
25 | index.indexing.slowlog: false
26 |
27 | appender:
28 | console:
29 | type: console
30 | layout:
31 | type: consolePattern
32 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
33 |
34 | file:
35 | type: dailyRollingFile
36 | file: ${path.logs}/${cluster.name}.log
37 | datePattern: "'.'yyyy-MM-dd"
38 | layout:
39 | type: pattern
40 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
41 |
42 | index_search_slow_log_file:
43 | type: dailyRollingFile
44 | file: ${path.logs}/${cluster.name}_index_search_slowlog.log
45 | datePattern: "'.'yyyy-MM-dd"
46 | layout:
47 | type: pattern
48 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
49 |
50 | index_indexing_slow_log_file:
51 | type: dailyRollingFile
52 | file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
53 | datePattern: "'.'yyyy-MM-dd"
54 | layout:
55 | type: pattern
56 | conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
57 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/sites-available/XX-yves.conf:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ###############################################################################
4 |
5 | server {
6 | # Listener for production/staging - requires external LoadBalancer directing traffic to this port
7 | listen {{ settings.environments[environment].stores[store].yves.port }};
8 |
9 | # Listener for testing/development - one host only, doesn't require external LoadBalancer
10 | listen 80;
11 |
12 | server_name {{ settings.environments[environment].stores[store].yves.hostnames|join(' ') }};
13 | access_log /data/logs/{{ environment }}/yves-access.log extended;
14 |
15 | {%- if settings.environments[environment].stores[store].yves.htpasswd_file is defined %}
16 | auth_basic "Restricted Files";
17 | auth_basic_user_file {{ settings.environments[environment].stores[store].yves.htpasswd_file }};
18 | {%- endif %}
19 |
20 | root /data/shop/{{ environment }}/current/public/Yves;
21 |
22 | set $application_env {{ environment }};
23 | set $application_store {{ store }};
24 |
25 | include "spryker/yves.conf";
26 |
27 | ###
28 | ### Rewrite rules
29 | ###
30 | ### Beware of load balancer heartbeat check (/monitoring/heartbeat), it must stay not rewritten.
31 |
32 | {%- if settings.environments[environment].stores[store].yves.hostnames|count == 1 %}
33 | ## Allow only first (default) hostname
34 | ## This feature is disabled as it should clarify production requirements for each site
35 | #if ($host != $server_name) {
36 | # rewrite ^(.*)$ http://$server_name$1 permanent;
37 | #}
38 | {%- endif %}
39 |
40 | }
41 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/spryker/static.conf:
--------------------------------------------------------------------------------
1 | ###
2 | ### Common Static settings (to be included in vhost definition)
3 | ### This file is maintained by salt
4 | ###
5 |
6 | # Protected files - require password, no caching
7 | location ~ .*/protected/.* {
8 | expires off;
9 | add_header Cache-Control "no-cache";
10 | try_files $uri =404;
11 | add_header X-Server $hostname;
12 | auth_basic "Restricted Files";
13 | auth_basic_user_file /etc/nginx/htpasswd;
14 | satisfy all;
15 | }
16 |
17 | # Web fonts
18 | location ~ \.(eot|ttf|woff|svg|otf)$ {
19 | expires 365d;
20 | add_header Cache-Control "public";
21 | try_files $uri =404;
22 | add_header X-Server $hostname;
23 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
24 | add_header Access-Control-Allow-Origin *;
25 | }
26 |
27 |
28 | # Data files - no caching
29 | location ~ \.(xml|txt|csv)$ {
30 | expires off;
31 | add_header Cache-Control "no-cache";
32 | try_files $uri =404;
33 | add_header X-Server $hostname;
34 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
35 | }
36 |
37 | # Static files - default
38 | location / {
39 | expires 30d;
40 | add_header Cache-Control "public";
41 | try_files $uri $uri/ =404;
42 | add_header X-Server $hostname;
43 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
44 | add_header Access-Control-Allow-Origin *;
45 | }
46 |
47 | # SEO rewrite for product images (multi-store)
48 | rewrite "^/([A-Z]{2})/images/(.*)-([0-9]{3})([0-9]{1,})-(.*)-([a-zA-Z0-9]{2})(.*)$" /$1/images/products/processed/$3/$4/$5$7;
49 |
--------------------------------------------------------------------------------
/base/hosting/filesystem.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup filesystems
3 | #
4 | # This salt state can be useful in cloud setup, where we have several block
5 | # devices attached to the machines and need to format them.
6 | #
7 | #
8 |
9 | {% for fs, fs_details in pillar.get('filesystems', {}).items() %}
10 | create-fs-{{ fs }}:
11 | cmd.run:
12 | - name: mkfs -t {{ fs_details.filesystem }} {{ fs_details.disk }}{{ fs_details.partition }}
13 | - onlyif: test -b {{ fs_details.disk }} && parted {{ fs_details.disk }} print | grep '^ *{{ fs_details.partition }}.*GB' | grep -v '{{ fs_details.filesystem }}'
14 | - requires:
15 | - pkg: filesystem-tools
16 |
17 | {{ fs_details.mount_point }}:
18 | file.directory
19 |
20 | fstab-for-{{ fs }}:
21 | file.append:
22 | - name: /etc/fstab
23 | - text: {{ fs_details.disk }}{{ fs_details.partition }} {{ fs_details.mount_point }} {{ fs_details.filesystem }} {{ fs_details.mount_options }} 0 1
24 | - require:
25 | - file: {{ fs_details.mount_point }}
26 | - cmd: create-fs-{{ fs }}
27 |
28 | mount-fs-{{ fs }}:
29 | cmd.wait:
30 | - name: mount {{ fs_details.mount_point }}
31 | - watch:
32 | - file: fstab-for-{{ fs }}
33 | - requires:
34 | - file: {{ fs_details.mount_point }}
35 | {% endfor %}
36 |
37 | {% for path, details in pillar.get('swap', {}).items() %}
38 | init-swap-{{ path }}:
39 | cmd.run:
40 | - name: dd if=/dev/zero of={{ path }} bs=1048576 count={{ details.size }} && mkswap {{ path }}
41 | - unless: test -f {{ path }}
42 |
43 | fstab-for-swap-{{ path }}:
44 | file.append:
45 | - name: /etc/fstab
46 | - text: {{ path }} none swap sw 0 0
47 | - require:
48 | - cmd: init-swap-{{ path }}
49 |
50 | mount-swap-{{ path }}:
51 | cmd.wait:
52 | - name: swapon {{ path }}
53 | - watch:
54 | - file: fstab-for-swap-{{ path }}
55 | {% endfor %}
56 |
--------------------------------------------------------------------------------
/base/pound/files/etc/pound/certs/1star_local:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIICXQIBAAKBgQCgFhzHOuwsfaSLznqz1xtUoxG/0wLBsaeoGQ21vyTX4GvOJ392
3 | 2gTG3D6PSFKjeHkMLTpTaI4oeti2et5NmxPLuy54gXnW/EiHQkH08onqbsUbHuN4
4 | wb9ObCVX9y36LVQCR6B5VQxe/Gs/62Cxf9skZ8Uf7PDpXw9eKiO/yDZj6QIDAQAB
5 | AoGALeDocniCguWnKKIOLEgzfvgU+CKjIZumbthvQ2z6RBv/CV/secVVpSAtlaam
6 | eE7ocgLsOkc2poc+YF2qJBeTflWdvJd1PXEH0jhTg9g8i6BzJaSwj8JhgIXXRVtb
7 | go3lDUt/m7Raj+OeL9j9Is6ISYW+SAiV8hlRhUi+y0D+CtkCQQDPv1jZ29xTfQfi
8 | icgoAXgiyX9nvmLz1XxRFmzHn+kHI13KuA8MzvnQLOi8X8BLV34Mc+ms9hPT9JYd
9 | k9+zbDNnAkEAxUTXIGG/zQ+aheMidoMNAwYvFgHi3ptMmC9lflMvsEuWKzljfrOz
10 | byqHm94GVhV2raVCCq/AGYRMF/Sn9JrsLwJAcpO0/ErPJFiz7ZRSCQZubLk3BE+z
11 | oYmfy9UTX7HpQp+VzgVff4/zpYXvQl2I38S8Zz/SpCr/2olt9Zgl634EhwJBAIOr
12 | z2vt0vgs0hdL5FT2ZFkAZ61o1OWBAz8RY81N+SbTTQjM5XxMthgdaya+q7wJ141Q
13 | ISTA6pgdliznQLQkNEECQQCTpIV0tUmgT13v9cna9v/DYw0ETaYYZGtHbhtr6b6x
14 | CFYhmrtljL9G+82BMZXtups/sqrkgMyDVITauiknlF7r
15 | -----END RSA PRIVATE KEY-----
16 | -----BEGIN CERTIFICATE-----
17 | MIICYzCCAcwCCQDyfJXR+5kUXDANBgkqhkiG9w0BAQUFADB2MQswCQYDVQQGEwJE
18 | RTEPMA0GA1UECBMGQmVybGluMQ8wDQYDVQQHEwZCZXJsaW4xGDAWBgNVBAoTD1Nw
19 | cnlrZXIgU3lzdGVtczEZMBcGA1UECxMQRGV2ZWxvcG1lbnQgVGVhbTEQMA4GA1UE
20 | AxQHKi5sb2NhbDAeFw0xNjA5MTQxMzQ4NTVaFw0yNjA5MTIxMzQ4NTVaMHYxCzAJ
21 | BgNVBAYTAkRFMQ8wDQYDVQQIEwZCZXJsaW4xDzANBgNVBAcTBkJlcmxpbjEYMBYG
22 | A1UEChMPU3ByeWtlciBTeXN0ZW1zMRkwFwYDVQQLExBEZXZlbG9wbWVudCBUZWFt
23 | MRAwDgYDVQQDFAcqLmxvY2FsMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCg
24 | FhzHOuwsfaSLznqz1xtUoxG/0wLBsaeoGQ21vyTX4GvOJ3922gTG3D6PSFKjeHkM
25 | LTpTaI4oeti2et5NmxPLuy54gXnW/EiHQkH08onqbsUbHuN4wb9ObCVX9y36LVQC
26 | R6B5VQxe/Gs/62Cxf9skZ8Uf7PDpXw9eKiO/yDZj6QIDAQABMA0GCSqGSIb3DQEB
27 | BQUAA4GBAEZf+5XBkYJD1PFv+Ic+RgJpIEd18xYn6W/m2jNHvdoWje3dcckIN3ZG
28 | dJRhit5v/ayltACFGFq2krprgvXgpQOoEP8+qVjkA/gOEjPgTGo2v5coRJEf34GX
29 | +mL/j6vI4zmZ7BWXttOk3d5pSZT6KxFtRsIFW79z/aErznjtosUp
30 | -----END CERTIFICATE-----
31 |
--------------------------------------------------------------------------------
/base/pound/files/etc/pound/certs/2star_local:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIICXQIBAAKBgQCgFhzHOuwsfaSLznqz1xtUoxG/0wLBsaeoGQ21vyTX4GvOJ392
3 | 2gTG3D6PSFKjeHkMLTpTaI4oeti2et5NmxPLuy54gXnW/EiHQkH08onqbsUbHuN4
4 | wb9ObCVX9y36LVQCR6B5VQxe/Gs/62Cxf9skZ8Uf7PDpXw9eKiO/yDZj6QIDAQAB
5 | AoGALeDocniCguWnKKIOLEgzfvgU+CKjIZumbthvQ2z6RBv/CV/secVVpSAtlaam
6 | eE7ocgLsOkc2poc+YF2qJBeTflWdvJd1PXEH0jhTg9g8i6BzJaSwj8JhgIXXRVtb
7 | go3lDUt/m7Raj+OeL9j9Is6ISYW+SAiV8hlRhUi+y0D+CtkCQQDPv1jZ29xTfQfi
8 | icgoAXgiyX9nvmLz1XxRFmzHn+kHI13KuA8MzvnQLOi8X8BLV34Mc+ms9hPT9JYd
9 | k9+zbDNnAkEAxUTXIGG/zQ+aheMidoMNAwYvFgHi3ptMmC9lflMvsEuWKzljfrOz
10 | byqHm94GVhV2raVCCq/AGYRMF/Sn9JrsLwJAcpO0/ErPJFiz7ZRSCQZubLk3BE+z
11 | oYmfy9UTX7HpQp+VzgVff4/zpYXvQl2I38S8Zz/SpCr/2olt9Zgl634EhwJBAIOr
12 | z2vt0vgs0hdL5FT2ZFkAZ61o1OWBAz8RY81N+SbTTQjM5XxMthgdaya+q7wJ141Q
13 | ISTA6pgdliznQLQkNEECQQCTpIV0tUmgT13v9cna9v/DYw0ETaYYZGtHbhtr6b6x
14 | CFYhmrtljL9G+82BMZXtups/sqrkgMyDVITauiknlF7r
15 | -----END RSA PRIVATE KEY-----
16 | -----BEGIN CERTIFICATE-----
17 | MIICZzCCAdACCQCixzCogcXlYTANBgkqhkiG9w0BAQUFADB4MQswCQYDVQQGEwJE
18 | RTEPMA0GA1UECBMGQmVybGluMQ8wDQYDVQQHEwZCZXJsaW4xGDAWBgNVBAoTD1Nw
19 | cnlrZXIgU3lzdGVtczEZMBcGA1UECxMQRGV2ZWxvcG1lbnQgVGVhbTESMBAGA1UE
20 | AxQJKi4qLmxvY2FsMB4XDTE2MDkxNDEzNDg1NVoXDTI2MDkxMjEzNDg1NVoweDEL
21 | MAkGA1UEBhMCREUxDzANBgNVBAgTBkJlcmxpbjEPMA0GA1UEBxMGQmVybGluMRgw
22 | FgYDVQQKEw9TcHJ5a2VyIFN5c3RlbXMxGTAXBgNVBAsTEERldmVsb3BtZW50IFRl
23 | YW0xEjAQBgNVBAMUCSouKi5sb2NhbDCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkC
24 | gYEAoBYcxzrsLH2ki856s9cbVKMRv9MCwbGnqBkNtb8k1+Brzid/dtoExtw+j0hS
25 | o3h5DC06U2iOKHrYtnreTZsTy7sueIF51vxIh0JB9PKJ6m7FGx7jeMG/TmwlV/ct
26 | +i1UAkegeVUMXvxrP+tgsX/bJGfFH+zw6V8PXiojv8g2Y+kCAwEAATANBgkqhkiG
27 | 9w0BAQUFAAOBgQAafRlhofKD9SLY7WeoSpYrWReJ9vV9gIPsduPF0DIHOkkN+9Bd
28 | XghsFkm1O+yUrHR9+/yhZA/kv8HXU0CmLd1i1naDXoXsaxXEo4g25rtwh/6Pp3yT
29 | ZD65Q4xBrSUzrTyR/LE/Tz8AMYopHpX4rWNxeS6SMD9ZCaBHsUqCG1Fryw==
30 | -----END CERTIFICATE-----
31 |
--------------------------------------------------------------------------------
/base/pound/files/etc/pound/certs/3star_local:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIICXQIBAAKBgQCgFhzHOuwsfaSLznqz1xtUoxG/0wLBsaeoGQ21vyTX4GvOJ392
3 | 2gTG3D6PSFKjeHkMLTpTaI4oeti2et5NmxPLuy54gXnW/EiHQkH08onqbsUbHuN4
4 | wb9ObCVX9y36LVQCR6B5VQxe/Gs/62Cxf9skZ8Uf7PDpXw9eKiO/yDZj6QIDAQAB
5 | AoGALeDocniCguWnKKIOLEgzfvgU+CKjIZumbthvQ2z6RBv/CV/secVVpSAtlaam
6 | eE7ocgLsOkc2poc+YF2qJBeTflWdvJd1PXEH0jhTg9g8i6BzJaSwj8JhgIXXRVtb
7 | go3lDUt/m7Raj+OeL9j9Is6ISYW+SAiV8hlRhUi+y0D+CtkCQQDPv1jZ29xTfQfi
8 | icgoAXgiyX9nvmLz1XxRFmzHn+kHI13KuA8MzvnQLOi8X8BLV34Mc+ms9hPT9JYd
9 | k9+zbDNnAkEAxUTXIGG/zQ+aheMidoMNAwYvFgHi3ptMmC9lflMvsEuWKzljfrOz
10 | byqHm94GVhV2raVCCq/AGYRMF/Sn9JrsLwJAcpO0/ErPJFiz7ZRSCQZubLk3BE+z
11 | oYmfy9UTX7HpQp+VzgVff4/zpYXvQl2I38S8Zz/SpCr/2olt9Zgl634EhwJBAIOr
12 | z2vt0vgs0hdL5FT2ZFkAZ61o1OWBAz8RY81N+SbTTQjM5XxMthgdaya+q7wJ141Q
13 | ISTA6pgdliznQLQkNEECQQCTpIV0tUmgT13v9cna9v/DYw0ETaYYZGtHbhtr6b6x
14 | CFYhmrtljL9G+82BMZXtups/sqrkgMyDVITauiknlF7r
15 | -----END RSA PRIVATE KEY-----
16 | -----BEGIN CERTIFICATE-----
17 | MIICazCCAdQCCQDVQQlPKhW1RjANBgkqhkiG9w0BAQUFADB6MQswCQYDVQQGEwJE
18 | RTEPMA0GA1UECBMGQmVybGluMQ8wDQYDVQQHEwZCZXJsaW4xGDAWBgNVBAoTD1Nw
19 | cnlrZXIgU3lzdGVtczEZMBcGA1UECxMQRGV2ZWxvcG1lbnQgVGVhbTEUMBIGA1UE
20 | AxQLKi4qLioubG9jYWwwHhcNMTYwOTE0MTM0ODU1WhcNMjYwOTEyMTM0ODU1WjB6
21 | MQswCQYDVQQGEwJERTEPMA0GA1UECBMGQmVybGluMQ8wDQYDVQQHEwZCZXJsaW4x
22 | GDAWBgNVBAoTD1NwcnlrZXIgU3lzdGVtczEZMBcGA1UECxMQRGV2ZWxvcG1lbnQg
23 | VGVhbTEUMBIGA1UEAxQLKi4qLioubG9jYWwwgZ8wDQYJKoZIhvcNAQEBBQADgY0A
24 | MIGJAoGBAKAWHMc67Cx9pIvOerPXG1SjEb/TAsGxp6gZDbW/JNfga84nf3baBMbc
25 | Po9IUqN4eQwtOlNojih62LZ63k2bE8u7LniBedb8SIdCQfTyiepuxRse43jBv05s
26 | JVf3LfotVAJHoHlVDF78az/rYLF/2yRnxR/s8OlfD14qI7/INmPpAgMBAAEwDQYJ
27 | KoZIhvcNAQEFBQADgYEAPsXU8GQtf48S82BCt3vJibKty1CX3ROJKI3u4CQYCste
28 | +vXEacnxMr6CD5MMC1QASYya/vz4VxwhLIJD8rJiJI35qSwASczNhhhd3hJCpm8S
29 | yavHR4TJI4c5ZpZtJZFmxyy0/+BDq8Z4Q0a6JSU2mlqOagYylWZnJXYqZ10Fgvo=
30 | -----END CERTIFICATE-----
31 |
--------------------------------------------------------------------------------
/base/pound/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install and configure pound, and SSL-Termination proxy
3 | #
4 |
5 | pound:
6 | pkg.installed:
7 | - name: pound
8 | service:
9 | - running
10 | - require:
11 | - pkg: pound
12 | - file: /etc/default/pound
13 | - file: /etc/pound/certs/1star_local
14 | - file: /etc/pound/certs/2star_local
15 | - file: /etc/pound/certs/3star_local
16 | - file: /etc/pound/certs/4star_local
17 | - file: /etc/pound/certs/star_spryker_dev
18 | - watch:
19 | - file: /etc/pound/pound.cfg
20 |
21 | /etc/pound/pound.cfg:
22 | file.managed:
23 | - source: salt://pound/files/etc/pound/pound.cfg
24 | - require:
25 | - pkg: pound
26 |
27 | /etc/default/pound:
28 | file.managed:
29 | - source: salt://pound/files/etc/default/pound
30 | - require:
31 | - pkg: pound
32 |
33 | /etc/pound/certs:
34 | file.directory:
35 | - user: root
36 | - group: root
37 | - mode: 755
38 | - require:
39 | - pkg: pound
40 |
41 | /etc/pound/certs/1star_local:
42 | file.managed:
43 | - source: salt://pound/files/etc/pound/certs/1star_local
44 | - require:
45 | - file: /etc/pound/certs
46 |
47 | /etc/pound/certs/2star_local:
48 | file.managed:
49 | - source: salt://pound/files/etc/pound/certs/2star_local
50 | - require:
51 | - file: /etc/pound/certs
52 |
53 | /etc/pound/certs/3star_local:
54 | file.managed:
55 | - source: salt://pound/files/etc/pound/certs/3star_local
56 | - require:
57 | - file: /etc/pound/certs
58 |
59 | /etc/pound/certs/4star_local:
60 | file.managed:
61 | - source: salt://pound/files/etc/pound/certs/4star_local
62 | - require:
63 | - file: /etc/pound/certs
64 |
65 | /etc/pound/certs/star_spryker_dev:
66 | file.managed:
67 | - source: salt://pound/files/etc/pound/certs/star_spryker_dev
68 | - require:
69 | - file: /etc/pound/certs
70 |
--------------------------------------------------------------------------------
/base/pound/files/etc/pound/certs/4star_local:
--------------------------------------------------------------------------------
1 | -----BEGIN RSA PRIVATE KEY-----
2 | MIICXQIBAAKBgQCgFhzHOuwsfaSLznqz1xtUoxG/0wLBsaeoGQ21vyTX4GvOJ392
3 | 2gTG3D6PSFKjeHkMLTpTaI4oeti2et5NmxPLuy54gXnW/EiHQkH08onqbsUbHuN4
4 | wb9ObCVX9y36LVQCR6B5VQxe/Gs/62Cxf9skZ8Uf7PDpXw9eKiO/yDZj6QIDAQAB
5 | AoGALeDocniCguWnKKIOLEgzfvgU+CKjIZumbthvQ2z6RBv/CV/secVVpSAtlaam
6 | eE7ocgLsOkc2poc+YF2qJBeTflWdvJd1PXEH0jhTg9g8i6BzJaSwj8JhgIXXRVtb
7 | go3lDUt/m7Raj+OeL9j9Is6ISYW+SAiV8hlRhUi+y0D+CtkCQQDPv1jZ29xTfQfi
8 | icgoAXgiyX9nvmLz1XxRFmzHn+kHI13KuA8MzvnQLOi8X8BLV34Mc+ms9hPT9JYd
9 | k9+zbDNnAkEAxUTXIGG/zQ+aheMidoMNAwYvFgHi3ptMmC9lflMvsEuWKzljfrOz
10 | byqHm94GVhV2raVCCq/AGYRMF/Sn9JrsLwJAcpO0/ErPJFiz7ZRSCQZubLk3BE+z
11 | oYmfy9UTX7HpQp+VzgVff4/zpYXvQl2I38S8Zz/SpCr/2olt9Zgl634EhwJBAIOr
12 | z2vt0vgs0hdL5FT2ZFkAZ61o1OWBAz8RY81N+SbTTQjM5XxMthgdaya+q7wJ141Q
13 | ISTA6pgdliznQLQkNEECQQCTpIV0tUmgT13v9cna9v/DYw0ETaYYZGtHbhtr6b6x
14 | CFYhmrtljL9G+82BMZXtups/sqrkgMyDVITauiknlF7r
15 | -----END RSA PRIVATE KEY-----
16 | -----BEGIN CERTIFICATE-----
17 | MIICbzCCAdgCCQD3TdbFnIs3JjANBgkqhkiG9w0BAQUFADB8MQswCQYDVQQGEwJE
18 | RTEPMA0GA1UECBMGQmVybGluMQ8wDQYDVQQHEwZCZXJsaW4xGDAWBgNVBAoTD1Nw
19 | cnlrZXIgU3lzdGVtczEZMBcGA1UECxMQRGV2ZWxvcG1lbnQgVGVhbTEWMBQGA1UE
20 | AxQNKi4qLiouKi5sb2NhbDAeFw0xNjA5MTQxMzQ4NTVaFw0yNjA5MTIxMzQ4NTVa
21 | MHwxCzAJBgNVBAYTAkRFMQ8wDQYDVQQIEwZCZXJsaW4xDzANBgNVBAcTBkJlcmxp
22 | bjEYMBYGA1UEChMPU3ByeWtlciBTeXN0ZW1zMRkwFwYDVQQLExBEZXZlbG9wbWVu
23 | dCBUZWFtMRYwFAYDVQQDFA0qLiouKi4qLmxvY2FsMIGfMA0GCSqGSIb3DQEBAQUA
24 | A4GNADCBiQKBgQCgFhzHOuwsfaSLznqz1xtUoxG/0wLBsaeoGQ21vyTX4GvOJ392
25 | 2gTG3D6PSFKjeHkMLTpTaI4oeti2et5NmxPLuy54gXnW/EiHQkH08onqbsUbHuN4
26 | wb9ObCVX9y36LVQCR6B5VQxe/Gs/62Cxf9skZ8Uf7PDpXw9eKiO/yDZj6QIDAQAB
27 | MA0GCSqGSIb3DQEBBQUAA4GBAInu8HNLC9YkzC9oI/+ojeM5FnNzEYNm7oc/bp2m
28 | OM+JpEDP3wte4s7OdQZKtcYmP0MCLjJSjTl8t0Fm5wyrz5Vn1I+HYlV3gdwWZvcu
29 | ICR4V/g+/xPDwlfLce6pzwkAG5JOuRvQBRZsnnJFf860ZpKLoRXf0WbG5AVAypO3
30 | lqBe
31 | -----END CERTIFICATE-----
32 |
--------------------------------------------------------------------------------
/dev/development/init.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Tools and settings for local development
3 | #
4 |
5 | # Pre-fetch SSH key for git repository
6 | get-github-ssh-hostkey:
7 | cmd.run:
8 | - name: ssh-keyscan -H {{ pillar.deploy.git_hostname }} >> /home/vagrant/.ssh/known_hosts
9 | - unless: test -f /home/vagrant/.ssh/known_hosts
10 | - user: vagrant
11 |
12 | # Install / Configure Oh-My-Zsh for user vagrant
13 | clone-oh-my-zsh:
14 | cmd.run:
15 | - name: git clone git://github.com/robbyrussell/oh-my-zsh.git /home/vagrant/.oh-my-zsh
16 | - unless: test -d /home/vagrant/.oh-my-zsh
17 | - user: vagrant
18 |
19 | # Create inital .zshrc, allow editing it by user (don't replace contents)
20 | /home/vagrant/.zshrc:
21 | file.managed:
22 | - source: salt://development/files/home/vagrant/.zshrc
23 | - user: vagrant
24 | - group: vagrant
25 | - mode: 600
26 | - replace: False
27 |
28 | /home/vagrant/.zsh_prompt:
29 | file.managed:
30 | - source: salt://development/files/home/vagrant/.zsh_prompt
31 | - user: vagrant
32 | - group: vagrant
33 | - mode: 644
34 | - replace: False
35 |
36 | /home/vagrant/.zlogin:
37 | file.managed:
38 | - source: salt://development/files/home/vagrant/.zlogin
39 | - user: vagrant
40 | - group: vagrant
41 | - mode: 600
42 | - replace: False
43 |
44 | /home/vagrant/bin:
45 | file.recurse:
46 | - source: salt://development/files/home/vagrant/bin
47 | - user: vagrant
48 | - group: vagrant
49 | - file_mode: 755
50 | - dir_mode: 755
51 |
52 | /home/vagrant/.oh-my-zsh/custom/plugins/spryker:
53 | file.recurse:
54 | - source: salt://development/files/home/vagrant/oh-my-zsh/custom/plugins/spryker
55 | - user: vagrant
56 | - group: vagrant
57 | - file_mode: 600
58 | - dir_mode: 755
59 |
60 |
61 | # Manually sync host to Vagrant Host
62 | /etc/cron.d/vagrant-ntpdate:
63 | file.managed:
64 | - source: salt://development/files/etc/cron.d/vagrant-ntpdate
65 |
66 |
--------------------------------------------------------------------------------
/base/redis/macros/redis_instance.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Macro: Setup one Elasticsearch instance
3 | #
4 |
5 | {% macro redis_instance(environment, environment_details, settings) -%}
6 |
7 | {% if 'skip_instance_setup' not in environment_details.redis %}
8 | /tmp/b-{{ environment }}:
9 | file.managed:
10 | - contents: {{ environment_details }}
11 |
12 | /data/shop/{{ environment }}/shared/redis:
13 | file.directory:
14 | - user: redis
15 | - group: redis
16 | - mode: 700
17 | - require:
18 | - file: /data/shop/{{ environment }}/shared
19 |
20 | /data/logs/{{ environment }}/redis:
21 | file.directory:
22 | - user: redis
23 | - group: redis
24 | - mode: 755
25 | - require:
26 | - file: /data/logs/{{ environment }}
27 |
28 | {%- if 'systemd' in grains %}
29 | {%- set service_name = 'redis-server-' + environment %}
30 | /etc/systemd/system/redis-server-{{ environment }}.service:
31 | file.managed:
32 | - template: jinja
33 | - source: salt://redis/files/etc/systemd/system/redis-server.service
34 | - context:
35 | environment: {{ environment }}
36 |
37 | redis-server-{{ environment }}:
38 | service.running:
39 | - enable: True
40 | - require:
41 | - file: /etc/systemd/system/redis-server-{{ environment }}.service
42 |
43 | {%- else %}
44 | {%- set service_name = 'redis-services' %}
45 | {%- endif %}
46 |
47 | /etc/redis/redis_{{ environment }}.conf:
48 | file.managed:
49 | - user: root
50 | - group: root
51 | - mode: 644
52 | - template: jinja
53 | - source: salt://redis/files/redis_instance/etc/redis/redis.conf
54 | - context:
55 | environment: {{ environment }}
56 | environment_details: {{ environment_details }}
57 | settings: {{ settings }}
58 | - require:
59 | - file: /data/shop/{{ environment }}/shared/redis
60 | - file: /data/logs/{{ environment }}/redis
61 | - watch_in:
62 | - service: {{ service_name }}
63 |
64 | {%- endif %}
65 | {%- endmacro %}
66 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/spryker/zed.conf:
--------------------------------------------------------------------------------
1 | ###
2 | ### Common Zed settings (to be included in vhost definition)
3 | ### This file is maintained by salt
4 | ###
5 |
6 | # Timeout for ZED requests - 10 minutes
7 | # (longer requests should be converted to jobs and executed via jenkins)
8 | proxy_read_timeout 600s;
9 | proxy_send_timeout 600s;
10 | fastcgi_read_timeout 600s;
11 | client_body_timeout 600s;
12 | client_header_timeout 600s;
13 | send_timeout 600s;
14 |
15 | # Static files can be delivered directly
16 | location ~ (/assets/|/favicon.ico|/robots.txt) {
17 | access_log off;
18 | expires 30d;
19 | add_header Pragma public;
20 | add_header Cache-Control "public, must-revalidate, proxy-revalidate";
21 | try_files $uri =404;
22 | }
23 |
24 | # Payone - PHP application gets all other requests without authorized
25 | location /payone/ {
26 | auth_basic off;
27 | add_header X-Server $hostname;
28 | fastcgi_pass backend-$application_env-zed;
29 | fastcgi_index index.php;
30 | include /etc/nginx/fastcgi_params;
31 | fastcgi_param SCRIPT_NAME /index.php;
32 | fastcgi_param APPLICATION_ENV $application_env;
33 | fastcgi_param APPLICATION_STORE $application_store;
34 | fastcgi_param SCRIPT_FILENAME $document_root/index.php;
35 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
36 | }
37 |
38 | # PHP application gets all other requests
39 | location / {
40 | add_header X-Server $hostname;
41 | fastcgi_pass backend-$application_env-zed;
42 | fastcgi_index index.php;
43 | include /etc/nginx/fastcgi_params;
44 | fastcgi_param SCRIPT_NAME /index.php;
45 | fastcgi_param APPLICATION_ENV $application_env;
46 | fastcgi_param APPLICATION_STORE $application_store;
47 | fastcgi_param SCRIPT_FILENAME $document_root/index.php;
48 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
49 | }
50 |
--------------------------------------------------------------------------------
/base/hosting/rackspace.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Rackspace-specific hoster packages (monitoring and backup)
3 | #
4 |
5 | # Firewall: UFW package
6 | ufw:
7 | pkg.installed
8 |
9 | # Networking configuration: nothing to do
10 |
11 | # Disk drives: if machines have grains with list of filesystems, those will be prepared by the included state
12 | include:
13 | - .filesystem
14 |
15 |
16 | # Monitoring
17 | rackspace-monitoring:
18 | pkgrepo.managed:
19 | - humanname: Rackspace monitoring tools
20 | - name: deb http://stable.packages.cloudmonitoring.rackspace.com/debian-{{ grains.lsb_distrib_codename }}-x86_64 cloudmonitoring main
21 | - file: /etc/apt/sources.list.d/rackspace-monitoring.list
22 | - key_url: https://monitoring.api.rackspacecloud.com/pki/agent/linux.asc
23 | - require_in:
24 | - pkg: rackspace-monitoring-agent
25 |
26 | rackspace-monitoring-agent:
27 | pkg.installed
28 |
29 | setup-rackspace-monitoring-agent:
30 | cmd.run:
31 | - name: rackspace-monitoring-agent --setup --username {{ pillar.rackspace.username }} --apikey {{ pillar.rackspace.apikey }} && service rackspace-monitoring-agent restart
32 | - unless: test -f /etc/rackspace-monitoring-agent.cfg
33 | - requires:
34 | - pkg: rackspace-monitoring-agent
35 |
36 | # Backup
37 | rackspace-backup:
38 | pkgrepo.managed:
39 | - humanname: Rackspace backup agent
40 | - name: deb [arch=amd64] http://agentrepo.drivesrvr.com/debian/ serveragent main
41 | - file: /etc/apt/sources.list.d/rackspace-backup.list
42 | - key_url: http://agentrepo.drivesrvr.com/debian/agentrepo.key
43 | - require_in:
44 | - pkg: driveclient
45 |
46 | driveclient:
47 | pkg.installed
48 |
49 | setup-rackspace-backup-agent:
50 | cmd.run:
51 | - name: /usr/local/bin/driveclient --configure -u {{ pillar.rackspace.username }} -k {{ pillar.rackspace.apikey }} -t LON && service driveclient restart && update-rc.d driveclient defaults
52 | - unless: test -f /var/run/driveclient.pid
53 | - requires:
54 | - pkg: driveclient
55 |
56 | # Support access
57 | /etc/sudoers.d/rackspace-support:
58 | file.managed:
59 | - source: salt://hosting/files/rackspace/etc/sudoers.d/rackspace-support
60 |
--------------------------------------------------------------------------------
/dev/development/files/home/vagrant/bin/xdebug:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | PHP_VERSION=`php -v`
4 | PHP_ETC_DIR=/etc/php/7.1/
5 | PHP_VERSION_NUMBER=7
6 | PHP_FPM=/etc/init.d/php7.1-fpm
7 |
8 | if [[ $PHP_VERSION =~ "PHP 5" ]] ; then
9 | PHP_ETC_DIR=/etc/php5/
10 | PHP_FPM=/etc/init.d/php5-fpm
11 | PHP_VERSION_NUMBER=5
12 | fi
13 |
14 | XDEBUG_INI=${PHP_ETC_DIR}mods-available/xdebug.ini
15 | PHP_CLI_DIR=${PHP_ETC_DIR}cli/conf.d/
16 | PHP_FPM_DIR=${PHP_ETC_DIR}fpm/conf.d/
17 |
18 | echo "Detected PHP version: ${PHP_VERSION_NUMBER}.x";
19 |
20 | function restartFPM {
21 | sudo $PHP_FPM restart
22 | }
23 |
24 | function xdebugOn {
25 | if [ ! -f "${PHP_CLI_DIR}30-xdebug.ini" ]; then
26 | echo "Enabling CLI Xdebug"
27 | sudo ln -s $XDEBUG_INI "${PHP_CLI_DIR}30-xdebug.ini"
28 | else
29 | echo " - CLI Xdebug already enabled"
30 | fi
31 |
32 | if [ ! -f "${PHP_FPM_DIR}30-xdebug.ini" ]; then
33 | echo "Enabling FPM Xdebug"
34 | sudo ln -s $XDEBUG_INI "${PHP_FPM_DIR}30-xdebug.ini"
35 | restartFPM
36 | else
37 | echo " - FPM Xdebug already enabled"
38 | fi
39 | }
40 |
41 | function xdebugOff {
42 | if [ -f "${PHP_CLI_DIR}30-xdebug.ini" ]; then
43 | echo "Disabling CLI Xdebug"
44 | sudo rm "${PHP_CLI_DIR}30-xdebug.ini"
45 | else
46 | echo " - CLI Xdebug is not enabled"
47 | fi
48 |
49 | if [ -f "${PHP_FPM_DIR}30-xdebug.ini" ]; then
50 | echo "Disabling FPM Xdebug"
51 | sudo rm "${PHP_FPM_DIR}30-xdebug.ini"
52 | restartFPM
53 | else
54 | echo " - FPM Xdebug is not enabled"
55 | fi
56 | }
57 |
58 | function reportStatus {
59 | if [ -f "${PHP_CLI_DIR}30-xdebug.ini" ]; then
60 | echo "CLI Xdebug is enabled"
61 | else
62 | echo "CLI Xdebug is disabled"
63 | fi
64 |
65 | if [ -f "${PHP_FPM_DIR}30-xdebug.ini" ]; then
66 | echo "FPM Xdebug is enabled"
67 | else
68 | echo "FPM Xdebug is disabled"
69 | fi
70 | }
71 |
72 | case $1 in
73 | --on)
74 | xdebugOn
75 | ;;
76 | --off)
77 | xdebugOff
78 | ;;
79 |
80 | *)
81 | reportStatus
82 | echo "Use --on or --off"
83 | ;;
84 | esac;
85 |
--------------------------------------------------------------------------------
/base/nginx/files/etc/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ###############################################################################
4 |
5 | user www-data;
6 | worker_processes {{ grains.num_cpus }};
7 | pid /var/run/nginx.pid;
8 | include /etc/nginx/modules-enabled/*;
9 |
10 | events {
11 | worker_connections 2000;
12 | multi_accept on;
13 | }
14 |
15 | http {
16 | ##
17 | # Basic Settings for Linux
18 | ##
19 |
20 | sendfile on;
21 | tcp_nopush on;
22 | tcp_nodelay on;
23 | keepalive_timeout 65;
24 |
25 | types_hash_max_size 2048;
26 | server_tokens off;
27 |
28 | client_max_body_size 32m;
29 |
30 | ##
31 | # Buffers optimized for proxy/fastcgi appserver
32 | ##
33 |
34 | client_body_buffer_size 8k;
35 | client_header_buffer_size 1k;
36 | large_client_header_buffers 4 8k;
37 | fastcgi_buffer_size 128k;
38 | fastcgi_buffers 16 128k;
39 | fastcgi_busy_buffers_size 512k;
40 | proxy_buffering on;
41 | proxy_buffers 16 128k;
42 | output_buffers 2 512k;
43 |
44 | ##
45 | # File attributes cache
46 | ##
47 |
48 | {%- if 'dev' in grains.roles %}
49 | open_file_cache off;
50 | {%- else %}
51 | open_file_cache max=10000;
52 | open_file_cache_valid 3s;
53 | open_file_cache_min_uses 2;
54 | open_file_cache_errors on;
55 | {%- endif %}
56 |
57 | server_names_hash_bucket_size 64;
58 | # server_name_in_redirect off;
59 |
60 | include /etc/nginx/mime.types;
61 | default_type application/octet-stream;
62 |
63 | ##
64 | # Logging Settings
65 | ##
66 |
67 | access_log /var/log/nginx/access.log;
68 | error_log /var/log/nginx/error.log;
69 |
70 | ##
71 | # Gzip Settings
72 | ##
73 |
74 | gzip on;
75 | gzip_disable "MSIE [1-6]\.";
76 |
77 | gzip_vary on;
78 | gzip_proxied any;
79 | gzip_comp_level 6;
80 | gzip_min_length 1000;
81 | gzip_buffers 16 128k;
82 | gzip_http_version 1.0;
83 | gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript text/x-component font/truetype font/opentype;
84 |
85 | ##
86 | # Virtual Host Configs
87 | ##
88 |
89 | include /etc/nginx/conf.d/*.conf;
90 | include /etc/nginx/sites-enabled/*;
91 | }
92 |
93 |
94 |
--------------------------------------------------------------------------------
/base/postgresql/setup.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install and configure PostgreSQL database
3 | #
4 | # This state manages the configuration of PostgreSQL database, creates
5 | # data directory in /data and sets up default cluster (main).
6 | # Note that this configuration does not include any failover and/or replication.
7 | # It is suitable to run on development and QA environments.
8 | #
9 | # To deploy Spryker in production, a stable and secure PostgreSQL setup is
10 | # recommended, which includes:
11 | # - backup
12 | # - replication
13 | # - hot-standby slave
14 | # - failover mechanism
15 | # - appropiate hardware
16 |
17 | postgresql:
18 | pkg.installed:
19 | - name: postgresql-9.6
20 | service.running:
21 | - enable: true
22 | - reload: true
23 | - watch:
24 | - file: /etc/postgresql/9.6/main/pg_hba.conf
25 | - file: /etc/postgresql/9.6/main/postgresql.conf
26 | - require:
27 | - file: /etc/postgresql/9.6/main/pg_hba.conf
28 | - file: /etc/postgresql/9.6/main/postgresql.conf
29 | - cmd: data-dir
30 |
31 | data-dir:
32 | file.directory:
33 | - name: /data/pgsql
34 | - makedirs: true
35 | - user: postgres
36 | - group: postgres
37 | - require:
38 | - pkg: postgresql
39 | cmd.run:
40 | - name: /etc/init.d/postgresql stop && rm -rf /etc/postgresql/9.6/main/* && pg_createcluster --datadir /data/pgsql 9.6 main
41 | - unless: test -d /data/pgsql/base
42 | - cwd: /data/pgsql
43 | - require:
44 | - file: data-dir
45 |
46 | /etc/postgresql/9.6/main/pg_hba.conf:
47 | file.managed:
48 | - source: salt://postgresql/files/etc/postgresql/pg_hba.conf
49 | - template: jinja
50 | - require:
51 | - pkg: postgresql
52 | - cmd: data-dir
53 | - watch_in:
54 | - service: postgresql
55 |
56 | /etc/postgresql/9.6/main/postgresql.conf:
57 | file.managed:
58 | - source: salt://postgresql/files/etc/postgresql/postgresql.conf
59 | - template: jinja
60 | - require:
61 | - pkg: postgresql
62 | - cmd: data-dir
63 | - watch_in:
64 | - service: postgresql
65 |
66 | root:
67 | postgres_user.present:
68 | - login: true
69 | - superuser: true
70 | - require:
71 | - service: postgresql
72 |
73 | # Include autoupdate if configured to do so
74 | {% if salt['pillar.get']('autoupdate:postgresql', False) %}
75 | include:
76 | - .update
77 | {% endif %}
78 |
--------------------------------------------------------------------------------
/base/mysql-server/credentials.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Create MySQL databases, users and privileges
3 | #
4 |
5 | {%- from 'settings/init.sls' import settings with context %}
6 | {%- for environment, environment_details in settings.environments.items() %}
7 | {%- for store in pillar.stores %}
8 |
9 | # create database - zed
10 | mysql_database_{{ store }}_{{ environment }}_zed:
11 | mysql_database.present:
12 | - name: {{ settings.environments[environment].stores[store].zed.database.database }}
13 | - require:
14 | - pkg: python-mysqldb
15 | {% if salt['pillar.get']('hosting:external_mysql', '') == '' %}
16 | - service: mysql
17 | {% endif %}
18 |
19 | # create database - dump
20 | mysql_database_{{ store }}_{{ environment }}_zed_dump:
21 | mysql_database.present:
22 | - name: {{ settings.environments[environment].stores[store].dump.database.database }}
23 | - require:
24 | - pkg: python-mysqldb
25 | {% if salt['pillar.get']('hosting:external_mysql', '') == '' %}
26 | - service: mysql
27 | {% endif %}
28 |
29 | # create database user
30 | mysql_users_{{ store }}_{{ environment }}:
31 | mysql_user.present:
32 | - name: {{ settings.environments[environment].stores[store].zed.database.username }}
33 | - host: "{{ salt['pillar.get']('hosting:mysql_network', '%') }}"
34 | - password: {{ settings.environments[environment].stores[store].zed.database.password }}
35 | - require:
36 | - pkg: python-mysqldb
37 | {% if salt['pillar.get']('hosting:external_mysql', '') == '' %}
38 | - service: mysql
39 | {% endif %}
40 |
41 | # create database permissions (zed database)
42 | mysql_grants_{{ store }}_{{ environment }}_zed:
43 | mysql_grants.present:
44 | - grant: all
45 | - database: {{ settings.environments[environment].stores[store].zed.database.database }}.*
46 | - user: {{ settings.environments[environment].stores[store].zed.database.username }}
47 | - host: "{{ salt['pillar.get']('hosting:mysql_network', '%') }}"
48 |
49 | # create database permissions (dump database)
50 | mysql_grants_{{ store }}_{{ environment }}_zed_dump:
51 | mysql_grants.present:
52 | - grant: all
53 | - database: {{ settings.environments[environment].stores[store].dump.database.database }}.*
54 | - user: {{ settings.environments[environment].stores[store].zed.database.username }}
55 | - host: "{{ salt['pillar.get']('hosting:mysql_network', '%') }}"
56 | {% endfor %}
57 | {% endfor %}
58 |
--------------------------------------------------------------------------------
/base/mysql-server/files/etc/mysql/my.cnf:
--------------------------------------------------------------------------------
1 | ###############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ###############################################################################
4 |
5 | [client]
6 | port = 3306
7 | socket = /var/run/mysqld/mysqld.sock
8 |
9 | [mysqld]
10 |
11 | # GENERAL #
12 | user = mysql
13 | default_storage_engine = InnoDB
14 | socket = /var/run/mysqld/mysqld.sock
15 | pid_file = /var/run/mysqld/mysqld.pid
16 | bind-address = 0.0.0.0
17 | port = 3306
18 |
19 | # MyISAM #
20 | key_buffer_size = 32M
21 |
22 | # SAFETY #
23 | max_allowed_packet = 64M
24 | max_connect_errors = 1000000
25 |
26 | # DATA STORAGE #
27 | datadir = /var/lib/mysql
28 | tmpdir = /tmp
29 |
30 | # CACHES AND LIMITS #
31 | tmp_table_size = 128M
32 | max_heap_table_size = 128M
33 | query_cache_size = 128M
34 | query_cache_limit = 1M
35 | max_connections = 500
36 | thread_cache_size = 50
37 | open_files_limit = 65535
38 | table_definition_cache = 4096
39 | table_open_cache = 4096
40 | group_concat_max_len = 1048576
41 |
42 | # INNODB #
43 | innodb_flush_method = O_DIRECT
44 | innodb_log_files_in_group = 2
45 | innodb_log_file_size = 256M
46 | innodb_flush_log_at_trx_commit = 2
47 | innodb_file_per_table = 1
48 | innodb_buffer_pool_size = {{ salt['grains.get']('mysql:innodb_buffer_pool_size', "128M") }}
49 |
50 | # LOGGING #
51 | log_queries_not_using_indexes = 0
52 | slow_query_log = 1
53 | slow_query_log_file = /var/log/mysql/slow.log
54 |
55 | # I18n
56 | character-set-server = utf8
57 | collation-server = utf8_general_ci
58 |
59 | # server mode
60 | sql-mode = STRICT_ALL_TABLES
61 |
62 | [mysqldump]
63 | quick
64 | quote-names
65 | max_allowed_packet = 64M
66 |
67 | [isamchk]
68 | key_buffer = 16M
69 |
70 | #
71 | # * IMPORTANT: Additional settings that can override those from this file!
72 | # The files must end with '.cnf', otherwise they'll be ignored.
73 | #
74 | !includedir /etc/mysql/conf.d/
75 |
--------------------------------------------------------------------------------
/base/spryker/macros/jenkins_instance.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Macro: Setup one jenkins instance per each environment
3 | #
4 |
5 | {% macro jenkins_instance(environment, environment_details, settings) -%}
6 | # Jenkins data directory
7 | /data/shop/{{ environment }}/shared/data/common/jenkins:
8 | file.directory:
9 | - mode: 755
10 | - user: www-data
11 | - group: www-data
12 | - makedirs: True
13 |
14 | # Jenkins default configuration
15 | /data/shop/{{ environment }}/shared/data/common/jenkins/config.xml:
16 | file.managed:
17 | - mode: 644
18 | - user: www-data
19 | - group: www-data
20 | - source: salt://spryker/files/jenkins_instance/config.xml
21 | - replace: False
22 | - template: jinja
23 | - context:
24 | environment: {{ environment }}
25 | - require:
26 | - file: /data/shop/{{ environment }}/shared/data/common/jenkins
27 | - watch_in:
28 | - service: jenkins-{{ environment }}
29 |
30 | # Service init script
31 | /etc/init.d/jenkins-{{ environment }}:
32 | file.managed:
33 | - mode: 755
34 | - user: root
35 | - group: root
36 | - source: salt://spryker/files/jenkins_instance/etc/init.d/jenkins
37 | - template: jinja
38 | - context:
39 | environment: {{ environment }}
40 |
41 | # Reload systemd on service creation
42 | jenkins-{{ environment }}-systemctl-reload:
43 | cmd.wait:
44 | - name: systemctl daemon-reload
45 | - watch:
46 | - file: /etc/init.d/jenkins-{{ environment }}
47 |
48 | # Service configuration
49 | /etc/default/jenkins-{{ environment }}:
50 | file.managed:
51 | - mode: 644
52 | - user: root
53 | - group: root
54 | - source: salt://spryker/files/jenkins_instance/etc/default/jenkins
55 | - template: jinja
56 | - context:
57 | environment: {{ environment }}
58 | environment_details: {{ environment_details }}
59 | settings: {{ settings }}
60 |
61 | # Dir permissions for unpacking .war file
62 | /var/cache/jenkins-{{ environment }}:
63 | file.directory:
64 | - name: /var/cache/jenkins
65 | - user: www-data
66 | - group: www-data
67 | - mode: 775
68 | - recurse:
69 | - user
70 | - group
71 |
72 | # Service
73 | jenkins-{{ environment }}:
74 | service.running:
75 | - enable: True
76 | - require:
77 | - pkg: jenkins
78 | - file: /etc/default/jenkins-{{ environment }}
79 | - file: /etc/init.d/jenkins-{{ environment }}
80 | - file: /var/cache/jenkins-{{ environment }}
81 | - cmd: jenkins-{{ environment }}-systemctl-reload
82 |
83 | {%- endmacro %}
84 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/deploy/config.rb:
--------------------------------------------------------------------------------
1 | {% from 'settings/init.sls' import settings with context %}
2 | # This file is maintained by salt
3 |
4 |
5 | ###################
6 | ### Locations and permissions
7 | ###################
8 |
9 | # For info only
10 | $project_name = "salt"
11 |
12 | # Deployment directory (temporary files)
13 | $deploy_dir = "/data/deploy"
14 |
15 | # Destination directory for application
16 | $destination_dir = "/data/shop"
17 |
18 | # Username to use to connect to all hosts
19 | $ssh_user = $rsync_user = "root"
20 |
21 | # Owner/group of shop files
22 | $www_user = "www-data"
23 | $www_group = "www-data"
24 |
25 | # Where to put rev.txt (release info file)
26 | $rev_txt_locations = ['.']
27 |
28 | ###################
29 | ### Environments, stores
30 | ###################
31 |
32 | # List of application environments
33 | $environments = [
34 | {%- for environment, environment_details in pillar.environments.items() %}
35 | "{{ environment }}",
36 | {%- endfor %}
37 | ]
38 |
39 | # List of stores
40 | $stores = [
41 | {%- for environment, environment_details in pillar.environments.items() %}{%- if loop.first %}
42 | {%- for store in pillar.stores %}
43 | { 'store' => '{{ store }}', 'locale' => '{{ settings.environments[environment].stores[store].locale }}', 'appdomain' => '{{ settings.environments[environment].stores[store].appdomain }}' },
44 | {%- endfor %}
45 | {%- endif %}{%- endfor %}
46 | ]
47 |
48 | ###################
49 | ### Hosts and roles
50 | ###################
51 |
52 | # Enable data warehouse?
53 | $use_dwh = false
54 |
55 | # Hosts that have the application code
56 | $app_hosts = [
57 | {% for host in settings.hosts.app %} "{{ host }}",
58 | {% endfor -%}
59 | ]
60 |
61 | # Hosts that run web server
62 | $web_hosts = [
63 | {% for host in settings.hosts.web %} "{{ host }}",
64 | {% endfor -%}
65 | ]
66 |
67 | # Host(s) that run jobs
68 | $jobs_hosts = [
69 | {% for host in settings.hosts.job %} "{{ host }}",
70 | {% endfor -%}
71 | ]
72 |
73 | # Deploy notifications (API key - it's NOT same as Newrelic License Key!)
74 | $newrelic_api_key = "{{ pillar.newrelic.api_key|default('', true) }}"
75 |
76 | ###################
77 | ### Git code repository
78 | ###################
79 |
80 | $scm_type = "git"
81 | $ssh_wrapper_path = "/etc/deploy/ssh_wrapper.sh"
82 | $git_path = $deploy_dir + "/git/"
83 | $original_git_url = "{{ pillar.deploy.git_url }}"
84 |
85 | ###################
86 | ### Project custom parameters
87 | ###################
88 |
89 | $project_options = [
90 | { :question => "Use debug mode", :ask_question => false, :options => %w(true), :variable => "debug", :cmdline => "--debug" },
91 | ]
92 |
--------------------------------------------------------------------------------
/base/spryker/stores.sls:
--------------------------------------------------------------------------------
1 |
2 | {% from 'settings/init.sls' import settings with context %}
3 |
4 | {%- for environment, environment_details in settings.environments.items() %}
5 | {%- for store in pillar.stores %}
6 |
7 | # Generate application store config - config_local_XX.php
8 | /data/shop/{{ environment }}/shared/config_local_{{ store }}.php:
9 | file.managed:
10 | - source: salt://spryker/files/config/config_local_XX.php
11 | - template: jinja
12 | - user: www-data
13 | - group: www-data
14 | - mode: 644
15 | - require:
16 | - file: /data/shop/{{ environment }}/shared/data/common
17 | - context:
18 | environment: {{ environment }}
19 | settings: {{ settings }}
20 | store: {{ store }}
21 |
22 | # Create logs directory for environment
23 | /data/logs/{{ environment }}/{{ store }}:
24 | file.symlink:
25 | - target: /data/shop/{{ environment }}/current/data/{{ store }}/logs
26 | - force: True
27 |
28 | {%- if 'web' in grains.roles %}
29 | # Only on webservers: create nginx vhosts
30 | /etc/nginx/sites-available/{{ store }}_{{ environment }}_zed:
31 | file.managed:
32 | - source: salt://spryker/files/etc/nginx/sites-available/XX-zed.conf
33 | - template: jinja
34 | - user: root
35 | - group: root
36 | - mode: 644
37 | - context:
38 | environment: {{ environment }}
39 | settings: {{ settings }}
40 | store: {{ store }}
41 | - require:
42 | - file: /data/logs/{{ environment }}
43 | - watch_in:
44 | - cmd: reload-nginx
45 |
46 | /etc/nginx/sites-available/{{ store }}_{{ environment }}_yves:
47 | file.managed:
48 | - source: salt://spryker/files/etc/nginx/sites-available/XX-yves.conf
49 | - template: jinja
50 | - user: root
51 | - group: root
52 | - mode: 644
53 | - context:
54 | environment: {{ environment }}
55 | settings: {{ settings }}
56 | store: {{ store }}
57 | - require:
58 | - file: /data/logs/{{ environment }}
59 | - watch_in:
60 | - cmd: reload-nginx
61 |
62 | /etc/nginx/sites-enabled/{{ store }}_{{ environment }}_zed:
63 | file.symlink:
64 | - target: /etc/nginx/sites-available/{{ store }}_{{ environment }}_zed
65 | - force: true
66 | - require:
67 | - file: /etc/nginx/sites-available/{{ store }}_{{ environment }}_zed
68 | - file: /etc/nginx/htpasswd-zed
69 | - file: /etc/nginx/htpasswd-staging
70 | - watch_in:
71 | - cmd: reload-nginx
72 |
73 | /etc/nginx/sites-enabled/{{ store }}_{{ environment }}_yves:
74 | file.symlink:
75 | - target: /etc/nginx/sites-available/{{ store }}_{{ environment }}_yves
76 | - force: true
77 | - require:
78 | - file: /etc/nginx/sites-available/{{ store }}_{{ environment }}_yves
79 | - watch_in:
80 | - cmd: reload-nginx
81 |
82 | {%- endif %}
83 |
84 | {%- endfor %}
85 | {%- endfor %}
86 |
--------------------------------------------------------------------------------
/base/postgresql/credentials.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Create postgresql databases, users and privileges
3 | #
4 | #
5 | # This implementation at the moment is 'hacky', as on the day when it was
6 | # written, salt did not support creating schemes and/or managing privileges.
7 | # For each environment we create user and two databases (zed + dump), which
8 | # have the owner set to this user.
9 |
10 | # Create admin account, if configured in pillar
11 | {%- if salt['pillar.get']('postgresql:superuser', False) %}
12 | postgres_users_admin:
13 | postgres_user.present:
14 | - name: {{ pillar.postgresql.superuser.username }}
15 | - password: {{ pillar.postgresql.superuser.password }}
16 | - superuser: True
17 | - require:
18 | - service: postgresql
19 | {%- endif %}
20 |
21 | {%- from 'settings/init.sls' import settings with context %}
22 | {%- for environment, environment_details in settings.environments.items() %}
23 | {%- for store in pillar.stores %}
24 |
25 | # create database user
26 | postgres_users_{{ store }}_{{ environment }}:
27 | postgres_user.present:
28 | - name: {{ settings.environments[environment].stores[store].zed.database.username }}
29 | - password: {{ settings.environments[environment].stores[store].zed.database.password }}
30 | - require:
31 | - service: postgresql
32 |
33 | # create database - zed
34 | postgres_database_{{ store }}_{{ environment }}_zed:
35 | postgres_database.present:
36 | - name: {{ settings.environments[environment].stores[store].zed.database.database }}
37 | - owner: {{ settings.environments[environment].stores[store].zed.database.username }}
38 | - require:
39 | - service: postgresql
40 | - postgres_user: {{ settings.environments[environment].stores[store].zed.database.username }}
41 |
42 | # Add citext extension to the database
43 | postgres_database_citext_{{ store }}_{{ environment }}_zed:
44 | postgres_extension.present:
45 | - name: citext
46 | - maintenance_db: {{ settings.environments[environment].stores[store].zed.database.database }}
47 | - require:
48 | - postgres_database: postgres_database_{{ store }}_{{ environment }}_zed
49 |
50 | # create database - dump
51 | postgres_database_{{ store }}_{{ environment }}_zed_dump:
52 | postgres_database.present:
53 | - name: {{ settings.environments[environment].stores[store].dump.database.database }}
54 | - owner: {{ settings.environments[environment].stores[store].zed.database.username }}
55 | - require:
56 | - service: postgresql
57 | - postgres_user: {{ settings.environments[environment].stores[store].zed.database.username }}
58 |
59 | postgres_database_citext_{{ store }}_{{ environment }}_zed_dump:
60 | postgres_extension.present:
61 | - name: citext
62 | - maintenance_db: {{ settings.environments[environment].stores[store].dump.database.database }}
63 | - require:
64 | - postgres_database: postgres_database_{{ store }}_{{ environment }}_zed_dump
65 |
66 |
67 | {% endfor %}
68 | {% endfor %}
69 |
--------------------------------------------------------------------------------
/base/spryker/deployment.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Install spryker deployment system - code and configuration
3 | # Everything is saved in /etc/deploy
4 | #
5 |
6 | # Enable shell for user www-data
7 | www-data:
8 | user.present:
9 | - shell: /bin/sh
10 |
11 | {% if 'deploy' in grains.roles %}
12 |
13 | /etc/deploy:
14 | file.directory:
15 | - user: root
16 | - group: root
17 | - dir_mode: 755
18 |
19 | # Deploy script implementation
20 | /etc/deploy/deploy.rb:
21 | file.managed:
22 | - source: salt://spryker/files/etc/deploy/deploy.rb
23 | - user: root
24 | - group: root
25 | - mode: 755
26 | /etc/deploy/functions.rb:
27 | file.managed:
28 | - source: salt://spryker/files/etc/deploy/functions.rb
29 | - user: root
30 | - group: root
31 | - mode: 600
32 |
33 | # Deploy script configuration
34 | /etc/deploy/config.rb:
35 | file.managed:
36 | - source: salt://spryker/files/etc/deploy/config.rb
37 | - template: jinja
38 | - user: root
39 | - group: root
40 | - mode: 644
41 |
42 | # SSH Wrapper and shared private key for deployment.
43 | # It should not be used, ssh AgentForwarding is recommended method.
44 | # Remove ssh_wrapper and deploy.key to use Agent Forwarding
45 | /etc/deploy/ssh_wrapper.sh:
46 | file.managed:
47 | - source: salt://spryker/files/etc/deploy/ssh_wrapper.sh
48 | - user: root
49 | - group: root
50 | - mode: 700
51 |
52 | /etc/deploy/deploy.key:
53 | file.managed:
54 | - source: salt://spryker/files/etc/deploy/deploy.key
55 | - user: root
56 | - group: root
57 | - mode: 400
58 |
59 | remove-empty-deploy-key:
60 | cmd.run:
61 | - name: rm -f /etc/deploy/deploy.key
62 | - unless: test -s /etc/deploy/deploy.key
63 | - require:
64 | - file: /etc/deploy/deploy.key
65 |
66 | # SSH key used for deployment. We must be able to ssh as root from deploy host
67 | # to all machines, where we deploy to.
68 | {% if pillar.server_env.ssh.id_rsa is defined %}
69 | /root/.ssh:
70 | file.directory:
71 | - mode: 700
72 |
73 | /root/.ssh/id_rsa:
74 | file.managed:
75 | - user: root
76 | - group: root
77 | - mode: 400
78 | - contents_pillar: server_env:ssh:id_rsa
79 | - require:
80 | - file: /root/.ssh
81 |
82 | # If authorized_keys is not present or empty (it can be automatically created by salt-cloud)
83 | # then extract public ssh key file from private key file, so that 'ssh root@localhost' will work
84 | extract-root-private-ssh-key:
85 | cmd.run:
86 | - name: ssh-keygen -N '' -y -f /root/.ssh/id_rsa | sed 's/$/ spryker-{{ grains.environment }}/' >> /root/.ssh/authorized_keys
87 | - unless: grep spryker-{{ grains.environment }} /root/.ssh/authorized_keys
88 | {% endif %}
89 |
90 | {% else %}
91 | {% if pillar.server_env.ssh.id_rsa_pub is defined %}
92 |
93 | add-root-public-ssh-key:
94 | file.append:
95 | - name: /root/.ssh/authorized_keys
96 | - makedirs: True
97 | - text: {{ pillar.server_env.ssh.id_rsa_pub }}
98 |
99 | {% endif %}
100 | {% endif %}
101 |
--------------------------------------------------------------------------------
/base/pound/files/etc/pound/certs/star_spryker_dev:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDVXsIhiVsD8TUc
3 | oaL+Gc6oSH+Py2OdShD82NJ1mPeoMVs59it5z+afE1xbCfbfochDwj8aUXv/w3+h
4 | Msl1HEjBKVTsBvxase4h1QVJpv3DsFbb5m/tGa+YZd/WNkYOkVwu5g1Oe5Hs3MpS
5 | BTQD5ObCnrAO6g0EtqUJRWOaUJHKtf6mHUf+GfgP5T9XYTWBjQEjSiAs62zmc4MX
6 | 0ytUr7Emcd4ItUwv93NiEtn0ycz5WhRrgzJ/sCqSVr3Tyd8dqtpxhNI+VU9Zd3OC
7 | CRL7m5HUvf5SW4dk5YAydyuXWOLoWquVretjy6y3Pm8en2Ufx+AYv5bPQztFh8+1
8 | qfMlBT0dAgMBAAECggEBAKlnp5g61tFlc/a8eo1mbgZ8MRM4t2lNQfmOKGKHxbF7
9 | JmP5iAEZyI/qLjx1x9U8PKVXlwRNR7cEP9P7X6mkgDsklrEixr83X8rqt10HwZjZ
10 | 68yBw8Gq0BieYb5QrwSn8lml+1ChmXVGwzelu+uPlItWtI/S0e05GgUnW3JMn8Qh
11 | kZDMFmdmGNRPVQY9+w0eTATSOj2dMOMx7JbdWdpRw7HdxWAb11cqh0Rm6y8amkZE
12 | UlpmaKuFMKqFB+qtX5rcjhDnV2SXk8+C7nOTPveY1uMywKD47yHThb1KPK/V3LI9
13 | vLpYK7oJKimpCyGZBFgejlKN5QTCOjoUnKFHqFL46WECgYEA86Dw37EcmNZKGXMy
14 | l57FX5RgYrh1u71ulPSqFqQcy1lI07YOAEwmSP9QnVBpqWvRXm4Cbslq9IuUw4kO
15 | heS7qVef0j1Mf2huGmNZ+u/t8dLLkhr7fmncRnLouRebcvpy8MCsz4WwJ1XGs3D8
16 | ydg1yN05ClLROItKJXHwsqXRx5kCgYEA4DR4f5qrHnn2kuoqLC4oHt0u8uGpMlJo
17 | JuonO3hvIShVqcJvDeI5P//d4ldUgZkzg4/kehSOmyNHurzFIMUKAsXJMOOX85JI
18 | zQvXjtGrAVbBBbXdSWncFk5LVZL/uJ3AiF4fvDe+pCGdktHesvz4SnGcDRNkrXpZ
19 | 5G3EpgRdBCUCgYEA1hrbdQrWoe65AjK+BIeQ15WzOy72sL4iv59hFzK053SYzw4Q
20 | sew6PS67rvTrVuwc/MrbMneqmi1M2KbLb5n73i/WkdIzN0kUcfZcLm7LAxmIs+co
21 | nAj2f6cepQN9Joe5maEKffn0PC1iyXIDhnOPmO3p4gu0OOgYil3e9XFKsgkCgYA1
22 | hNDuxODsGXj+gN8oRKxXEb1PRQa6eGlcQLC0xX7hj1tDdqJD0iiQWmeAzA/kD8n4
23 | MbT8X0PbTISm0sDLnb1SBByOiwWOovC14YnHEc2cODVDQ7Ja/+YFfRMGjlx0mVOI
24 | Oc+hgj0DuPKARZC/cRtjydLSXDrAHmCu4nT6nDgH8QKBgBS+NZyHIlmhRiMfYuLU
25 | 8qr2wYfpyZEeZcw9nY19SV3aKRbDwFBlCx3wrdQe3pCeJq4njEDba+M41c2saae1
26 | 9qQGNL9W6jO33OSV+FmgyEpyAlneyYeLPK1ZQfYE8dSXxywIosWehhmdtaIEgy5w
27 | YmHfJO013RoLQTgr1UaURmZf
28 | -----END PRIVATE KEY-----
29 | -----BEGIN CERTIFICATE-----
30 | MIIDZTCCAk2gAwIBAgIJAIFMy60YhRYCMA0GCSqGSIb3DQEBCwUAMEkxCzAJBgNV
31 | BAYTAkRFMRAwDgYDVQQIDAdIYW1idXJnMRAwDgYDVQQKDAdTcHJ5a2VyMRYwFAYD
32 | VQQDDA0qLnNwcnlrZXIuZGV2MB4XDTE1MDIyNjEzNDQxNVoXDTM1MDIyMTEzNDQx
33 | NVowSTELMAkGA1UEBhMCREUxEDAOBgNVBAgMB0hhbWJ1cmcxEDAOBgNVBAoMB1Nw
34 | cnlrZXIxFjAUBgNVBAMMDSouc3ByeWtlci5kZXYwggEiMA0GCSqGSIb3DQEBAQUA
35 | A4IBDwAwggEKAoIBAQDVXsIhiVsD8TUcoaL+Gc6oSH+Py2OdShD82NJ1mPeoMVs5
36 | 9it5z+afE1xbCfbfochDwj8aUXv/w3+hMsl1HEjBKVTsBvxase4h1QVJpv3DsFbb
37 | 5m/tGa+YZd/WNkYOkVwu5g1Oe5Hs3MpSBTQD5ObCnrAO6g0EtqUJRWOaUJHKtf6m
38 | HUf+GfgP5T9XYTWBjQEjSiAs62zmc4MX0ytUr7Emcd4ItUwv93NiEtn0ycz5WhRr
39 | gzJ/sCqSVr3Tyd8dqtpxhNI+VU9Zd3OCCRL7m5HUvf5SW4dk5YAydyuXWOLoWquV
40 | retjy6y3Pm8en2Ufx+AYv5bPQztFh8+1qfMlBT0dAgMBAAGjUDBOMB0GA1UdDgQW
41 | BBQCgh3oEUA8kklWx6JTBRW1tJtj4DAfBgNVHSMEGDAWgBQCgh3oEUA8kklWx6JT
42 | BRW1tJtj4DAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAC8YIcPX0x
43 | ennfNJ5Fq9Trt4ynxycSVBxd92LUXNd/QFw9ScJzcCaZaTk9fK65DaeYIMzm+326
44 | vhK2kDsUwLcXOvZyO1r6Fu/f+T4xR92sTP5r+/wk6q1vADNQGtbkt7OYUYq/ivF7
45 | OQ0oNTU6sdYwlvW0hBWPjnAYFn5zT73g332JLLOlyTkUAPNYz9gByDiYhWktku17
46 | Gvf6+LuOXMPY+igrFPQiDxebTEIj03vqbToQsLrxP7/XH5tQfF8DtXhd4YOMmnaQ
47 | ou1mY9WBl5dBtN0S8+ZzTk6DOXPfNG5F7gnVwbc8nF1ajzL8TfUrLLLmROwKRurA
48 | o+kjUrUYSB+p
49 | -----END CERTIFICATE-----
50 |
51 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/nginx/spryker/yves.conf:
--------------------------------------------------------------------------------
1 | ###
2 | ### Common Yves settings (to be included in vhost definition)
3 | ### This file is maintained by salt
4 | ###
5 |
6 | # Static files - allow only specified here paths
7 | # all other resources should be served via static host (and cached, if possible, via reverse proxy or cdn)
8 | location ~ (/assets/|/maintenance.html|/favicon.ico|/crossdomain.xml) {
9 | access_log off;
10 | expires 30d;
11 | add_header Pragma public;
12 | add_header Cache-Control "public";
13 | try_files $uri $uri/ =404;
14 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
15 | }
16 |
17 | location ~ \.(jpg|gif|png|css|js|html|xml|ico|txt|csv|map)$ {
18 | access_log off;
19 | expires 30d;
20 | add_header Pragma public;
21 | add_header Cache-Control "public";
22 | try_files $uri /index.php?$args;
23 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
24 | }
25 |
26 | # Allow payment messages
27 | location /transactionstatus/ {
28 | auth_basic off;
29 |
30 | if (-f $document_root/maintenance.html) {
31 | return 503;
32 | }
33 | add_header X-Server $hostname;
34 | add_header X-Info disabled_htaccess;
35 | fastcgi_pass backend-$application_env-yves;
36 | fastcgi_index index.php;
37 | include /etc/nginx/fastcgi_params;
38 | fastcgi_param SCRIPT_NAME /index.php;
39 | fastcgi_param APPLICATION_ENV $application_env;
40 | fastcgi_param APPLICATION_STORE $application_store;
41 | fastcgi_param SCRIPT_FILENAME $document_root/index.php;
42 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
43 | }
44 |
45 | # PHP application
46 | location / {
47 | if (-f $document_root/maintenance.html) {
48 | return 503;
49 | }
50 |
51 | # CORS - Allow ajax requests from http to https webservices on the same domain
52 | more_set_headers "Access-Control-Allow-Origin: http://$server_name";
53 | more_set_headers "Access-Control-Allow-Credentials: true";
54 | more_set_headers "Access-Control-Allow-Headers: Authorization";
55 |
56 | # CORS - Allow ajax calls from cdn/static scripts
57 | if ($http_origin ~* "^(http|https)://(img[1234]|cdn|static|cms)\.") {
58 | add_header "Access-Control-Allow-Origin" $http_origin;
59 | }
60 |
61 | # Frontend - force browser to use new rendering engine
62 | more_set_headers "X-UA-Compatible: IE=Edge,chrome=1";
63 |
64 | # Terminate OPTIONS requests immediately. No need for calling php
65 | # OPTIONS is used by Ajax from http to https as a pre-flight-request
66 | # see http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
67 | if ($request_method = OPTIONS) {
68 | return 200;
69 | }
70 |
71 | add_header X-Server $hostname;
72 | fastcgi_pass backend-$application_env-yves;
73 | fastcgi_index index.php;
74 | include /etc/nginx/fastcgi_params;
75 | fastcgi_param SCRIPT_NAME /index.php;
76 | fastcgi_param APPLICATION_ENV $application_env;
77 | fastcgi_param APPLICATION_STORE $application_store;
78 | fastcgi_param SCRIPT_FILENAME $document_root/index.php;
79 | more_clear_headers 'X-Powered-By' 'X-Store' 'X-Locale' 'X-Env' 'Server';
80 | }
81 |
82 | # Maintenance page
83 | error_page 503 @maintenance;
84 | location @maintenance {
85 | rewrite ^(.*)$ /maintenance.html break;
86 | }
87 |
--------------------------------------------------------------------------------
/base/spryker/files/config/config_local_XX.php:
--------------------------------------------------------------------------------
1 | &2
80 | exit 1
81 | fi
82 | else
83 | CONFIGS=(redis)
84 | fi
85 |
86 | CONFIG_NUM=${#CONFIGS[@]}
87 | for ((i=0; i < $CONFIG_NUM; i++))
88 | do
89 | NAME=${CONFIGS[${i}]}
90 | PIDFILE="/var/run/redis/${NAME}.pid"
91 |
92 | case "$1" in
93 | start)
94 | echo -n "Starting $DESC: "
95 | mkdir -p $RUNDIR
96 | touch $PIDFILE
97 | chown redis:redis $RUNDIR $PIDFILE
98 | chmod 755 $RUNDIR
99 |
100 | if [ -n "$ULIMIT" ]
101 | then
102 | ulimit -n $ULIMIT
103 | fi
104 |
105 | if start-stop-daemon --start --quiet --umask 007 --pidfile $PIDFILE --chuid redis:redis --exec $DAEMON -- /etc/redis/${NAME}.conf
106 | then
107 | echo "$NAME."
108 | else
109 | echo "failed"
110 | fi
111 | ;;
112 |
113 | stop)
114 | echo -n "Stopping $DESC: "
115 | if start-stop-daemon --stop --retry forever/TERM/1 --quiet --oknodo --pidfile $PIDFILE --exec $DAEMON
116 | then
117 | echo "$NAME."
118 | else
119 | echo "failed"
120 | fi
121 | rm -f $PIDFILE
122 | sleep 1
123 | ;;
124 |
125 | restart|force-reload)
126 | ARGS=($@)
127 | CONFIG=${ARGS[@]:1}
128 | ${0} stop $CONFIG
129 | ${0} start $CONFIG
130 | exit 0
131 | ;;
132 |
133 | status)
134 | echo -n "$DESC ($NAME) is "
135 | if start-stop-daemon --stop --quiet --signal 0 --pidfile $PIDFILE --exec $DAEMON
136 | then
137 | echo "running"
138 | else
139 | echo "not running"
140 | fi
141 | ;;
142 |
143 | *)
144 | N=/etc/init.d/$NAME
145 | echo "Usage: $N {start|stop|restart|force-reload|status}" >&2
146 | exit 1
147 | ;;
148 | esac
149 | done
150 |
151 | exit 0
152 |
--------------------------------------------------------------------------------
/base/elasticsearch/files/elasticsearch_instance/etc/elasticsearch/elasticsearch.yml:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | ## This file is managed by saltstack - DO NOT MANUALLY CHANGE IT ##
3 | ##############################################################################
4 |
5 |
6 | ################################### Cluster ##################################
7 |
8 | cluster.name: spryker-{{ environment }}
9 |
10 |
11 | #################################### Node ####################################
12 |
13 | node.name: {{ grains.fqdn }}
14 | node.master: true
15 | node.data: true
16 | node.max_local_storage_nodes: 1
17 |
18 |
19 | #################################### Index ###################################
20 |
21 | index.number_of_shards: {{ settings.elasticsearch.shards }}
22 | index.number_of_replicas: {{ settings.elasticsearch.replicas }}
23 |
24 |
25 | #################################### Paths ###################################
26 |
27 | # path.conf: /path/to/conf
28 | # path.data: /path/to/data1,/path/to/data2
29 | # path.work: /path/to/work
30 | # path.logs: /path/to/logs
31 | # path.plugins: /path/to/plugins
32 | path.repo: ["/data/shop/{{ environment }}/shared/elasticsearch/snapshots"]
33 |
34 | ################################### Memory ###################################
35 |
36 | bootstrap.mlockall: {{ salt['pillar.get']('elasticsearch.environments:mlockall', 'true') }}
37 |
38 |
39 | ############################## Network And HTTP ##############################
40 |
41 | network.bind_host: 0.0.0.0
42 | network.publish_host: {{ settings.publish_ip }}
43 | http.port: {{ settings.environments[environment].elasticsearch.http_port }}
44 | transport.tcp.port: {{ settings.environments[environment].elasticsearch.transport_port }}
45 | transport.tcp.compress: false
46 |
47 |
48 | ################################### Gateway ##################################
49 |
50 | gateway.recover_after_nodes: {{ settings.elasticsearch.minimum_nodes }}
51 | gateway.recover_after_time: 3m
52 | gateway.expected_nodes: {{ settings.elasticsearch.total_nodes }}
53 |
54 |
55 | ############################# Cluster management #############################
56 |
57 | cluster.routing.allocation.node_initial_primaries_recoveries: 4
58 | cluster.routing.allocation.node_concurrent_recoveries: 2
59 | cluster.routing.allocation.disk.watermark.low: 90%
60 | cluster.routing.allocation.disk.watermark.high: 95%
61 |
62 |
63 | ################################### Indices ##################################
64 |
65 | indices.recovery.max_bytes_per_sec: 50mb
66 | indices.recovery.concurrent_streams: 5
67 | indices.store.throttle.max_bytes_per_sec: 100mb
68 | indices.cache.filter.size: 100M
69 | indices.fielddata.cache.size: 100M
70 | indices.fielddata.cache.expire: 30m
71 |
72 |
73 | ################################## Discovery #################################
74 | discovery.zen.minimum_master_nodes: {{ settings.elasticsearch.minimum_nodes }}
75 | discovery.zen.ping.timeout: 5s
76 |
77 | {%- if ('dev' in grains.roles) %}
78 | discovery.zen.ping.multicast.enabled: false
79 | {%- else %}
80 | discovery.zen.ping.multicast.enabled: true
81 | {%- endif %}
82 |
83 |
84 | ################################## Slow Log ##################################
85 |
86 | #index.search.slowlog.threshold.query.warn: 10s
87 | #index.search.slowlog.threshold.query.info: 5s
88 | #index.search.slowlog.threshold.query.debug: 2s
89 | #index.search.slowlog.threshold.query.trace: 500ms
90 | #index.search.slowlog.threshold.fetch.warn: 1s
91 | #index.search.slowlog.threshold.fetch.info: 800ms
92 | #index.search.slowlog.threshold.fetch.debug: 500ms
93 | #index.search.slowlog.threshold.fetch.trace: 200ms
94 | #index.indexing.slowlog.threshold.index.warn: 10s
95 | #index.indexing.slowlog.threshold.index.info: 5s
96 | #index.indexing.slowlog.threshold.index.debug: 2s
97 | #index.indexing.slowlog.threshold.index.trace: 500ms
98 |
99 |
100 | ################################## GC Logging ################################
101 |
102 | monitor.jvm.gc.young.warn: 1000ms
103 | monitor.jvm.gc.young.info: 700ms
104 | monitor.jvm.gc.young.debug: 400ms
105 |
106 | monitor.jvm.gc.old.warn: 10s
107 | monitor.jvm.gc.old.info: 5s
108 | monitor.jvm.gc.old.debug: 2s
109 |
110 | ################################### Safety####################################
111 | action.disable_delete_all_indices: true
112 |
--------------------------------------------------------------------------------
/base/elasticsearch/macros/elasticsearch_instance.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Macro: Setup one Elasticsearch instance
3 | #
4 |
5 | {% macro elasticsearch_instance(environment, environment_details, settings) -%}
6 |
7 | {% if 'skip_instance_setup' not in environment_details.elasticsearch %}
8 |
9 | # Data directory
10 | /data/shop/{{ environment }}/shared/elasticsearch:
11 | file.directory:
12 | - user: elasticsearch
13 | - group: elasticsearch
14 | - mode: 700
15 | - makedirs: True
16 |
17 | # Log directory
18 | /data/logs/{{ environment }}/elasticsearch:
19 | file.directory:
20 | - user: elasticsearch
21 | - group: elasticsearch
22 | - mode: 755
23 | - makedirs: True
24 |
25 | # Symlink for original log location, for gc.log
26 | /var/log/elasticsearch-{{ environment }}:
27 | file.symlink:
28 | - target: /data/logs/{{ environment }}/elasticsearch
29 | - require:
30 | - file: /data/logs/{{ environment }}/elasticsearch
31 |
32 | # Service configuration
33 | /etc/default/elasticsearch-{{ environment }}:
34 | file.managed:
35 | - source: salt://elasticsearch/files/elasticsearch_instance/etc/default/elasticsearch
36 | - mode: 644
37 | - user: root
38 | - group: root
39 | - template: jinja
40 | - context:
41 | environment: {{ environment }}
42 | settings: {{ settings }}
43 | - watch_in:
44 | - service: elasticsearch-{{ environment }}
45 |
46 | # Service init script
47 | /etc/init.d/elasticsearch-{{ environment }}:
48 | file.managed:
49 | - source: salt://elasticsearch/files/elasticsearch_instance/etc/init.d/elasticsearch
50 | - mode: 755
51 | - user: root
52 | - group: root
53 | - template: jinja
54 | - context:
55 | environment: {{ environment }}
56 |
57 | # Reload systemd on service creation
58 | elasticsearch-{{ environment }}-systemctl-reload:
59 | cmd.wait:
60 | - name: systemctl daemon-reload
61 | - watch:
62 | - file: /etc/init.d/elasticsearch-{{ environment }}
63 |
64 | # Configuration directory
65 | /etc/elasticsearch-{{ environment }}:
66 | file.directory:
67 | - user: root
68 | - group: root
69 | - mode: 755
70 |
71 | # Configuration - main yaml file
72 | /etc/elasticsearch-{{ environment }}/elasticsearch.yml:
73 | file.managed:
74 | - source: salt://elasticsearch/files/elasticsearch_instance/etc/elasticsearch/elasticsearch.yml
75 | - mode: 644
76 | - user: root
77 | - group: root
78 | - template: jinja
79 | - context:
80 | environment: {{ environment }}
81 | environment_details: {{ environment_details }}
82 | settings: {{ settings }}
83 | - require:
84 | - file: /etc/elasticsearch-{{ environment }}
85 | - watch_in:
86 | - service: elasticsearch-{{ environment }}
87 |
88 |
89 | # Configuration - logging yaml file
90 | /etc/elasticsearch-{{ environment }}/logging.yml:
91 | file.managed:
92 | - source: salt://elasticsearch/files/elasticsearch_instance/etc/elasticsearch/logging.yml
93 | - mode: 644
94 | - user: root
95 | - group: root
96 | - template: jinja
97 | - context:
98 | environment: {{ environment }}
99 | - require:
100 | - file: /etc/elasticsearch-{{ environment }}
101 | - watch_in:
102 | - service: elasticsearch-{{ environment }}
103 |
104 | # Configuration - (empty) scripts directory
105 | /etc/elasticsearch-{{ environment }}/scripts:
106 | file.directory:
107 | - require:
108 | - file: /etc/elasticsearch-{{ environment }}
109 |
110 | # Symlink for easier location of ES configs
111 | /etc/elasticsearch/{{ environment }}:
112 | file.symlink:
113 | - target: /etc/elasticsearch-{{ environment }}
114 | - require:
115 | - file: /etc/elasticsearch-{{ environment }}
116 |
117 | # Service
118 | elasticsearch-{{ environment }}:
119 | service:
120 | - running
121 | - enable: True
122 | - require:
123 | - pkg: elasticsearch
124 | - file: /etc/init.d/elasticsearch-{{ environment }}
125 | - file: /data/shop/{{ environment }}/shared/elasticsearch
126 | - file: /data/logs/{{ environment }}/elasticsearch
127 | - file: /etc/default/elasticsearch-{{ environment }}
128 | - file: /etc/elasticsearch/{{ environment }}
129 | - file: /etc/elasticsearch-{{ environment }}/elasticsearch.yml
130 | - file: /etc/elasticsearch-{{ environment }}/logging.yml
131 | - file: /etc/elasticsearch-{{ environment }}/scripts
132 | - cmd: elasticsearch-{{ environment }}-systemctl-reload
133 |
134 | {%- endif %}
135 | {%- endmacro %}
136 |
--------------------------------------------------------------------------------
/dev/mailcatcher/files/etc/init.d/mailcatcher:
--------------------------------------------------------------------------------
1 | #! /bin/sh
2 | ### BEGIN INIT INFO
3 | # Provides: mailcatcher
4 | # Required-Start: $remote_fs $syslog
5 | # Required-Stop: $remote_fs $syslog
6 | # Default-Start: 2 3 4 5
7 | # Default-Stop: 0 1 6
8 | # Short-Description: Mailcatcher
9 | # Description: Catches mails sent from localhost and displays them via a web frontend
10 | ### END INIT INFO
11 |
12 | # Author: Daniel Seif
13 |
14 | # Do NOT "set -e"
15 |
16 | # PATH should only include /usr/* if it runs after the mountnfs.sh script
17 | PATH=/sbin:/usr/sbin:/bin:/usr/bin
18 | DESC="Mailcatcher"
19 | NAME=mailcatcher
20 | DAEMON=/usr/local/bin/mailcatcher
21 | DAEMON_ARGS="--http-ip 0.0.0.0 --http-port 1080 --smtp-ip 0.0.0.0 --smtp-port 1025 -f"
22 | PIDFILE=/var/run/mailcatcher.pid
23 | SCRIPTNAME=/etc/init.d/mailcatcher
24 |
25 | # Exit if the package is not installed
26 | [ -x "$DAEMON" ] || exit 0
27 |
28 | # Read configuration variable file if it is present
29 | [ -r /etc/default/$NAME ] && . /etc/default/$NAME
30 |
31 | # Load the VERBOSE setting and other rcS variables
32 | . /lib/init/vars.sh
33 |
34 | # Define LSB log_* functions.
35 | # Depend on lsb-base (>= 3.2-14) to ensure that this file is present
36 | # and status_of_proc is working.
37 | . /lib/lsb/init-functions
38 |
39 | #
40 | # Function that starts the daemon/service
41 | #
42 | do_start()
43 | {
44 | # Return
45 | # 0 if daemon has been started
46 | # 1 if daemon was already running
47 | # 2 if daemon could not be started
48 | start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null \
49 | || return 1
50 | start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --make-pidfile --background -- \
51 | $DAEMON_ARGS \
52 | || return 2
53 | # Add code here, if necessary, that waits for the process to be ready
54 | # to handle requests from services started subsequently which depend
55 | # on this one. As a last resort, sleep for some time.
56 | }
57 |
58 | #
59 | # Function that stops the daemon/service
60 | #
61 | do_stop()
62 | {
63 | # Return
64 | # 0 if daemon has been stopped
65 | # 1 if daemon was already stopped
66 | # 2 if daemon could not be stopped
67 | # other if a failure occurred
68 | start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE --name $NAME
69 | RETVAL="$?"
70 | [ "$RETVAL" = 2 ] && return 2
71 | # Wait for children to finish too if this is a daemon that forks
72 | # and if the daemon is only ever run from this initscript.
73 | # If the above conditions are not satisfied then add some other code
74 | # that waits for the process to drop all resources that could be
75 | # needed by services started subsequently. A last resort is to
76 | # sleep for some time.
77 | start-stop-daemon --stop --quiet --oknodo --retry=0/30/KILL/5 --exec $DAEMON
78 | [ "$?" = 2 ] && return 2
79 | # Many daemons don't delete their pidfiles when they exit.
80 | rm -f $PIDFILE
81 | return "$RETVAL"
82 | }
83 |
84 | #
85 | # Function that sends a SIGHUP to the daemon/service
86 | #
87 | do_reload() {
88 | #
89 | # If the daemon can reload its configuration without
90 | # restarting (for example, when it is sent a SIGHUP),
91 | # then implement that here.
92 | #
93 | start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME
94 | return 0
95 | }
96 |
97 | case "$1" in
98 | start)
99 | [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME"
100 | do_start
101 | case "$?" in
102 | 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
103 | 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
104 | esac
105 | ;;
106 | stop)
107 | [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME"
108 | do_stop
109 | case "$?" in
110 | 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;;
111 | 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;;
112 | esac
113 | ;;
114 | status)
115 | status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $?
116 | ;;
117 | #reload|force-reload)
118 | #
119 | # If do_reload() is not implemented then leave this commented out
120 | # and leave 'force-reload' as an alias for 'restart'.
121 | #
122 | #log_daemon_msg "Reloading $DESC" "$NAME"
123 | #do_reload
124 | #log_end_msg $?
125 | #;;
126 | restart|force-reload)
127 | #
128 | # If the "reload" option is implemented then remove the
129 | # 'force-reload' alias
130 | #
131 | log_daemon_msg "Restarting $DESC" "$NAME"
132 | do_stop
133 | case "$?" in
134 | 0|1)
135 | do_start
136 | case "$?" in
137 | 0) log_end_msg 0 ;;
138 | 1) log_end_msg 1 ;; # Old process is still running
139 | *) log_end_msg 1 ;; # Failed to start
140 | esac
141 | ;;
142 | *)
143 | # Failed to stop
144 | log_end_msg 1
145 | ;;
146 | esac
147 | ;;
148 | *)
149 | echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
150 | exit 3
151 | ;;
152 | esac
153 |
154 | :
155 |
--------------------------------------------------------------------------------
/base/settings/hosts.sls:
--------------------------------------------------------------------------------
1 | # Check if we run development VM? If so, no salt master is present, so mine mechanism is not available
2 | # We assume that all services run on localhost
3 | {%- if 'dev' in grains.roles %}
4 | {%- set app_hosts = ['localhost'] %}
5 | {%- set web_hosts = ['localhost'] %}
6 | {%- set job_hosts = ['localhost'] %}
7 | {%- set es_data_hosts = ['localhost'] %}
8 | {%- set es_log_hosts = ['localhost'] %}
9 | {%- set cron_master_host = 'localhost' %}
10 | {%- set queue_host = 'localhost' %}
11 | {%- set redis_host = 'localhost' %}
12 | {%- set publish_ip = 'localhost' %}
13 |
14 | {%- else %}
15 | # Use mine to fetch IP adresses from minions. Get the IP address of project_interface.
16 | {%- set netif = salt['pillar.get']('hosting:project_network_interface', 'lo') %}
17 |
18 | # Limit mine search: only same host (qa), only same environment (non-qa)
19 | {%- if grains.environment == 'qa' %}
20 | {%- set envmatch = ' and G@id:' + grains.id %}
21 | {%- else %}
22 | {%- set envmatch = ' and G@environment:' + grains.environment %}
23 | {%- endif %}
24 |
25 |
26 | # Get IP's of specific roles from mine.get of running instances
27 | {%- set app_hosts = [] %}
28 | {%- for hostname, network_settings in salt['mine.get']('G@roles:app' + envmatch, 'network.interfaces', expr_form = 'compound').items() %}
29 | {%- do app_hosts.append(network_settings[netif]['inet'][0]['address']) %}
30 | {%- endfor %}
31 |
32 | {%- set web_hosts = [] %}
33 | {%- for hostname, network_settings in salt['mine.get']('G@roles:web' + envmatch, 'network.interfaces', expr_form = 'compound').items() %}
34 | {%- do web_hosts.append(network_settings[netif]['inet'][0]['address']) %}
35 | {%- endfor %}
36 |
37 | {%- set job_hosts = [] %}
38 | {%- for hostname, network_settings in salt['mine.get']('G@roles:cronjobs' + envmatch, 'network.interfaces', expr_form = 'compound').items() %}
39 | {%- do job_hosts.append(network_settings[netif]['inet'][0]['address']) %}
40 | {%- endfor %}
41 |
42 |
43 | {%- if salt['pillar.get']('hosting:external_elasticsearch', '') == '' %}
44 | {%- set es_data_hosts = [] %}
45 | {%- for hostname, network_settings in salt['mine.get']('G@roles:elasticsearch' + envmatch, 'network.interfaces', expr_form = 'compound').items() %}
46 | {%- do es_data_hosts.append(network_settings[netif]['inet'][0]['address']) %}
47 | {%- endfor %}
48 | {%- else %}
49 | {%- set es_data_hosts = salt['pillar.get']('hosting:external_elasticsearch') %}
50 | {%- endif %}
51 |
52 | {%- if salt['pillar.get']('hosting:external_rabbitmq', '') == '' %}
53 | {%- set queue_host = salt['mine.get']('G@roles:queue' + envmatch, 'network.interfaces', expr_form = 'compound').items()[0][1][netif]['inet'][0].address %}
54 | {%- else %}
55 | {%- set queue_host = salt['pillar.get']('hosting:external_rabbitmq') %}
56 | {%- endif %}
57 |
58 |
59 | {%- set es_log_hosts = [] %}
60 | {%- for hostname, network_settings in salt['mine.get']('G@roles:elk_elasticsearch' + envmatch, 'network.interfaces', expr_form = 'compound').items() %}
61 | {%- do es_log_hosts.append(network_settings[netif]['inet'][0]['address']) %}
62 | {%- endfor %}
63 |
64 | {%- set cron_master_host = salt['mine.get']('G@roles:cronjobs' + envmatch, 'network.interfaces', expr_form = 'compound').items()[0][1][netif]['inet'][0].address %}
65 | {%- set publish_ip = grains.ip_interfaces[netif]|first %}
66 | {%- if salt['pillar.get']('hosting:external_redis', '') == '' %}
67 | {%- set redis_host = salt['mine.get']('G@roles:redis' + envmatch, 'network.interfaces', expr_form = 'compound').items()[0][1][netif]['inet'][0].address %}
68 | {%- else %}
69 | {%- set redis_host = salt['pillar.get']('hosting:external_redis') %}
70 | {%- endif %}
71 |
72 | {%- endif %}
73 |
74 | # Based on host settings, prepare cluster parameters for elasticsearch
75 | {%- set es_total_nodes = (es_data_hosts)|count %}
76 | {%- set es_minimum_nodes = ( es_total_nodes / 2 )|round|int %}
77 |
78 | {%- if es_total_nodes > 1 %}
79 | {%- set es_replicas = 1 %}
80 | {%- set es_shards = 6 %}
81 | {%- else %}
82 | {%- set es_replicas = 0 %}
83 | {%- set es_shards = 1 %}
84 | {%- endif %}
85 |
86 | # Combine settings from above into three directories, which can be easily
87 | # imported from this state
88 | {%- set elasticsearch = {} %}
89 | {%- do elasticsearch.update ({
90 | 'minimum_nodes' : es_minimum_nodes,
91 | 'total_nodes' : es_total_nodes,
92 | 'shards' : es_shards,
93 | 'replicas' : es_replicas,
94 | }) %}
95 |
96 | {%- set host = {} %}
97 | {%- do host.update ({
98 | 'cron_master' : cron_master_host,
99 | 'queue' : queue_host,
100 | 'redis' : redis_host,
101 | }) %}
102 |
103 | {%- set hosts = {} %}
104 | {%- do hosts.update ({
105 | 'app' : app_hosts,
106 | 'web' : web_hosts,
107 | 'job' : job_hosts,
108 | 'elasticsearch_data' : es_data_hosts,
109 | 'elasticsearch_logs' : es_log_hosts,
110 | }) %}
111 |
--------------------------------------------------------------------------------
/base/settings/environments.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Parse per-environment settings
3 | #
4 | {% import_yaml 'settings/port_numbering.sls' as port %}
5 |
6 | #
7 | # Parse environments-specific settings
8 | #
9 | {%- set environments = pillar.environments %}
10 | {%- for environment, environment_details in environments.items() %}
11 |
12 | # If hostnames are defined in grains - overwrite setings from pillar
13 | {%- set grains_hostname_static = salt['grains.get']('environments:' + environment + ':static:hostname', None) %}
14 | {%- if grains_hostname_static != None %}
15 | {%- do environments[environment].static.update ({ 'hostname': grains_hostname_static}) %}
16 | {%- endif %}
17 |
18 | # Generate Jenkins ports
19 | {%- do environments[environment].update ({ 'jenkins': { 'port': '1' + port['environment'][environment]['port'] + '00' + '7' }}) %}
20 |
21 | # Generate http static assets ports
22 | {%- do environments[environment].static.update ({ 'port': '1' + port['environment'][environment]['port'] + '00' + '2' }) %}
23 |
24 | # Generate Elasticsearch ports
25 | {%- do environments[environment]['elasticsearch'].update ({
26 | 'http_port': '1' + port['environment'][environment]['port'] + '00' + '5',
27 | 'transport_port': '2' + port['environment'][environment]['port'] + '00' + '5',
28 | }) %}
29 |
30 | # Not using Redis-as-a-Service?
31 | {%- if salt['pillar.get']('hosting:external_redis', '') == '' %}
32 | # Generate Redis ports
33 | {%- do environments[environment].redis.update ({
34 | 'port': '1' + port['environment'][environment]['port'] + '00' + '9'
35 | }) %}
36 | {%- else %}
37 | {%- do environments[environment].update ({
38 | 'redis': { 'port': 6379 }
39 | }) %}
40 | {%- endif %}
41 |
42 | #
43 | # Parse store settings
44 | #
45 | {%- for store, store_details in environment_details.stores.items() %}
46 |
47 | # If hostnames are defined in grains - overwrite setings from pillar
48 | {%- set grains_hostnames_yves = salt['grains.get']('environments:' + environment + ':stores:' + store + ':yves:hostnames', None) %}
49 | {%- if grains_hostnames_yves != None %}
50 | {%- do environments[environment]['stores'][store].yves.update ({ 'hostnames': grains_hostnames_yves}) %}
51 | {%- endif %}
52 | {%- set grains_hostname_zed = salt['grains.get']('environments:' + environment + ':stores:' + store + ':zed:hostname', None) %}
53 | {%- if grains_hostname_zed != None %}
54 | {%- do environments[environment]['stores'][store].zed.update ({ 'hostname': grains_hostname_zed}) %}
55 | {%- endif %}
56 |
57 |
58 | # Generate Yves/Zed ports
59 | {%- do environments[environment]['stores'][store].yves.update ({ 'port': '1' + port['environment'][environment]['port'] + port['store'][store]['appdomain'] + '0' }) %}
60 | {%- do environments[environment]['stores'][store].zed.update ({ 'port': '1' + port['environment'][environment]['port'] + port['store'][store]['appdomain'] + '1' }) %}
61 |
62 | # Generate store locale settings
63 | {%- do environments[environment]['stores'][store].update ({ 'locale': port['store'][store]['locale'], 'appdomain': port['store'][store]['appdomain'] }) %}
64 |
65 | # Generate RabbitMQ vhost names / credentials
66 | {%- do environments[environment]['stores'][store].update({
67 | 'rabbitmq': {
68 | 'username': store + '_' + environment,
69 | 'password': environment_details.rabbitmq.password,
70 | 'vhost': '/' + store + '_' + environment + '_zed'
71 | }
72 | }) %}
73 |
74 |
75 | # Not using MySQL-as-a-service?
76 | {%- if salt['pillar.get']('hosting:external_mysql', '') == '' %}
77 |
78 | # Generate SQL database names
79 | {%- do environments[environment]['stores'][store].zed.update({
80 | 'database': {
81 | 'database': store + '_' + environment + '_zed',
82 | 'hostname': environment_details.database.zed.hostname,
83 | 'username': environment_details.database.zed.username,
84 | 'password': environment_details.database.zed.password
85 | }
86 | }) %}
87 | {%- do environments[environment]['stores'][store].update({
88 | 'dump': {
89 | 'database': {
90 | 'database': store + '_' + environment + '_dump',
91 | 'hostname': environment_details.database.zed.hostname,
92 | 'username': environment_details.database.zed.username,
93 | 'password': environment_details.database.zed.password
94 | }
95 | }
96 | }) %}
97 |
98 | {%- else %}
99 | # Using MySQL-as-a-service
100 | {%- set mysql_hostname = salt['pillar.get']('hosting:external_mysql') %}
101 |
102 | # Generate SQL database names
103 | {%- do environments[environment]['stores'][store].zed.update({
104 | 'database': {
105 | 'database': store + '_' + environment + '_zed',
106 | 'hostname': mysql_hostname,
107 | 'username': environment_details.database.zed.username,
108 | 'password': environment_details.database.zed.password
109 | }
110 | }) %}
111 | {%- do environments[environment]['stores'][store].update({
112 | 'dump': {
113 | 'database': {
114 | 'database': store + '_' + environment + '_dump',
115 | 'hostname': mysql_hostname,
116 | 'username': environment_details.database.zed.username,
117 | 'password': environment_details.database.zed.password
118 | }
119 | }
120 | }) %}
121 | {%- endif %}
122 |
123 | {%- endfor %}
124 | {%- endfor %}
125 |
--------------------------------------------------------------------------------
/base/spryker/environments.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup Spryker environments
3 | #
4 |
5 | {% from 'settings/init.sls' import settings with context %}
6 | {% from 'spryker/macros/jenkins_instance.sls' import jenkins_instance with context %}
7 |
8 | {%- for environment, environment_details in pillar.environments.items() %}
9 | /data/shop/{{ environment }}:
10 | file.directory:
11 | - user: www-data
12 | - group: www-data
13 | - dir_mode: 755
14 | - require:
15 | - file: /data/shop
16 |
17 | /data/shop/{{ environment }}/shared:
18 | file.directory:
19 | - user: www-data
20 | - group: www-data
21 | - dir_mode: 755
22 | - require:
23 | - file: /data/shop/{{ environment }}
24 |
25 | # Create environment directory structure
26 | /data/shop/{{ environment }}/shared/Generated:
27 | file.directory:
28 | - user: www-data
29 | - group: www-data
30 | - dir_mode: 755
31 | - file_mode: 755
32 | - require:
33 | - file: /data/shop/{{ environment }}/shared
34 |
35 | /data/shop/{{ environment }}/shared/data/common:
36 | file.directory:
37 | - user: www-data
38 | - group: www-data
39 | - dir_mode: 755
40 | - file_mode: 755
41 | - makedirs: True
42 | - require:
43 | - file: /data/shop/{{ environment }}/shared
44 |
45 | /data/logs/{{ environment }}:
46 | file.directory:
47 | - user: www-data
48 | - group: www-data
49 | - dir_mode: 755
50 | - file_mode: 755
51 | - require:
52 | - file: /data/logs
53 |
54 | # If we do not use cloud object storage, then this directory should be shared
55 | # between servers (using technology like NFS or GlusterFS, not included here).
56 | /data/storage/{{ environment }}/static:
57 | file.directory:
58 | - user: www-data
59 | - group: www-data
60 | - dir_mode: 755
61 | - file_mode: 755
62 | - makedirs: true
63 | - require:
64 | - file: /data/storage
65 |
66 | /data/shop/{{ environment }}/shared/data/static:
67 | file.symlink:
68 | - target: /data/storage/{{ environment }}/static
69 | - force: true
70 | - require:
71 | - file: /data/shop/{{ environment }}/shared/data/common
72 | - file: /data/storage/{{ environment }}/static
73 |
74 | # Application environment config
75 | /data/shop/{{ environment }}/shared/config_local.php:
76 | file.managed:
77 | - source: salt://spryker/files/config/config_local.php
78 | - template: jinja
79 | - user: www-data
80 | - group: www-data
81 | - mode: 644
82 | - require:
83 | - file: /data/shop/{{ environment }}/shared/data/common
84 | - context:
85 | environment: {{ environment }}
86 | settings: {{ settings }}
87 |
88 | /data/shop/{{ environment }}/shared/console_env_local.php:
89 | file.managed:
90 | - source: salt://spryker/files/config/console_env_local.php
91 | - template: jinja
92 | - user: www-data
93 | - group: www-data
94 | - mode: 644
95 | - require:
96 | - file: /data/shop/{{ environment }}/shared/data/common
97 | - context:
98 | environment: {{ environment }}
99 | settings: {{ settings }}
100 |
101 | {%- if 'code_symlink' in environment_details %}
102 | /data/shop/{{ environment }}/current:
103 | file.symlink:
104 | - target: {{ environment_details.code_symlink }}
105 | {%- endif %}
106 |
107 | {%- if 'web' in grains.roles %}
108 | # Configure PHP-FPM pools
109 | /etc/php/7.1/fpm/pool.d/{{ environment }}-zed.conf:
110 | file.managed:
111 | - source: salt://spryker/files/etc/php/7.1/fpm/pool.d/zed.conf
112 | - template: jinja
113 | - user: root
114 | - group: root
115 | - mode: 644
116 | - watch_in:
117 | - cmd: reload-php-fpm
118 | - context:
119 | environment: {{ environment }}
120 |
121 | /etc/php/7.1/fpm/pool.d/{{ environment }}-yves.conf:
122 | file.managed:
123 | - source: salt://spryker/files/etc/php/7.1/fpm/pool.d/yves.conf
124 | - template: jinja
125 | - user: root
126 | - group: root
127 | - mode: 644
128 | - watch_in:
129 | - cmd: reload-php-fpm
130 | - context:
131 | environment: {{ environment }}
132 |
133 |
134 | # NginX configs
135 | /etc/nginx/conf.d/{{ environment }}-backend.conf:
136 | file.managed:
137 | - source: salt://spryker/files/etc/nginx/conf.d/backend.conf
138 | - template: jinja
139 | - user: root
140 | - group: root
141 | - mode: 644
142 | - watch_in:
143 | - cmd: reload-nginx
144 | - context:
145 | environment: {{ environment }}
146 |
147 | # Local NginX static vhost for images/assets?
148 | {% if 'enable_local_vhost' in environment_details.static %}
149 | {% if environment_details.static.enable_local_vhost %}
150 | /etc/nginx/sites-available/{{ environment }}_static:
151 | file.managed:
152 | - source: salt://spryker/files/etc/nginx/sites-available/static.conf
153 | - template: jinja
154 | - user: root
155 | - group: root
156 | - mode: 644
157 | - context:
158 | environment: {{ environment }}
159 | settings: {{ settings }}
160 | - watch_in:
161 | - cmd: reload-nginx
162 |
163 | /etc/nginx/sites-enabled/{{ environment }}_static:
164 | file.symlink:
165 | - target: /etc/nginx/sites-available/{{ environment }}_static
166 | - force: true
167 | - require:
168 | - file: /etc/nginx/sites-available/{{ environment }}_static
169 | - watch_in:
170 | - cmd: reload-nginx
171 | {%- endif %}
172 | {%- endif %}
173 |
174 | {%- endif %}
175 |
176 | {%- if 'cronjobs' in grains.roles %}
177 | {{ jenkins_instance(environment, environment_details, settings) }}
178 | {%- endif %}
179 |
180 | {%- endfor %}
181 |
--------------------------------------------------------------------------------
/base/system/repositories.sls:
--------------------------------------------------------------------------------
1 | #
2 | # Setup additional debian package repositories
3 | #
4 |
5 | # Required for https-based repositories
6 | apt-transport-https:
7 | pkg.installed
8 |
9 | apt-get-update:
10 | cmd.wait:
11 | - name: apt-get update
12 |
13 | # Base debian repositories
14 | /etc/apt/sources.list:
15 | file.managed:
16 | - source: salt://system/files/etc/apt/sources.list
17 | - template: jinja
18 | - watch_in:
19 | - cmd: apt-get-update
20 |
21 | # Additional software repositories
22 | dotdeb:
23 | pkgrepo.managed:
24 | - humanname: DotDeb repo ({{ grains.lsb_distrib_codename }})
25 | - name: deb http://packages.dotdeb.org {{ grains.lsb_distrib_codename }} all
26 | - file: /etc/apt/sources.list.d/dotdeb.list
27 | - key_url: http://www.dotdeb.org/dotdeb.gpg
28 | - refresh_db: False
29 | - watch_in:
30 | - cmd: apt-get-update
31 |
32 | docker-repo:
33 | pkgrepo.managed:
34 | - humanname: Official Docker Repository
35 | - name: deb https://apt.dockerproject.org/repo debian-{{ grains.lsb_distrib_codename }} main
36 | - file: /etc/apt/sources.list.d/docker.list
37 | - keyid: 58118E89F3A912897C070ADBF76221572C52609D
38 | - keyserver: p80.pool.sks-keyservers.net
39 | - refresh_db: False
40 | - watch_in:
41 | - cmd: apt-get-update
42 |
43 | elasticsearch-repo:
44 | pkgrepo.managed:
45 | - humanname: Official Elasticsearch Repository
46 | - name: deb http://packages.elastic.co/elasticsearch/2.x/debian stable main
47 | - file: /etc/apt/sources.list.d/elasticsearch.list
48 | - key_url: http://packages.elasticsearch.org/GPG-KEY-elasticsearch
49 | - refresh_db: False
50 | - watch_in:
51 | - cmd: apt-get-update
52 |
53 | beats-repo:
54 | pkgrepo.managed:
55 | - humanname: Official Beats Repository
56 | - name: deb https://packages.elastic.co/beats/apt stable main
57 | - file: /etc/apt/sources.list.d/beats.list
58 | - key_url: http://packages.elasticsearch.org/GPG-KEY-elasticsearch
59 | - refresh_db: False
60 | - watch_in:
61 | - cmd: apt-get-update
62 |
63 | {{ grains.lsb_distrib_codename }}-backports-repo:
64 | pkgrepo.managed:
65 | - humanname: Debian {{ grains.lsb_distrib_codename }} Backports repository
66 | - name: deb http://ftp.uk.debian.org/debian {{ grains.lsb_distrib_codename }}-backports main
67 | - file: /etc/apt/sources.list.d/backports.list
68 | - refresh_db: False
69 | - watch_in:
70 | - cmd: apt-get-update
71 |
72 | nodesource-node-repo:
73 | pkgrepo.managed:
74 | - humanname: NodeSource NodeJS repository
75 | - name: deb https://deb.nodesource.com/node_6.x {{ grains.lsb_distrib_codename }} main
76 | - file: /etc/apt/sources.list.d/nodesource.list
77 | - key_url: https://deb.nodesource.com/gpgkey/nodesource.gpg.key
78 | - refresh_db: False
79 | - watch_in:
80 | - cmd: apt-get-update
81 |
82 | yarn-repo:
83 | pkgrepo.managed:
84 | - humanname: Yarn repository
85 | - name: deb https://dl.yarnpkg.com/debian/ stable main
86 | - file: /etc/apt/sources.list.d/yarn.list
87 | - key_url: https://dl.yarnpkg.com/debian/pubkey.gpg
88 | - refresh_db: False
89 | - watch_in:
90 | - cmd: apt-get-update
91 |
92 | jenkins-repo:
93 | pkgrepo.managed:
94 | - humanname: Jenkins repository
95 | - name: deb http://pkg.jenkins-ci.org/debian binary/
96 | - file: /etc/apt/sources.list.d/jenkins.list
97 | - key_url: http://pkg.jenkins-ci.org/debian/jenkins-ci.org.key
98 | - refresh_db: False
99 | - watch_in:
100 | - cmd: apt-get-update
101 |
102 | postgresql-repo:
103 | pkgrepo.managed:
104 | - humanname: Postgresql repository ({{ grains.lsb_distrib_codename }})
105 | - name: deb http://apt.postgresql.org/pub/repos/apt/ {{ grains.lsb_distrib_codename }}-pgdg main
106 | - file: /etc/apt/sources.list.d/postgresql.list
107 | - key_url: http://apt.postgresql.org/pub/repos/apt/ACCC4CF8.asc
108 | - refresh_db: False
109 | - watch_in:
110 | - cmd: apt-get-update
111 |
112 | php-repo:
113 | pkgrepo.managed:
114 | - humanname: PHP7.1 repository
115 | - name: deb https://packages.sury.org/php/ {{ grains.lsb_distrib_codename }} main
116 | - file: /etc/apt/sources.list.d/php.list
117 | - key_url: https://packages.sury.org/php/apt.gpg
118 | - refresh_db: False
119 | - watch_in:
120 | - cmd: apt-get-update
121 |
122 | rabbitmq-repo:
123 | pkgrepo.managed:
124 | - humanname: RabbitMQ repository
125 | - name: deb http://www.rabbitmq.com/debian/ testing main
126 | - file: /etc/apt/sources.list.d/rabbitmq.list
127 | - key_url: https://www.rabbitmq.com/rabbitmq-release-signing-key.asc
128 | - refresh_db: False
129 | - watch_in:
130 | - cmd: apt-get-update
131 |
132 | git-repo:
133 | pkgrepo.managed:
134 | - humanname: Official Git Ubuntu Repository
135 | - name: deb http://ppa.launchpad.net/git-core/ppa/ubuntu lucid main
136 | - file: /etc/apt/sources.list.d/git.list
137 | - keyid: E1DF1F24
138 | - keyserver: keyserver.ubuntu.com
139 | - refresh_db: False
140 | - watch_in:
141 | - cmd: apt-get-update
142 |
143 | mysql-server-repo:
144 | pkgrepo.managed:
145 | - humanname: Official MySQL server repository
146 | - name: deb http://repo.mysql.com/apt/debian/ {{ grains.lsb_distrib_codename }} mysql-5.7
147 | - file: /etc/apt/sources.list.d/mysql-server.list
148 | - keyid: 5072E1F5
149 | - keyserver: pool.sks-keyservers.net
150 | - refresh_db: False
151 | - watch_in:
152 | - cmd: apt-get-update
153 |
154 | mysql-tools-repo:
155 | pkgrepo.managed:
156 | - humanname: Official MySQL tools repository
157 | - name: deb http://repo.mysql.com/apt/debian/ {{ grains.lsb_distrib_codename }} mysql-tools
158 | - file: /etc/apt/sources.list.d/mysql-tools.list
159 | - keyid: 5072E1F5
160 | - keyserver: pool.sks-keyservers.net
161 | - refresh_db: False
162 | - watch_in:
163 | - cmd: apt-get-update
164 |
--------------------------------------------------------------------------------
/base/spryker/files/etc/deploy/functions.rb:
--------------------------------------------------------------------------------
1 | require 'rubygems'
2 | require 'fileutils'
3 | require 'highline/import'
4 | require 'net/ssh/multi'
5 | require 'optparse'
6 |
7 | ## Execute command (parallel) on ssh hosts
8 | def multi_ssh_exec(servers, command, options={})
9 | if servers.is_a? Array then server_array=servers; else server_array=[servers]; end
10 | Net::SSH::Multi.start(:default_user => "root") do |session|
11 | server_array.each { |server| session.use server }
12 | channels = session.exec command
13 | session.loop
14 | failed = channels.select { |c| c[:exit_status] != 0 }
15 | if failed.any?
16 | failed.each { |f| puts "[#{f[:host]}] FAILED!" } unless options[:dont_display_failed]
17 | return false
18 | end
19 | return true
20 | end
21 | end
22 |
23 | ## Execute command (parralel) on ssh hosts. Throw exception any command or host failed
24 | def multi_ssh_exec!(servers, command)
25 | if !multi_ssh_exec(servers, command)
26 | puts red "Command failed on one or more servers. Aborting."
27 | exit 1
28 | end
29 | return true
30 | end
31 |
32 | ## Execute comands locally, in parallel
33 | def multi_exec(commands)
34 | commands.each { |command| exec (command) if fork.nil? }
35 | out = Process.waitall
36 | if out.select { |pid,status| status!=0 }.any?
37 | return false
38 | end
39 | return true
40 | end
41 |
42 | # Execute commands, locally, in parallel. Throw exception if any of them failed.
43 | def multi_exec!(commands)
44 | if !multi_exec(commands)
45 | puts red "Command failed. Aborting."
46 | exit 1
47 | end
48 | return true
49 | end
50 |
51 | # Parser for commandline parameters
52 | def parse_commandline_parameters
53 | $parameters={}
54 | $opt_parser = OptionParser.new do |opt|
55 | opt.banner = "Usage: deploy.rb [OPTIONS]"
56 | opt.separator ""
57 | opt.separator "Commands"
58 | opt.separator " deploy - perform deployment"
59 | opt.separator ""
60 | opt.separator "Options"
61 |
62 | opt.on("-e","--environment ENVIRONMENT","Environment to deploy") do |environment|
63 | $parameters[:environment] = environment
64 | end
65 |
66 | opt.on("-s","--scmpath PATH","Path in SCM (e.g. trunk, branches/my_branch, tags/go_live_1)") do |scmpath|
67 | $parameters[:scmpath] = scmpath
68 | end
69 |
70 | opt.on("-r","--reindex","Force reindexing") do
71 | $parameters[:reindex] = true
72 | end
73 |
74 | opt.on("-n","--no-reindex","Do not reindex") do
75 | $parameters[:reindex] = false
76 | end
77 |
78 | opt.on("-v","--verbose","Switch on verbose mode") do
79 | $parameters[:verbose] = true
80 | end
81 |
82 | opt.on("-h","--help","Show help") do
83 | puts $opt_parser
84 | exit
85 | end
86 |
87 | # Parser for custom options
88 | $project_options.select {|o| o.has_key? :cmdline }.each do |option|
89 | option[:cli_options] = option[:options] unless option.has_key? :cli_options
90 | option[:value] = option[:cli_options][1] || ''
91 | opt.on(option[:cmdline], option[:question]) do
92 | option[:value] = option[:cli_options].first
93 | end
94 | end
95 | end
96 | $opt_parser.parse!
97 | end
98 |
99 | # Execute SVN with given args, passing credentials from configfile
100 | def svn(args)
101 | return `svn --username=#{$svn_user} --password=#{$svn_password} --no-auth-cache --non-interactive --trust-server-cert #{args}`
102 | end
103 |
104 | # Execute SVNSYNC with given args, passing credentials from configfile
105 | def svnsync(args)
106 | return `svnsync --username=#{$svn_user} --password=#{$svn_password} --no-auth-cache --non-interactive --trust-server-cert #{args}`
107 | end
108 |
109 | # SVN helpers
110 | def svn_get_revision(url)
111 | return (svn "info #{url}").map(&:split).select{ |i| i[0]=="Revision:"}.flatten[1]
112 | end
113 |
114 | # GIT helpers
115 | def git_list_tags
116 | return `git --git-dir #{$git_path}/.git tag -l | sed -e 's/\*//g' -e 's/^ *//g'`
117 | end
118 | def git_list_branches
119 | return `git --git-dir #{$git_path}/.git branch -r | grep -v HEAD | sed -e 's/^[ ]*\//g' -e 's/origin[/]//g'`
120 | end
121 | def git_pull
122 | return `cd #{$git_path}/ && git checkout -q master && git pull --all --tags -q --force`
123 | end
124 | def git_prune
125 | return `git --git-dir=#{$git_path}/.git remote prune origin`
126 | end
127 | def git_get_revision
128 | return `git --git-dir=#{$git_path}/.git rev-parse HEAD`.chomp
129 | end
130 |
131 | # Show menu with all items from argument array, return choosen array element
132 | def choose_item_from_array(prompt, items)
133 | puts ""
134 | choose do |menu|
135 | menu.prompt = prompt
136 | items.each { |item| menu.choice(item) { |choice| return choice }}
137 | end
138 | end
139 |
140 | ## Ask for project-specific configuration options (if any)
141 | # { :question => "Do you want to reload solr cores?", :options => %w(yes no), :variable => "reload_solr_cores" },
142 | def ask_project_options
143 | $project_options.select{ |o| (o.has_key? :ask_question) && o[:ask_question] && ((!o.has_key? :value) or (o[:value].empty?))}.each do |option|
144 | option[:value] = choose_item_from_array(option[:question].strip + ": ", option[:options])
145 | end
146 | end
147 |
148 | # Return string with current time, for directory namings
149 | def current_time_dirname
150 | return Time.new().strftime('%Y%m%d-%H%M%S')
151 | end
152 |
153 | # Create directory and change ownership to www-data
154 | def mkdir(dir)
155 | system "install -d -o #{$www_user} -g #{$www_group} #{dir}"
156 | end
157 | def mkdir_if_missing(dir)
158 | if !File.directory? dir
159 | puts "### Creating directory: #{dir}"
160 | mkdir dir
161 | end
162 | end
163 |
164 | # Get the login name of current user
165 | def get_current_user
166 | return ENV['SUDO_USER'] || `whoami`.strip
167 | end
168 |
169 | # Color support for console
170 | def colorize(text, color_code)
171 | "#{color_code}#{text}\033[0m"
172 | end
173 |
174 | # Helpers for colorized output
175 | def red(text); colorize(text, "\033[31m"); end
176 | def yellow(text); colorize(text, "\033[33m"); end
177 | def green(text); colorize(text, "\033[32m"); end
178 | def put_status(text); puts(yellow("### "+text)); end
179 | def put_error(text); puts(red("!!! "+text)); end
180 |
--------------------------------------------------------------------------------
/base/elasticsearch/files/elasticsearch_instance/etc/init.d/elasticsearch:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | #
3 | # /etc/init.d/elasticsearch -- startup script for Elasticsearch
4 | #
5 | # Written by Miquel van Smoorenburg .
6 | # Modified for Debian GNU/Linux by Ian Murdock .
7 | # Modified for Tomcat by Stefan Gybas .
8 | # Modified for Tomcat6 by Thierry Carrez .
9 | # Additional improvements by Jason Brittain .
10 | # Modified by Nicolas Huray for Elasticsearch .
11 | #
12 | ### BEGIN INIT INFO
13 | # Provides: elasticsearch-{{ environment }}
14 | # Required-Start: $network $remote_fs $named
15 | # Required-Stop: $network $remote_fs $named
16 | # Default-Start: 2 3 4 5
17 | # Default-Stop: 0 1 6
18 | # Short-Description: Starts elasticsearch
19 | # Description: Starts elasticsearch using start-stop-daemon
20 | ### END INIT INFO
21 |
22 | PATH=/bin:/usr/bin:/sbin:/usr/sbin
23 | NAME=elasticsearch-{{ environment }}
24 | DESC="Elasticsearch Server ({{ environment}})"
25 | DEFAULT=/etc/default/$NAME
26 |
27 | if [ `id -u` -ne 0 ]; then
28 | echo "You need root privileges to run this script"
29 | exit 1
30 | fi
31 |
32 |
33 | . /lib/lsb/init-functions
34 |
35 | if [ -r /etc/default/rcS ]; then
36 | . /etc/default/rcS
37 | fi
38 |
39 |
40 | # The following variables can be overwritten in $DEFAULT
41 |
42 | # Run Elasticsearch as this user ID and group ID
43 | ES_USER=elasticsearch
44 | ES_GROUP=elasticsearch
45 |
46 | # The first existing directory is used for JAVA_HOME (if JAVA_HOME is not defined in $DEFAULT)
47 | JDK_DIRS="/usr/lib/jvm/java-8-oracle/ /usr/lib/jvm/j2sdk1.8-oracle/ /usr/lib/jvm/jdk-7-oracle-x64 /usr/lib/jvm/java-7-oracle /usr/lib/jvm/j2sdk1.7-oracle/ /usr/lib/jvm/java-8-openjdk-amd64/ /usr/lib/jvm/java-8-openjdk/ /usr/lib/jvm/java-7-openjdk-amd64/ /usr/lib/jvm/java-7-openjdk/ /usr/lib/jvm/java-7-openjdk-armhf /usr/lib/jvm/java-7-openjdk-i386/ /usr/lib/jvm/default-java"
48 |
49 | # Look for the right JVM to use
50 | for jdir in $JDK_DIRS; do
51 | if [ -r "$jdir/bin/java" -a -z "${JAVA_HOME}" ]; then
52 | JAVA_HOME="$jdir"
53 | fi
54 | done
55 | export JAVA_HOME
56 |
57 | # Directory where the Elasticsearch binary distribution resides
58 | ES_HOME=/usr/share/elasticsearch
59 |
60 | # Heap Size (defaults to 256m min, 1g max)
61 | #ES_HEAP_SIZE=2g
62 |
63 | # Heap new generation
64 | #ES_HEAP_NEWSIZE=
65 |
66 | # max direct memory
67 | #ES_DIRECT_SIZE=
68 |
69 | # Additional Java OPTS
70 | #ES_JAVA_OPTS=
71 |
72 | # Maximum number of open files
73 | MAX_OPEN_FILES=65535
74 |
75 | # Maximum amount of locked memory
76 | #MAX_LOCKED_MEMORY=
77 |
78 | # Elasticsearch log directory
79 | LOG_DIR=/var/log/$NAME
80 |
81 | # Elasticsearch data directory
82 | DATA_DIR=/var/lib/$NAME
83 |
84 | # Elasticsearch configuration directory
85 | CONF_DIR=/etc/$NAME
86 |
87 | # Maximum number of VMA (Virtual Memory Areas) a process can own
88 | MAX_MAP_COUNT=262144
89 |
90 | # Path to the GC log file
91 | ES_GC_LOG_FILE=${LOG_DIR}/gc.log
92 |
93 | # Elasticsearch PID file directory
94 | PID_DIR="/var/run"
95 |
96 | # End of variables that can be overwritten in $DEFAULT
97 |
98 | # overwrite settings from default file
99 | if [ -f "$DEFAULT" ]; then
100 | . "$DEFAULT"
101 | fi
102 |
103 | # Check for newrelic java agent
104 | if [ -f "${DATA_DIR}/newrelic/newrelic.jar" ]; then
105 | ES_JAVA_OPTS="${ES_JAVA_OPTS} -javaagent:${DATA_DIR}/newrelic/newrelic.jar"
106 | fi
107 |
108 | # Define other required variables
109 | PID_FILE="$PID_DIR/$NAME.pid"
110 | DAEMON=$ES_HOME/bin/elasticsearch
111 | DAEMON_OPTS="-d -p $PID_FILE --default.path.home=$ES_HOME --default.path.logs=$LOG_DIR --default.path.data=$DATA_DIR --default.path.conf=$CONF_DIR"
112 |
113 | export ES_HEAP_SIZE
114 | export ES_HEAP_NEWSIZE
115 | export ES_DIRECT_SIZE
116 | export ES_JAVA_OPTS
117 | export ES_GC_LOG_FILE
118 |
119 | # Check DAEMON exists
120 | test -x $DAEMON || exit 0
121 |
122 | checkJava() {
123 | if [ -x "$JAVA_HOME/bin/java" ]; then
124 | JAVA="$JAVA_HOME/bin/java"
125 | else
126 | JAVA=`which java`
127 | fi
128 |
129 | if [ ! -x "$JAVA" ]; then
130 | echo "Could not find any executable java binary. Please install java in your PATH or set JAVA_HOME"
131 | exit 1
132 | fi
133 | }
134 |
135 | case "$1" in
136 | start)
137 | checkJava
138 |
139 | if [ -n "$MAX_LOCKED_MEMORY" -a -z "$ES_HEAP_SIZE" ]; then
140 | log_failure_msg "MAX_LOCKED_MEMORY is set - ES_HEAP_SIZE must also be set"
141 | exit 1
142 | fi
143 |
144 | log_daemon_msg "Starting $DESC"
145 |
146 | pid=`pidofproc -p $PID_FILE elasticsearch`
147 | if [ -n "$pid" ] ; then
148 | log_begin_msg "Already running."
149 | log_end_msg 0
150 | exit 0
151 | fi
152 |
153 | # Prepare environment
154 | mkdir -p "$LOG_DIR" "$DATA_DIR" && chown "$ES_USER":"$ES_GROUP" "$LOG_DIR" "$DATA_DIR"
155 | if [ -n "$PID_FILE" ] && [ ! -e "$PID_FILE" ]; then
156 | touch "$PID_FILE" && chown "$ES_USER":"$ES_GROUP" "$PID_FILE"
157 | fi
158 |
159 | if [ -n "$MAX_OPEN_FILES" ]; then
160 | ulimit -n $MAX_OPEN_FILES
161 | fi
162 |
163 | if [ -n "$MAX_LOCKED_MEMORY" ]; then
164 | ulimit -l $MAX_LOCKED_MEMORY
165 | fi
166 |
167 | if [ -n "$MAX_MAP_COUNT" -a -f /proc/sys/vm/max_map_count ]; then
168 | sysctl -q -w vm.max_map_count=$MAX_MAP_COUNT
169 | fi
170 |
171 | # Start Daemon
172 | start-stop-daemon -d $ES_HOME --start -b --user "$ES_USER" -c "$ES_USER" --pidfile "$PID_FILE" --exec $DAEMON -- $DAEMON_OPTS
173 | return=$?
174 | if [ $return -eq 0 ]; then
175 | i=0
176 | timeout=10
177 | # Wait for the process to be properly started before exiting
178 | until { cat "$PID_FILE" | xargs kill -0; } >/dev/null 2>&1
179 | do
180 | sleep 1
181 | i=$(($i + 1))
182 | if [ $i -gt $timeout ]; then
183 | log_end_msg 1
184 | exit 1
185 | fi
186 | done
187 | fi
188 | log_end_msg $return
189 | exit $return
190 | ;;
191 | stop)
192 | log_daemon_msg "Stopping $DESC"
193 |
194 | if [ -f "$PID_FILE" ]; then
195 | start-stop-daemon --stop --pidfile "$PID_FILE" \
196 | --user "$ES_USER" \
197 | --quiet \
198 | --retry forever/TERM/20 >/dev/null
199 | if [ $? -eq 1 ]; then
200 | log_progress_msg "$DESC is not running but pid file exists, cleaning up"
201 | elif [ $? -eq 3 ]; then
202 | PID="`cat $PID_FILE`"
203 | log_failure_msg "Failed to stop $DESC (pid $PID)"
204 | exit 1
205 | fi
206 | rm -f "$PID_FILE"
207 | else
208 | log_progress_msg "(not running)"
209 | fi
210 | log_end_msg 0
211 | ;;
212 | status)
213 | status_of_proc -p $PID_FILE elasticsearch elasticsearch && exit 0 || exit $?
214 | ;;
215 | restart|force-reload)
216 | if [ -f "$PID_FILE" ]; then
217 | $0 stop
218 | sleep 1
219 | fi
220 | $0 start
221 | ;;
222 | *)
223 | log_success_msg "Usage: $0 {start|stop|restart|force-reload|status}"
224 | exit 1
225 | ;;
226 | esac
227 |
228 | exit 0
229 |
--------------------------------------------------------------------------------
/base/spryker/files/jenkins_instance/etc/init.d/jenkins:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # /etc/init.d/jenkins
3 | # debian-compatible jenkins startup script.
4 | # Amelia A Lewis
5 | #
6 | ### BEGIN INIT INFO
7 | # Provides: jenkins-{{ environment }}
8 | # Required-Start: $remote_fs $syslog $network
9 | # Required-Stop: $remote_fs $syslog $network
10 | # Default-Start: 2 3 4 5
11 | # Default-Stop: 0 1 6
12 | # Short-Description: Start jenkins-{{ environment }} at boot time
13 | # Description: Controls the jenkins-{{ environment }} engine.
14 | ### END INIT INFO
15 |
16 | PATH=/bin:/usr/bin:/sbin:/usr/sbin
17 |
18 | DESC="Jenkins-{{ environment }} Server"
19 | NAME=`basename $0`
20 | SCRIPTNAME=/etc/init.d/$NAME
21 |
22 | [ -r /etc/default/$NAME ] && . /etc/default/$NAME
23 |
24 | #DAEMON=$JENKINS_SH
25 | DAEMON=/usr/bin/daemon
26 | DAEMON_ARGS="--name=$NAME --inherit --env=JENKINS_HOME=$JENKINS_HOME --output=$JENKINS_LOG --pidfile=$PIDFILE"
27 |
28 | if [ -n "$UMASK" ]; then
29 | DAEMON_ARGS="$DAEMON_ARGS --umask=$UMASK"
30 | fi
31 |
32 | SU=/bin/su
33 |
34 | # Exit if the package is not installed
35 | if [ ! -x "$DAEMON" ]; then
36 | echo "daemon package not installed" >&2
37 | exit 1
38 | fi
39 |
40 | # Exit if not supposed to run standalone
41 | if [ "$RUN_STANDALONE" = "false" ]; then
42 | echo "Not configured to run standalone" >&2
43 | exit 1
44 | fi
45 |
46 | # load environments
47 | if [ -r /etc/default/locale ]; then
48 | . /etc/default/locale
49 | export LANG LANGUAGE
50 | elif [ -r /etc/environment ]; then
51 | . /etc/environment
52 | export LANG LANGUAGE
53 | fi
54 |
55 | # Load the VERBOSE setting and other rcS variables
56 | . /lib/init/vars.sh
57 |
58 | # Define LSB log_* functions.
59 | # Depend on lsb-base (>= 3.0-6) to ensure that this file is present.
60 | . /lib/lsb/init-functions
61 |
62 | # Make sure we run as root, since setting the max open files through
63 | # ulimit requires root access
64 | if [ `id -u` -ne 0 ]; then
65 | echo "The $NAME init script can only be run as root"
66 | exit 1
67 | fi
68 |
69 |
70 | check_tcp_port() {
71 | local service=$1
72 | local assigned=$2
73 | local default=$3
74 |
75 | if [ -n "$assigned" ]; then
76 | port=$assigned
77 | else
78 | port=$default
79 | fi
80 |
81 | count=`netstat --listen --numeric-ports | grep \:$port[[:space:]] | grep -c . `
82 |
83 | if [ $count -ne 0 ]; then
84 | echo "The selected $service port ($port) seems to be in use by another program "
85 | echo "Please select another port to use for $NAME"
86 | return 1
87 | fi
88 | }
89 |
90 | #
91 | # Function that starts the daemon/service
92 | #
93 | do_start()
94 | {
95 | # the default location is /var/run/jenkins/jenkins.pid but the parent directory needs to be created
96 | mkdir `dirname $PIDFILE` > /dev/null 2>&1 || true
97 | chown $JENKINS_USER `dirname $PIDFILE`
98 | # Return
99 | # 0 if daemon has been started
100 | # 1 if daemon was already running
101 | # 2 if daemon could not be started
102 | $DAEMON $DAEMON_ARGS --running && return 1
103 |
104 | # Verify that the jenkins port is not already in use, winstone does not exit
105 | # even for BindException
106 | check_tcp_port "http" "$HTTP_PORT" "8080" || return 2
107 |
108 | # If the var MAXOPENFILES is enabled in /etc/default/jenkins then set the max open files to the
109 | # proper value
110 | if [ -n "$MAXOPENFILES" ]; then
111 | [ "$VERBOSE" != no ] && echo Setting up max open files limit to $MAXOPENFILES
112 | ulimit -n $MAXOPENFILES
113 | fi
114 |
115 | # notify of explicit umask
116 | if [ -n "$UMASK" ]; then
117 | [ "$VERBOSE" != no ] && echo Setting umask to $UMASK
118 | fi
119 |
120 | # --user in daemon doesn't prepare environment variables like HOME, USER, LOGNAME or USERNAME,
121 | # so we let su do so for us now
122 | $SU -l $JENKINS_USER --shell=/bin/bash -c "$DAEMON $DAEMON_ARGS -- $JAVA $JAVA_ARGS -jar $JENKINS_WAR $JENKINS_ARGS" || return 2
123 | }
124 |
125 |
126 | #
127 | # Verify that all jenkins processes have been shutdown
128 | # and if not, then do killall for them
129 | #
130 | get_running()
131 | {
132 | return `ps -U $JENKINS_USER --no-headers -f | egrep -e '(java|daemon)' | grep -c . `
133 | }
134 |
135 | force_stop()
136 | {
137 | get_running
138 | if [ $? -ne 0 ]; then
139 | killall -u $JENKINS_USER java daemon || return 3
140 | fi
141 | }
142 |
143 | # Get the status of the daemon process
144 | get_daemon_status()
145 | {
146 | $DAEMON $DAEMON_ARGS --running || return 1
147 | }
148 |
149 |
150 | #
151 | # Function that stops the daemon/service
152 | #
153 | do_stop()
154 | {
155 | # Return
156 | # 0 if daemon has been stopped
157 | # 1 if daemon was already stopped
158 | # 2 if daemon could not be stopped
159 | # other if a failure occurred
160 | get_daemon_status
161 | case "$?" in
162 | 0)
163 | $DAEMON $DAEMON_ARGS --stop || return 2
164 | # wait for the process to really terminate
165 | for n in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20; do
166 | sleep 1
167 | $DAEMON $DAEMON_ARGS --running || break
168 | done
169 | if get_daemon_status; then
170 | force_stop || return 3
171 | fi
172 | ;;
173 | *)
174 | force_stop || return 3
175 | ;;
176 | esac
177 |
178 | # Many daemons don't delete their pidfiles when they exit.
179 | rm -f $PIDFILE
180 | return 0
181 | }
182 |
183 | case "$1" in
184 | start)
185 | log_daemon_msg "Starting $DESC" "$NAME"
186 | do_start
187 | case "$?" in
188 | 0|1) log_end_msg 0 ;;
189 | 2) log_end_msg 1 ;;
190 | esac
191 | ;;
192 | stop)
193 | log_daemon_msg "Stopping $DESC" "$NAME"
194 | do_stop
195 | case "$?" in
196 | 0|1) log_end_msg 0 ;;
197 | 2) log_end_msg 1 ;;
198 | esac
199 | ;;
200 | restart|force-reload)
201 | #
202 | # If the "reload" option is implemented then remove the
203 | # 'force-reload' alias
204 | #
205 | log_daemon_msg "Restarting $DESC" "$NAME"
206 | do_stop
207 | case "$?" in
208 | 0|1)
209 | do_start
210 | case "$?" in
211 | 0) log_end_msg 0 ;;
212 | 1) log_end_msg 1 ;; # Old process is still running
213 | *) log_end_msg 1 ;; # Failed to start
214 | esac
215 | ;;
216 | *)
217 | # Failed to stop
218 | log_end_msg 1
219 | ;;
220 | esac
221 | ;;
222 | status)
223 | get_daemon_status
224 | case "$?" in
225 | 0)
226 | echo "$DESC is running with the pid `cat $PIDFILE`"
227 | rc=0
228 | ;;
229 | *)
230 | get_running
231 | procs=$?
232 | if [ $procs -eq 0 ]; then
233 | echo -n "$DESC is not running"
234 | if [ -f $PIDFILE ]; then
235 | echo ", but the pidfile ($PIDFILE) still exists"
236 | rc=1
237 | else
238 | echo
239 | rc=3
240 | fi
241 |
242 | else
243 | echo "$procs instances of jenkins are running at the moment"
244 | echo "but the pidfile $PIDFILE is missing"
245 | rc=0
246 | fi
247 |
248 | exit $rc
249 | ;;
250 | esac
251 | ;;
252 | *)
253 | echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
254 | exit 3
255 | ;;
256 | esac
257 |
258 | exit 0
259 |
--------------------------------------------------------------------------------