├── README.md
├── pillar
├── nodes
│ ├── db1.sls
│ ├── db2.sls
│ ├── ha1.sls
│ ├── ha2.sls
│ ├── mc1.sls
│ └── mc2.sls
├── roles
│ ├── admin.sls
│ ├── cache.sls
│ ├── common.sls
│ ├── db.sls
│ ├── ha.sls
│ ├── mc.sls
│ ├── search.sls
│ ├── storage.sls
│ └── web.sls
├── salt
│ └── minion.sls
├── schedule
│ └── highstate.sls
├── svn
│ └── init.sls
├── top.sls
├── users
│ └── init.sls
└── zabbix
│ ├── agent.sls
│ └── api.sls
└── salt
├── _grains
└── roles.py
├── _runners
├── publish.py
└── publish.pyc
├── apache
├── files
│ └── etc
│ │ └── httpd
│ │ └── conf
│ │ └── httpd.conf
└── init.sls
├── base
├── crons.sls
├── files
│ ├── mall.repo
│ └── resolv.conf
├── hosts.sls
├── init.sls
├── repo.sls
└── resolv.sls
├── coreseek
├── files
│ └── sphinx.conf
└── init.sls
├── haproxy
├── files
│ └── etc
│ │ └── haproxy
│ │ └── haproxy.cfg
└── init.sls
├── iptables
├── init.sls
└── iptables
├── keepalived
├── files
│ └── etc
│ │ └── keepalived
│ │ └── keepalived.conf
└── init.sls
├── limits
├── files
│ └── etc
│ │ └── security
│ │ └── limits.d
│ │ └── limits.conf
└── init.sls
├── memcached
├── files
│ └── etc
│ │ ├── sysconfig
│ │ └── memcached
│ │ └── zabbix
│ │ ├── ExternalScripts
│ │ └── zabbix_memcached_check.sh
│ │ └── zabbix_agentd.conf.d
│ │ └── memcached.conf
├── init.sls
├── install.sls
└── monitor.sls
├── mirrors
├── files
│ ├── data1
│ │ └── vhosts
│ │ │ └── mirrors.mall.com
│ │ │ ├── mall
│ │ │ └── 6
│ │ │ │ └── x86_64
│ │ │ │ ├── coreseek-4.1b-1.el6.x86_64.rpm
│ │ │ │ ├── epel-release-6-8.noarch.rpm
│ │ │ │ ├── mmseg-3.2.14-1.el6.x86_64.rpm
│ │ │ │ ├── python-libcloud-0.14.1-1.x86_64.rpm
│ │ │ │ ├── redis-2.8.9-1.el6.remi.x86_64.rpm
│ │ │ │ ├── salt-2014.1.4-1.el6.noarch.rpm
│ │ │ │ ├── salt-master-2014.1.4-1.el6.noarch.rpm
│ │ │ │ ├── salt-minion-2014.1.4-1.el6.noarch.rpm
│ │ │ │ └── sphinx-2.1.8-1.rhel6.x86_64.rpm
│ │ │ └── mysql
│ │ │ └── 6
│ │ │ └── x86_64
│ │ │ ├── MySQL-client-5.5.37-1.el6.x86_64.rpm
│ │ │ ├── MySQL-devel-5.5.37-1.el6.x86_64.rpm
│ │ │ ├── MySQL-embedded-5.5.37-1.el6.x86_64.rpm
│ │ │ ├── MySQL-server-5.5.37-1.el6.x86_64.rpm
│ │ │ ├── MySQL-shared-5.5.37-1.el6.x86_64.rpm
│ │ │ ├── MySQL-shared-compat-5.5.37-1.el6.x86_64.rpm
│ │ │ └── MySQL-test-5.5.37-1.el6.x86_64.rpm
│ └── etc
│ │ └── httpd
│ │ └── conf.d
│ │ └── mirrors.mall.com.conf
└── init.sls
├── mysql
├── files
│ ├── etc
│ │ ├── my.cnf
│ │ ├── my_slave.cnf
│ │ ├── salt
│ │ │ └── minion.d
│ │ │ │ └── mysql.conf
│ │ ├── yum.repo.d
│ │ │ └── mysql.repo
│ │ └── zabbix
│ │ │ └── zabbix_agentd.conf.d
│ │ │ └── userparameter_mysql.conf
│ └── var
│ │ └── lib
│ │ └── zabbix
│ │ └── .my.cnf
├── init.sls
├── monitor.sls
├── repl.txt
└── server.sls
├── nfs
├── client.sls
├── files
│ └── etc
│ │ └── exports
└── init.sls
├── nginx
├── files
│ └── etc
│ │ ├── nginx
│ │ ├── conf.d
│ │ │ ├── fastcgi_params_mall
│ │ │ ├── static.mall.com.conf
│ │ │ ├── status.conf
│ │ │ └── www.mall.com.conf
│ │ └── nginx.conf
│ │ ├── php-fpm.conf
│ │ ├── php-fpm.d
│ │ └── www.conf
│ │ ├── php.d
│ │ ├── bcmath.ini
│ │ ├── curl.ini
│ │ ├── dom.ini
│ │ ├── fileinfo.ini
│ │ ├── gd.ini
│ │ ├── json.ini
│ │ ├── mbstring.ini
│ │ ├── memcache.ini
│ │ ├── memcached.ini
│ │ ├── mysql.ini
│ │ ├── mysqli.ini
│ │ ├── pdo.ini
│ │ ├── pdo_mysql.ini
│ │ ├── pdo_sqlite.ini
│ │ ├── phar.ini
│ │ ├── sqlite3.ini
│ │ ├── wddx.ini
│ │ ├── xmlreader.ini
│ │ ├── xmlwriter.ini
│ │ ├── xsl.ini
│ │ └── zip.ini
│ │ ├── php.ini
│ │ └── zabbix
│ │ ├── ExternalScripts
│ │ └── php-fpm_status.pl
│ │ └── zabbix_agentd.conf.d
│ │ ├── nginx.conf
│ │ └── php_fpm.conf
├── init.sls
├── monitor.sls
├── php.sls
└── server.sls
├── ntp
└── init.sls
├── php
├── files
│ └── etc
│ │ └── php.ini
└── init.sls
├── redis
├── files
│ ├── etc
│ │ ├── redis.conf
│ │ ├── redis.conf.default
│ │ ├── sysctl.d
│ │ │ └── redis.conf
│ │ └── zabbix
│ │ │ └── zabbix_agentd.conf.d
│ │ │ └── redis.conf
│ └── redis-2.8.9-1.el6.remi.x86_64.rpm
├── init.sls
└── monitor.sls
├── roles
├── admin.sls
├── cache.sls
├── common.sls
├── db.sls
├── ha.sls
├── mc.sls
├── search.sls
├── storage.sls
└── web.sls
├── rpcbind
└── init.sls
├── rsync
├── files
│ └── etc
│ │ ├── rsyncd.conf
│ │ └── xinetd.d
│ │ └── rsync
└── init.sls
├── salt
├── files
│ └── etc
│ │ └── salt
│ │ ├── grains
│ │ ├── master
│ │ ├── master.d
│ │ ├── nodegroups.conf
│ │ └── publish.conf
│ │ └── minion
├── master.sls
└── minion.sls
├── sphinx
├── files
│ └── etc
│ │ └── sphinx
│ │ └── sphinx.conf
└── init.sls
├── ssh
├── files
│ └── etc
│ │ └── ssh
│ │ └── sshd_config
└── init.sls
├── svn
├── files
│ ├── conf
│ │ ├── authz
│ │ ├── passwd
│ │ └── svnserve.conf
│ └── etc
│ │ └── sysconfig
│ │ └── svnserve
└── init.sls
├── top.sls
├── users
├── init.sls
├── root.sls
├── sudo.sls
├── user.sls
└── www.sls
├── varnish
├── files
│ └── etc
│ │ ├── sysconfig
│ │ └── varnish
│ │ ├── varnish
│ │ └── default.vcl
│ │ └── zabbix
│ │ └── zabbix_agentd.conf.d
│ │ └── varnish.conf
├── init.sls
└── monitor.sls
└── zabbix
├── agent.sls
├── api.sls
├── files
├── etc
│ ├── zabbix
│ │ ├── api
│ │ │ ├── add_monitors.py
│ │ │ ├── config.yaml
│ │ │ ├── monitors
│ │ │ │ └── minion
│ │ │ └── templates
│ │ │ │ └── zbx_export_templates.xml
│ │ └── web
│ │ │ └── zabbix.conf.php
│ ├── zabbix_agentd.conf
│ └── zabbix_server.conf
└── usr
│ └── lib
│ └── python2.6
│ └── site-packages
│ └── zabbix
│ ├── __init__.py
│ └── zapi.py
├── server.sls
└── web.sls
/pillar/nodes/db1.sls:
--------------------------------------------------------------------------------
1 | mysql:
2 | conf_template: my.cnf
3 | datadir: /data1/mysql
4 | role: mysql-master
5 |
--------------------------------------------------------------------------------
/pillar/nodes/db2.sls:
--------------------------------------------------------------------------------
1 | mysql:
2 | conf_template: my_slave.cnf
3 | datadir: /data1/mysql
4 | role: mysql-slave
5 |
--------------------------------------------------------------------------------
/pillar/nodes/ha1.sls:
--------------------------------------------------------------------------------
1 | keepalived:
2 | notification_email: 'dongliang@mall.com'
3 | notification_email_from: 'haproxy@mall.com'
4 | smtp_server: 127.0.0.1
5 | state: MASTER
6 | priority: 100
7 | auth_type: PASS
8 | auth_pass: mall
9 | virtual_ipaddress_internal: 172.16.100.100
10 | virtual_ipaddress_external: 60.60.60.100
11 |
--------------------------------------------------------------------------------
/pillar/nodes/ha2.sls:
--------------------------------------------------------------------------------
1 | keepalived:
2 | notification_email: 'dongliang@mall.com'
3 | notification_email_from: 'haproxy@mall.com'
4 | smtp_server: 127.0.0.1
5 | state: BACKUP
6 | priority: 99
7 | auth_type: PASS
8 | auth_pass: mall
9 | virtual_ipaddress_internal: 172.16.100.100
10 | virtual_ipaddress_external: 60.60.60.100
11 |
--------------------------------------------------------------------------------
/pillar/nodes/mc1.sls:
--------------------------------------------------------------------------------
1 | redis:
2 | port: 6379
3 | bind: 172.16.100.41
4 | timeout: 300
5 | loglevel: warning
6 | dir: /data1/redis
7 | maxclients: 3000
8 | maxmemory: 128MB
9 |
--------------------------------------------------------------------------------
/pillar/nodes/mc2.sls:
--------------------------------------------------------------------------------
1 | redis:
2 | port: 6379
3 | bind: 172.16.100.42
4 | timeout: 300
5 | loglevel: warning
6 | dir: /data1/redis
7 | master: 172.16.100.41
8 | master_port: 6379
9 | maxclients: 3000
10 | maxmemory: 128MB
11 |
--------------------------------------------------------------------------------
/pillar/roles/admin.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - svn
3 | - zabbix.api
4 |
5 | hostgroup: admin
6 | limit_users:
7 | root:
8 | limit_hard: 65535
9 | limit_soft: 65535
10 | limit_type: nofile
11 | apache:
12 | Listen: 80
13 |
--------------------------------------------------------------------------------
/pillar/roles/cache.sls:
--------------------------------------------------------------------------------
1 | hostgroup: cache
2 | varnish_static_01: 172.16.100.21
3 | varnish_static_02: 172.16.100.22
4 | varnish_static_03: 172.16.100.23
5 | limit_users:
6 | varnish:
7 | limit_hard: 65535
8 | limit_soft: 65535
9 | limit_type: nofile
10 |
--------------------------------------------------------------------------------
/pillar/roles/common.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - users
3 | - salt.minion
4 | - zabbix.agent
5 | - schedule.highstate
6 |
--------------------------------------------------------------------------------
/pillar/roles/db.sls:
--------------------------------------------------------------------------------
1 | hostgroup: db
2 | limit_users:
3 | mysql:
4 | limit_hard: 65535
5 | limit_soft: 65535
6 | limit_type: nofile
7 |
--------------------------------------------------------------------------------
/pillar/roles/ha.sls:
--------------------------------------------------------------------------------
1 | hostgroup: ha
2 | limit_users:
3 | haproxy:
4 | limit_hard: 65535
5 | limit_soft: 65535
6 | limit_type: nofile
7 |
--------------------------------------------------------------------------------
/pillar/roles/mc.sls:
--------------------------------------------------------------------------------
1 | hostgroup: mc
2 | limit_users:
3 | memcached:
4 | limit_hard: 65535
5 | limit_soft: 65535
6 | limit_type: nofile
7 |
--------------------------------------------------------------------------------
/pillar/roles/search.sls:
--------------------------------------------------------------------------------
1 | hostgroup: search
2 | limit_users:
3 | sphinx:
4 | limit_hard: 65535
5 | limit_soft: 65535
6 | limit_type: nofile
7 |
--------------------------------------------------------------------------------
/pillar/roles/storage.sls:
--------------------------------------------------------------------------------
1 | hostgroup: storage
2 | exports:
3 | /data1/share: '172.16.100.*(rw,async,all_squash,anonuid=65534,anongid=65534) *.grid.mall.com(rw,async,all_squash,anonuid=65534,anongid=65534)'
4 |
--------------------------------------------------------------------------------
/pillar/roles/web.sls:
--------------------------------------------------------------------------------
1 | hostgroup: web
2 | vhostsdir: /data1/vhosts
3 | vhostscachedir: /data1/cache
4 | logdir: /data1/logs
5 | vhosts:
6 | - www.mall.com
7 | - static.mall.com
8 | limit_users:
9 | nginx:
10 | limit_hard: 65535
11 | limit_soft: 65535
12 | limit_type: nofile
13 | mounts:
14 | /data1/vhosts/static.mall.com/htdocs:
15 | device: 172.16.100.71:/data1/share
16 | fstype: nfs
17 | mkmnt: True
18 | opts: async,noatime,noexec,nosuid,soft,timeo=3,retrans=3,intr,retry=3,rsize=16384,wsize=16384
19 |
--------------------------------------------------------------------------------
/pillar/salt/minion.sls:
--------------------------------------------------------------------------------
1 | mine_functions:
2 | test.ping: []
3 | grains.item: [id, hostgroup, roles, ipv4]
4 |
--------------------------------------------------------------------------------
/pillar/schedule/highstate.sls:
--------------------------------------------------------------------------------
1 | schedule:
2 | highstate:
3 | function: state.highstate
4 | minutes: 30
5 |
--------------------------------------------------------------------------------
/pillar/svn/init.sls:
--------------------------------------------------------------------------------
1 | repodir: /data1/svn
2 |
--------------------------------------------------------------------------------
/pillar/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - roles.common
4 | 'admin.grid.mall.com':
5 | - roles.admin
6 | 'ha.grid.mall.com':
7 | - roles.ha
8 | 'web*.grid.mall.com':
9 | - roles.web
10 | 'cache*.grid.mall.com':
11 | - roles.cache
12 | 'mc*.grid.mall.com':
13 | - roles.mc
14 | 'db*.grid.mall.com':
15 | - roles.db
16 | 'search*.grid.mall.com':
17 | - roles.search
18 | 'storage*.grid.mall.com':
19 | - roles.storage
20 |
21 | 'ha1.grid.mall.com':
22 | - nodes.ha1
23 | 'ha2.grid.mall.com':
24 | - nodes.ha2
25 | 'mc1.grid.mall.com':
26 | - nodes.mc1
27 | 'mc2.grid.mall.com':
28 | - nodes.mc2
29 | 'db1.grid.mall.com':
30 | - nodes.db1
31 | 'db2.grid.mall.com':
32 | - nodes.db2
33 |
34 |
--------------------------------------------------------------------------------
/pillar/users/init.sls:
--------------------------------------------------------------------------------
1 | users:
2 | dongliang:
3 | group: dongliang
4 | uid: 1000
5 | gid: 1000
6 | fullname: Shi Dongliang
7 | password: $6$BZpX5dWZ$./TKqv8ZL3eLNAAmuiGWeT0SvwvpPtk5Nhgf8.xeyFd5XVMJ0QRh8HmiJOpJi7qPCo.mfXIIrbQSGdAJVmZxW.
8 | shell: /bin/bash
9 | sudo: true
10 | ssh_auth:
11 | key: AAAAB3NzaC1yc2EAAAABIwAAAQEAmCqNHfK6VACeXsAnRfzq3AiSN+U561pSF8qoLOh5Ez38UqtsFLBaFdC/pTTxGQBYhwO2KkgWL9TtWOEp+LxYLskXUeG24pIe8y8r+edHC8fhmHGXWXQVmZwRERl+ygTdFt3ojhDu1FYA0WmKU07KgAqUrvJW1zwJsa/DaXExfwSzALAgm2jwx68hP9CO1msTAhtElUJWeLTlQTZr0ZGWvmlKzcwqxDX58HpA69qgccaOzO5n5qsQYXx8JmnCV18XW9bkxMvn5q8Y9o/to+BQ1440hKcsm9rNpJlIrnQaIbMZs/Sy2QnT+bVx9JyucDvaVJmsfJ+qZlfnhdRkm6eosw==
12 | comment: dongliang@mall.com
13 | jiazhu:
14 | group: jiazhu
15 | uid: 1001
16 | gid: 1001
17 | fullname: Niu Jiazhu
18 | password: $6$YMt9NU8n$hMrWKJ9RGhQiswwnLZB3chY8/SUe7WBkuMA.vLOx.NpuK1wunPH6sxrJwZcnKcRcv5a314Wnr/mjODFLJdYUb.
19 | shell: /bin/bash
20 | sudo: true
21 |
--------------------------------------------------------------------------------
/pillar/zabbix/agent.sls:
--------------------------------------------------------------------------------
1 | zabbix-agent:
2 | Zabbix_Server: 172.16.100.81
3 |
--------------------------------------------------------------------------------
/pillar/zabbix/api.sls:
--------------------------------------------------------------------------------
1 | zabbix-api:
2 | Zabbix_URL: http://172.16.100.81/zabbix
3 | Zabbix_User: admin
4 | Zabbix_Pass: zabbix
5 | Monitors_DIR: /etc/zabbix/api/monitors/
6 | Templates_DIR: /etc/zabbix/api/templates/
7 |
8 | zabbix-base-templates:
9 | {% if grains['os_family'] == 'RedHat' or grains['os_family'] == 'Debian' %}
10 | - 'Template OS Linux'
11 | {% endif %}
12 |
13 | zabbix-templates:
14 | memcached: 'Template App Memcached'
15 | zabbix-server: 'Template App Zabbix Server'
16 | web-server: 'Template App HTTP Service'
17 | mysql: 'Template App MySQL'
18 | mysql-master: 'Template App MySQL'
19 | mysql-slave: 'Template App MySQL Slave'
20 | php-fpm: 'Template App PHP FPM'
21 | nginx: 'Template App Nginx'
22 | varnish: 'Template App Varnish'
23 | redis: 'Template App Redis'
24 |
--------------------------------------------------------------------------------
/salt/_grains/roles.py:
--------------------------------------------------------------------------------
1 | import os.path
2 |
3 | def roles():
4 | '''define host roles'''
5 |
6 | roles_file = "/etc/salt/roles"
7 | roles_list = []
8 |
9 | if os.path.isfile(roles_file):
10 | roles_fd = open(roles_file, "r")
11 | for eachroles in roles_fd:
12 | roles_list.append(eachroles[:-1])
13 | return {'roles': roles_list}
14 |
15 |
16 | if __name__ == "__main__":
17 | print roles()
18 |
--------------------------------------------------------------------------------
/salt/_runners/publish.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | '''
3 | Functions to publish code on the master
4 | '''
5 |
6 | # Import salt libs
7 | import salt.client
8 | import salt.output
9 |
10 |
11 | def push(project, output=True):
12 | '''
13 | publish code to web server.
14 |
15 | CLI Example:
16 |
17 | .. code-block:: bash
18 |
19 | salt-run publish.push project
20 | '''
21 |
22 | client = salt.client.LocalClient(__opts__['conf_file'])
23 | ret = client.cmd(__opts__['publish']['master'],
24 | 'svn.checkout',
25 | [
26 | __opts__['publish']['cwd'],
27 | __opts__['projects'][project]['remote']
28 | ],
29 | kwarg={
30 | 'target':project,
31 | 'username':__opts__['svn']['username'],
32 | 'password':__opts__['svn']['password']
33 | }
34 | )
35 |
36 | if ret:
37 | msg = 'URL: %s\n%s' %(__opts__['projects'][project]['remote'], ret[__opts__['publish']['master']])
38 | ret = {'Check out code': msg}
39 | else:
40 | ret = {'Check out code': 'Timeout, try again.'}
41 | if output:
42 | salt.output.display_output(ret, '', __opts__)
43 |
44 | for target in __opts__['projects'][project]['target']:
45 | cmd = '/usr/bin/rsync -avz --exclude=".svn" %s/%s/trunk/* %s/' %(__opts__['publish']['cwd'], project, target)
46 | ret[target] = client.cmd(__opts__['publish']['master'],
47 | 'cmd.run',
48 | [
49 | cmd,
50 | ],
51 | )
52 |
53 | title = '\nSending file to %s' %target.split(':')[0]
54 | ret = {title: ret[target][__opts__['publish']['master']]}
55 | if output:
56 | salt.output.display_output(ret, '', __opts__)
57 |
58 | return ret
59 |
--------------------------------------------------------------------------------
/salt/_runners/publish.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/_runners/publish.pyc
--------------------------------------------------------------------------------
/salt/apache/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.minion
3 |
4 | apache:
5 | pkg.installed:
6 | - name: httpd
7 | file.managed:
8 | - name: /etc/httpd/conf/httpd.conf
9 | - source: salt://apache/files/etc/httpd/conf/httpd.conf
10 | - template: jinja
11 | - require:
12 | - pkg: apache
13 | service.running:
14 | - name: httpd
15 | - enable: True
16 | - watch:
17 | - pkg: apache
18 | - file: apache
19 |
20 | httpd-conf.d:
21 | file.directory:
22 | - name: /etc/httpd/conf.d/
23 | - watch_in:
24 | - service: apache
25 |
26 | web-server-role:
27 | file.append:
28 | - name: /etc/salt/roles
29 | - text:
30 | - 'web-server'
31 | - require:
32 | - file: roles
33 | - service: apache
34 | - service: salt-minion
35 | - watch_in:
36 | - module: sync_grains
37 |
38 |
--------------------------------------------------------------------------------
/salt/base/crons.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - ntp
3 |
4 | '/usr/sbin/ntpdate 1.cn.pool.ntp.org 1.asia.pool.ntp.org':
5 | cron.present:
6 | - user: root
7 | - minute: 0
8 | - hour: 2
9 |
--------------------------------------------------------------------------------
/salt/base/files/mall.repo:
--------------------------------------------------------------------------------
1 | [mall]
2 | name = mall $releasever - $basearch
3 | baseurl = http://172.16.100.81/mirrors/mall/$releasever/$basearch/
4 | gpgcheck=0
5 | priority=1
6 |
7 | [epel]
8 | name=Extra Packages for Enterprise Linux 6 - $basearch
9 | #baseurl=http://download.fedoraproject.org/pub/epel/6/$basearch
10 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-6&arch=$basearch
11 | failovermethod=priority
12 | enabled=1
13 | gpgcheck=0
14 | priority=10
15 |
16 | [nginx]
17 | name = nginx $releasever - $basearch
18 | baseurl = http://nginx.org/packages/rhel/$releasever/$basearch/
19 | gpgcheck=0
20 | priority=1
21 |
22 | [varnish]
23 | name = varnish $releasever - $basearch
24 | baseurl = http://repo.varnish-cache.org/redhat/varnish-3.0/el$releasever/$basearch/
25 | gpgcheck=0
26 | priority=1
27 |
--------------------------------------------------------------------------------
/salt/base/files/resolv.conf:
--------------------------------------------------------------------------------
1 | nameserver 202.106.196.115
2 |
--------------------------------------------------------------------------------
/salt/base/hosts.sls:
--------------------------------------------------------------------------------
1 | admin.grid.mall.com:
2 | host.present:
3 | - ip: 172.16.100.81
4 | - order: 1
5 | - names:
6 | - admin.grid.mall.com
7 |
8 | ha1.grid.mall.com:
9 | host.present:
10 | - ip: 172.16.100.11
11 | - order: 1
12 | - names:
13 | - ha1.grid.mall.com
14 |
15 | ha2.grid.mall.com:
16 | host.present:
17 | - ip: 172.16.100.12
18 | - order: 1
19 | - names:
20 | - ha2.grid.mall.com
21 |
22 | web1.grid.mall.com:
23 | host.present:
24 | - ip: 172.16.100.21
25 | - order: 1
26 | - names:
27 | - web1.grid.mall.com
28 |
29 | web2.grid.mall.com:
30 | host.present:
31 | - ip: 172.16.100.22
32 | - order: 1
33 | - names:
34 | - web2.grid.mall.com
35 |
36 | web3.grid.mall.com:
37 | host.present:
38 | - ip: 172.16.100.23
39 | - order: 1
40 | - names:
41 | - .grid.mall.com
42 |
43 | cache1.grid.mall.com:
44 | host.present:
45 | - ip: 172.16.100.31
46 | - order: 1
47 | - names:
48 | - cache1.grid.mall.com
49 |
50 | cache2.grid.mall.com:
51 | host.present:
52 | - ip: 172.16.100.32
53 | - order: 1
54 | - names:
55 | - cache2.grid.mall.com
56 |
57 | mc1.grid.mall.com:
58 | host.present:
59 | - ip: 172.16.100.41
60 | - order: 1
61 | - names:
62 | - mc1.grid.mall.com
63 |
64 | mc2.grid.mall.com:
65 | host.present:
66 | - ip: 172.16.100.42
67 | - order: 1
68 | - names:
69 | - mc2.grid.mall.com
70 |
71 | db1.grid.mall.com:
72 | host.present:
73 | - ip: 172.16.100.51
74 | - order: 1
75 | - names:
76 | - db1.grid.mall.com
77 |
78 | db2.grid.mall.com:
79 | host.present:
80 | - ip: 172.16.100.52
81 | - order: 1
82 | - names:
83 | - db2.grid.mall.com
84 |
85 | search1.grid.mall.com:
86 | host.present:
87 | - ip: 172.16.100.61
88 | - order: 1
89 | - names:
90 | - search1.grid.mall.com
91 |
92 | search2.grid.mall.com:
93 | host.present:
94 | - ip: 172.16.100.62
95 | - order: 1
96 | - names:
97 | - search.grid.mall.com
98 |
99 | storage1.grid.mall.com:
100 | host.present:
101 | - ip: 172.16.100.71
102 | - order: 1
103 | - names:
104 | - storage1.grid.mall.com
105 |
106 |
--------------------------------------------------------------------------------
/salt/base/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - base.hosts
3 | - base.repo
4 | - base.crons
5 | # - base.epel
6 |
--------------------------------------------------------------------------------
/salt/base/repo.sls:
--------------------------------------------------------------------------------
1 | /etc/yum.repos.d/mall.repo:
2 | file.managed:
3 | - source: salt://base/files/mall.repo
4 | - user: root
5 | - group: root
6 | - mode: 644
7 | - order: 1
8 | cmd.wait:
9 | - name: yum clean all
10 | - watch:
11 | - file: /etc/yum.repos.d/mall.repo
12 |
--------------------------------------------------------------------------------
/salt/base/resolv.sls:
--------------------------------------------------------------------------------
1 | /etc/resolv.conf:
2 | file.managed:
3 | - source: salt://base/files/resolv.conf
4 | - user: root
5 | - group: root
6 | - mode: 644
7 | - template: jinja
8 |
--------------------------------------------------------------------------------
/salt/coreseek/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 |
5 | coreseek:
6 | pkg.installed:
7 | - name: coreseek
8 | service.running:
9 | - name: searchd
10 | - enable: True
11 | - require:
12 | - pkg: coreseek
13 | - watch:
14 | - pkg: coreseek
15 | - file: /usr/local/coreseek/etc/sphinx.conf
16 |
17 | /usr/local/coreseek/etc/sphinx.conf:
18 | file.managed:
19 | - source: salt://coreseek/files/sphinx.conf
20 | - template: jinja
21 | - user: root
22 | - group: root
23 | - mode: 644
24 |
25 | coreseek-role:
26 | file.append:
27 | - name: /etc/salt/roles
28 | - text:
29 | - 'coreseek'
30 | - require:
31 | - file: roles
32 | - service: coreseek
33 | - service: salt-minion
34 | - watch_in:
35 | - module: sync_grains
36 |
--------------------------------------------------------------------------------
/salt/haproxy/files/etc/haproxy/haproxy.cfg:
--------------------------------------------------------------------------------
1 | # File Managed by Salt
2 | global
3 | maxconn 20480
4 | user haproxy
5 | group haproxy
6 | daemon
7 | nbproc 4
8 | pidfile /var/run/haproxy.pid
9 |
10 | defaults
11 | log global
12 | option dontlognull
13 | option redispatch
14 | log 127.0.0.1 local0 err
15 | timeout connect 10000
16 | timeout client 120000
17 | timeout server 120000
18 | maxconn 20480
19 | retries 3
20 |
21 | frontend main *:80
22 | mode http
23 | option httpclose
24 | option httplog
25 | option forwardfor
26 |
27 | acl host_static hdr_end(host) -i static.mall.com
28 | use_backend static if host_static
29 |
30 | default_backend dynamic
31 |
32 | listen haproxy-status {{grains['ipv4'][1]}}:8080
33 | mode http
34 | option httplog
35 | stats uri /haproxy-status
36 | stats realm Global\ statistics
37 | stats auth mall:1mall.com$
38 |
39 | backend dynamic
40 | mode http
41 | balance roundrobin
42 | server dynamic-21 172.16.100.21:80 weight 1 check inter 10000
43 | server dynamic-22 172.16.100.22:80 weight 1 check inter 10000
44 | server dynamic-23 172.16.100.23:80 weight 1 check inter 10000
45 |
46 | backend static
47 | mode http
48 | balance uri len 128 depth 8
49 | server static-31 172.16.100.31:8080 maxconn 2000 check inter 10000 rise 3 fall 3
50 | server static-32 172.16.100.32:8080 maxconn 2000 check inter 10000 rise 3 fall 3
51 |
--------------------------------------------------------------------------------
/salt/haproxy/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 |
5 | haproxy:
6 | pkg.installed:
7 | - name: haproxy
8 | service.running:
9 | - name: haproxy
10 | - enable: True
11 | - require:
12 | - pkg: haproxy
13 | - watch:
14 | - pkg: haproxy
15 | - file: /etc/haproxy/haproxy.cfg
16 |
17 | /etc/haproxy/haproxy.cfg:
18 | file.managed:
19 | - source: salt://haproxy/files/etc/haproxy/haproxy.cfg
20 | - template: jinja
21 | - user: root
22 | - group: root
23 | - mode: 644
24 |
25 | haproxy-role:
26 | file.append:
27 | - name: /etc/salt/roles
28 | - text:
29 | - 'haproxy'
30 | - require:
31 | - file: roles
32 | - service: haproxy
33 | - service: salt-minion
34 | - watch_in:
35 | - module: sync_grains
36 |
--------------------------------------------------------------------------------
/salt/iptables/init.sls:
--------------------------------------------------------------------------------
1 | iptables:
2 | pkg:
3 | - installed
4 | service:
5 | - running
6 | - watch:
7 | - pkg: iptables
8 | - file: iptables
9 | file:
10 | - managed
11 | - source: salt://iptables/iptables
12 | {% if grains['os'] == 'CentOS' or grains['os'] == 'Fedora' %}
13 | - name: /etc/sysconfig/iptables
14 | {% elif grains['os'] == 'Arch' %}
15 | - name: /etc/conf.d/iptables
16 | {% endif %}
17 |
--------------------------------------------------------------------------------
/salt/iptables/iptables:
--------------------------------------------------------------------------------
1 | # Generated by iptables-save v1.4.7 on Sun Apr 27 04:07:13 2014
2 | *filter
3 | :INPUT ACCEPT [193:15604]
4 | :FORWARD ACCEPT [0:0]
5 | :OUTPUT ACCEPT [159:28231]
6 | COMMIT
7 | # Completed on Sun Apr 27 04:07:13 2014
8 |
--------------------------------------------------------------------------------
/salt/keepalived/files/etc/keepalived/keepalived.conf:
--------------------------------------------------------------------------------
1 | ! File Managed by Salt
2 |
3 | global_defs {
4 | notification_email {
5 | {{pillar['keepalived']['notification_email']}}
6 | }
7 | notification_email_from {{pillar['keepalived']['notification_email_from']}}
8 | smtp_server {{pillar['keepalived']['smtp_server']}}
9 | smtp_connect_timeout 30
10 | router_id LVS_DEVEL
11 | }
12 | vrrp_script chk_haproxy {
13 | script "killall -0 haproxy"
14 | interval 2
15 | weight 2
16 | }
17 | vrrp_instance VI_1 {
18 | state {{pillar['keepalived']['state']}}
19 | interface eth0
20 | virtual_router_id 51
21 | priority {{pillar['keepalived']['priority']}}
22 | advert_int 1
23 | authentication {
24 | auth_type {{pillar['keepalived']['auth_type']}}
25 | auth_pass {{pillar['keepalived']['auth_pass']}}
26 | }
27 | virtual_ipaddress {
28 | {{pillar['keepalived']['virtual_ipaddress_internal']}}
29 | }
30 | track_script {
31 | chk_haproxy
32 | }
33 | }
34 |
35 | vrrp_instance VI_2 {
36 | state {{pillar['keepalived']['state']}}
37 | interface eth1
38 | virtual_router_id 52
39 | priority {{pillar['keepalived']['priority']}}
40 | advert_int 1
41 | authentication {
42 | auth_type {{pillar['keepalived']['auth_type']}}
43 | auth_pass {{pillar['keepalived']['auth_pass']}}
44 | }
45 | virtual_ipaddress {
46 | {{pillar['keepalived']['virtual_ipaddress_external']}}
47 | }
48 | track_script {
49 | chk_haproxy
50 | }
51 | }
52 |
--------------------------------------------------------------------------------
/salt/keepalived/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 | - haproxy
5 |
6 | keepalived:
7 | pkg.installed:
8 | - name: keepalived
9 | service.running:
10 | - name: keepalived
11 | - enable: True
12 | - watch:
13 | - pkg: keepalived
14 | - file: /etc/keepalived/keepalived.conf
15 | - require:
16 | - service: haproxy
17 |
18 | /etc/keepalived/keepalived.conf:
19 | file.managed:
20 | - source: salt://keepalived/files/etc/keepalived/keepalived.conf
21 | - template: jinja
22 | - user: root
23 | - group: root
24 | - mode: 644
25 |
--------------------------------------------------------------------------------
/salt/limits/files/etc/security/limits.d/limits.conf:
--------------------------------------------------------------------------------
1 | {% if hard -%}
2 | {{ user }} hard {{ limit_type }} {{ hard }}
3 | {% endif -%}
4 |
5 | {% if soft -%}
6 | {{ user }} soft {{ limit_type }} {{ soft }}
7 | {% endif -%}
8 |
--------------------------------------------------------------------------------
/salt/limits/init.sls:
--------------------------------------------------------------------------------
1 | {% for user, limit in salt['pillar.get']('limit_users', {}).iteritems() %}
2 | {% if user %}
3 |
4 | limits-{{user}}-{{limit['limit_type']}}:
5 | file.managed:
6 | - source: salt://limits/files/etc/security/limits.d/limits.conf
7 | - template: jinja
8 | - defaults:
9 | user: {{user}}
10 | hard: {{limit['limit_hard']}}
11 | soft: {{limit['limit_soft']}}
12 | limit_type: {{limit['limit_type']}}
13 | {% if grains['os'] == 'CentOS' or grains['os'] == 'Fedora' %}
14 | - name: /etc/security/limits.d/{{user}}_{{limit['limit_type']}}.conf
15 | {% endif %}
16 | {% endif %}
17 | {% endfor %}
18 |
--------------------------------------------------------------------------------
/salt/memcached/files/etc/sysconfig/memcached:
--------------------------------------------------------------------------------
1 | PORT="11211"
2 | USER="memcached"
3 | MAXCONN="1024"
4 | CACHESIZE="64"
5 | OPTIONS=""
6 |
--------------------------------------------------------------------------------
/salt/memcached/files/etc/zabbix/ExternalScripts/zabbix_memcached_check.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | ##################################
3 | # Zabbix monitoring script
4 | #
5 | # memcached:
6 | # - anything available via memcached-tool stats
7 | #
8 | # Allowed parameters combinaisons:
9 | # - param
10 | # - param + listening IP
11 | # - param + listening IP + port
12 | ##################################
13 | # Contact:
14 | # vincent.viallet@gmail.com
15 | ##################################
16 | # ChangeLog:
17 | # 20100922 VV initial creation
18 | ##################################
19 |
20 | # Zabbix requested parameter
21 | ZBX_REQ_DATA="$1"
22 | ZBX_REQ_DATA_HOST="$2"
23 | ZBX_REQ_DATA_PORT="$3"
24 |
25 | # Memcached defaults
26 | MEMCACHED_DEFAULT_HOST="127.0.0.1"
27 | MEMCACHED_DEFAULT_PORT="11211"
28 | MEMCACHED_TOOL_BIN="/usr/bin/memcached-tool"
29 |
30 | #
31 | # Error handling:
32 | # - need to be displayable in Zabbix (avoid NOT_SUPPORTED)
33 | # - items need to be of type "float" (allow negative + float)
34 | #
35 | ERROR_NO_ACCESS_FILE="-0.9900"
36 | ERROR_NO_ACCESS="-0.9901"
37 | ERROR_WRONG_PARAM="-0.9902"
38 | ERROR_DATA="-0.9903" # either can not connect / bad host / bad port
39 |
40 | # Handle host and port if non-default
41 | # Allowed parameters combinaisons:
42 | # - param
43 | # - param + listening IP
44 | # - param + listening IP + port
45 | if [ ! -z "$ZBX_REQ_DATA_HOST" ]; then
46 | HOST="$ZBX_REQ_DATA_HOST"
47 | if [ ! -z "$ZBX_REQ_DATA_PORT" ]; then
48 | PORT="$ZBX_REQ_DATA_PORT"
49 | else
50 | PORT="$MEMCACHED_DEFAULT_PORT"
51 | fi
52 | else
53 | HOST="$MEMCACHED_DEFAULT_HOST"
54 | PORT="$MEMCACHED_DEFAULT_PORT"
55 | fi
56 |
57 | # save the memcached stats in a variable for future parsing
58 | MEMCACHED_STATS=$($MEMCACHED_TOOL_BIN $HOST:$PORT stats 2> /dev/null )
59 |
60 | # error during retrieve
61 | if [ $? -ne 0 ]; then
62 | echo $ERROR_DATA
63 | exit 1
64 | fi
65 |
66 | #
67 | # Extract data from memcached stats
68 | #
69 | MEMCACHED_VALUE=$(echo "$MEMCACHED_STATS" | grep -E "^ .* $ZBX_REQ_DATA " | awk '{print $2}')
70 |
71 | if [ ! -z "$MEMCACHED_VALUE" ]; then
72 | echo $MEMCACHED_VALUE
73 | else
74 | echo $ERROR_WRONG_PARAM
75 | exit 1
76 | fi
77 |
78 | exit 0
79 |
--------------------------------------------------------------------------------
/salt/memcached/files/etc/zabbix/zabbix_agentd.conf.d/memcached.conf:
--------------------------------------------------------------------------------
1 | ##Memcached status monitor
2 | UserParameter=memcached[*], /etc/zabbix/ExternalScripts/zabbix_memcached_check.sh $1 $2 $3
3 |
--------------------------------------------------------------------------------
/salt/memcached/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - memcached.install
3 | - memcached.monitor
4 |
--------------------------------------------------------------------------------
/salt/memcached/install.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 |
5 | memcached:
6 | pkg.installed:
7 | - name: memcached
8 | service.running:
9 | - name: memcached
10 | - enable: True
11 | - watch:
12 | - pkg: memcached
13 | - file: /etc/sysconfig/memcached
14 |
15 | /etc/sysconfig/memcached:
16 | file.managed:
17 | - source: salt://memcached/files/etc/sysconfig/memcached
18 | - user: root
19 | - group: root
20 | - mode: 644
21 |
22 | memcached-role:
23 | file.append:
24 | - name: /etc/salt/roles
25 | - text:
26 | - 'memcached'
27 | - require:
28 | - file: roles
29 | - service: memcached
30 | - service: salt-minion
31 | - watch_in:
32 | - module: sync_grains
33 |
--------------------------------------------------------------------------------
/salt/memcached/monitor.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - memcached.install
4 |
5 | memcached-monitor-script:
6 | file.managed:
7 | - name: /etc/zabbix/ExternalScripts/zabbix_memcached_check.sh
8 | - source: salt://memcached/files/etc/zabbix/ExternalScripts/zabbix_memcached_check.sh
9 | - user: root
10 | - group: root
11 | - mode: 755
12 | - require:
13 | - service: memcached
14 | - cmd: memcached-monitor-scrip
15 | cmd.run:
16 | - name: mkdir -p /etc/zabbix/ExternalScripts
17 | - unless: test -d /etc/zabbix/ExternalScripts
18 |
19 | memcached-monitor-config:
20 | file.managed:
21 | - name: /etc/zabbix/zabbix_agentd.conf.d/memcached.conf
22 | - source: salt://memcached/files/etc/zabbix/zabbix_agentd.conf.d/memcached.conf
23 | - require:
24 | - file: memcached-monitor-script
25 | - watch_in:
26 | - service: zabbix-agent
27 |
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/coreseek-4.1b-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/coreseek-4.1b-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/epel-release-6-8.noarch.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/epel-release-6-8.noarch.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/mmseg-3.2.14-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/mmseg-3.2.14-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/python-libcloud-0.14.1-1.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/python-libcloud-0.14.1-1.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/redis-2.8.9-1.el6.remi.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/redis-2.8.9-1.el6.remi.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/salt-2014.1.4-1.el6.noarch.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/salt-2014.1.4-1.el6.noarch.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/salt-master-2014.1.4-1.el6.noarch.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/salt-master-2014.1.4-1.el6.noarch.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/salt-minion-2014.1.4-1.el6.noarch.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/salt-minion-2014.1.4-1.el6.noarch.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/sphinx-2.1.8-1.rhel6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mall/6/x86_64/sphinx-2.1.8-1.rhel6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-client-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-client-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-devel-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-devel-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-embedded-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-embedded-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-server-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-server-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-shared-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-shared-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-shared-compat-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-shared-compat-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-test-5.5.37-1.el6.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/mirrors/files/data1/vhosts/mirrors.mall.com/mysql/6/x86_64/MySQL-test-5.5.37-1.el6.x86_64.rpm
--------------------------------------------------------------------------------
/salt/mirrors/files/etc/httpd/conf.d/mirrors.mall.com.conf:
--------------------------------------------------------------------------------
1 | Alias /mirrors /data1/vhosts/mirrors.mall.com
2 |
3 |
4 | Options FollowSymLinks
5 | AllowOverride None
6 |
7 |
8 | # Apache 2.4
9 | Require all granted
10 |
11 |
12 |
13 | # Apache 2.2
14 | Order allow,deny
15 | Allow from all
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/salt/mirrors/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - apache
3 |
4 | createrepo:
5 | pkg.installed:
6 | - name: createrepo
7 |
8 | /etc/httpd/conf.d/mirrors.mall.com.conf:
9 | file.managed:
10 | - source: salt://mirrors/files/etc/httpd/conf.d/mirrors.mall.com.conf
11 | - user: root
12 | - group: root
13 | - mode: 644
14 | - require:
15 | - pkg: createrepo
16 | - pkg: apache
17 | - watch_in:
18 | - service: apache
19 | cmd.wait:
20 | - name: mkdir -p /data1/vhosts/mirrors.mall.com
21 | - watch:
22 | - file: /etc/httpd/conf.d/mirrors.mall.com.conf
23 |
24 | /data1/vhosts/mirrors.mall.com/:
25 | file.recurse:
26 | - source: salt://mirrors/files/data1/vhosts/mirrors.mall.com/
27 | - user: root
28 | - group: root
29 | - dir_mode: 755
30 | - file_mode: 644
31 | - exclude_pat: '*.svn*'
32 | - require:
33 | - file: /etc/httpd/conf.d/mirrors.mall.com.conf
34 | cmd.wait:
35 | - name: createrepo /data1/vhosts/mirrors.mall.com/mall/6/x86_64 && createrepo /data1/vhosts/mirrors.mall.com/mysql/6/x86_64
36 | - watch:
37 | - file: /data1/vhosts/mirrors.mall.com/
38 |
--------------------------------------------------------------------------------
/salt/mysql/files/etc/my.cnf:
--------------------------------------------------------------------------------
1 | {% set datadir = salt['pillar.get']('mysql:datadir', '/var/lib/mysql') -%}
2 | [mysqld]
3 | bind-address = {{grains['ipv4'][1]}}
4 | port = 3306
5 | pid-file = mysqld.pid
6 | socket = /tmp/mysql.sock
7 | key_buffer = 1024M
8 | max_allowed_packet = 100M
9 | table_cache = 1024
10 | sort_buffer_size = 2M
11 | read_buffer_size = 2M
12 | read_rnd_buffer_size = 8M
13 | myisam_sort_buffer_size = 64M
14 | query_cache_size = 32M
15 | tmp_table_size = 256M
16 | max_heap_table_size = 256M
17 | max_connections = 3000
18 | skip-name-resolve
19 | server-id = 10
20 | log-bin = mysql-bin
21 | log-error = log-error.log
22 | log-queries-not-using-indexes = 0
23 | long_query_time = 2
24 | slow_query_log_file = slow-queries.log
25 | datadir={{datadir}}
26 | default_storage_engine=innodb
27 | innodb_data_home_dir={{datadir}}
28 | innodb_data_file_path=ibdata1:10M:autoextend
29 | innodb_log_group_home_dir={{datadir}}
30 | innodb_buffer_pool_size=512M
31 | innodb_additional_mem_pool_size=50M
32 | innodb_log_file_size=100M
33 | innodb_log_buffer_size=8M
34 | innodb_flush_log_at_trx_commit=1
35 | innodb_lock_wait_timeout=50
36 | innodb_file_per_table
37 | innodb_open_files=20000
38 | skip-slave-start
39 | log-slave-updates=1
40 | expire_logs_days=15
41 | max_connect_errors=999999999
42 | net_write_timeout=120
43 | net_read_timeout=120
44 | wait_timeout = 30
45 | interactive_timeout = 30
46 |
47 | [mysqldump]
48 | quick
49 | max_allowed_packet = 16M
50 |
51 | [mysql]
52 | no-auto-rehash
53 | prompt={{salt['pillar.get']('mysql:role', 'mysql')}}::[{{grains['ipv4'][1]}}]\\n\\u@\\p::[\\d]\\n->\\_
54 |
55 | [isamchk]
56 | key_buffer = 256M
57 | sort_buffer_size = 256M
58 | read_buffer = 2M
59 | write_buffer = 2M
60 |
61 | [myisamchk]
62 | key_buffer = 256M
63 | sort_buffer_size = 256M
64 | read_buffer = 2M
65 | write_buffer = 2M
66 |
67 | [mysqlhotcopy]
68 | interactive-timeout
69 |
--------------------------------------------------------------------------------
/salt/mysql/files/etc/my_slave.cnf:
--------------------------------------------------------------------------------
1 | {% set datadir = salt['pillar.get']('mysql:datadir', '/var/lib/mysql') -%}
2 | [mysqld]
3 | bind-address = {{grains['ipv4'][1]}}
4 | port = 3306
5 | pid-file = mysqld.pid
6 | socket = /tmp/mysql.sock
7 | key_buffer = 1024M
8 | max_allowed_packet = 100M
9 | table_cache = 1024
10 | sort_buffer_size = 2M
11 | read_buffer_size = 2M
12 | read_rnd_buffer_size = 8M
13 | myisam_sort_buffer_size = 64M
14 | query_cache_size = 128M
15 | tmp_table_size = 256M
16 | max_connections = 2000
17 | skip-name-resolve
18 | server-id = 30
19 | relay-log=relay-log
20 | log-queries-not-using-indexes = 0
21 | log-bin = mysql-bin
22 | log-error = log-error.log
23 | max_heap_table_size = 256M
24 | datadir={{datadir}}
25 | default_storage_engine=innodb
26 | innodb_data_home_dir={{datadir}}
27 | innodb_data_file_path=ibdata1:10M:autoextend
28 | innodb_log_group_home_dir={{datadir}}
29 | innodb_buffer_pool_size=2048M
30 | innodb_additional_mem_pool_size=50M
31 | innodb_log_file_size=100M
32 | innodb_log_buffer_size=8M
33 | innodb_flush_log_at_trx_commit=1
34 | innodb_lock_wait_timeout=50
35 | innodb_file_per_table
36 | slave-skip-errors = 1062
37 | skip-slave-start
38 | read_only = 1
39 | thread_cache_size = 32
40 | thread_concurrency= 16
41 | wait_timeout = 30
42 | interactive_timeout = 30
43 | open-files-limit=10000
44 | net_read_timeout=120
45 | net_write_timeout=120
46 | slow_query_log_file = slow-queries.log
47 | slow_launch_time=2
48 | slow_query_log=1
49 |
50 | [mysqldump]
51 | quick
52 | max_allowed_packet = 16M
53 |
54 | [mysql]
55 | no-auto-rehash
56 | prompt={{salt['pillar.get']('mysql:role', 'mysql')}}::[{{grains['ipv4'][1]}}]\\n\\u@\\p::[\\d]\\n->\\_
57 |
58 | [isamchk]
59 | key_buffer = 256M
60 | sort_buffer_size = 256M
61 | read_buffer = 2M
62 | write_buffer = 2M
63 |
64 | [myisamchk]
65 | key_buffer = 256M
66 | sort_buffer_size = 256M
67 | read_buffer = 2M
68 | write_buffer = 2M
69 |
70 | [mysqlhotcopy]
71 | interactive-timeout
72 |
--------------------------------------------------------------------------------
/salt/mysql/files/etc/salt/minion.d/mysql.conf:
--------------------------------------------------------------------------------
1 | mysql.host: 'localhost'
2 | mysql.port: 3306
3 | mysql.user: 'root'
4 | mysql.pass: ''
5 | mysql.unix_socket: '/var/lib/mysql/mysql.sock'
6 |
--------------------------------------------------------------------------------
/salt/mysql/files/etc/yum.repo.d/mysql.repo:
--------------------------------------------------------------------------------
1 | [mysql]
2 | name = MySQL $releasever - $basearch
3 | baseurl = http://172.16.100.12:8000/mirrors/mysql/$releasever/$basearch/
4 | gpgcheck=0
5 | priority=1
6 |
--------------------------------------------------------------------------------
/salt/mysql/files/etc/zabbix/zabbix_agentd.conf.d/userparameter_mysql.conf:
--------------------------------------------------------------------------------
1 | # For all the following commands HOME should be set to the directory that has .my.cnf file with password information.
2 |
3 | # Flexible parameter to grab global variables. On the frontend side, use keys like mysql.status[Com_insert].
4 | # Key syntax is mysql.status[variable].
5 | UserParameter=mysql.status[*],echo "show global status where Variable_name='$1';" | HOME=/var/lib/zabbix mysql -N | awk '{print $$2}'
6 |
7 | # Flexible parameter to determine database or table size. On the frontend side, use keys like mysql.size[zabbix,history,data].
8 | # Key syntax is mysql.size[,,].
9 | # Database may be a database name or "all". Default is "all".
10 | # Table may be a table name or "all". Default is "all".
11 | # Type may be "data", "index", "free" or "both". Both is a sum of data and index. Default is "both".
12 | # Database is mandatory if a table is specified. Type may be specified always.
13 | # Returns value in bytes.
14 | # 'sum' on data_length or index_length alone needed when we are getting this information for whole database instead of a single table
15 | UserParameter=mysql.size[*],echo "select sum($(case "$3" in both|"") echo "data_length+index_length";; data|index) echo "$3_length";; free) echo "data_free";; esac)) from information_schema.tables$([[ "$1" = "all" || ! "$1" ]] || echo " where table_schema='$1'")$([[ "$2" = "all" || ! "$2" ]] || echo "and table_name='$2'");" | HOME=/var/lib/zabbix mysql -N
16 |
17 | UserParameter=mysql.ping,HOME=/var/lib/zabbix mysqladmin ping | grep -c alive
18 | UserParameter=mysql.version,mysql -V
19 |
20 | {% if salt['pillar.get']('mysql:role', 'mysql') == 'mysql-slave' -%}
21 | UserParameter=mysql.io.running,echo "show slave status\G" | HOME=/var/lib/zabbix mysql | grep 'Slave_IO_Running' | awk -F':' '{if($2==" Yes"){print 1}else{print 0}}'
22 | UserParameter=mysql.sql.running,echo "show slave status\G" | HOME=/var/lib/zabbix mysql | grep 'Slave_SQL_Running' | awk -F':' '{if($2==" Yes"){print 1}else{print 0}}'
23 | UserParameter=mysql.behind_master,echo "show slave status\G" | HOME=/var/lib/zabbix mysql | grep 'Seconds_Behind_Master' | awk -F':' '{print $2}' | sed 's/ //g'
24 | {% endif -%}
25 |
--------------------------------------------------------------------------------
/salt/mysql/files/var/lib/zabbix/.my.cnf:
--------------------------------------------------------------------------------
1 | [mysql]
2 | host={{grains['ipv4'][1]}}
3 | user=monitor
4 | password=monitor@mall.com$
5 | socket=/tmp/mysqld.sock
6 |
7 | [mysqladmin]
8 | host={{grains['ipv4'][1]}}
9 | user=monitor
10 | password=monitor@mall.com$
11 | socket=/tmp/mysqld.sock
12 |
--------------------------------------------------------------------------------
/salt/mysql/init.sls:
--------------------------------------------------------------------------------
1 | {% set datadir = salt['pillar.get']('mysql:datadir', '/var/lib/mysql') %}
2 | include:
3 | - zabbix.agent
4 | - salt.minion
5 | - mysql.monitor
6 |
7 | mysql:
8 | pkg.installed:
9 | - pkgs:
10 | - MySQL-server
11 | - MySQL-client
12 | - MySQL-devel
13 | - require:
14 | - file: mysql
15 | - cmd: remove-mysql-libs
16 | service.running:
17 | - name: mysql
18 | - enable: False
19 | - require:
20 | - pkg: mysql
21 | - watch:
22 | - pkg: mysql
23 | - file: /etc/my.cnf
24 | file.managed:
25 | - name: /etc/yum.repos.d/mysql.repo
26 | - source: salt://mysql/files/etc/yum.repo.d/mysql.repo
27 | cmd.wait:
28 | - name: mkdir -p {{datadir}} && cp -r /var/lib/mysql/* {{datadir}}/ && chown -R mysql.mysql {{datadir}}
29 | - unless: test -d {{datadir}}
30 | - watch:
31 | - pkg: mysql
32 |
33 | # 解决软件冲突
34 | remove-mysql-libs:
35 | cmd.run:
36 | - name: rpm -e --nodeps mysql-libs && chkconfig --level 2345 postfix off
37 | - onlyif: rpm -qa |grep mysql-libs
38 |
39 | /etc/my.cnf:
40 | file.managed:
41 | - source: salt://mysql/files/etc/{{salt['pillar.get']('mysql:conf_template', 'my.cnf')}}
42 | - template: jinja
43 | - user: root
44 | - group: root
45 | - mode: 644
46 |
47 | mysql-role:
48 | file.append:
49 | - name: /etc/salt/roles
50 | - text:
51 | - {{salt['pillar.get']('mysql:role', 'mysql')}}
52 | - require:
53 | - file: roles
54 | - service: mysql
55 | - service: salt-minion
56 | - watch_in:
57 | - module: sync_grains
58 |
--------------------------------------------------------------------------------
/salt/mysql/monitor.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - mysql
4 |
5 | my.cnf:
6 | file.managed:
7 | - name: /var/lib/zabbix/.my.cnf
8 | - source: salt://mysql/files/var/lib/zabbix/.my.cnf
9 | - template: jinja
10 | - watch_in:
11 | - service: zabbix-agent
12 |
13 | mysql-monitor-config:
14 | file.managed:
15 | - name: /etc/zabbix/zabbix_agentd.conf.d/userparameter_mysql.conf
16 | - source: salt://mysql/files/etc/zabbix/zabbix_agentd.conf.d/userparameter_mysql.conf
17 | - template: jinja
18 | - require:
19 | - file: my.cnf
20 | - watch_in:
21 | - service: zabbix-agent
22 |
--------------------------------------------------------------------------------
/salt/mysql/repl.txt:
--------------------------------------------------------------------------------
1 | mysql-master:
2 | GRANT REPLICATION SLAVE ON *.* TO 'repl'@'172.16.100.%' IDENTIFIED BY 'MySQL@mall.com$';
3 | FLUSH PRIVILEGES;
4 | FLUSH TABLES WITH READ LOCK;
5 | SHOW MASTER STATUS;
6 |
7 | mysql-slave:
8 | CHANGE MASTER TO MASTER_HOST='172.16.100.51',MASTER_USER='repl', MASTER_PASSWORD='MySQL@mall.com$',MASTER_LOG_FILE='mysql-bin.000005',MASTER_LOG_POS=345;
9 | START SLAVE;
10 | show slave status;
11 |
12 | monitor:
13 | GRANT USAGE,SUPER,REPLICATION CLIENT ON *.* TO 'monitor'@'172.16.100.%' IDENTIFIED BY 'monitor@mall.com$';
14 | FLUSH PRIVILEGES;
15 |
--------------------------------------------------------------------------------
/salt/mysql/server.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.minion
3 |
4 | mysql-server:
5 | pkg:
6 | - installed
7 | file.managed:
8 | - name: /etc/my.cnf
9 | - require:
10 | - pkg: mysql-server
11 | service.running:
12 | - name: mysqld
13 | - enable: True
14 | - require:
15 | - pkg: mysql-server
16 | - watch:
17 | - file: mysql-server
18 |
19 | mysql-server-config-minion:
20 | file.managed:
21 | - name: /etc/salt/minion.d/mysql.conf
22 | - source: salt://mysql/files/etc/salt/minion.d/mysql.conf
23 | - makedirs: True
24 | - require:
25 | - service: salt-minion
26 |
27 |
--------------------------------------------------------------------------------
/salt/nfs/client.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - rpcbind
3 |
4 | nfs-client:
5 | pkg.installed:
6 | - name: nfs-utils
7 |
8 | {% for dir, args in salt['pillar.get']('mounts', {}).iteritems() %}
9 | {{dir}}:
10 | mount.mounted:
11 | - device: {{args['device']}}
12 | - fstype: {{args['fstype']}}
13 | - mkmnt: {{args['mkmnt']}}
14 | - opts: {{args['opts']}}
15 | - require:
16 | - pkg: nfs-utils
17 | - service: rpcbind
18 | {% endfor %}
19 |
--------------------------------------------------------------------------------
/salt/nfs/files/etc/exports:
--------------------------------------------------------------------------------
1 | {% for dir, right in salt['pillar.get']('exports', {}).iteritems() -%}
2 | {{dir}} {{right}}
3 | {% endfor -%}
4 |
--------------------------------------------------------------------------------
/salt/nfs/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 | - rpcbind
5 |
6 | nfs:
7 | pkg.installed:
8 | - name: nfs-utils
9 | service.running:
10 | - name: nfs
11 | - enable: True
12 | - require:
13 | - pkg: nfs-utils
14 | - watch:
15 | - pkg: nfs-utils
16 | - service: rpcbind
17 |
18 | {% for dir, right in salt['pillar.get']('exports', {}).iteritems() %}
19 | {{dir}}:
20 | cmd.run:
21 | - name: mkdir -p {{dir}} && chown -R nfsnobody.nfsnobody {{dir}}
22 | - unless: test -d {{dir}}
23 | - require:
24 | - pkg: nfs-utils
25 | {% endfor %}
26 |
27 | /etc/exports:
28 | file.managed:
29 | - source: salt://nfs/files/etc/exports
30 | - template: jinja
31 | - user: root
32 | - group: root
33 | - mode: 644
34 | cmd.wait:
35 | - name: /usr/sbin/exportfs -rv
36 | - watch:
37 | - file: /etc/exports
38 |
39 | nfs-role:
40 | file.append:
41 | - name: /etc/salt/roles
42 | - text:
43 | - 'nfs'
44 | - require:
45 | - file: roles
46 | - service: nfs
47 | - service: salt-minion
48 | - watch_in:
49 | - module: sync_grains
50 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/nginx/conf.d/fastcgi_params_mall:
--------------------------------------------------------------------------------
1 | fastcgi_param SRV_CACHE_DIR "{{ salt['pillar.get']('vhostscachedir', '/var/www/cache') }}/www.mall.com";
2 | fastcgi_param SRV_NDATA_DIR "/data1/vhosts/static.mall.com/htdocs";
3 | fastcgi_param SRV_NDATA_CACHE_URL "http://static.mall.com";
4 | fastcgi_param SRV_SEARCHD_HOST "172.16.100.61 172.16.100.62";
5 |
6 | fastcgi_param SRV_DB_HOST "172.16.100.51";
7 | fastcgi_param SRV_DB_PORT "3306";
8 | fastcgi_param SRV_DB_USER "mall";
9 | fastcgi_param SRV_DB_PASS "Aspintour4_bdQ5";
10 | fastcgi_param SRV_DB_NAME "www_mall_com";
11 | fastcgi_param SRV_DB_HOST_R "172.16.100.52";
12 | fastcgi_param SRV_DB_PORT_R "3306";
13 | fastcgi_param SRV_DB_USER_R "mall";
14 | fastcgi_param SRV_DB_PASS_R "Aspintour4_bdQ5";
15 | fastcgi_param SRV_DB_NAME_R "www_mall_com";
16 |
17 | fastcgi_param SRV_MEMCACHED_KEY_PREFIX "www-";
18 | fastcgi_param SRV_MEMCACHED_SERVERS "172.16.100.41:11211 172.16.100.42:11211";
19 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/nginx/conf.d/static.mall.com.conf:
--------------------------------------------------------------------------------
1 | {% set dir = salt['pillar.get']('vhostsdir', '/var/www/html') -%}
2 | {% set logdir = salt['pillar.get']('logdir', '/var/log/nginx') -%}
3 | server {
4 | listen 80;
5 | server_name static.mall.com;
6 | root {{dir}}/static.mall.com/htdocs;
7 | access_log {{logdir}}/static.mall.com.log main;
8 |
9 | location / {
10 | root {{dir}}/static.mall.com/htdocs;
11 | index index.php index.html index.htm;
12 | try_files $uri $uri/ /index.php?$args; # robert
13 | }
14 |
15 | error_page 500 502 503 504 /50x.html;
16 | location = /50x.html {
17 | root html;
18 | }
19 |
20 | location ~* .*\.(gif|jpg|jpeg|png|bmp|swf|flv)$
21 | {
22 | expires 365d;
23 | }
24 |
25 | location ~* .*\.(js|css|html|htm|shtml)$
26 | {
27 | expires 1h;
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/nginx/conf.d/status.conf:
--------------------------------------------------------------------------------
1 | #
2 | # Enable php-fpm and nginx status
3 | #
4 | server {
5 | listen 55888;
6 |
7 | location /phpfpm-status {
8 | fastcgi_pass 127.0.0.1:9000;
9 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
10 | include fastcgi_params;
11 | allow 127.0.0.1;
12 | allow 172.16.100.81;
13 | deny all;
14 | }
15 |
16 | location /nginx-status {
17 | stub_status on;
18 | access_log off;
19 | allow 127.0.0.1;
20 | allow 172.16.100.81;
21 | deny all;
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/nginx/conf.d/www.mall.com.conf:
--------------------------------------------------------------------------------
1 | {% set dir = salt['pillar.get']('vhostsdir', '/var/www/html') -%}
2 | {% set logdir = salt['pillar.get']('logdir', '/var/log/nginx') -%}
3 | server {
4 | listen 80;
5 | server_name www.mall.com;
6 | root {{dir}}/www.mall.com/htdocs;
7 | access_log {{logdir}}/www.mall.com.log main;
8 |
9 | location / {
10 | root {{dir}}/www.mall.com/htdocs;
11 | index index.php index.html index.htm;
12 | try_files $uri $uri/ /index.php?$args; # robert
13 | }
14 |
15 | location ~ \.php$ {
16 | root {{dir}}/www.mall.com/htdocs;
17 | fastcgi_pass 127.0.0.1:9000;
18 | fastcgi_index index.php;
19 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
20 | fastcgi_buffers 8 128k;
21 | send_timeout 60;
22 | include fastcgi_params;
23 | include conf.d/fastcgi_params_mall;
24 | }
25 |
26 | if (!-f $request_filename){
27 | rewrite /!.(js|ico|gif|jpg|jpeg|png|css|swf|xml|flv|html|htm)$ /index.php last;
28 | }
29 |
30 | location ~* .*\.(gif|jpg|jpeg|png|bmp|swf|flv)$
31 | {
32 | expires 365d;
33 | }
34 |
35 | location ~* .*\.(js|css|html|htm|shtml)$
36 | {
37 | expires 1h;
38 | }
39 | }
40 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/nginx/nginx.conf:
--------------------------------------------------------------------------------
1 | {% set logdir = salt['pillar.get']('logdir', '/var/log/nginx') -%}
2 | user nginx;
3 | worker_processes 8;
4 |
5 | error_log {{logdir}}/error.log crit;
6 |
7 | pid /var/run/nginx.pid;
8 |
9 | worker_rlimit_nofile 65530;
10 |
11 | events {
12 | use epoll;
13 | worker_connections 65530;
14 | }
15 |
16 |
17 | http {
18 | include mime.types;
19 | default_type application/octet-stream;
20 |
21 | log_format main '$http_x_forwarded_for - $remote_user [$time_local] "$request" '
22 | '$status $body_bytes_sent "$http_referer" '
23 | '"$http_user_agent" "$http_x_forwarded_for"';
24 |
25 |
26 | server_names_hash_bucket_size 128;
27 | client_header_buffer_size 512k;
28 | large_client_header_buffers 4 1024k;
29 | client_max_body_size 8m;
30 |
31 | access_log {{logdir}}/access.log main;
32 |
33 | sendfile on;
34 | tcp_nopush on;
35 | tcp_nodelay on;
36 |
37 | keepalive_timeout 60;
38 |
39 | gzip on;
40 | gzip_static on;
41 | gzip_disable "MSIE [1-5]\.";
42 | gzip_proxied any;
43 | gzip_min_length 1k;
44 | gzip_buffers 4 16k;
45 | gzip_http_version 1.1;
46 | gzip_comp_level 4;
47 | gzip_types text/plain application/x-javascript video/x-flv text/css application/xml;
48 | gzip_vary on;
49 |
50 | fastcgi_intercept_errors on;
51 |
52 | geo $dollar {
53 | default "$";
54 | }
55 |
56 | include /etc/nginx/conf.d/*.conf;
57 | }
58 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php-fpm.conf:
--------------------------------------------------------------------------------
1 | ;;;;;;;;;;;;;;;;;;;;;
2 | ; FPM Configuration ;
3 | ;;;;;;;;;;;;;;;;;;;;;
4 |
5 | ; All relative paths in this configuration file are relative to PHP's install
6 | ; prefix.
7 |
8 | ; Include one or more files. If glob(3) exists, it is used to include a bunch of
9 | ; files from a glob(3) pattern. This directive can be used everywhere in the
10 | ; file.
11 | include=/etc/php-fpm.d/*.conf
12 |
13 | ;;;;;;;;;;;;;;;;;;
14 | ; Global Options ;
15 | ;;;;;;;;;;;;;;;;;;
16 |
17 | [global]
18 | ; Pid file
19 | ; Default Value: none
20 | pid = /var/run/php-fpm/php-fpm.pid
21 |
22 | ; Error log file
23 | ; Default Value: /var/log/php-fpm.log
24 | error_log = /var/log/php-fpm/error.log
25 |
26 | ; Log level
27 | ; Possible Values: alert, error, warning, notice, debug
28 | ; Default Value: notice
29 | ;log_level = notice
30 |
31 | ; If this number of child processes exit with SIGSEGV or SIGBUS within the time
32 | ; interval set by emergency_restart_interval then FPM will restart. A value
33 | ; of '0' means 'Off'.
34 | ; Default Value: 0
35 | ;emergency_restart_threshold = 0
36 |
37 | ; Interval of time used by emergency_restart_interval to determine when
38 | ; a graceful restart will be initiated. This can be useful to work around
39 | ; accidental corruptions in an accelerator's shared memory.
40 | ; Available Units: s(econds), m(inutes), h(ours), or d(ays)
41 | ; Default Unit: seconds
42 | ; Default Value: 0
43 | ;emergency_restart_interval = 0
44 |
45 | ; Time limit for child processes to wait for a reaction on signals from master.
46 | ; Available units: s(econds), m(inutes), h(ours), or d(ays)
47 | ; Default Unit: seconds
48 | ; Default Value: 0
49 | ;process_control_timeout = 0
50 |
51 | ; Send FPM to background. Set to 'no' to keep FPM in foreground for debugging.
52 | ; Default Value: yes
53 | daemonize = no
54 |
55 | ;;;;;;;;;;;;;;;;;;;;
56 | ; Pool Definitions ;
57 | ;;;;;;;;;;;;;;;;;;;;
58 |
59 | ; See /etc/php-fpm.d/*.conf
60 |
61 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php-fpm.d/www.conf:
--------------------------------------------------------------------------------
1 | ; Start a new pool named 'www'.
2 | [www]
3 |
4 | ; The address on which to accept FastCGI requests.
5 | ; Valid syntaxes are:
6 | ; 'ip.add.re.ss:port' - to listen on a TCP socket to a specific address on
7 | ; a specific port;
8 | ; 'port' - to listen on a TCP socket to all addresses on a
9 | ; specific port;
10 | ; '/path/to/unix/socket' - to listen on a unix socket.
11 | ; Note: This value is mandatory.
12 | listen = 127.0.0.1:9000
13 |
14 | ; Set listen(2) backlog. A value of '-1' means unlimited.
15 | ; Default Value: -1
16 | ;listen.backlog = -1
17 |
18 | ; List of ipv4 addresses of FastCGI clients which are allowed to connect.
19 | ; Equivalent to the FCGI_WEB_SERVER_ADDRS environment variable in the original
20 | ; PHP FCGI (5.2.2+). Makes sense only with a tcp listening socket. Each address
21 | ; must be separated by a comma. If this value is left blank, connections will be
22 | ; accepted from any ip address.
23 | ; Default Value: any
24 | listen.allowed_clients = 127.0.0.1
25 |
26 | ; Set permissions for unix socket, if one is used. In Linux, read/write
27 | ; permissions must be set in order to allow connections from a web server. Many
28 | ; BSD-derived systems allow connections regardless of permissions.
29 | ; Default Values: user and group are set as the running user
30 | ; mode is set to 0666
31 | ;listen.owner = nobody
32 | ;listen.group = nobody
33 | ;listen.mode = 0666
34 |
35 | ; Unix user/group of processes
36 | ; Note: The user is mandatory. If the group is not set, the default user's group
37 | ; will be used.
38 | ; RPM: apache Choosed to be able to access some dir as httpd
39 | user = nginx
40 | ; RPM: Keep a group allowed to write in log dir.
41 | group = nginx
42 |
43 | ; Choose how the process manager will control the number of child processes.
44 | ; Possible Values:
45 | ; static - a fixed number (pm.max_children) of child processes;
46 | ; dynamic - the number of child processes are set dynamically based on the
47 | ; following directives:
48 | ; pm.max_children - the maximum number of children that can
49 | ; be alive at the same time.
50 | ; pm.start_servers - the number of children created on startup.
51 | ; pm.min_spare_servers - the minimum number of children in 'idle'
52 | ; state (waiting to process). If the number
53 | ; of 'idle' processes is less than this
54 | ; number then some children will be created.
55 | ; pm.max_spare_servers - the maximum number of children in 'idle'
56 | ; state (waiting to process). If the number
57 | ; of 'idle' processes is greater than this
58 | ; number then some children will be killed.
59 | ; Note: This value is mandatory.
60 | pm = dynamic
61 |
62 | ; The number of child processes to be created when pm is set to 'static' and the
63 | ; maximum number of child processes to be created when pm is set to 'dynamic'.
64 | ; This value sets the limit on the number of simultaneous requests that will be
65 | ; served. Equivalent to the ApacheMaxClients directive with mpm_prefork.
66 | ; Equivalent to the PHP_FCGI_CHILDREN environment variable in the original PHP
67 | ; CGI.
68 | ; Note: Used when pm is set to either 'static' or 'dynamic'
69 | ; Note: This value is mandatory.
70 | pm.max_children = 100
71 |
72 | ; The number of child processes created on startup.
73 | ; Note: Used only when pm is set to 'dynamic'
74 | ; Default Value: min_spare_servers + (max_spare_servers - min_spare_servers) / 2
75 | pm.start_servers = 10
76 |
77 | ; The desired minimum number of idle server processes.
78 | ; Note: Used only when pm is set to 'dynamic'
79 | ; Note: Mandatory when pm is set to 'dynamic'
80 | pm.min_spare_servers = 10
81 |
82 | ; The desired maximum number of idle server processes.
83 | ; Note: Used only when pm is set to 'dynamic'
84 | ; Note: Mandatory when pm is set to 'dynamic'
85 | pm.max_spare_servers = 35
86 |
87 | ; The number of requests each child process should execute before respawning.
88 | ; This can be useful to work around memory leaks in 3rd party libraries. For
89 | ; endless request processing specify '0'. Equivalent to PHP_FCGI_MAX_REQUESTS.
90 | ; Default Value: 0
91 | ;pm.max_requests = 2000
92 |
93 | ; The URI to view the FPM status page. If this value is not set, no URI will be
94 | ; recognized as a status page. By default, the status page shows the following
95 | ; information:
96 | ; accepted conn - the number of request accepted by the pool;
97 | ; pool - the name of the pool;
98 | ; process manager - static or dynamic;
99 | ; idle processes - the number of idle processes;
100 | ; active processes - the number of active processes;
101 | ; total processes - the number of idle + active processes.
102 | ; The values of 'idle processes', 'active processes' and 'total processes' are
103 | ; updated each second. The value of 'accepted conn' is updated in real time.
104 | ; Example output:
105 | ; accepted conn: 12073
106 | ; pool: www
107 | ; process manager: static
108 | ; idle processes: 35
109 | ; active processes: 65
110 | ; total processes: 100
111 | ; By default the status page output is formatted as text/plain. Passing either
112 | ; 'html' or 'json' as a query string will return the corresponding output
113 | ; syntax. Example:
114 | ; http://www.foo.bar/status
115 | ; http://www.foo.bar/status?json
116 | ; http://www.foo.bar/status?html
117 | ; Note: The value must start with a leading slash (/). The value can be
118 | ; anything, but it may not be a good idea to use the .php extension or it
119 | ; may conflict with a real PHP file.
120 | ; Default Value: not set
121 | ;pm.status_path = /status
122 | pm.status_path = /phpfpm-status
123 |
124 | ; The ping URI to call the monitoring page of FPM. If this value is not set, no
125 | ; URI will be recognized as a ping page. This could be used to test from outside
126 | ; that FPM is alive and responding, or to
127 | ; - create a graph of FPM availability (rrd or such);
128 | ; - remove a server from a group if it is not responding (load balancing);
129 | ; - trigger alerts for the operating team (24/7).
130 | ; Note: The value must start with a leading slash (/). The value can be
131 | ; anything, but it may not be a good idea to use the .php extension or it
132 | ; may conflict with a real PHP file.
133 | ; Default Value: not set
134 | ;ping.path = /ping
135 |
136 | ; This directive may be used to customize the response of a ping request. The
137 | ; response is formatted as text/plain with a 200 response code.
138 | ; Default Value: pong
139 | ;ping.response = pong
140 |
141 | ; The timeout for serving a single request after which the worker process will
142 | ; be killed. This option should be used when the 'max_execution_time' ini option
143 | ; does not stop script execution for some reason. A value of '0' means 'off'.
144 | ; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
145 | ; Default Value: 0
146 | ;request_terminate_timeout = 0
147 |
148 | ; The timeout for serving a single request after which a PHP backtrace will be
149 | ; dumped to the 'slowlog' file. A value of '0s' means 'off'.
150 | ; Available units: s(econds)(default), m(inutes), h(ours), or d(ays)
151 | ; Default Value: 0
152 | ;request_slowlog_timeout = 0
153 |
154 | ; The log file for slow requests
155 | ; Default Value: not set
156 | ; Note: slowlog is mandatory if request_slowlog_timeout is set
157 | slowlog = /var/log/php-fpm/www-slow.log
158 |
159 | ; Set open file descriptor rlimit.
160 | ; Default Value: system defined value
161 | ;rlimit_files = 1024
162 |
163 | ; Set max core size rlimit.
164 | ; Possible Values: 'unlimited' or an integer greater or equal to 0
165 | ; Default Value: system defined value
166 | ;rlimit_core = 0
167 |
168 | ; Chroot to this directory at the start. This value must be defined as an
169 | ; absolute path. When this value is not set, chroot is not used.
170 | ; Note: chrooting is a great security feature and should be used whenever
171 | ; possible. However, all PHP paths will be relative to the chroot
172 | ; (error_log, sessions.save_path, ...).
173 | ; Default Value: not set
174 | ;chroot =
175 |
176 | ; Chdir to this directory at the start. This value must be an absolute path.
177 | ; Default Value: current directory or / when chroot
178 | ;chdir = /var/www
179 |
180 | ; Redirect worker stdout and stderr into main error log. If not set, stdout and
181 | ; stderr will be redirected to /dev/null according to FastCGI specs.
182 | ; Default Value: no
183 | catch_workers_output = yes
184 |
185 | ; Limits the extensions of the main script FPM will allow to parse. This can
186 | ; prevent configuration mistakes on the web server side. You should only limit
187 | ; FPM to .php extensions to prevent malicious users to use other extensions to
188 | ; exectute php code.
189 | ; Note: set an empty value to allow all extensions.
190 | ; Default Value: .php
191 | ;security.limit_extensions = .php .php3 .php4 .php5
192 |
193 | ; Pass environment variables like LD_LIBRARY_PATH. All $VARIABLEs are taken from
194 | ; the current environment.
195 | ; Default Value: clean env
196 | ;env[HOSTNAME] = $HOSTNAME
197 | ;env[PATH] = /usr/local/bin:/usr/bin:/bin
198 | ;env[TMP] = /tmp
199 | ;env[TMPDIR] = /tmp
200 | ;env[TEMP] = /tmp
201 |
202 | ; Additional php.ini defines, specific to this pool of workers. These settings
203 | ; overwrite the values previously defined in the php.ini. The directives are the
204 | ; same as the PHP SAPI:
205 | ; php_value/php_flag - you can set classic ini defines which can
206 | ; be overwritten from PHP call 'ini_set'.
207 | ; php_admin_value/php_admin_flag - these directives won't be overwritten by
208 | ; PHP call 'ini_set'
209 | ; For php_*flag, valid values are on, off, 1, 0, true, false, yes or no.
210 |
211 | ; Defining 'extension' will load the corresponding shared extension from
212 | ; extension_dir. Defining 'disable_functions' or 'disable_classes' will not
213 | ; overwrite previously defined php.ini values, but will append the new value
214 | ; instead.
215 |
216 | ; Default Value: nothing is defined by default except the values in php.ini and
217 | ; specified at startup with the -d argument
218 | ;php_admin_value[sendmail_path] = /usr/sbin/sendmail -t -i -f www@my.domain.com
219 | ;php_flag[display_errors] = off
220 | php_admin_value[error_log] = /var/log/php-fpm/www-error.log
221 | php_admin_flag[log_errors] = on
222 | ;php_admin_value[memory_limit] = 128M
223 |
224 | ; Set session path to a directory owned by process user
225 | php_value[session.save_handler] = files
226 | php_value[session.save_path] = /var/lib/php/session
227 |
228 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/bcmath.ini:
--------------------------------------------------------------------------------
1 | ; Enable bcmath extension module
2 | extension=bcmath.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/curl.ini:
--------------------------------------------------------------------------------
1 | ; Enable curl extension module
2 | extension=curl.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/dom.ini:
--------------------------------------------------------------------------------
1 | ; Enable dom extension module
2 | extension=dom.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/fileinfo.ini:
--------------------------------------------------------------------------------
1 | ; Enable fileinfo extension module
2 | extension=fileinfo.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/gd.ini:
--------------------------------------------------------------------------------
1 | ; Enable gd extension module
2 | extension=gd.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/json.ini:
--------------------------------------------------------------------------------
1 | ; Enable json extension module
2 | extension=json.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/mbstring.ini:
--------------------------------------------------------------------------------
1 | ; Enable mbstring extension module
2 | extension=mbstring.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/memcache.ini:
--------------------------------------------------------------------------------
1 | ; ----- Enable memcache extension module
2 | extension=memcache.so
3 |
4 | ; ----- Options for the memcache module
5 |
6 | ; Whether to transparently failover to other servers on errors
7 | ;memcache.allow_failover=1
8 | ; Data will be transferred in chunks of this size
9 | ;memcache.chunk_size=32768
10 | ; Autocompress large data
11 | ;memcache.compress_threshold=20000
12 | ; The default TCP port number to use when connecting to the memcached server
13 | ;memcache.default_port=11211
14 | ; Hash function {crc32, fnv}
15 | ;memcache.hash_function=crc32
16 | ; Hash strategy {standard, consistent}
17 | ;memcache.hash_strategy=consistent
18 | ; Defines how many servers to try when setting and getting data.
19 | ;memcache.max_failover_attempts=20
20 | ; The protocol {ascii, binary} : You need a memcached >= 1.3.0 to use the binary protocol
21 | ; The binary protocol results in less traffic and is more efficient
22 | ;memcache.protocol=ascii
23 | ; Redundancy : When enabled the client sends requests to N servers in parallel
24 | ;memcache.redundancy=1
25 | ;memcache.session_redundancy=2
26 | ; Lock Timeout
27 | ;memcache.lock_timeout = 15
28 |
29 | ; ----- Options to use the memcache session handler
30 |
31 | ; Use memcache as a session handler
32 | ;session.save_handler=memcache
33 | ; Defines a comma separated of server urls to use for session storage
34 | ;session.save_path="tcp://localhost:11211?persistent=1&weight=1&timeout=1&retry_interval=15"
35 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/memcached.ini:
--------------------------------------------------------------------------------
1 | ; Enable memcached extension module
2 | extension=memcached.so
3 |
4 |
5 | ; ----- Options to use the memcached session handler
6 |
7 | ; Use memcache as a session handler
8 | ;session.save_handler=memcached
9 | ; Defines a comma separated list of server urls to use for session storage
10 | ;session.save_path="localhost:11211"
11 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/mysql.ini:
--------------------------------------------------------------------------------
1 | ; Enable mysql extension module
2 | extension=mysql.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/mysqli.ini:
--------------------------------------------------------------------------------
1 | ; Enable mysqli extension module
2 | extension=mysqli.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/pdo.ini:
--------------------------------------------------------------------------------
1 | ; Enable pdo extension module
2 | extension=pdo.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/pdo_mysql.ini:
--------------------------------------------------------------------------------
1 | ; Enable pdo_mysql extension module
2 | extension=pdo_mysql.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/pdo_sqlite.ini:
--------------------------------------------------------------------------------
1 | ; Enable pdo_sqlite extension module
2 | extension=pdo_sqlite.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/phar.ini:
--------------------------------------------------------------------------------
1 | ; Enable phar extension module
2 | extension=phar.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/sqlite3.ini:
--------------------------------------------------------------------------------
1 | ; Enable sqlite3 extension module
2 | extension=sqlite3.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/wddx.ini:
--------------------------------------------------------------------------------
1 | ; Enable wddx extension module
2 | extension=wddx.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/xmlreader.ini:
--------------------------------------------------------------------------------
1 | ; Enable xmlreader extension module
2 | extension=xmlreader.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/xmlwriter.ini:
--------------------------------------------------------------------------------
1 | ; Enable xmlwriter extension module
2 | extension=xmlwriter.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/xsl.ini:
--------------------------------------------------------------------------------
1 | ; Enable xsl extension module
2 | extension=xsl.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/php.d/zip.ini:
--------------------------------------------------------------------------------
1 | ; Enable zip extension module
2 | extension=zip.so
3 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/zabbix/ExternalScripts/php-fpm_status.pl:
--------------------------------------------------------------------------------
1 | #!/usr/bin/perl
2 | #
3 | # Boris HUISGEN
4 |
5 | use LWP::UserAgent;
6 |
7 | my $URL = "http://127.0.0.1:55888/phpfpm-status";
8 |
9 | #
10 | # DO NOT MODIFY AFTER THIS LINE
11 | #
12 |
13 | my $ua = LWP::UserAgent->new(timeout => 15);
14 | my $response = $ua->request(HTTP::Request->new('GET', $URL));
15 |
16 | my $conn = 0;
17 | my $idle = 0;
18 | my $active = 0;
19 | my $total = 0;
20 | my $maxchildren = 0;
21 |
22 | foreach (split(/\n/, $response->content)) {
23 | $conn = $1 if (/^accepted conn:\s+(\d+)/);
24 | $idle = $1 if (/^idle processes:\s+(\d+)/);
25 | $active = $1 if (/^active processes:\s+(\d+)/);
26 | $total = $1 if (/^total processes:\s+(\d+)/);
27 | $maxchildren = $1 if (/^max children reached:\s+(\d+)/);
28 | }
29 |
30 | print "Accepted conn: $conn\tIdle proc: $idle\tActive proc: $active\tTotal proc: $total\tMax children: $maxchildren\n";
31 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/zabbix/zabbix_agentd.conf.d/nginx.conf:
--------------------------------------------------------------------------------
1 | UserParameter=nginx.active[*],wget -O- -q http://localhost:55888/nginx-status | grep 'Active' | awk '{print $NF}'
2 | UserParameter=nginx.reading[*],wget -O- -q http://localhost:55888/nginx-status | grep 'Reading' | awk '{print $$2}'
3 | UserParameter=nginx.writing[*],wget -O- -q http://localhost:55888/nginx-status | grep 'Writing' | awk '{print $$4}'
4 | UserParameter=nginx.waiting[*],wget -O- -q http://localhost:55888/nginx-status | grep 'Waiting' | awk '{print $$6}'
5 | UserParameter=nginx.accepted[*],wget -O- -q http://localhost:55888/nginx-status | awk NR==3 | awk '{print $$1}'
6 | UserParameter=nginx.handled[*],wget -O- -q http://localhost:55888/nginx-status | awk NR==3 | awk '{print $$2}'
7 | UserParameter=nginx.requests[*],wget -O- -q http://localhost:55888/nginx-status | awk NR==3 | awk '{print $$3}'
8 |
--------------------------------------------------------------------------------
/salt/nginx/files/etc/zabbix/zabbix_agentd.conf.d/php_fpm.conf:
--------------------------------------------------------------------------------
1 | UserParameter=php.conn,/etc/zabbix/ExternalScripts/php-fpm_status.pl|cut -f1|cut -d" " -f3
2 | UserParameter=php.idle,/etc/zabbix/ExternalScripts/php-fpm_status.pl|cut -f2|cut -d" " -f3
3 | UserParameter=php.active,/etc/zabbix/ExternalScripts/php-fpm_status.pl|cut -f3|cut -d" " -f3
4 | UserParameter=php.total,/etc/zabbix/ExternalScripts/php-fpm_status.pl|cut -f4|cut -d" " -f3
5 | UserParameter=php.maxchildren,/etc/zabbix/ExternalScripts/php-fpm_status.pl|cut -f5|cut -d" " -f3
6 |
--------------------------------------------------------------------------------
/salt/nginx/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nginx.server
3 | - nginx.php
4 | - nginx.monitor
5 |
--------------------------------------------------------------------------------
/salt/nginx/monitor.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - nginx
4 |
5 | nginx-monitor:
6 | pkg.installed:
7 | - name: perl-libwww-perl
8 |
9 | php-fpm-monitor-script:
10 | file.managed:
11 | - name: /etc/zabbix/ExternalScripts/php-fpm_status.pl
12 | - source: salt://nginx/files/etc/zabbix/ExternalScripts/php-fpm_status.pl
13 | - user: root
14 | - group: root
15 | - mode: 755
16 | - require:
17 | - service: php-fpm
18 | - pkg: nginx-monitor
19 | - cmd: php-fpm-monitor-script
20 | cmd.run:
21 | - name: mkdir -p /etc/zabbix/ExternalScripts
22 | - unless: test -d /etc/zabbix/ExternalScripts
23 |
24 | php-fpm-monitor-config:
25 | file.managed:
26 | - name: /etc/zabbix/zabbix_agentd.conf.d/php_fpm.conf
27 | - source: salt://nginx/files/etc/zabbix/zabbix_agentd.conf.d/php_fpm.conf
28 | - require:
29 | - file: php-fpm-monitor-script
30 | - service: php-fpm
31 | - watch_in:
32 | - service: zabbix-agent
33 |
34 | nginx-monitor-config:
35 | file.managed:
36 | - name: /etc/zabbix/zabbix_agentd.conf.d/nginx.conf
37 | - source: salt://nginx/files/etc/zabbix/zabbix_agentd.conf.d/nginx.conf
38 | - template: jinja
39 | - require:
40 | - service: nginx
41 | - watch_in:
42 | - service: zabbix-agent
43 |
--------------------------------------------------------------------------------
/salt/nginx/php.sls:
--------------------------------------------------------------------------------
1 | php-fpm:
2 | pkg:
3 | - name: php-fpm
4 | - pkgs:
5 | - php-fpm
6 | - php-common
7 | - php-cli
8 | - php-devel
9 | - php-pecl-memcache
10 | - php-pecl-memcached
11 | - php-gd
12 | - php-pear
13 | - php-mbstring
14 | - php-mysql
15 | - php-xml
16 | - php-bcmath
17 | - php-pdo
18 | - installed
19 | service:
20 | - running
21 | - require:
22 | - pkg: php-fpm
23 | - watch:
24 | - pkg: php-fpm
25 | - file: /etc/php.ini
26 | - file: /etc/php.d/
27 | - file: /etc/php-fpm.conf
28 | - file: /etc/php-fpm.d/
29 |
30 | /etc/php.ini:
31 | file.managed:
32 | - source: salt://nginx/files/etc/php.ini
33 | - user: root
34 | - group: root
35 | - mode: 644
36 |
37 | /etc/php.d/:
38 | file.recurse:
39 | - source: salt://nginx/files/etc/php.d/
40 | - user: root
41 | - group: root
42 | - dir_mode: 755
43 | - file_mode: 644
44 |
45 | /etc/php-fpm.conf:
46 | file.managed:
47 | - source: salt://nginx/files/etc/php-fpm.conf
48 | - user: root
49 | - group: root
50 | - mode: 644
51 |
52 | /etc/php-fpm.d/:
53 | file.recurse:
54 | - source: salt://nginx/files/etc/php-fpm.d/
55 | - user: root
56 | - group: root
57 | - dir_mode: 755
58 | - file_mode: 644
59 |
60 | php-fpm-role:
61 | file.append:
62 | - name: /etc/salt/roles
63 | - text:
64 | - 'php-fpm'
65 | - require:
66 | - file: roles
67 | - service: php-fpm
68 | - service: salt-minion
69 | - watch_in:
70 | - module: sync_grains
71 |
--------------------------------------------------------------------------------
/salt/nginx/server.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 |
5 | nginx:
6 | pkg:
7 | - name: nginx
8 | - installed
9 | service:
10 | - name: nginx
11 | - running
12 | - require:
13 | - pkg: nginx
14 | - watch:
15 | - pkg: nginx
16 | - file: /etc/nginx/nginx.conf
17 | - file: /etc/nginx/conf.d/
18 |
19 | /etc/nginx/nginx.conf:
20 | file.managed:
21 | - source: salt://nginx/files/etc/nginx/nginx.conf
22 | - template: jinja
23 | - user: root
24 | - group: root
25 | - mode: 644
26 | - backup: minion
27 |
28 | /etc/nginx/conf.d/:
29 | file.recurse:
30 | - source: salt://nginx/files/etc/nginx/conf.d/
31 | - template: jinja
32 | - user: root
33 | - group: root
34 | - dir_mode: 755
35 | - file_mode: 644
36 |
37 | {% set logdir = salt['pillar.get']('logdir', '/var/log/nginx') %}
38 | {{logdir}}:
39 | cmd.run:
40 | - name: mkdir -p {{logdir}}
41 | - unless: test -d {{logdir}}
42 | - require:
43 | - pkg: nginx
44 |
45 | {% if salt['pillar.get']('vhosts', false) %}
46 | {% set dir = salt['pillar.get']('vhostsdir', '/var/www/html') %}
47 | {% set cachedir = salt['pillar.get']('vhostscachedir', '/var/www/cache') %}
48 | {% for vhost in pillar['vhosts'] %}
49 | {{dir}}/{{vhost}}/htdocs:
50 | cmd.run:
51 | - name: mkdir -p {{dir}}/{{vhost}}/htdocs && chown -R nobody.nobody {{dir}}/{{vhost}}/htdocs
52 | - unless: test -d {{dir}}/{{vhost}}/htdocs
53 | - require:
54 | - pkg: nginx
55 | {{cachedir}}/{{vhost}}:
56 | cmd.run:
57 | - name: mkdir -p {{cachedir}}/{{vhost}} && chown -R nginx.nginx {{cachedir}}/{{vhost}}
58 | - unless: test -d {{cachedir}}/{{vhost}}
59 | - require:
60 | - pkg: nginx
61 | {% endfor %}
62 | {% endif %}
63 |
64 | nginx-role:
65 | file.append:
66 | - name: /etc/salt/roles
67 | - text:
68 | - 'nginx'
69 | - require:
70 | - file: roles
71 | - service: nginx
72 | - service: salt-minion
73 | - watch_in:
74 | - module: sync_grains
75 |
--------------------------------------------------------------------------------
/salt/ntp/init.sls:
--------------------------------------------------------------------------------
1 | ntpdate:
2 | pkg.installed:
3 | - name: ntpdate
4 |
--------------------------------------------------------------------------------
/salt/php/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - apache
3 |
4 | php:
5 | pkg.installed:
6 | - name: php
7 | file.managed:
8 | - name: /etc/php.ini
9 | - source: salt://php/files/etc/php.ini
10 | - require:
11 | - pkg: php
12 | - watch_in:
13 | - service: apache
14 |
--------------------------------------------------------------------------------
/salt/redis/files/etc/redis.conf:
--------------------------------------------------------------------------------
1 | daemonize yes
2 | pidfile /var/run/redis.pid
3 | port {{pillar['redis']['port']}}
4 | tcp-backlog 511
5 | bind {{pillar['redis']['bind']}}
6 | timeout {{pillar['redis']['timeout']}}
7 | tcp-keepalive 60
8 | loglevel {{pillar['redis']['loglevel']}}
9 | logfile /var/log/redis/redis.log
10 | databases 16
11 | save 900 1
12 | save 300 10
13 | save 60 10000
14 | stop-writes-on-bgsave-error no
15 | rdbcompression yes
16 | rdbchecksum yes
17 | dbfilename dump.rdb
18 | dir {{pillar['redis']['dir']}}
19 | {% if salt['pillar.get']('redis:master', false) and salt['pillar.get']('redis:master_port', false) -%}
20 | slaveof {{pillar['redis']['master']}} {{pillar['redis']['master_port']}}
21 | {% endif -%}
22 | {% if salt['pillar.get']('redis:master_password', false) -%}
23 | masterauth {{pillar['redis']['master_password']}}
24 | {% endif -%}
25 | slave-serve-stale-data yes
26 | slave-read-only yes
27 | repl-disable-tcp-nodelay no
28 | slave-priority 100
29 | maxclients {{pillar['redis']['maxclients']}}
30 | maxmemory {{pillar['redis']['maxmemory']}}
31 | appendonly no
32 | appendfilename "appendonly.aof"
33 | appendfsync everysec
34 | no-appendfsync-on-rewrite no
35 | auto-aof-rewrite-percentage 100
36 | auto-aof-rewrite-min-size 64mb
37 | lua-time-limit 5000
38 | slowlog-log-slower-than 10000
39 | slowlog-max-len 128
40 | notify-keyspace-events ""
41 | hash-max-ziplist-entries 512
42 | hash-max-ziplist-value 64
43 | list-max-ziplist-entries 512
44 | list-max-ziplist-value 64
45 | set-max-intset-entries 512
46 | zset-max-ziplist-entries 128
47 | zset-max-ziplist-value 64
48 | hll-sparse-max-bytes 3000
49 | activerehashing yes
50 | client-output-buffer-limit normal 0 0 0
51 | client-output-buffer-limit slave 256mb 64mb 60
52 | client-output-buffer-limit pubsub 32mb 8mb 60
53 | hz 10
54 | aof-rewrite-incremental-fsync yes
55 |
--------------------------------------------------------------------------------
/salt/redis/files/etc/sysctl.d/redis.conf:
--------------------------------------------------------------------------------
1 | # More optimistic malloc for Redis forks
2 | vm.overcommit_memory = 1
3 |
--------------------------------------------------------------------------------
/salt/redis/files/etc/zabbix/zabbix_agentd.conf.d/redis.conf:
--------------------------------------------------------------------------------
1 | UserParameter=redis.stat[*],/usr/bin/redis-cli -h {{pillar['redis']['bind']}} -p {{pillar['redis']['port']}} info | grep "$1:" | cut -d":" -f2
2 |
--------------------------------------------------------------------------------
/salt/redis/files/redis-2.8.9-1.el6.remi.x86_64.rpm:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/redis/files/redis-2.8.9-1.el6.remi.x86_64.rpm
--------------------------------------------------------------------------------
/salt/redis/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - redis.monitor
3 |
4 | {% set dbdir = salt['pillar.get']('redis:dir', '/var/lib/redis/') %}
5 | redis:
6 | pkg:
7 | - installed
8 | file.managed:
9 | - name: /etc/redis.conf
10 | - source: salt://redis/files/etc/redis.conf
11 | - template: jinja
12 | - defaults:
13 | bind: 127.0.0.1
14 | port: 6379
15 | - require:
16 | - pkg: redis
17 | service.running:
18 | - enable: True
19 | - watch:
20 | - file: redis
21 | cmd.wait:
22 | - name: mkdir -p {{dbdir}} && chown -R redis.redis {{dbdir}}
23 | - watch:
24 | - pkg: redis
25 |
26 | /etc/sysctl.d/redis.conf:
27 | file.managed:
28 | - source: salt://redis/files/etc/sysctl.d/redis.conf
29 |
30 | redis-sysctl:
31 | cmd.wait:
32 | - name: /sbin/sysctl -q -p /etc/sysctl.d/redis.conf
33 | - watch:
34 | - file: /etc/sysctl.d/redis.conf
35 |
36 | /etc/rc.d/rc.local:
37 | file.managed:
38 | - name: /etc/rc.d/rc.local
39 | - text:
40 | - '/sbin/sysctl -q -p /etc/sysctl.d/redis.conf'
41 | - require:
42 | - file: /etc/sysctl.d/redis.conf
43 | - service: redis
44 |
45 | redis-role:
46 | file.append:
47 | - name: /etc/salt/roles
48 | - text:
49 | - 'redis'
50 | - require:
51 | - file: roles
52 | - service: redis
53 | - service: salt-minion
54 | - watch_in:
55 | - module: sync_grains
56 |
--------------------------------------------------------------------------------
/salt/redis/monitor.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - redis
4 |
5 | redis-monitor-config:
6 | file.managed:
7 | - name: /etc/zabbix/zabbix_agentd.conf.d/redis.conf
8 | - source: salt://redis/files/etc/zabbix/zabbix_agentd.conf.d/redis.conf
9 | - template: jinja
10 | - require:
11 | - service: redis
12 | - watch_in:
13 | - service: zabbix-agent
14 |
--------------------------------------------------------------------------------
/salt/roles/admin.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.master
3 | - mysql.server
4 | - zabbix.server
5 | - zabbix.web
6 | - zabbix.api
7 | - svn
8 | - limits
9 | - mirrors
10 |
--------------------------------------------------------------------------------
/salt/roles/cache.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - limits
3 | - varnish
4 |
--------------------------------------------------------------------------------
/salt/roles/common.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - base
3 | - ssh
4 | - users
5 | - iptables
6 | - salt.minion
7 | - zabbix.agent
8 |
--------------------------------------------------------------------------------
/salt/roles/db.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - limits
3 | - mysql
4 |
--------------------------------------------------------------------------------
/salt/roles/ha.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - limits
3 | - haproxy
4 | - keepalived
5 |
--------------------------------------------------------------------------------
/salt/roles/mc.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - limits
3 | - memcached
4 | - redis
5 |
--------------------------------------------------------------------------------
/salt/roles/search.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - limits
3 | - coreseek
4 |
--------------------------------------------------------------------------------
/salt/roles/storage.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - nfs
3 |
--------------------------------------------------------------------------------
/salt/roles/web.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - limits
3 | - rsync
4 | - nginx
5 | - nfs.client
6 |
--------------------------------------------------------------------------------
/salt/rpcbind/init.sls:
--------------------------------------------------------------------------------
1 | rpcbind:
2 | pkg.installed:
3 | - name: rpcbind
4 | service.running:
5 | - name: rpcbind
6 | - enable: True
7 | - require:
8 | - pkg: rpcbind
9 | - watch:
10 | - pkg: rpcbind
11 |
--------------------------------------------------------------------------------
/salt/rsync/files/etc/rsyncd.conf:
--------------------------------------------------------------------------------
1 | # File Managed by Salt
2 |
3 | uid = nobody
4 | gid = nobody
5 | use chroot = yes
6 | max connections = 150
7 | pid file = /var/run/rsyncd.pid
8 | log file = /var/log/rsyncd.log
9 | transfer logging = yes
10 | log format = %t %a %m %f %b
11 | syslog facility = local3
12 | timeout = 300
13 | incoming chmod = Du=rwx,Dog=rx,Fu=rw,Fgo=r
14 | hosts allow=172.16.100.0/24
15 |
16 | [www_mall_com]
17 | path=/data1/vhosts/www.mall.com/htdocs/
18 | read only=no
19 |
--------------------------------------------------------------------------------
/salt/rsync/files/etc/xinetd.d/rsync:
--------------------------------------------------------------------------------
1 | # default: off
2 | # description: The rsync server is a good addition to an ftp server, as it \
3 | # allows crc checksumming etc.
4 | service rsync
5 | {
6 | disable = no
7 | flags = IPv4
8 | socket_type = stream
9 | wait = no
10 | user = root
11 | server = /usr/bin/rsync
12 | server_args = --daemon
13 | log_on_failure += USERID
14 | }
15 |
--------------------------------------------------------------------------------
/salt/rsync/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 |
5 | rsync:
6 | pkg.installed:
7 | - name: rsync
8 | - pkgs:
9 | - xinetd
10 | - rsync
11 | service.running:
12 | - name: xinetd
13 | - enable: True
14 | - require:
15 | - pkg: rsync
16 | - watch:
17 | - pkg: rsync
18 | - file: /etc/xinetd.d/rsync
19 |
20 | /etc/xinetd.d/rsync:
21 | file.managed:
22 | - source: salt://rsync/files/etc/xinetd.d/rsync
23 | - template: jinja
24 | - user: root
25 | - group: root
26 | - mode: 644
27 |
28 | /etc/rsyncd.conf:
29 | file.managed:
30 | - source: salt://rsync/files/etc/rsyncd.conf
31 | - template: jinja
32 | - user: root
33 | - group: root
34 | - mode: 644
35 |
--------------------------------------------------------------------------------
/salt/salt/files/etc/salt/grains:
--------------------------------------------------------------------------------
1 | hostgroup:
2 | - {{ hostgroup }}
3 |
--------------------------------------------------------------------------------
/salt/salt/files/etc/salt/master:
--------------------------------------------------------------------------------
1 | ##### Primary configuration settings #####
2 | ##########################################
3 | # This configuration file is used to manage the behavior of the Salt Master
4 | # Values that are commented out but have no space after the comment are
5 | # defaults that need not be set in the config. If there is a space after the
6 | # comment that the value is presented as an example and is not the default.
7 |
8 | # Per default, the master will automatically include all config files
9 | # from master.d/*.conf (master.d is a directory in the same directory
10 | # as the main master config file)
11 | #default_include: master.d/*.conf
12 |
13 | # The address of the interface to bind to
14 | interface: 172.16.100.81
15 |
16 | # Whether the master should listen for IPv6 connections. If this is set to True,
17 | # the interface option must be adjusted too (for example: "interface: '::'")
18 | #ipv6: False
19 |
20 | # The tcp port used by the publisher
21 | #publish_port: 4505
22 |
23 | # The user under which the salt master will run. Salt will update all
24 | # permissions to allow the specified user to run the master. The exception is
25 | # the job cache, which must be deleted if this user is changed. If the
26 | # modified files cause conflicts set verify_env to False.
27 | #user: root
28 |
29 | # Max open files
30 | # Each minion connecting to the master uses AT LEAST one file descriptor, the
31 | # master subscription connection. If enough minions connect you might start
32 | # seeing on the console(and then salt-master crashes):
33 | # Too many open files (tcp_listener.cpp:335)
34 | # Aborted (core dumped)
35 | #
36 | # By default this value will be the one of `ulimit -Hn`, ie, the hard limit for
37 | # max open files.
38 | #
39 | # If you wish to set a different value than the default one, uncomment and
40 | # configure this setting. Remember that this value CANNOT be higher than the
41 | # hard limit. Raising the hard limit depends on your OS and/or distribution,
42 | # a good way to find the limit is to search the internet for(for example):
43 | # raise max open files hard limit debian
44 | #
45 | #max_open_files: 100000
46 |
47 | # The number of worker threads to start, these threads are used to manage
48 | # return calls made from minions to the master, if the master seems to be
49 | # running slowly, increase the number of threads
50 | #worker_threads: 5
51 |
52 | # The port used by the communication interface. The ret (return) port is the
53 | # interface used for the file server, authentication, job returnes, etc.
54 | #ret_port: 4506
55 |
56 | # Specify the location of the daemon process ID file
57 | #pidfile: /var/run/salt-master.pid
58 |
59 | # The root directory prepended to these options: pki_dir, cachedir,
60 | # sock_dir, log_file, autosign_file, autoreject_file, extension_modules,
61 | # key_logfile, pidfile.
62 | #root_dir: /
63 |
64 | # Directory used to store public key data
65 | #pki_dir: /etc/salt/pki/master
66 |
67 | # Directory to store job and cache data
68 | #cachedir: /var/cache/salt/master
69 |
70 | # Verify and set permissions on configuration directories at startup
71 | #verify_env: True
72 |
73 | # Set the number of hours to keep old job information in the job cache
74 | #keep_jobs: 24
75 |
76 | # Set the default timeout for the salt command and api, the default is 5
77 | # seconds
78 | #timeout: 5
79 |
80 | # The loop_interval option controls the seconds for the master's maintinance
81 | # process check cycle. This process updates file server backends, cleans the
82 | # job cache and executes the scheduler.
83 | #loop_interval: 60
84 |
85 | # Set the default outputter used by the salt command. The default is "nested"
86 | #output: nested
87 |
88 | # By default output is colored, to disable colored output set the color value
89 | # to False
90 | #color: True
91 |
92 | # Set the directory used to hold unix sockets
93 | #sock_dir: /var/run/salt/master
94 |
95 | # The master can take a while to start up when lspci and/or dmidecode is used
96 | # to populate the grains for the master. Enable if you want to see GPU hardware
97 | # data for your master.
98 | #
99 | # enable_gpu_grains: False
100 |
101 | # The master maintains a job cache, while this is a great addition it can be
102 | # a burden on the master for larger deployments (over 5000 minions).
103 | # Disabling the job cache will make previously executed jobs unavailable to
104 | # the jobs system and is not generally recommended.
105 | #
106 | #job_cache: True
107 |
108 | # Cache minion grains and pillar data in the cachedir.
109 | #minion_data_cache: True
110 |
111 | # The master can include configuration from other files. To enable this,
112 | # pass a list of paths to this option. The paths can be either relative or
113 | # absolute; if relative, they are considered to be relative to the directory
114 | # the main master configuration file lives in (this file). Paths can make use
115 | # of shell-style globbing. If no files are matched by a path passed to this
116 | # option then the master will log a warning message.
117 | #
118 | #
119 | # Include a config file from some other path:
120 | #include: /etc/salt/extra_config
121 | #
122 | # Include config from several files and directories:
123 | #include:
124 | # - /etc/salt/extra_config
125 |
126 |
127 | ##### Security settings #####
128 | ##########################################
129 | # Enable "open mode", this mode still maintains encryption, but turns off
130 | # authentication, this is only intended for highly secure environments or for
131 | # the situation where your keys end up in a bad state. If you run in open mode
132 | # you do so at your own risk!
133 | #open_mode: False
134 |
135 | # Enable auto_accept, this setting will automatically accept all incoming
136 | # public keys from the minions. Note that this is insecure.
137 | #auto_accept: False
138 |
139 | # If the autosign_file is specified, incoming keys specified in the
140 | # autosign_file will be automatically accepted. This is insecure. Regular
141 | # expressions as well as globing lines are supported.
142 | #autosign_file: /etc/salt/autosign.conf
143 |
144 | # Works like autosign_file, but instead allows you to specify minion IDs for
145 | # which keys will automatically be rejected. Will override both membership in
146 | # the autosign_file and the auto_accept setting.
147 | #autosign_file: /etc/salt/autosign.conf
148 |
149 | # Enable permissive access to the salt keys. This allows you to run the
150 | # master or minion as root, but have a non-root group be given access to
151 | # your pki_dir. To make the access explicit, root must belong to the group
152 | # you've given access to. This is potentially quite insecure.
153 | # If an autosign_file is specified, enabling permissive_pki_access will allow group access
154 | # to that specific file.
155 | #permissive_pki_access: False
156 |
157 | # Allow users on the master access to execute specific commands on minions.
158 | # This setting should be treated with care since it opens up execution
159 | # capabilities to non root users. By default this capability is completely
160 | # disabled.
161 | #
162 | #client_acl:
163 | # larry:
164 | # - test.ping
165 | # - network.*
166 | #
167 |
168 | # Blacklist any of the following users or modules
169 | #
170 | # This example would blacklist all non sudo users, including root from
171 | # running any commands. It would also blacklist any use of the "cmd"
172 | # module.
173 | # This is completely disabled by default.
174 | #
175 | #client_acl_blacklist:
176 | # users:
177 | # - root
178 | # - '^(?!sudo_).*$' # all non sudo users
179 | # modules:
180 | # - cmd
181 |
182 | # The external auth system uses the Salt auth modules to authenticate and
183 | # validate users to access areas of the Salt system.
184 | #
185 | #external_auth:
186 | # pam:
187 | # fred:
188 | # - test.*
189 | #
190 | external_auth:
191 | pam:
192 | admin:
193 | - .*
194 | - '@runner'
195 | - '@wheel'
196 |
197 | #halite:
198 | # level: 'debug'
199 | # server: 'gevent'
200 | # host: '10.10.10.10'
201 | # port: '8000'
202 | # cors: False
203 | # tls: False
204 | # certpath: '/etc/salt/pki/master/master.pub'
205 | # keypath: '/etc/salt/pki/master/master.pem'
206 | # pempath: '/etc/salt/pki/master/salt.pem'
207 |
208 | # Time (in seconds) for a newly generated token to live. Default: 12 hours
209 | #token_expire: 43200
210 |
211 | # Allow minions to push files to the master. This is disabled by default, for
212 | # security purposes.
213 | #file_recv: False
214 |
215 | # Set a hard-limit on the size of the files that can be pushed to the master.
216 | # It will be interpreted as megabytes.
217 | # Default: 100
218 | #file_recv_max_size: 100
219 |
220 | # Signature verification on messages published from the master.
221 | # This causes the master to cryptographically sign all messages published to its event
222 | # bus, and minions then verify that signature before acting on the message.
223 | #
224 | # This is False by default.
225 | #
226 | # Note that to facilitate interoperability with masters and minions that are different
227 | # versions, if sign_pub_messages is True but a message is received by a minion with
228 | # no signature, it will still be accepted, and a warning message will be logged.
229 | # Conversely, if sign_pub_messages is False, but a minion receives a signed
230 | # message it will be accepted, the signature will not be checked, and a warning message
231 | # will be logged. This behavior will go away in Salt 0.17.6 (or Hydrogen RC1, whichever
232 | # comes first) and these two situations will cause minion to throw an exception and
233 | # drop the message.
234 | #
235 | # sign_pub_messages: False
236 |
237 | ##### Master Module Management #####
238 | ##########################################
239 | # Manage how master side modules are loaded
240 |
241 | # Add any additional locations to look for master runners
242 | #runner_dirs: []
243 | runner_dirs: [/srv/salt/_runners]
244 |
245 | # Enable Cython for master side modules
246 | #cython_enable: False
247 |
248 |
249 | ##### State System settings #####
250 | ##########################################
251 | # The state system uses a "top" file to tell the minions what environment to
252 | # use and what modules to use. The state_top file is defined relative to the
253 | # root of the base environment as defined in "File Server settings" below.
254 | #state_top: top.sls
255 |
256 | # The master_tops option replaces the external_nodes option by creating
257 | # a plugable system for the generation of external top data. The external_nodes
258 | # option is deprecated by the master_tops option.
259 | # To gain the capabilities of the classic external_nodes system, use the
260 | # following configuration:
261 | # master_tops:
262 | # ext_nodes:
263 | #
264 | #master_tops: {}
265 |
266 | # The external_nodes option allows Salt to gather data that would normally be
267 | # placed in a top file. The external_nodes option is the executable that will
268 | # return the ENC data. Remember that Salt will look for external nodes AND top
269 | # files and combine the results if both are enabled!
270 | #external_nodes: None
271 |
272 | # The renderer to use on the minions to render the state data
273 | #renderer: yaml_jinja
274 |
275 | # The Jinja renderer can strip extra carriage returns and whitespace
276 | # See http://jinja.pocoo.org/docs/api/#high-level-api
277 | #
278 | # If this is set to True the first newline after a Jinja block is removed
279 | # (block, not variable tag!). Defaults to False, corresponds to the Jinja
280 | # environment init variable "trim_blocks".
281 | # jinja_trim_blocks: False
282 | #
283 | # If this is set to True leading spaces and tabs are stripped from the start
284 | # of a line to a block. Defaults to False, corresponds to the Jinja
285 | # environment init variable "lstrip_blocks".
286 | # jinja_lstrip_blocks: False
287 |
288 | # The failhard option tells the minions to stop immediately after the first
289 | # failure detected in the state execution, defaults to False
290 | #failhard: False
291 |
292 | # The state_verbose and state_output settings can be used to change the way
293 | # state system data is printed to the display. By default all data is printed.
294 | # The state_verbose setting can be set to True or False, when set to False
295 | # all data that has a result of True and no changes will be suppressed.
296 | #state_verbose: True
297 |
298 | # The state_output setting changes if the output is the full multi line
299 | # output for each changed state if set to 'full', but if set to 'terse'
300 | # the output will be shortened to a single line. If set to 'mixed', the output
301 | # will be terse unless a state failed, in which case that output will be full.
302 | #state_output: full
303 |
304 |
305 | ##### File Server settings #####
306 | ##########################################
307 | # Salt runs a lightweight file server written in zeromq to deliver files to
308 | # minions. This file server is built into the master daemon and does not
309 | # require a dedicated port.
310 |
311 | # The file server works on environments passed to the master, each environment
312 | # can have multiple root directories, the subdirectories in the multiple file
313 | # roots cannot match, otherwise the downloaded files will not be able to be
314 | # reliably ensured. A base environment is required to house the top file.
315 | # Example:
316 | # file_roots:
317 | # base:
318 | # - /srv/salt/
319 | # dev:
320 | # - /srv/salt/dev/services
321 | # - /srv/salt/dev/states
322 | # prod:
323 | # - /srv/salt/prod/services
324 | # - /srv/salt/prod/states
325 |
326 | #file_roots:
327 | # base:
328 | # - /srv/salt
329 |
330 | # The hash_type is the hash to use when discovering the hash of a file on
331 | # the master server. The default is md5, but sha1, sha224, sha256, sha384
332 | # and sha512 are also supported.
333 | #hash_type: md5
334 |
335 | # The buffer size in the file server can be adjusted here:
336 | #file_buffer_size: 1048576
337 |
338 | # A regular expression (or a list of expressions) that will be matched
339 | # against the file path before syncing the modules and states to the minions.
340 | # This includes files affected by the file.recurse state.
341 | # For example, if you manage your custom modules and states in subversion
342 | # and don't want all the '.svn' folders and content synced to your minions,
343 | # you could set this to '/\.svn($|/)'. By default nothing is ignored.
344 | #
345 | #file_ignore_regex:
346 | # - '/\.svn($|/)'
347 | # - '/\.git($|/)'
348 |
349 | # A file glob (or list of file globs) that will be matched against the file
350 | # path before syncing the modules and states to the minions. This is similar
351 | # to file_ignore_regex above, but works on globs instead of regex. By default
352 | # nothing is ignored.
353 | #
354 | # file_ignore_glob:
355 | # - '*.pyc'
356 | # - '*/somefolder/*.bak'
357 | # - '*.swp'
358 |
359 | # File Server Backend
360 | # Salt supports a modular fileserver backend system, this system allows
361 | # the salt master to link directly to third party systems to gather and
362 | # manage the files available to minions. Multiple backends can be
363 | # configured and will be searched for the requested file in the order in which
364 | # they are defined here. The default setting only enables the standard backend
365 | # "roots" which uses the "file_roots" option.
366 | #
367 | #fileserver_backend:
368 | # - roots
369 | #
370 | # To use multiple backends list them in the order they are searched:
371 | #
372 | #fileserver_backend:
373 | # - git
374 | # - roots
375 | #
376 | # Uncomment the line below if you do not want the file_server to follow
377 | # symlinks when walking the filesystem tree. This is set to True
378 | # by default. Currently this only applies to the default roots
379 | # fileserver_backend.
380 | #
381 | #fileserver_followsymlinks: False
382 | #
383 | # Uncomment the line below if you do not want symlinks to be
384 | # treated as the files they are pointing to. By default this is set to
385 | # False. By uncommenting the line below, any detected symlink while listing
386 | # files on the Master will not be returned to the Minion.
387 | #
388 | #fileserver_ignoresymlinks: True
389 | #
390 | # By default, the Salt fileserver recurses fully into all defined environments
391 | # to attempt to find files. To limit this behavior so that the fileserver only
392 | # traverses directories with SLS files and special Salt directories like _modules,
393 | # enable the option below. This might be useful for installations where a file root
394 | # has a very large number of files and performance is impacted. Default is False.
395 | #
396 | # fileserver_limit_traversal: False
397 | #
398 | # The fileserver can fire events off every time the fileserver is updated,
399 | # these are disabled by default, but can be easily turned on by setting this
400 | # flag to True
401 | #fileserver_events: False
402 | #
403 | # Git fileserver backend configuration
404 | # When using the git fileserver backend at least one git remote needs to be
405 | # defined. The user running the salt master will need read access to the repo.
406 | #
407 | #gitfs_remotes:
408 | # - git://github.com/saltstack/salt-states.git
409 | # - file:///var/git/saltmaster
410 | #
411 | # The gitfs_ssl_verify option specifies whether to ignore ssl certificate
412 | # errors when contacting the gitfs backend. You might want to set this to
413 | # false if you're using a git backend that uses a self-signed certificate but
414 | # keep in mind that setting this flag to anything other than the default of True
415 | # is a security concern, you may want to try using the ssh transport.
416 | #gitfs_ssl_verify: True
417 | #
418 | # The repos will be searched in order to find the file requested by a client
419 | # and the first repo to have the file will return it.
420 | # When using the git backend branches and tags are translated into salt
421 | # environments.
422 | # Note: file:// repos will be treated as a remote, so refs you want used must
423 | # exist in that repo as *local* refs.
424 | #
425 | # The gitfs_root option gives the ability to serve files from a subdirectory
426 | # within the repository. The path is defined relative to the root of the
427 | # repository and defaults to the repository root.
428 | #gitfs_root: somefolder/otherfolder
429 |
430 |
431 | ##### Pillar settings #####
432 | ##########################################
433 | # Salt Pillars allow for the building of global data that can be made selectively
434 | # available to different minions based on minion grain filtering. The Salt
435 | # Pillar is laid out in the same fashion as the file server, with environments,
436 | # a top file and sls files. However, pillar data does not need to be in the
437 | # highstate format, and is generally just key/value pairs.
438 |
439 | #pillar_roots:
440 | # base:
441 | # - /srv/pillar
442 |
443 | #ext_pillar:
444 | # - hiera: /etc/hiera.yaml
445 | # - cmd_yaml: cat /etc/salt/yaml
446 |
447 | # The pillar_gitfs_ssl_verify option specifies whether to ignore ssl certificate
448 | # errors when contacting the pillar gitfs backend. You might want to set this to
449 | # false if you're using a git backend that uses a self-signed certificate but
450 | # keep in mind that setting this flag to anything other than the default of True
451 | # is a security concern, you may want to try using the ssh transport.
452 | #pillar_gitfs_ssl_verify: True
453 |
454 | # The pillar_opts option adds the master configuration file data to a dict in
455 | # the pillar called "master". This is used to set simple configurations in the
456 | # master config file that can then be used on minions.
457 | #pillar_opts: True
458 |
459 |
460 | ##### Syndic settings #####
461 | ##########################################
462 | # The Salt syndic is used to pass commands through a master from a higher
463 | # master. Using the syndic is simple, if this is a master that will have
464 | # syndic servers(s) below it set the "order_masters" setting to True, if this
465 | # is a master that will be running a syndic daemon for passthrough the
466 | # "syndic_master" setting needs to be set to the location of the master server
467 | # to receive commands from.
468 |
469 | # Set the order_masters setting to True if this master will command lower
470 | # masters' syndic interfaces.
471 | #order_masters: False
472 |
473 | # If this master will be running a salt syndic daemon, syndic_master tells
474 | # this master where to receive commands from.
475 | #syndic_master: masterofmaster
476 |
477 | # This is the 'ret_port' of the MasterOfMaster
478 | #syndic_master_port: 4506
479 |
480 | # PID file of the syndic daemon
481 | #syndic_pidfile: /var/run/salt-syndic.pid
482 |
483 | # LOG file of the syndic daemon
484 | #syndic_log_file: syndic.log
485 |
486 | ##### Peer Publish settings #####
487 | ##########################################
488 | # Salt minions can send commands to other minions, but only if the minion is
489 | # allowed to. By default "Peer Publication" is disabled, and when enabled it
490 | # is enabled for specific minions and specific commands. This allows secure
491 | # compartmentalization of commands based on individual minions.
492 |
493 | # The configuration uses regular expressions to match minions and then a list
494 | # of regular expressions to match functions. The following will allow the
495 | # minion authenticated as foo.example.com to execute functions from the test
496 | # and pkg modules.
497 | #
498 | #peer:
499 | # foo.example.com:
500 | # - test.*
501 | # - pkg.*
502 | #
503 | # This will allow all minions to execute all commands:
504 | #
505 | #peer:
506 | # .*:
507 | # - .*
508 | #
509 | # This is not recommended, since it would allow anyone who gets root on any
510 | # single minion to instantly have root on all of the minions!
511 |
512 | # Minions can also be allowed to execute runners from the salt master.
513 | # Since executing a runner from the minion could be considered a security risk,
514 | # it needs to be enabled. This setting functions just like the peer setting
515 | # except that it opens up runners instead of module functions.
516 | #
517 | # All peer runner support is turned off by default and must be enabled before
518 | # using. This will enable all peer runners for all minions:
519 | #
520 | #peer_run:
521 | # .*:
522 | # - .*
523 | #
524 | # To enable just the manage.up runner for the minion foo.example.com:
525 | #
526 | #peer_run:
527 | # foo.example.com:
528 | # - manage.up
529 |
530 | ##### Mine settings #####
531 | ##########################################
532 | # Restrict mine.get access from minions. By default any minion has a full access
533 | # to get all mine data from master cache. In acl definion below, only pcre matches
534 | # are allowed.
535 | #
536 | # mine_get:
537 | # .*:
538 | # - .*
539 | #
540 | # Example below enables minion foo.example.com to get 'network.interfaces' mine data only
541 | # , minions web* to get all network.* and disk.* mine data and all other minions won't get
542 | # any mine data.
543 | #
544 | # mine_get:
545 | # foo.example.com:
546 | # - network.inetrfaces
547 | # web.*:
548 | # - network.*
549 | # - disk.*
550 |
551 | ##### Logging settings #####
552 | ##########################################
553 | # The location of the master log file
554 | # The master log can be sent to a regular file, local path name, or network
555 | # location. Remote logging works best when configured to use rsyslogd(8) (e.g.:
556 | # ``file:///dev/log``), with rsyslogd(8) configured for network logging. The URI
557 | # format is: ://:/
558 | #log_file: /var/log/salt/master
559 | #log_file: file:///dev/log
560 | #log_file: udp://loghost:10514
561 |
562 | #log_file: /var/log/salt/master
563 | #key_logfile: /var/log/salt/key
564 |
565 | # The level of messages to send to the console.
566 | # One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
567 | #log_level: warning
568 |
569 | # The level of messages to send to the log file.
570 | # One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
571 | #log_level_logfile: warning
572 |
573 | # The date and time format used in log messages. Allowed date/time formating
574 | # can be seen here: http://docs.python.org/library/time.html#time.strftime
575 | #log_datefmt: '%H:%M:%S'
576 | #log_datefmt_logfile: '%Y-%m-%d %H:%M:%S'
577 |
578 | # The format of the console logging messages. Allowed formatting options can
579 | # be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
580 | #log_fmt_console: '[%(levelname)-8s] %(message)s'
581 | #log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
582 |
583 | # This can be used to control logging levels more specificically. This
584 | # example sets the main salt library at the 'warning' level, but sets
585 | # 'salt.modules' to log at the 'debug' level:
586 | # log_granular_levels:
587 | # 'salt': 'warning',
588 | # 'salt.modules': 'debug'
589 | #
590 | #log_granular_levels: {}
591 |
592 |
593 | ##### Node Groups #####
594 | ##########################################
595 | # Node groups allow for logical groupings of minion nodes.
596 | # A group consists of a group name and a compound target.
597 | #
598 | #nodegroups:
599 | # group1: 'L@foo.domain.com,bar.domain.com,baz.domain.com and bl*.domain.com'
600 | # group2: 'G@os:Debian and foo.domain.com'
601 |
602 |
603 | ##### Range Cluster settings #####
604 | ##########################################
605 | # The range server (and optional port) that serves your cluster information
606 | # https://github.com/grierj/range/wiki/Introduction-to-Range-with-YAML-files
607 | #
608 | #range_server: range:80
609 |
610 |
611 | ##### Windows Software Repo settings #####
612 | ##############################################
613 | # Location of the repo on the master
614 | #win_repo: '/srv/salt/win/repo'
615 |
616 | # Location of the master's repo cache file
617 | #win_repo_mastercachefile: '/srv/salt/win/repo/winrepo.p'
618 |
619 | # List of git repositories to include with the local repo
620 | #win_gitrepos:
621 | # - 'https://github.com/saltstack/salt-winrepo.git'
622 |
--------------------------------------------------------------------------------
/salt/salt/files/etc/salt/master.d/nodegroups.conf:
--------------------------------------------------------------------------------
1 | nodegroups:
2 | # define ha node group
3 | ha: 'E@ha*'
4 |
5 | # define web node group
6 | web: 'web*'
7 |
8 | # define mc node group
9 | mc: 'E@mc*'
10 |
11 | # define db node group
12 | db: 'E@db*'
13 | db.master: 'db1.grid.mall.com'
14 | db.slave: 'db2.grid.mall.com'
15 |
16 | # define storage node group
17 | storage: 'E@storage*'
18 | nfs: 'N@storage'
19 |
--------------------------------------------------------------------------------
/salt/salt/files/etc/salt/master.d/publish.conf:
--------------------------------------------------------------------------------
1 | svn:
2 | username: 'publish'
3 | password: '1qaz@WSX@mall'
4 |
5 | publish:
6 | master: 'admin.grid.mall.com'
7 | cwd: '/data1/vhosts'
8 |
9 | projects:
10 | www.mall.com:
11 | remote: 'svn://172.16.100.81/www.mall.com'
12 | target:
13 | - '172.16.100.21::www_mall_com'
14 | - '172.16.100.22::www_mall_com'
15 | - '172.16.100.23::www_mall_com'
16 |
17 |
--------------------------------------------------------------------------------
/salt/salt/files/etc/salt/minion:
--------------------------------------------------------------------------------
1 | ##### Primary configuration settings #####
2 | ##########################################
3 |
4 | # Per default the minion will automatically include all config files
5 | # from minion.d/*.conf (minion.d is a directory in the same directory
6 | # as the main minion config file).
7 | #default_include: minion.d/*.conf
8 |
9 | # Set the location of the salt master server, if the master server cannot be
10 | # resolved, then the minion will fail to start.
11 | #master: salt
12 | master: {{master}}
13 |
14 | # Set the number of seconds to wait before attempting to resolve
15 | # the master hostname if name resolution fails. Defaults to 30 seconds.
16 | # Set to zero if the minion should shutdown and not retry.
17 | # retry_dns: 30
18 |
19 | # Set the port used by the master reply and authentication server
20 | #master_port: 4506
21 |
22 | # The user to run salt
23 | #user: root
24 |
25 | # Specify the location of the daemon process ID file
26 | #pidfile: /var/run/salt-minion.pid
27 |
28 | # The root directory prepended to these options: pki_dir, cachedir, log_file,
29 | # sock_dir, pidfile.
30 | #root_dir: /
31 |
32 | # The directory to store the pki information in
33 | #pki_dir: /etc/salt/pki/minion
34 |
35 | # Explicitly declare the id for this minion to use, if left commented the id
36 | # will be the hostname as returned by the python call: socket.getfqdn()
37 | # Since salt uses detached ids it is possible to run multiple minions on the
38 | # same machine but with different ids, this can be useful for salt compute
39 | # clusters.
40 | #id:
41 |
42 | # Append a domain to a hostname in the event that it does not exist. This is
43 | # useful for systems where socket.getfqdn() does not actually result in a
44 | # FQDN (for instance, Solaris).
45 | #append_domain:
46 |
47 | # Custom static grains for this minion can be specified here and used in SLS
48 | # files just like all other grains. This example sets 4 custom grains, with
49 | # the 'roles' grain having two values that can be matched against:
50 | #grains:
51 | # roles:
52 | # - webserver
53 | # - memcache
54 | # deployment: datacenter4
55 | # cabinet: 13
56 | # cab_u: 14-15
57 |
58 | # Where cache data goes
59 | #cachedir: /var/cache/salt/minion
60 |
61 | # Verify and set permissions on configuration directories at startup
62 | #verify_env: True
63 |
64 | # The minion can locally cache the return data from jobs sent to it, this
65 | # can be a good way to keep track of jobs the minion has executed
66 | # (on the minion side). By default this feature is disabled, to enable
67 | # set cache_jobs to True
68 | #cache_jobs: False
69 |
70 | # set the directory used to hold unix sockets
71 | #sock_dir: /var/run/salt/minion
72 |
73 | # Set the default outputter used by the salt-call command. The default is
74 | # "nested"
75 | #output: nested
76 | #
77 | # By default output is colored, to disable colored output set the color value
78 | # to False
79 | #color: True
80 |
81 | # Backup files that are replaced by file.managed and file.recurse under
82 | # 'cachedir'/file_backups relative to their original location and appended
83 | # with a timestamp. The only valid setting is "minion". Disabled by default.
84 | #
85 | # Alternatively this can be specified for each file in state files:
86 | #
87 | # /etc/ssh/sshd_config:
88 | # file.managed:
89 | # - source: salt://ssh/sshd_config
90 | # - backup: minion
91 | #
92 | #backup_mode: minion
93 |
94 | # When waiting for a master to accept the minion's public key, salt will
95 | # continuously attempt to reconnect until successful. This is the time, in
96 | # seconds, between those reconnection attempts.
97 | #acceptance_wait_time: 10
98 |
99 | # The loop_interval sets how long in seconds the minion will wait between
100 | # evaluating the scheduler and running cleanup tasks. This defaults to a
101 | # sane 60 seconds, but if the minion scheduler needs to be evaluated more
102 | # often lower this value
103 | #loop_interval: 60
104 |
105 | # When healing, a dns_check is run. This is to make sure that the originally
106 | # resolved dns has not changed. If this is something that does not happen in
107 | # your environment, set this value to False.
108 | #dns_check: True
109 |
110 | # Windows platforms lack posix IPC and must rely on slower TCP based inter-
111 | # process communications. Set ipc_mode to 'tcp' on such systems
112 | #ipc_mode: ipc
113 | #
114 | # Overwrite the default tcp ports used by the minion when in tcp mode
115 | #tcp_pub_port: 4510
116 | #tcp_pull_port: 4511
117 |
118 | # The minion can include configuration from other files. To enable this,
119 | # pass a list of paths to this option. The paths can be either relative or
120 | # absolute; if relative, they are considered to be relative to the directory
121 | # the main minion configuration file lives in (this file). Paths can make use
122 | # of shell-style globbing. If no files are matched by a path passed to this
123 | # option then the minion will log a warning message.
124 | #
125 | #
126 | # Include a config file from some other path:
127 | # include: /etc/salt/extra_config
128 | #
129 | # Include config from several files and directories:
130 | # include:
131 | # - /etc/salt/extra_config
132 | # - /etc/roles/webserver
133 |
134 | ##### Minion module management #####
135 | ##########################################
136 | # Disable specific modules. This allows the admin to limit the level of
137 | # access the master has to the minion
138 | #disable_modules: [cmd,test]
139 | #disable_returners: []
140 | #
141 | # Modules can be loaded from arbitrary paths. This enables the easy deployment
142 | # of third party modules. Modules for returners and minions can be loaded.
143 | # Specify a list of extra directories to search for minion modules and
144 | # returners. These paths must be fully qualified!
145 | #module_dirs: []
146 | #returner_dirs: []
147 | #states_dirs: []
148 | #render_dirs: []
149 | #
150 | # A module provider can be statically overwritten or extended for the minion
151 | # via the providers option, in this case the default module will be
152 | # overwritten by the specified module. In this example the pkg module will
153 | # be provided by the yumpkg5 module instead of the system default.
154 | #
155 | # providers:
156 | # pkg: yumpkg5
157 | #
158 | # Enable Cython modules searching and loading. (Default: False)
159 | #cython_enable: False
160 | #
161 |
162 | ##### State Management Settings #####
163 | ###########################################
164 | # The state management system executes all of the state templates on the minion
165 | # to enable more granular control of system state management. The type of
166 | # template and serialization used for state management needs to be configured
167 | # on the minion, the default renderer is yaml_jinja. This is a yaml file
168 | # rendered from a jinja template, the available options are:
169 | # yaml_jinja
170 | # yaml_mako
171 | # yaml_wempy
172 | # json_jinja
173 | # json_mako
174 | # json_wempy
175 | #
176 | #renderer: yaml_jinja
177 | #
178 | # The failhard option tells the minions to stop immediately after the first
179 | # failure detected in the state execution, defaults to False
180 | #failhard: False
181 | #
182 | # autoload_dynamic_modules Turns on automatic loading of modules found in the
183 | # environments on the master. This is turned on by default, to turn of
184 | # autoloading modules when states run set this value to False
185 | #autoload_dynamic_modules: True
186 | #
187 | # clean_dynamic_modules keeps the dynamic modules on the minion in sync with
188 | # the dynamic modules on the master, this means that if a dynamic module is
189 | # not on the master it will be deleted from the minion. By default this is
190 | # enabled and can be disabled by changing this value to False
191 | #clean_dynamic_modules: True
192 | #
193 | # Normally the minion is not isolated to any single environment on the master
194 | # when running states, but the environment can be isolated on the minion side
195 | # by statically setting it. Remember that the recommended way to manage
196 | # environments is to isolate via the top file.
197 | #environment: None
198 | #
199 | # If using the local file directory, then the state top file name needs to be
200 | # defined, by default this is top.sls.
201 | #state_top: top.sls
202 | #
203 | # Run states when the minion daemon starts. To enable, set startup_states to:
204 | # 'highstate' -- Execute state.highstate
205 | # 'sls' -- Read in the sls_list option and execute the named sls files
206 | # 'top' -- Read top_file option and execute based on that file on the Master
207 | #startup_states: ''
208 | #
209 | # list of states to run when the minion starts up if startup_states is 'sls'
210 | #sls_list:
211 | # - edit.vim
212 | # - hyper
213 | #
214 | # top file to execute if startup_states is 'top'
215 | #top_file: ''
216 |
217 | ##### File Directory Settings #####
218 | ##########################################
219 | # The Salt Minion can redirect all file server operations to a local directory,
220 | # this allows for the same state tree that is on the master to be used if
221 | # copied completely onto the minion. This is a literal copy of the settings on
222 | # the master but used to reference a local directory on the minion.
223 |
224 | # Set the file client, the client defaults to looking on the master server for
225 | # files, but can be directed to look at the local file directory setting
226 | # defined below by setting it to local.
227 | #file_client: remote
228 |
229 | # The file directory works on environments passed to the minion, each environment
230 | # can have multiple root directories, the subdirectories in the multiple file
231 | # roots cannot match, otherwise the downloaded files will not be able to be
232 | # reliably ensured. A base environment is required to house the top file.
233 | # Example:
234 | # file_roots:
235 | # base:
236 | # - /srv/salt/
237 | # dev:
238 | # - /srv/salt/dev/services
239 | # - /srv/salt/dev/states
240 | # prod:
241 | # - /srv/salt/prod/services
242 | # - /srv/salt/prod/states
243 | #
244 | # Default:
245 | #file_roots:
246 | # base:
247 | # - /srv/salt
248 |
249 | # The hash_type is the hash to use when discovering the hash of a file in
250 | # the minion directory, the default is md5, but sha1, sha224, sha256, sha384
251 | # and sha512 are also supported.
252 | #hash_type: md5
253 |
254 | # The Salt pillar is searched for locally if file_client is set to local. If
255 | # this is the case, and pillar data is defined, then the pillar_roots need to
256 | # also be configured on the minion:
257 | #pillar_roots:
258 | # base:
259 | # - /srv/pillar
260 |
261 | ###### Security settings #####
262 | ###########################################
263 | # Enable "open mode", this mode still maintains encryption, but turns off
264 | # authentication, this is only intended for highly secure environments or for
265 | # the situation where your keys end up in a bad state. If you run in open mode
266 | # you do so at your own risk!
267 | #open_mode: False
268 |
269 | # Enable permissive access to the salt keys. This allows you to run the
270 | # master or minion as root, but have a non-root group be given access to
271 | # your pki_dir. To make the access explicit, root must belong to the group
272 | # you've given access to. This is potentially quite insecure.
273 | #permissive_pki_access: False
274 |
275 | # The state_verbose and state_output settings can be used to change the way
276 | # state system data is printed to the display. By default all data is printed.
277 | # The state_verbose setting can be set to True or False, when set to False
278 | # all data that has a result of True and no changes will be suppressed.
279 | #state_verbose: True
280 | #
281 | # The state_output setting changes if the output is the full multi line
282 | # output for each changed state if set to 'full', but if set to 'terse'
283 | # the output will be shortened to a single line.
284 | #state_output: full
285 | #
286 | # Fingerprint of the master public key to double verify the master is valid,
287 | # the master fingerprint can be found by running "salt-key -F master" on the
288 | # salt master.
289 | #master_finger: ''
290 |
291 | ###### Thread settings #####
292 | ###########################################
293 | # Disable multiprocessing support, by default when a minion receives a
294 | # publication a new process is spawned and the command is executed therein.
295 | #multiprocessing: True
296 |
297 | ##### Logging settings #####
298 | ##########################################
299 | # The location of the minion log file
300 | # The minion log can be sent to a regular file, local path name, or network
301 | # location. Remote logging works best when configured to use rsyslogd(8) (e.g.:
302 | # ``file:///dev/log``), with rsyslogd(8) configured for network logging. The URI
303 | # format is: ://:/
304 | #log_file: /var/log/salt/minion
305 | #log_file: file:///dev/log
306 | #log_file: udp://loghost:10514
307 | #
308 | #log_file: /var/log/salt/minion
309 | #key_logfile: /var/log/salt/key
310 | #
311 | # The level of messages to send to the console.
312 | # One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
313 | # Default: 'warning'
314 | #log_level: warning
315 | #
316 | # The level of messages to send to the log file.
317 | # One of 'garbage', 'trace', 'debug', info', 'warning', 'error', 'critical'.
318 | # Default: 'warning'
319 | #log_level_logfile:
320 |
321 | # The date and time format used in log messages. Allowed date/time formating
322 | # can be seen here: http://docs.python.org/library/time.html#time.strftime
323 | #log_datefmt: '%H:%M:%S'
324 | #log_datefmt_logfile: '%Y-%m-%d %H:%M:%S'
325 | #
326 | # The format of the console logging messages. Allowed formatting options can
327 | # be seen here: http://docs.python.org/library/logging.html#logrecord-attributes
328 | #log_fmt_console: '[%(levelname)-8s] %(message)s'
329 | #log_fmt_logfile: '%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s] %(message)s'
330 | #
331 | # This can be used to control logging levels more specificically. This
332 | # example sets the main salt library at the 'warning' level, but sets
333 | # 'salt.modules' to log at the 'debug' level:
334 | # log_granular_levels:
335 | # 'salt': 'warning',
336 | # 'salt.modules': 'debug'
337 | #
338 | #log_granular_levels: {}
339 |
340 | ###### Module configuration #####
341 | ###########################################
342 | # Salt allows for modules to be passed arbitrary configuration data, any data
343 | # passed here in valid yaml format will be passed on to the salt minion modules
344 | # for use. It is STRONGLY recommended that a naming convention be used in which
345 | # the module name is followed by a . and then the value. Also, all top level
346 | # data must be applied via the yaml dict construct, some examples:
347 | #
348 | # You can specify that all modules should run in test mode:
349 | #test: True
350 | #
351 | # A simple value for the test module:
352 | #test.foo: foo
353 | #
354 | # A list for the test module:
355 | #test.bar: [baz,quo]
356 | #
357 | # A dict for the test module:
358 | #test.baz: {spam: sausage, cheese: bread}
359 |
360 |
361 | ###### Update settings ######
362 | ###########################################
363 | # Using the features in Esky, a salt minion can both run as a frozen app and
364 | # be updated on the fly. These options control how the update process
365 | # (saltutil.update()) behaves.
366 | #
367 | # The url for finding and downloading updates. Disabled by default.
368 | #update_url: False
369 | #
370 | # The list of services to restart after a successful update. Empty by default.
371 | #update_restart_services: []
372 |
373 |
374 | ###### Keepalive settings ######
375 | ############################################
376 | # ZeroMQ now includes support for configuring SO_KEEPALIVE if supported by
377 | # the OS. If connections between the minion and the master pass through
378 | # a state tracking device such as a firewall or VPN gateway, there is
379 | # the risk that it could tear down the connection the master and minion
380 | # without informing either party that their connection has been taken away.
381 | # Enabling TCP Keepalives prevents this from happening.
382 | #
383 | # Overall state of TCP Keepalives, enable (1 or True), disable (0 or False)
384 | # or leave to the OS defaults (-1), on Linux, typically disabled. Default True, enabled.
385 | #tcp_keepalive: True
386 | #
387 | # How long before the first keepalive should be sent in seconds. Default 300
388 | # to send the first keepalive after 5 minutes, OS default (-1) is typically 7200 seconds
389 | # on Linux see /proc/sys/net/ipv4/tcp_keepalive_time.
390 | #tcp_keepalive_idle: 300
391 | #
392 | # How many lost probes are needed to consider the connection lost. Default -1
393 | # to use OS defaults, typically 9 on Linux, see /proc/sys/net/ipv4/tcp_keepalive_probes.
394 | #tcp_keepalive_cnt: -1
395 | #
396 | # How often, in seconds, to send keepalives after the first one. Default -1 to
397 | # use OS defaults, typically 75 seconds on Linux, see
398 | # /proc/sys/net/ipv4/tcp_keepalive_intvl.
399 | #tcp_keepalive_intvl: -1
400 |
401 |
402 | ###### Windows Software settings ######
403 | ############################################
404 | # Location of the repository cache file on the master
405 | # win_repo_cachefile: 'salt://win/repo/winrepo.p'
406 |
--------------------------------------------------------------------------------
/salt/salt/master.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.minion
3 |
4 | salt-master:
5 | pkg.installed:
6 | - name: salt-master
7 | file.managed:
8 | - name: /etc/salt/master
9 | - require:
10 | - pkg: salt-master
11 | service.running:
12 | - enable: True
13 | - watch:
14 | - pkg: salt-master
15 | - file: salt-master
16 | - file: /etc/salt/master.d/
17 |
18 | /etc/salt/master.d/:
19 | file.recurse:
20 | - source: salt://salt/files/etc/salt/master.d/
21 | - user: root
22 | - group: root
23 | - dir_mode: 755
24 | - file_mode: 644
25 |
26 | salt-master-role:
27 | file.append:
28 | - name: /etc/salt/roles
29 | - text:
30 | - 'salt-master'
31 | - require:
32 | - file: roles
33 | - service: salt-master
34 | - service: salt-minion
35 | - watch_in:
36 | - module: sync_grains
37 |
--------------------------------------------------------------------------------
/salt/salt/minion.sls:
--------------------------------------------------------------------------------
1 | salt-minion:
2 | pkg.installed:
3 | - name: salt-minion
4 | file.managed:
5 | - name: /etc/salt/minion
6 | - source: salt://salt/files/etc/salt/minion
7 | - template: jinja
8 | - defaults:
9 | master: 172.16.100.81
10 | - require:
11 | - pkg: salt-minion
12 | service.running:
13 | - enable: True
14 | - watch:
15 | - pkg: salt-minion
16 | - file: salt-minion
17 |
18 | roles:
19 | file.managed:
20 | - name: /etc/salt/roles
21 |
22 | sync_grains:
23 | module.wait:
24 | - name: saltutil.sync_grains
25 |
26 | mine_update:
27 | module.run:
28 | - name: mine.update
29 | - require:
30 | - module: sync_grains
31 |
32 | salt-minion-grains:
33 | file.managed:
34 | - name: /etc/salt/grains
35 | - order: 1
36 | - source: salt://salt/files/etc/salt/grains
37 | - template: jinja
38 | - defaults:
39 | hostgroup: {{salt['pillar.get']('hostgroup', 'Salt-Discovery')}}
40 | - require:
41 | - pkg: salt-minion
42 | - watch_in:
43 | - module: sync_grains
44 |
45 | salt-minion-role:
46 | file.append:
47 | - name: /etc/salt/roles
48 | - text:
49 | - 'salt-minion'
50 | - require:
51 | - file: roles
52 | - service: salt-minion
53 | - watch_in:
54 | - module: sync_grains
55 |
--------------------------------------------------------------------------------
/salt/sphinx/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 |
5 | sphinx:
6 | pkg.installed:
7 | - name: sphinx
8 | service.running:
9 | - name: searchd
10 | - enable: True
11 | - require:
12 | - pkg: sphinx
13 | - watch:
14 | - pkg: sphinx
15 | - file: /etc/sphinx/sphinx.conf
16 |
17 | /etc/sphinx/sphinx.conf:
18 | file.managed:
19 | - source: salt://sphinx/files/etc/sphinx/sphinx.conf
20 | - template: jinja
21 | - user: root
22 | - group: root
23 | - mode: 644
24 |
25 | sphinx-role:
26 | file.append:
27 | - name: /etc/salt/roles
28 | - text:
29 | - 'sphinx'
30 | - require:
31 | - file: roles
32 | - service: sphinx
33 | - service: salt-minion
34 | - watch_in:
35 | - module: sync_grains
36 |
--------------------------------------------------------------------------------
/salt/ssh/files/etc/ssh/sshd_config:
--------------------------------------------------------------------------------
1 | # $OpenBSD: sshd_config,v 1.80 2008/07/02 02:24:18 djm Exp $
2 |
3 | # This is the sshd server system-wide configuration file. See
4 | # sshd_config(5) for more information.
5 |
6 | # This sshd was compiled with PATH=/usr/local/bin:/bin:/usr/bin
7 |
8 | # The strategy used for options in the default sshd_config shipped with
9 | # OpenSSH is to specify options with their default value where
10 | # possible, but leave them commented. Uncommented options change a
11 | # default value.
12 |
13 | #Port 22
14 | #AddressFamily any
15 | #ListenAddress 0.0.0.0
16 | #ListenAddress ::
17 |
18 | # Disable legacy (protocol version 1) support in the server for new
19 | # installations. In future the default will change to require explicit
20 | # activation of protocol 1
21 | Protocol 2
22 |
23 | # HostKey for protocol version 1
24 | #HostKey /etc/ssh/ssh_host_key
25 | # HostKeys for protocol version 2
26 | #HostKey /etc/ssh/ssh_host_rsa_key
27 | #HostKey /etc/ssh/ssh_host_dsa_key
28 |
29 | # Lifetime and size of ephemeral version 1 server key
30 | #KeyRegenerationInterval 1h
31 | #ServerKeyBits 1024
32 |
33 | # Logging
34 | # obsoletes QuietMode and FascistLogging
35 | #SyslogFacility AUTH
36 | SyslogFacility AUTHPRIV
37 | #LogLevel INFO
38 |
39 | # Authentication:
40 |
41 | #LoginGraceTime 2m
42 | #PermitRootLogin yes
43 | #StrictModes yes
44 | #MaxAuthTries 6
45 | #MaxSessions 10
46 |
47 | #RSAAuthentication yes
48 | #PubkeyAuthentication yes
49 | #AuthorizedKeysFile .ssh/authorized_keys
50 | #AuthorizedKeysCommand none
51 | #AuthorizedKeysCommandRunAs nobody
52 |
53 | # For this to work you will also need host keys in /etc/ssh/ssh_known_hosts
54 | #RhostsRSAAuthentication no
55 | # similar for protocol version 2
56 | #HostbasedAuthentication no
57 | # Change to yes if you don't trust ~/.ssh/known_hosts for
58 | # RhostsRSAAuthentication and HostbasedAuthentication
59 | #IgnoreUserKnownHosts no
60 | # Don't read the user's ~/.rhosts and ~/.shosts files
61 | #IgnoreRhosts yes
62 |
63 | # To disable tunneled clear text passwords, change to no here!
64 | #PasswordAuthentication yes
65 | #PermitEmptyPasswords no
66 | PasswordAuthentication yes
67 |
68 | # Change to no to disable s/key passwords
69 | #ChallengeResponseAuthentication yes
70 | ChallengeResponseAuthentication no
71 |
72 | # Kerberos options
73 | #KerberosAuthentication no
74 | #KerberosOrLocalPasswd yes
75 | #KerberosTicketCleanup yes
76 | #KerberosGetAFSToken no
77 | #KerberosUseKuserok yes
78 |
79 | # GSSAPI options
80 | #GSSAPIAuthentication no
81 | GSSAPIAuthentication yes
82 | #GSSAPICleanupCredentials yes
83 | GSSAPICleanupCredentials yes
84 | #GSSAPIStrictAcceptorCheck yes
85 | #GSSAPIKeyExchange no
86 |
87 | # Set this to 'yes' to enable PAM authentication, account processing,
88 | # and session processing. If this is enabled, PAM authentication will
89 | # be allowed through the ChallengeResponseAuthentication and
90 | # PasswordAuthentication. Depending on your PAM configuration,
91 | # PAM authentication via ChallengeResponseAuthentication may bypass
92 | # the setting of "PermitRootLogin without-password".
93 | # If you just want the PAM account and session checks to run without
94 | # PAM authentication, then enable this but set PasswordAuthentication
95 | # and ChallengeResponseAuthentication to 'no'.
96 | #UsePAM no
97 | UsePAM yes
98 |
99 | # Accept locale-related environment variables
100 | AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES
101 | AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT
102 | AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE
103 | AcceptEnv XMODIFIERS
104 |
105 | #AllowAgentForwarding yes
106 | #AllowTcpForwarding yes
107 | #GatewayPorts no
108 | #X11Forwarding no
109 | X11Forwarding yes
110 | #X11DisplayOffset 10
111 | #X11UseLocalhost yes
112 | #PrintMotd yes
113 | #PrintLastLog yes
114 | #TCPKeepAlive yes
115 | #UseLogin no
116 | #UsePrivilegeSeparation yes
117 | #PermitUserEnvironment no
118 | #Compression delayed
119 | #ClientAliveInterval 0
120 | #ClientAliveCountMax 3
121 | #ShowPatchLevel no
122 | #UseDNS yes
123 | #PidFile /var/run/sshd.pid
124 | #MaxStartups 10:30:100
125 | #PermitTunnel no
126 | #ChrootDirectory none
127 |
128 | # no default banner path
129 | #Banner none
130 |
131 | # override default of no subsystems
132 | Subsystem sftp /usr/libexec/openssh/sftp-server
133 |
134 | # Example of overriding settings on a per-user basis
135 | #Match User anoncvs
136 | # X11Forwarding no
137 | # AllowTcpForwarding no
138 | # ForceCommand cvs server
139 |
--------------------------------------------------------------------------------
/salt/ssh/init.sls:
--------------------------------------------------------------------------------
1 | openssh:
2 | pkg:
3 | - installed
4 |
5 | sshd:
6 | service:
7 | - running
8 | - watch:
9 | - file: /etc/ssh/sshd_config
10 | require:
11 | - pkg: openssh
12 |
13 | /etc/ssh/sshd_config:
14 | file.managed:
15 | - source: salt://ssh/files/etc/ssh/sshd_config
16 | - template: jinja
17 |
--------------------------------------------------------------------------------
/salt/svn/files/conf/authz:
--------------------------------------------------------------------------------
1 | ### This file is an example authorization file for svnserve.
2 | ### Its format is identical to that of mod_authz_svn authorization
3 | ### files.
4 | ### As shown below each section defines authorizations for the path and
5 | ### (optional) repository specified by the section name.
6 | ### The authorizations follow. An authorization line can refer to:
7 | ### - a single user,
8 | ### - a group of users defined in a special [groups] section,
9 | ### - an alias defined in a special [aliases] section,
10 | ### - all authenticated users, using the '$authenticated' token,
11 | ### - only anonymous users, using the '$anonymous' token,
12 | ### - anyone, using the '*' wildcard.
13 | ###
14 | ### A match can be inverted by prefixing the rule with '~'. Rules can
15 | ### grant read ('r') access, read-write ('rw') access, or no access
16 | ### ('').
17 |
18 | [aliases]
19 | # joe = /C=XZ/ST=Dessert/L=Snake City/O=Snake Oil, Ltd./OU=Research Institute/CN=Joe Average
20 |
21 | [groups]
22 | # harry_and_sally = harry,sally
23 | # harry_sally_and_joe = harry,sally,&joe
24 |
25 | # [/foo/bar]
26 | # harry = rw
27 | # &joe = r
28 | # * =
29 |
30 | # [repository:/baz/fuz]
31 | # @harry_and_sally = rw
32 | # * = r
33 |
34 | [/]
35 | publish = r
36 | dongliang = rw
37 | jiazhu = rw
38 |
--------------------------------------------------------------------------------
/salt/svn/files/conf/passwd:
--------------------------------------------------------------------------------
1 | ### This file is an example password file for svnserve.
2 | ### Its format is similar to that of svnserve.conf. As shown in the
3 | ### example below it contains one section labelled [users].
4 | ### The name and password for each user follow, one account per line.
5 |
6 | [users]
7 | publish = 1qaz@WSX@mall
8 | dongliang = 1qaz@WSX$
9 |
--------------------------------------------------------------------------------
/salt/svn/files/conf/svnserve.conf:
--------------------------------------------------------------------------------
1 | ### This file controls the configuration of the svnserve daemon, if you
2 | ### use it to allow access to this repository. (If you only allow
3 | ### access through http: and/or file: URLs, then this file is
4 | ### irrelevant.)
5 |
6 | ### Visit http://subversion.tigris.org/ for more information.
7 |
8 | [general]
9 | anon-access = none
10 | auth-access = write
11 | password-db = passwd
12 | authz-db = authz
13 |
14 | ### These options control access to the repository for unauthenticated
15 | ### and authenticated users. Valid values are "write", "read",
16 | ### and "none". The sample settings below are the defaults.
17 | # anon-access = read
18 | # auth-access = write
19 | ### The password-db option controls the location of the password
20 | ### database file. Unless you specify a path starting with a /,
21 | ### the file's location is relative to the directory containing
22 | ### this configuration file.
23 | ### If SASL is enabled (see below), this file will NOT be used.
24 | ### Uncomment the line below to use the default password file.
25 | # password-db = passwd
26 | ### The authz-db option controls the location of the authorization
27 | ### rules for path-based access control. Unless you specify a path
28 | ### starting with a /, the file's location is relative to the the
29 | ### directory containing this file. If you don't specify an
30 | ### authz-db, no path-based access control is done.
31 | ### Uncomment the line below to use the default authorization file.
32 | # authz-db = authz
33 | ### This option specifies the authentication realm of the repository.
34 | ### If two repositories have the same authentication realm, they should
35 | ### have the same password database, and vice versa. The default realm
36 | ### is repository's uuid.
37 | # realm = My First Repository
38 |
39 | [sasl]
40 | ### This option specifies whether you want to use the Cyrus SASL
41 | ### library for authentication. Default is false.
42 | ### This section will be ignored if svnserve is not built with Cyrus
43 | ### SASL support; to check, run 'svnserve --version' and look for a line
44 | ### reading 'Cyrus SASL authentication is available.'
45 | # use-sasl = true
46 | ### These options specify the desired strength of the security layer
47 | ### that you want SASL to provide. 0 means no encryption, 1 means
48 | ### integrity-checking only, values larger than 1 are correlated
49 | ### to the effective key length for encryption (e.g. 128 means 128-bit
50 | ### encryption). The values below are the defaults.
51 | # min-encryption = 0
52 | # max-encryption = 256
53 |
--------------------------------------------------------------------------------
/salt/svn/files/etc/sysconfig/svnserve:
--------------------------------------------------------------------------------
1 | OPTIONS='-r {{salt['pillar.get']('repodir', '/var/svn')}}'
2 |
--------------------------------------------------------------------------------
/salt/svn/init.sls:
--------------------------------------------------------------------------------
1 | {% set repodir = salt['pillar.get']('repodir', '/var/svn') %}
2 | svnserve:
3 | pkg:
4 | - name: subversion
5 | - installed
6 | service:
7 | - running
8 | - require:
9 | - pkg: subversion
10 | - watch:
11 | - pkg: subversion
12 | - file: /etc/sysconfig/svnserve
13 | - file: {{repodir}}/conf/
14 | cmd.wait:
15 | - name: mkdir -p {{repodir}} && /usr/bin/svnadmin create {{repodir}}
16 | - watch:
17 | - pkg: subversion
18 |
19 | /etc/sysconfig/svnserve:
20 | file.managed:
21 | - source: salt://svn/files/etc/sysconfig/svnserve
22 | - template: jinja
23 | - user: root
24 | - group: root
25 | - mode: 644
26 |
27 | {{repodir}}/conf/:
28 | file.recurse:
29 | - source: salt://svn/files/conf/
30 | - user: root
31 | - group: root
32 | - dir_mode: 755
33 | - file_mode: 644
34 |
--------------------------------------------------------------------------------
/salt/top.sls:
--------------------------------------------------------------------------------
1 | base:
2 | '*':
3 | - roles.common
4 | 'admin.grid.mall.com':
5 | - roles.admin
6 | 'ha*.grid.mall.com':
7 | - roles.ha
8 | 'web*.grid.mall.com':
9 | - roles.web
10 | 'cache*.grid.mall.com':
11 | - roles.cache
12 | 'mc*.grid.mall.com':
13 | - roles.mc
14 | 'db*.grid.mall.com':
15 | - roles.db
16 | 'search*.grid.mall.com':
17 | - roles.search
18 | 'storage*.grid.mall.com':
19 | - roles.storage
20 |
--------------------------------------------------------------------------------
/salt/users/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - users.root
3 | - users.user
4 |
--------------------------------------------------------------------------------
/salt/users/root.sls:
--------------------------------------------------------------------------------
1 | root:
2 | user:
3 | - present
4 |
--------------------------------------------------------------------------------
/salt/users/sudo.sls:
--------------------------------------------------------------------------------
1 | sudoers:
2 | file.managed:
3 | - name: /etc/sudoers
4 |
--------------------------------------------------------------------------------
/salt/users/user.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - users.sudo
3 |
4 | {% for user, args in pillar['users'].iteritems() %}
5 | {{user}}:
6 | group.present:
7 | - gid: {{args['gid']}}
8 | user.present:
9 | - home: /home/{{user}}
10 | - shell: {{args['shell']}}
11 | - uid: {{args['uid']}}
12 | - gid: {{args['gid']}}
13 | - fullname: {{args['fullname']}}
14 | {% if 'password' in args %}
15 | - password: {{args['password']}}
16 | {% endif %}
17 | - require:
18 | - group: {{user}}
19 |
20 | {% if 'sudo' in args %}
21 | {% if args['sudo'] %}
22 | sudoer-{{user}}:
23 | file.append:
24 | - name: /etc/sudoers
25 | - text:
26 | - '{{user}} ALL=(ALL) NOPASSWD: ALL'
27 | - require:
28 | - file: sudoers
29 | - user: {{user}}
30 | {% endif %}
31 | {% endif %}
32 |
33 | {% if 'ssh_auth' in args %}
34 | /home/{{user}}/.ssh:
35 | file.directory:
36 | - user: {{user}}
37 | - group: {{args['group']}}
38 | - mode: 700
39 | - require:
40 | - user: {{user}}
41 |
42 | /home/{{user}}/.ssh/authorized_keys:
43 | file.managed:
44 | - user: {{user}}
45 | - group: {{args['group']}}
46 | - mode: 600
47 | - require:
48 | - file: /home/{{user}}/.ssh
49 |
50 | {{ args['ssh_auth']['key'] }}:
51 | ssh_auth.present:
52 | - user: {{user}}
53 | - comment: {{args['ssh_auth']['comment']}}
54 | - require:
55 | - file: /home/{{user}}/.ssh/authorized_keys
56 | {% endif %}
57 | {% endfor %}
58 |
--------------------------------------------------------------------------------
/salt/users/www.sls:
--------------------------------------------------------------------------------
1 | www:
2 | user:
3 | - present
4 | - uid: 80
5 | - uid: 80
6 | - shell: /sbin/nologin
7 |
--------------------------------------------------------------------------------
/salt/varnish/files/etc/sysconfig/varnish:
--------------------------------------------------------------------------------
1 | # Configuration file for varnish
2 | #
3 | # /etc/init.d/varnish expects the variable $DAEMON_OPTS to be set from this
4 | # shell script fragment.
5 | #
6 |
7 | # Maximum number of open files (for ulimit -n)
8 | NFILES=65535
9 |
10 | # Locked shared memory (for ulimit -l)
11 | # Default log size is 82MB + header
12 | MEMLOCK=82000
13 |
14 | # Maximum number of threads (for ulimit -u)
15 | NPROCS="unlimited"
16 |
17 | # Maximum size of corefile (for ulimit -c). Default in Fedora is 0
18 | # DAEMON_COREFILE_LIMIT="unlimited"
19 |
20 | # Set this to 1 to make init script reload try to switch vcl without restart.
21 | # To make this work, you need to set the following variables
22 | # explicit: VARNISH_VCL_CONF, VARNISH_ADMIN_LISTEN_ADDRESS,
23 | # VARNISH_ADMIN_LISTEN_PORT, VARNISH_SECRET_FILE, or in short,
24 | # use Alternative 3, Advanced configuration, below
25 | RELOAD_VCL=1
26 |
27 | # This file contains 4 alternatives, please use only one.
28 |
29 | ## Alternative 1, Minimal configuration, no VCL
30 | #
31 | # Listen on port 6081, administration on localhost:6082, and forward to
32 | # content server on localhost:8080. Use a fixed-size cache file.
33 | #
34 | #DAEMON_OPTS="-a :6081 \
35 | # -T localhost:6082 \
36 | # -b localhost:8080 \
37 | # -u varnish -g varnish \
38 | # -s file,/var/lib/varnish/varnish_storage.bin,1G"
39 |
40 |
41 | ## Alternative 2, Configuration with VCL
42 | #
43 | # Listen on port 6081, administration on localhost:6082, and forward to
44 | # one content server selected by the vcl file, based on the request. Use a
45 | # fixed-size cache file.
46 | #
47 | #DAEMON_OPTS="-a :6081 \
48 | # -T localhost:6082 \
49 | # -f /etc/varnish/default.vcl \
50 | # -u varnish -g varnish \
51 | # -S /etc/varnish/secret \
52 | # -s file,/var/lib/varnish/varnish_storage.bin,1G"
53 |
54 |
55 | ## Alternative 3, Advanced configuration
56 | #
57 | # See varnishd(1) for more information.
58 | #
59 | # # Main configuration file. You probably want to change it :)
60 | VARNISH_VCL_CONF=/etc/varnish/default.vcl
61 | #
62 | # # Default address and port to bind to
63 | # # Blank address means all IPv4 and IPv6 interfaces, otherwise specify
64 | # # a host name, an IPv4 dotted quad, or an IPv6 address in brackets.
65 | # VARNISH_LISTEN_ADDRESS=
66 | VARNISH_LISTEN_PORT=8080
67 | #
68 | # # Telnet admin interface listen address and port
69 | VARNISH_ADMIN_LISTEN_ADDRESS=127.0.0.1
70 | VARNISH_ADMIN_LISTEN_PORT=6082
71 | #
72 | # # Shared secret file for admin interface
73 | VARNISH_SECRET_FILE=/etc/varnish/secret
74 | #
75 | # # The minimum number of worker threads to start
76 | VARNISH_MIN_THREADS=100
77 | #
78 | # # The Maximum number of worker threads to start
79 | VARNISH_MAX_THREADS=2000
80 | #
81 | # # Idle timeout for worker threads
82 | VARNISH_THREAD_TIMEOUT=120
83 | #
84 | # # Cache file location
85 | VARNISH_STORAGE_FILE=/data1/varnish/varnish_storage.bin
86 | #
87 | # # Cache file size: in bytes, optionally using k / M / G / T suffix,
88 | # # or in percentage of available disk space using the % suffix.
89 | VARNISH_STORAGE_SIZE=100G
90 | #
91 | # # Backend storage specification
92 | VARNISH_STORAGE="persistent,${VARNISH_STORAGE_FILE},${VARNISH_STORAGE_SIZE}"
93 | VARNISH_MEM="malloc,12G"
94 | #
95 | # # Default TTL used when the backend does not specify one
96 | VARNISH_TTL=120
97 | #
98 | # # DAEMON_OPTS is used by the init script. If you add or remove options, make
99 | # # sure you update this section, too.
100 | DAEMON_OPTS="-a ${VARNISH_LISTEN_ADDRESS}:${VARNISH_LISTEN_PORT} \
101 | -f ${VARNISH_VCL_CONF} \
102 | -T ${VARNISH_ADMIN_LISTEN_ADDRESS}:${VARNISH_ADMIN_LISTEN_PORT} \
103 | -t ${VARNISH_TTL} \
104 | -w ${VARNISH_MIN_THREADS},${VARNISH_MAX_THREADS},${VARNISH_THREAD_TIMEOUT} \
105 | -u varnish -g varnish \
106 | -S ${VARNISH_SECRET_FILE} \
107 | -s ${VARNISH_MEM} \
108 | -s ${VARNISH_STORAGE}"
109 | #
110 |
111 |
112 | ## Alternative 4, Do It Yourself. See varnishd(1) for more information.
113 | #
114 | # DAEMON_OPTS=""
115 |
--------------------------------------------------------------------------------
/salt/varnish/files/etc/varnish/default.vcl:
--------------------------------------------------------------------------------
1 | probe healthcheck {
2 | .request =
3 | "GET / HTTP/1.1"
4 | "Host: static.mall.com"
5 | "Connection: close";
6 | .interval = 60s;
7 | .timeout = 0.3 s;
8 | .window = 8;
9 | .threshold = 3;
10 | .initial = 3;
11 | .expected_response = 200;
12 | }
13 |
14 | backend static_01 {
15 | .host = "{{ pillar['varnish_static_01'] }}";
16 | .port = "80";
17 | .probe = healthcheck;
18 | }
19 |
20 | backend static_02 {
21 | .host = "{{ pillar['varnish_static_02'] }}";
22 | .port = "80";
23 | .probe = healthcheck;
24 | }
25 |
26 | backend static_03 {
27 | .host = "{{ pillar['varnish_static_03'] }}";
28 | .port = "80";
29 | .probe = healthcheck;
30 | }
31 |
32 | director static random {
33 | .retries = 5;
34 | {
35 | .backend = static_01;
36 | .weight = 5;
37 | }
38 | {
39 | .backend = static_02;
40 | .weight = 5;
41 | }
42 | {
43 | .backend = static_03;
44 | .weight = 5;
45 | }
46 | }
47 |
48 | acl purge {
49 | "localhost";
50 | "172.16.100.0"/24;
51 | }
52 |
53 | sub vcl_recv {
54 | if (req.restarts == 0) {
55 | if (req.http.x-forwarded-for) {
56 | set req.http.X-Forwarded-For =
57 | req.http.X-Forwarded-For + ", " + client.ip;
58 | } else {
59 | set req.http.X-Forwarded-For = client.ip;
60 | }
61 | }
62 |
63 | if (req.request == "PURGE") {
64 | if (!client.ip ~ purge) {
65 | error 405 "Not allowed.";
66 | }
67 | return(lookup);
68 | }
69 |
70 | if (req.http.host ~ "(?i)^static[0-9]?.mall.com$") {
71 | set req.backend = static;
72 | }
73 |
74 | if (req.request != "GET" && req.request != "HEAD") {
75 | /* We only deal with GET and HEAD by default */
76 | return (pass);
77 | }
78 |
79 | # normalize Accept-Encoding to reduce vary
80 | if (req.http.Accept-Encoding) {
81 | if (req.url ~ "\.(jpg|jpeg|png|gif|ico|gz|tgz|bz2|tbz|mp3|ogg)$") {
82 | unset req.http.Accept-Encoding;
83 | } elsif (req.http.User-Agent ~ "MSIE 6") {
84 | unset req.http.Accept-Encoding;
85 | } elsif (req.http.Accept-Encoding ~ "gzip") {
86 | set req.http.Accept-Encoding = "gzip";
87 | } elsif (req.http.Accept-Encoding ~ "deflate") {
88 | set req.http.Accept-Encoding = "deflate";
89 | } else {
90 | unset req.http.Accept-Encoding;
91 | }
92 | }
93 |
94 | if (req.url ~ "(?i)\.(js|css|ico|gif|jpg|jpeg|png|xml|htm|html|swf|flv)$") {
95 | unset req.http.Cookie;
96 | }
97 |
98 | if (req.http.referer ~ "http://.*" && req.url ~ "\.(js|css|ico|gif|jpg|jpeg|png|xml|htm|html|swf|flv)$") {
99 | call daolian;
100 | }
101 |
102 | return (lookup);
103 | }
104 |
105 | sub daolian {
106 | if ( !(req.http.referer ~ "http://.*\.mall\.com"
107 | || req.http.referer ~ "http://mall\.com"
108 | )) {
109 | error 404 "Not Found!";
110 | }
111 | }
112 |
113 | sub vcl_pipe {
114 | set req.http.connection = "close";
115 | }
116 |
117 | sub vcl_pass {
118 | return (pass);
119 | }
120 |
121 | sub vcl_hit {
122 | if (req.request == "PURGE") {
123 | purge;
124 | error 200 "Purged.";
125 | }
126 | return (deliver);
127 | }
128 |
129 | sub vcl_miss {
130 | if (req.request == "PURGE") {
131 | purge;
132 | error 404 "Not in cache.";
133 | }
134 | return (fetch);
135 | }
136 |
137 | sub vcl_fetch {
138 | if (beresp.ttl <= 0s ||
139 | # beresp.http.Set-Cookie ||
140 | beresp.http.Vary == "*") {
141 | set beresp.ttl = 3600 s;
142 | return (hit_for_pass);
143 | }
144 | if (beresp.status == 500 || beresp.status == 501 || beresp.status == 502 ||
145 | beresp.status == 503 || beresp.status == 504) {
146 | return (restart);
147 | }
148 | return (deliver);
149 | }
150 |
151 | sub vcl_deliver {
152 | set resp.http.x-hits = obj.hits;
153 | if (obj.hits > 0) {
154 | set resp.http.X-Cache = "HITS from {{ salt['grains.get']('fqdn', 'mall') }}";
155 | } else {
156 | set resp.http.X-Cache = "MISS from {{ salt['grains.get']('fqdn', 'mall') }}";
157 | }
158 | return (deliver);
159 | }
160 |
161 | sub vcl_error {
162 | return (deliver);
163 | }
164 |
165 | sub vcl_init {
166 | return (ok);
167 | }
168 |
169 | sub vcl_fini {
170 | return (ok);
171 | }
172 |
--------------------------------------------------------------------------------
/salt/varnish/files/etc/zabbix/zabbix_agentd.conf.d/varnish.conf:
--------------------------------------------------------------------------------
1 | UserParameter=varnish.stat[*],(test -f /usr/bin/varnishstat && varnishstat -1 -f $1 | awk '{print $$2}')
2 |
--------------------------------------------------------------------------------
/salt/varnish/init.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - salt.minion
4 | - varnish.monitor
5 |
6 | varnish:
7 | pkg.installed:
8 | - name: varnish
9 | service.running:
10 | - name: varnish
11 | - enable: True
12 | - require:
13 | - pkg: varnish
14 | - watch:
15 | - pkg: varnish
16 | - file: varnish
17 | - file: /etc/varnish/default.vcl
18 | file.managed:
19 | - name: /etc/sysconfig/varnish
20 | - source: salt://varnish/files/etc/sysconfig/varnish
21 | - template: jinja
22 | - user: root
23 | - group: root
24 | - mode: 644
25 | - require:
26 | - cmd: varnish
27 | cmd.run:
28 | - name: mkdir -p /data1/varnish
29 | - unless: test -d /data1/varnish
30 |
31 | /etc/varnish/default.vcl:
32 | file.managed:
33 | - source: salt://varnish/files/etc/varnish/default.vcl
34 | - template: jinja
35 | - user: root
36 | - group: root
37 | - mode: 644
38 |
39 | varnish-role:
40 | file.append:
41 | - name: /etc/salt/roles
42 | - text:
43 | - 'varnish'
44 | - require:
45 | - file: roles
46 | - service: varnish
47 | - service: salt-minion
48 | - watch_in:
49 | - module: sync_grains
50 |
--------------------------------------------------------------------------------
/salt/varnish/monitor.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - zabbix.agent
3 | - varnish
4 |
5 | varnish-monitor-config:
6 | file.managed:
7 | - name: /etc/zabbix/zabbix_agentd.conf.d/varnish.conf
8 | - source: salt://varnish/files/etc/zabbix/zabbix_agentd.conf.d/varnish.conf
9 | - require:
10 | - service: varnish
11 | - watch_in:
12 | - service: zabbix-agent
13 |
--------------------------------------------------------------------------------
/salt/zabbix/agent.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.minion
3 |
4 | zabbix-agent:
5 | pkg.installed:
6 | - name: zabbix22-agent
7 | file.managed:
8 | - name: /etc/zabbix_agentd.conf
9 | - source: salt://zabbix/files/etc/zabbix_agentd.conf
10 | - template: jinja
11 | - defaults:
12 | zabbix_server: {{ pillar['zabbix-agent']['Zabbix_Server'] }}
13 | - require:
14 | - pkg: zabbix-agent
15 | service.running:
16 | - enable: True
17 | - watch:
18 | - pkg: zabbix-agent
19 | - file: zabbix-agent
20 |
21 | zabbix-agent-role:
22 | file.append:
23 | - name: /etc/salt/roles
24 | - text:
25 | - 'zabbix-agent'
26 | - require:
27 | - file: roles
28 | - service: zabbix-agent
29 | - service: salt-minion
30 | - watch_in:
31 | - module: sync_grains
32 |
33 |
34 | zabbix_agentd_conf-link:
35 | file.symlink:
36 | - name: /etc/zabbix/zabbix_agentd.conf
37 | - target: /etc/zabbix_agentd.conf
38 | - require_in:
39 | - service: zabbix-agent
40 | - require:
41 | - pkg: zabbix-agent
42 | - file: zabbix-agent
43 |
44 | zabbix_agentd.conf.d:
45 | file.directory:
46 | - name: /etc/zabbix/zabbix_agentd.conf.d
47 | - watch_in:
48 | - service: zabbix-agent
49 | - require:
50 | - pkg: zabbix-agent
51 | - file: zabbix-agent
52 |
53 |
--------------------------------------------------------------------------------
/salt/zabbix/api.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.minion
3 |
4 | python-zabbix-zapi:
5 | file.recurse:
6 | - name: /usr/lib/python2.6/site-packages/zabbix
7 | - source: salt://zabbix/files/usr/lib/python2.6/site-packages/zabbix
8 | - include_empty: True
9 |
10 |
11 | zabbix-api-role:
12 | file.append:
13 | - name: /etc/salt/roles
14 | - text:
15 | - 'zabbix-api'
16 | - require:
17 | - file: roles
18 | - service: salt-minion
19 | - file: python-zabbix-zapi
20 | - watch_in:
21 | - module: sync_grains
22 |
23 | zabbix-api-config:
24 | file.managed:
25 | - name: /etc/zabbix/api/config.yaml
26 | - source: salt://zabbix/files/etc/zabbix/api/config.yaml
27 | - makedirs: True
28 | - template: jinja
29 | - defaults:
30 | Monitors_DIR: {{pillar['zabbix-api']['Monitors_DIR']}}
31 | Templates_DIR: {{pillar['zabbix-api']['Templates_DIR']}}
32 | Zabbix_User: {{pillar['zabbix-api']['Zabbix_User']}}
33 | Zabbix_Pass: {{pillar['zabbix-api']['Zabbix_Pass']}}
34 | Zabbix_URL: {{pillar['zabbix-api']['Zabbix_URL']}}
35 |
36 | zabbix-templates:
37 | file.recurse:
38 | - name: {{pillar['zabbix-api']['Templates_DIR']}}
39 | - source: salt://zabbix/files/etc/zabbix/api/templates
40 | - require:
41 | - file: python-zabbix-zapi
42 | - file: zabbix-api-config
43 |
44 | zabbix-add-monitors-script:
45 | file.managed:
46 | - name: /etc/zabbix/api/add_monitors.py
47 | - source: salt://zabbix/files/etc/zabbix/api/add_monitors.py
48 | - makedirs: True
49 | - mode: 755
50 | - require:
51 | - file: python-zabbix-zapi
52 | - file: zabbix-api-config
53 |
54 | {% for each_minion, each_mine in salt['mine.get']('*', 'grains.item').iteritems() %}
55 | monitor-{{each_minion}}:
56 | file.managed:
57 | - name: {{pillar['zabbix-api']['Monitors_DIR']}}/{{each_minion}}
58 | - source: salt://zabbix/files/etc/zabbix/api/monitors/minion
59 | - makedirs: True
60 | - template: jinja
61 | - defaults:
62 | IP: {{each_mine.ipv4[1]}}
63 | Hostgroup: {{each_mine.hostgroup}}
64 | Roles: {{each_mine.roles}}
65 | Templates: {{pillar['zabbix-templates']}}
66 | - order: last
67 | - require:
68 | - module: mine_update
69 | cmd.wait:
70 | - name: python /etc/zabbix/api/add_monitors.py {{each_minion}}
71 | - require:
72 | - file: zabbix-add-monitors-script
73 | - watch:
74 | - file: monitor-{{each_minion}}
75 | {% endfor %}
76 |
--------------------------------------------------------------------------------
/salt/zabbix/files/etc/zabbix/api/add_monitors.py:
--------------------------------------------------------------------------------
1 | #!/bin/env python
2 | #coding=utf8
3 |
4 | ##########################################################
5 | # Add Monitor To Zabbix
6 | ##########################################################
7 |
8 | import sys, os.path
9 | import yaml
10 |
11 | from zabbix.zapi import *
12 |
13 | def _config(config_file):
14 | '''get config'''
15 |
16 | config_fd = open(config_file)
17 | config = yaml.load(config_fd)
18 |
19 | return config
20 |
21 | def _get_templates(api_obj, templates_list):
22 | '''get templates ids'''
23 |
24 | templates_id = {}
25 | templates_result = api_obj.Template.getobjects({"host": templates_list})
26 |
27 | for each_template in templates_result:
28 | template_name = each_template['name']
29 | template_id = each_template['templateid']
30 | templates_id[template_name] = template_id
31 |
32 | return templates_id
33 |
34 | def _get_host_templates(api_obj, hostid):
35 | '''get the host has linked templates'''
36 |
37 | templates_id = []
38 | templates_result = api_obj.Template.get({'hostids': hostid})
39 |
40 | for each_template in templates_result:
41 | template_id = each_template['templateid']
42 | templates_id.append(template_id)
43 |
44 | return templates_id
45 |
46 |
47 | def _create_hostgroup(api_obj, group_name):
48 | '''create hostgroup'''
49 |
50 | ##check hostgroup exists
51 | hostgroup_status = api_obj.Hostgroup.exists({"name": "%s" %(group_name)})
52 | if hostgroup_status:
53 | print "Hostgroup(%s) is already exists" %(group_name)
54 | group_id = api_obj.Hostgroup.getobjects({"name": "%s" %(group_name)})[0]["groupid"]
55 | else:
56 | hostgroup_status = api_obj.Hostgroup.create({"name": "%s" %(group_name)})
57 | if hostgroup_status:
58 | print "Hostgroup(%s) create success" %(group_name)
59 | group_id = hostgroup_status["groupids"][0]
60 | else:
61 | sys.stderr.write("Hostgroup(%s) create failed, please connect administrator\n" %(group_name))
62 | exit(2)
63 |
64 | return group_id
65 |
66 | def _create_host(api_obj, hostname, hostip, group_ids):
67 | '''create host'''
68 |
69 | ##check host exists
70 | host_status = api_obj.Host.exists({"name": "%s" %(hostname)})
71 | if host_status:
72 | print "Host(%s) is already exists" %(hostname)
73 | hostid = api_obj.Host.getobjects({"name": "%s" %(hostname)})[0]["hostid"]
74 | ##update host groups
75 | groupids = [group['groupid'] for group in api_obj.Host.get({"output": ["hostid"], "selectGroups": "extend", "filter": {"host": ["%s" %(hostname)]}})[0]['groups']]
76 | is_hostgroup_update = 0
77 | for groupid in group_ids:
78 | if groupid not in groupids:
79 | is_hostgroup_update = 1
80 | groupids.append(groupid)
81 | if is_hostgroup_update == 1:
82 | groups = []
83 | for groupid in groupids:
84 | groups.append({"groupid": "%s" %(groupid)})
85 | host_status = api_obj.Host.update({"hostid": "%s" %(hostid), "groups": groups})
86 | if host_status:
87 | print "Host(%s) group update success" %(hostname)
88 | else:
89 | sys.stderr.write("Host(%s) group update failed, please connect administrator\n" %(hostname))
90 | exit(3)
91 | else:
92 | groups = []
93 | for groupid in group_ids:
94 | groups.append({"groupid": "%s" %(groupid)})
95 | host_status = api_obj.Host.create({"host": "%s" %(hostname), "interfaces": [{"type": 1, "main": 1, "useip": 1, "ip": "%s" %(hostip), "dns": "", "port": "10050"}], "groups": groups})
96 | if host_status:
97 | print "Host(%s) create success" %(hostname)
98 | hostid = host_status["hostids"][0]
99 | else:
100 | sys.stderr.write("Host(%s) create failed, please connect administrator\n" %(hostname))
101 | exit(3)
102 |
103 | return hostid
104 |
105 | def _create_host_usermacro(api_obj, hostname, usermacro):
106 | '''create host usermacro'''
107 |
108 | for macro in usermacro.keys():
109 | value = usermacro[macro]
110 |
111 | ##check host exists
112 | host_status = api_obj.Host.exists({"name": "%s" %(hostname)})
113 | if host_status:
114 | hostid = api_obj.Host.getobjects({"name": "%s" %(hostname)})[0]["hostid"]
115 | ##check usermacro exists
116 | usermacros = api_obj.Usermacro.get({"output": "extend", "hostids": "%s" %(hostid)})
117 | is_macro_exists = 0
118 | if usermacros:
119 | for usermacro in usermacros:
120 | if usermacro["macro"] == macro:
121 | is_macro_exists = 1
122 | if usermacro["value"] == str(value):
123 | print "Host(%s) usermacro(%s) is already exists" %(hostname, macro)
124 | hostmacroid = usermacro["hostmacroid"]
125 | else:
126 | ##usermacro exists, but value is not the same, update
127 | usermacro_status = api_obj.Usermacro.update({"hostmacroid": usermacro["hostmacroid"], "value": "%s" %(value)})
128 | if usermacro_status:
129 | print "Host(%s) usermacro(%s) update success" %(hostname, macro)
130 | hostmacroid = usermacro_status["hostmacroids"][0]
131 | else:
132 | sys.stderr.write("Host(%s) usermacro(%s) update failed, please connect administrator\n" %(hostname, macro))
133 | exit(3)
134 | break
135 | if is_macro_exists == 0:
136 | usermacro_status = api_obj.Usermacro.create({"hostid": "%s" %(hostid), "macro": "%s" %(macro), "value": "%s" %(value)})
137 | if usermacro_status:
138 | print "Host(%s) usermacro(%s) create success" %(hostname, macro)
139 | hostmacroid = usermacro_status["hostmacroids"][0]
140 | else:
141 | sys.stderr.write("Host(%s) usermacro(%s) create failed, please connect administrator\n" %(hostname, macro))
142 | exit(3)
143 | else:
144 | sys.stderr.write("Host(%s) is not exists" %(hostname))
145 | exit(3)
146 |
147 | return hostmacroid
148 |
149 | def _link_templates(api_obj, hostname, hostid, templates_list, donot_unlink_templates):
150 | '''link templates'''
151 |
152 | all_templates = []
153 | clear_templates = []
154 | ##get templates id
155 | if donot_unlink_templates is None:
156 | donot_unlink_templates_id = {}
157 | else:
158 | donot_unlink_templates_id = _get_templates(api_obj, donot_unlink_templates)
159 | templates_id = _get_templates(api_obj, templates_list)
160 | ##get the host currently linked tempaltes
161 | curr_linked_templates = _get_host_templates(api_obj, hostid)
162 |
163 | for each_template in templates_id:
164 | if templates_id[each_template] in curr_linked_templates:
165 | print "Host(%s) is already linked %s" %(hostname, each_template)
166 | else:
167 | print "Host(%s) will link %s" %(hostname, each_template)
168 | all_templates.append(templates_id[each_template])
169 |
170 | ##merge templates list
171 | for each_template in curr_linked_templates:
172 | if each_template not in all_templates:
173 | if each_template in donot_unlink_templates_id.values():
174 | all_templates.append(each_template)
175 | else:
176 | clear_templates.append(each_template)
177 |
178 |
179 | ##convert to zabbix api style
180 | templates_list = []
181 | clear_templates_list = []
182 | for each_template in all_templates:
183 | templates_list.append({"templateid": each_template})
184 | for each_template in clear_templates:
185 | clear_templates_list.append({"templateid": each_template})
186 |
187 |
188 | ##update host to link templates
189 | update_status = api_obj.Host.update({"hostid": hostid, "templates": templates_list})
190 |
191 | if update_status:
192 | print "Host(%s) link templates success" %(hostname)
193 | else:
194 | print "Host(%s) link templates failed, please contact administrator" %(hostname)
195 |
196 | ##host unlink templates
197 | if clear_templates_list != []:
198 | clear_status = api_obj.Host.update({"hostid": hostid, "templates_clear": clear_templates_list})
199 | if clear_status:
200 | print "Host(%s) unlink templates success" %(hostname)
201 | else:
202 | print "Host(%s) unlink templates failed, please contact administrator" %(hostname)
203 |
204 |
205 | def _main():
206 | '''main function'''
207 |
208 | hosts = []
209 | if len(sys.argv) > 1:
210 | hosts = sys.argv[1:]
211 |
212 | config_dir = os.path.dirname(sys.argv[0])
213 | if config_dir:
214 | config_file = config_dir+"/"+"config.yaml"
215 | else:
216 | config_file = "config.yaml"
217 |
218 | ###get config options
219 | config = _config(config_file)
220 | Monitor_DIR = config["Monitors_DIR"]
221 | Zabbix_URL = config["Zabbix_URL"]
222 | Zabbix_User = config["Zabbix_User"]
223 | Zabbix_Pass = config["Zabbix_Pass"]
224 | Zabbix_Donot_Unlink_Template = config["Zabbix_Donot_Unlink_Template"]
225 |
226 | if not hosts:
227 | hosts = os.listdir(Monitor_DIR)
228 |
229 | ###Login Zabbix
230 | zapi = ZabbixAPI(url=Zabbix_URL, user=Zabbix_User, password=Zabbix_Pass)
231 | zapi.login()
232 |
233 | for each_host in hosts:
234 | each_config_fd = open(Monitor_DIR+"/"+each_host)
235 | each_config = yaml.load(each_config_fd)
236 |
237 | ##Get config options
238 | each_ip = each_config["IP"]
239 | hostgroups = each_config["Hostgroup"]
240 | each_templates = each_config["Templates"]
241 | each_usermacros = each_config["Usermacros"]
242 |
243 | ###Create Hostgroup
244 | groupids = []
245 | for each_hostgroup in hostgroups:
246 | group_id = _create_hostgroup(zapi, each_hostgroup)
247 | groupids.append(group_id)
248 |
249 | ##Create Host
250 | hostid = _create_host(zapi, each_host, each_ip, groupids)
251 |
252 | if each_usermacros:
253 | ##Create Host Usermacros
254 | for usermacro in each_usermacros:
255 | if usermacro:
256 | usermacrosid = _create_host_usermacro(zapi, each_host, usermacro)
257 |
258 | if each_templates:
259 | ##Link tempaltes
260 | _link_templates(zapi, each_host, hostid, each_templates, Zabbix_Donot_Unlink_Template)
261 |
262 |
263 | if __name__ == "__main__":
264 | _main()
265 |
266 |
--------------------------------------------------------------------------------
/salt/zabbix/files/etc/zabbix/api/config.yaml:
--------------------------------------------------------------------------------
1 | Monitors_DIR: {{Monitors_DIR}}
2 | Templates_DIR: {{Templates_DIR}}
3 | Zabbix_URL: {{Zabbix_URL}}
4 | Zabbix_User: {{Zabbix_User}}
5 | Zabbix_Pass: {{Zabbix_Pass}}
6 | Zabbix_Donot_Unlink_Template:
7 | - 'Template OS Linux'
8 |
--------------------------------------------------------------------------------
/salt/zabbix/files/etc/zabbix/api/monitors/minion:
--------------------------------------------------------------------------------
1 | IP: {{IP}}
2 | Hostgroup:
3 | {% for each_hostgroup in Hostgroup -%}
4 | - {{each_hostgroup}}
5 | {% endfor %}
6 | Usermacros:
7 | - '{$MAX_CPULOAD}': {{ grains['num_cpus'] * 2 + 2 }}
8 | - '{$OK_CPULOAD}': {{ grains['num_cpus'] * 2 }}
9 | Templates:
10 | {% for each_template in pillar.get('zabbix-base-templates', []) -%}
11 | - {{ each_template }}
12 | {% endfor -%}
13 | {% for each_role in Roles -%}
14 | {% if each_role in Templates -%}
15 | - {{ pillar['zabbix-templates'][each_role] }}
16 | {% endif -%}
17 | {% endfor -%}
18 |
19 |
--------------------------------------------------------------------------------
/salt/zabbix/files/etc/zabbix/web/zabbix.conf.php:
--------------------------------------------------------------------------------
1 |
21 |
--------------------------------------------------------------------------------
/salt/zabbix/files/etc/zabbix_agentd.conf:
--------------------------------------------------------------------------------
1 | # This is a config file for the Zabbix agent daemon (Unix)
2 | # To get more information about Zabbix, visit http://www.zabbix.com
3 |
4 | ############ GENERAL PARAMETERS #################
5 |
6 | ### Option: PidFile
7 | # Name of PID file.
8 | #
9 | # Mandatory: no
10 | # Default:
11 | PidFile=/var/run/zabbix/zabbix_agentd.pid
12 |
13 | ### Option: LogFile
14 | # Name of log file.
15 | # If not set, syslog is used.
16 | #
17 | # Mandatory: no
18 | # Default:
19 | # LogFile=
20 |
21 | LogFile=/var/log/zabbix/zabbix_agentd.log
22 |
23 | ### Option: LogFileSize
24 | # Maximum size of log file in MB.
25 | # 0 - disable automatic log rotation.
26 | #
27 | # Mandatory: no
28 | # Range: 0-1024
29 | # Default:
30 | LogFileSize=0
31 |
32 | ### Option: DebugLevel
33 | # Specifies debug level
34 | # 0 - no debug
35 | # 1 - critical information
36 | # 2 - error information
37 | # 3 - warnings
38 | # 4 - for debugging (produces lots of information)
39 | #
40 | # Mandatory: no
41 | # Range: 0-4
42 | # Default:
43 | # DebugLevel=3
44 |
45 | ### Option: SourceIP
46 | # Source IP address for outgoing connections.
47 | #
48 | # Mandatory: no
49 | # Default:
50 | # SourceIP=
51 |
52 | ### Option: EnableRemoteCommands
53 | # Whether remote commands from Zabbix server are allowed.
54 | # 0 - not allowed
55 | # 1 - allowed
56 | #
57 | # Mandatory: no
58 | # Default:
59 | # EnableRemoteCommands=0
60 |
61 | ### Option: LogRemoteCommands
62 | # Enable logging of executed shell commands as warnings.
63 | # 0 - disabled
64 | # 1 - enabled
65 | #
66 | # Mandatory: no
67 | # Default:
68 | # LogRemoteCommands=0
69 |
70 | ##### Passive checks related
71 |
72 | ### Option: Server
73 | # List of comma delimited IP addresses (or hostnames) of Zabbix servers.
74 | # Incoming connections will be accepted only from the hosts listed here.
75 | # No spaces allowed.
76 | # If IPv6 support is enabled then '127.0.0.1', '::127.0.0.1', '::ffff:127.0.0.1' are treated equally.
77 | #
78 | # Mandatory: no
79 | # Default:
80 | # Server=
81 |
82 | Server={{zabbix_server}}
83 |
84 | ### Option: ListenPort
85 | # Agent will listen on this port for connections from the server.
86 | #
87 | # Mandatory: no
88 | # Range: 1024-32767
89 | # Default:
90 | # ListenPort=10050
91 |
92 | ### Option: ListenIP
93 | # List of comma delimited IP addresses that the agent should listen on.
94 | # First IP address is sent to Zabbix server if connecting to it to retrieve list of active checks.
95 | #
96 | # Mandatory: no
97 | # Default:
98 | # ListenIP=0.0.0.0
99 |
100 | ### Option: StartAgents
101 | # Number of pre-forked instances of zabbix_agentd that process passive checks.
102 | # If set to 0, disables passive checks and the agent will not listen on any TCP port.
103 | #
104 | # Mandatory: no
105 | # Range: 0-100
106 | # Default:
107 | # StartAgents=3
108 |
109 | ##### Active checks related
110 |
111 | ### Option: ServerActive
112 | # List of comma delimited IP:port (or hostname:port) pairs of Zabbix servers for active checks.
113 | # If port is not specified, default port is used.
114 | # IPv6 addresses must be enclosed in square brackets if port for that host is specified.
115 | # If port is not specified, square brackets for IPv6 addresses are optional.
116 | # If this parameter is not specified, active checks are disabled.
117 | # Example: ServerActive=127.0.0.1:20051,zabbix.domain,[::1]:30051,::1,[12fc::1]
118 | #
119 | # Mandatory: no
120 | # Default:
121 | # ServerActive=
122 |
123 | #ServerActive=127.0.0.1
124 |
125 | ### Option: Hostname
126 | # Unique, case sensitive hostname.
127 | # Required for active checks and must match hostname as configured on the server.
128 | # Value is acquired from HostnameItem if undefined.
129 | #
130 | # Mandatory: no
131 | # Default:
132 | # Hostname=
133 |
134 |
135 | ### Option: HostnameItem
136 | # Item used for generating Hostname if it is undefined.
137 | # Ignored if Hostname is defined.
138 | #
139 | # Mandatory: no
140 | # Default:
141 | # HostnameItem=system.hostname
142 |
143 | ### Option: RefreshActiveChecks
144 | # How often list of active checks is refreshed, in seconds.
145 | #
146 | # Mandatory: no
147 | # Range: 60-3600
148 | # Default:
149 | # RefreshActiveChecks=120
150 |
151 | ### Option: BufferSend
152 | # Do not keep data longer than N seconds in buffer.
153 | #
154 | # Mandatory: no
155 | # Range: 1-3600
156 | # Default:
157 | # BufferSend=5
158 |
159 | ### Option: BufferSize
160 | # Maximum number of values in a memory buffer. The agent will send
161 | # all collected data to Zabbix Server or Proxy if the buffer is full.
162 | #
163 | # Mandatory: no
164 | # Range: 2-65535
165 | # Default:
166 | # BufferSize=100
167 |
168 | ### Option: MaxLinesPerSecond
169 | # Maximum number of new lines the agent will send per second to Zabbix Server
170 | # or Proxy processing 'log' and 'logrt' active checks.
171 | # The provided value will be overridden by the parameter 'maxlines',
172 | # provided in 'log' or 'logrt' item keys.
173 | #
174 | # Mandatory: no
175 | # Range: 1-1000
176 | # Default:
177 | # MaxLinesPerSecond=100
178 |
179 | ### Option: AllowRoot
180 | # Allow the agent to run as 'root'. If disabled and the agent is started by 'root', the agent
181 | # will try to switch to user 'zabbix' instead. Has no effect if started under a regular user.
182 | # 0 - do not allow
183 | # 1 - allow
184 | #
185 | # Mandatory: no
186 | # Default:
187 | # AllowRoot=0
188 |
189 | ############ ADVANCED PARAMETERS #################
190 |
191 | ### Option: Alias
192 | # Sets an alias for parameter. It can be useful to substitute long and complex parameter name with a smaller and simpler one.
193 | #
194 | # Mandatory: no
195 | # Range:
196 | # Default:
197 |
198 | ### Option: Timeout
199 | # Spend no more than Timeout seconds on processing
200 | #
201 | # Mandatory: no
202 | # Range: 1-30
203 | # Default:
204 | # Timeout=3
205 |
206 | ### Option: Include
207 | # You may include individual files or all files in a directory in the configuration file.
208 | # Installing Zabbix will create include directory in /etc, unless modified during the compile time.
209 | #
210 | # Mandatory: no
211 | # Default:
212 | # Include=
213 |
214 | # Include=/etc/zabbix_agentd.userparams.conf
215 | # Include=/etc/zabbix_agentd.conf.d/
216 | Include=/etc/zabbix/zabbix_agentd.conf.d/
217 |
218 | ####### USER-DEFINED MONITORED PARAMETERS #######
219 |
220 | ### Option: UnsafeUserParameters
221 | # Allow all characters to be passed in arguments to user-defined parameters.
222 | # 0 - do not allow
223 | # 1 - allow
224 | #
225 | # Mandatory: no
226 | # Range: 0-1
227 | # Default:
228 | # UnsafeUserParameters=0
229 |
230 | ### Option: UserParameter
231 | # User-defined parameter to monitor. There can be several user-defined parameters.
232 | # Format: UserParameter=,
233 | # See 'zabbix_agentd' directory for examples.
234 | #
235 | # Mandatory: no
236 | # Default:
237 | # UserParameter=
238 |
--------------------------------------------------------------------------------
/salt/zabbix/files/etc/zabbix_server.conf:
--------------------------------------------------------------------------------
1 | # This is a configuration file for Zabbix Server process
2 | # To get more information about Zabbix,
3 | # visit http://www.zabbix.com
4 |
5 | ############ GENERAL PARAMETERS #################
6 |
7 | ### Option: NodeID
8 | # Unique NodeID in distributed setup.
9 | # 0 - standalone server
10 | #
11 | # Mandatory: no
12 | # Range: 0-999
13 | # Default:
14 | # NodeID=0
15 |
16 | ### Option: ListenPort
17 | # Listen port for trapper.
18 | #
19 | # Mandatory: no
20 | # Range: 1024-32767
21 | # Default:
22 | # ListenPort=10051
23 |
24 | ### Option: SourceIP
25 | # Source IP address for outgoing connections.
26 | #
27 | # Mandatory: no
28 | # Default:
29 | # SourceIP=
30 |
31 | ### Option: LogFile
32 | # Name of log file.
33 | # If not set, syslog is used.
34 | #
35 | # Mandatory: no
36 | # Default:
37 | # LogFile=
38 |
39 | LogFile=/var/log/zabbixsrv/zabbix_server.log
40 |
41 | ### Option: LogFileSize
42 | # Maximum size of log file in MB.
43 | # 0 - disable automatic log rotation.
44 | #
45 | # Mandatory: no
46 | # Range: 0-1024
47 | # Default:
48 | LogFileSize=0
49 |
50 | ### Option: DebugLevel
51 | # Specifies debug level
52 | # 0 - no debug
53 | # 1 - critical information
54 | # 2 - error information
55 | # 3 - warnings
56 | # 4 - for debugging (produces lots of information)
57 | #
58 | # Mandatory: no
59 | # Range: 0-4
60 | # Default:
61 | # DebugLevel=3
62 |
63 | ### Option: PidFile
64 | # Name of PID file.
65 | #
66 | # Mandatory: no
67 | # Default:
68 | PidFile=/var/run/zabbixsrv/zabbix_server.pid
69 |
70 | ### Option: DBHost
71 | # Database host name.
72 | # If set to localhost, socket is used for MySQL.
73 | # If set to empty string, socket is used for PostgreSQL.
74 | #
75 | # Mandatory: no
76 | # Default:
77 | # DBHost=localhost
78 | DBHost={{DBHost}}
79 |
80 | ### Option: DBName
81 | # Database name.
82 | # For SQLite3 path to database file must be provided. DBUser and DBPassword are ignored.
83 | #
84 | # Mandatory: yes
85 | # Default:
86 | # DBName=
87 |
88 | DBName={{DBName}}
89 |
90 | ### Option: DBSchema
91 | # Schema name. Used for IBM DB2.
92 | #
93 | # Mandatory: no
94 | # Default:
95 | # DBSchema=
96 |
97 | ### Option: DBUser
98 | # Database user. Ignored for SQLite.
99 | #
100 | # Mandatory: no
101 | # Default:
102 | # DBUser=
103 |
104 | DBUser={{DBUser}}
105 |
106 | ### Option: DBPassword
107 | # Database password. Ignored for SQLite.
108 | # Comment this line if no password is used.
109 | #
110 | # Mandatory: no
111 | # Default:
112 | # DBPassword=
113 | DBPassword={{DBPassword}}
114 |
115 | ### Option: DBSocket
116 | # Path to MySQL socket.
117 | #
118 | # Mandatory: no
119 | # Default:
120 | DBSocket={{DBSocket}}
121 |
122 | ### Option: DBPort
123 | # Database port when not using local socket. Ignored for SQLite.
124 | #
125 | # Mandatory: no
126 | # Range: 1024-65535
127 | # Default (for MySQL):
128 | # DBPort=3306
129 | DBPort={{DBPort}}
130 |
131 | ############ ADVANCED PARAMETERS ################
132 |
133 | ### Option: StartPollers
134 | # Number of pre-forked instances of pollers.
135 | #
136 | # Mandatory: no
137 | # Range: 0-1000
138 | # Default:
139 | # StartPollers=5
140 |
141 | ### Option: StartIPMIPollers
142 | # Number of pre-forked instances of IPMI pollers.
143 | #
144 | # Mandatory: no
145 | # Range: 0-1000
146 | # Default:
147 | # StartIPMIPollers=0
148 |
149 | ### Option: StartPollersUnreachable
150 | # Number of pre-forked instances of pollers for unreachable hosts (including IPMI).
151 | #
152 | # Mandatory: no
153 | # Range: 0-1000
154 | # Default:
155 | # StartPollersUnreachable=1
156 |
157 | ### Option: StartTrappers
158 | # Number of pre-forked instances of trappers.
159 | # Trappers accept incoming connections from Zabbix sender, active agents, active proxies and child nodes.
160 | # At least one trapper process must be running to display server availability in the frontend.
161 | #
162 | # Mandatory: no
163 | # Range: 0-1000
164 | # Default:
165 | # StartTrappers=5
166 |
167 | ### Option: StartPingers
168 | # Number of pre-forked instances of ICMP pingers.
169 | #
170 | # Mandatory: no
171 | # Range: 0-1000
172 | # Default:
173 | # StartPingers=1
174 |
175 | ### Option: StartDiscoverers
176 | # Number of pre-forked instances of discoverers.
177 | #
178 | # Mandatory: no
179 | # Range: 0-250
180 | # Default:
181 | # StartDiscoverers=1
182 |
183 | ### Option: StartHTTPPollers
184 | # Number of pre-forked instances of HTTP pollers.
185 | #
186 | # Mandatory: no
187 | # Range: 0-1000
188 | # Default:
189 | # StartHTTPPollers=1
190 |
191 | ### Option: JavaGateway
192 | # IP address (or hostname) of Zabbix Java gateway.
193 | # Only required if Java pollers are started.
194 | #
195 | # Mandatory: no
196 | # Default:
197 | # JavaGateway=
198 |
199 | ### Option: JavaGatewayPort
200 | # Port that Zabbix Java gateway listens on.
201 | #
202 | # Mandatory: no
203 | # Range: 1024-32767
204 | # Default:
205 | # JavaGatewayPort=10052
206 |
207 | ### Option: StartJavaPollers
208 | # Number of pre-forked instances of Java pollers.
209 | #
210 | # Mandatory: no
211 | # Range: 0-1000
212 | # Default:
213 | # StartJavaPollers=0
214 |
215 | ### Option: SNMPTrapperFile
216 | # Temporary file used for passing data from SNMP trap daemon to the server.
217 | # Must be the same as in zabbix_trap_receiver.pl or SNMPTT configuration file.
218 | #
219 | # Mandatory: no
220 | # Default:
221 | # SNMPTrapperFile=/tmp/zabbix_traps.tmp
222 |
223 | ### Option: StartSNMPTrapper
224 | # If 1, SNMP trapper process is started.
225 | #
226 | # Mandatory: no
227 | # Range: 0-1
228 | # Default:
229 | # StartSNMPTrapper=0
230 |
231 | ### Option: ListenIP
232 | # List of comma delimited IP addresses that the trapper should listen on.
233 | # Trapper will listen on all network interfaces if this parameter is missing.
234 | #
235 | # Mandatory: no
236 | # Default:
237 | # ListenIP=0.0.0.0
238 |
239 | # ListenIP=127.0.0.1
240 |
241 | ### Option: HousekeepingFrequency
242 | # How often Zabbix will perform housekeeping procedure (in hours).
243 | # Housekeeping is removing unnecessary information from history, alert, and alarms tables.
244 | #
245 | # Mandatory: no
246 | # Range: 1-24
247 | # Default:
248 | # HousekeepingFrequency=1
249 |
250 | ### Option: MaxHousekeeperDelete
251 | # The table "housekeeper" contains "tasks" for housekeeping procedure in the format:
252 | # [housekeeperid], [tablename], [field], [value].
253 | # No more than 'MaxHousekeeperDelete' rows (corresponding to [tablename], [field], [value])
254 | # will be deleted per one task in one housekeeping cycle.
255 | # SQLite3 does not use this parameter, deletes all corresponding rows without a limit.
256 | # If set to 0 then no limit is used at all. In this case you must know what you are doing!
257 | #
258 | # Mandatory: no
259 | # Range: 0-1000000
260 | # Default:
261 | # MaxHousekeeperDelete=500
262 |
263 | ### Option: DisableHousekeeping
264 | # If set to 1, disables housekeeping.
265 | #
266 | # Mandatory: no
267 | # Range: 0-1
268 | # Default:
269 | # DisableHousekeeping=0
270 |
271 | ### Option: SenderFrequency
272 | # How often Zabbix will try to send unsent alerts (in seconds).
273 | #
274 | # Mandatory: no
275 | # Range: 5-3600
276 | # Default:
277 | # SenderFrequency=30
278 |
279 | ### Option: CacheSize
280 | # Size of configuration cache, in bytes.
281 | # Shared memory size for storing host, item and trigger data.
282 | #
283 | # Mandatory: no
284 | # Range: 128K-2G
285 | # Default:
286 | # CacheSize=8M
287 |
288 | ### Option: CacheUpdateFrequency
289 | # How often Zabbix will perform update of configuration cache, in seconds.
290 | #
291 | # Mandatory: no
292 | # Range: 1-3600
293 | # Default:
294 | # CacheUpdateFrequency=60
295 |
296 | ### Option: StartDBSyncers
297 | # Number of pre-forked instances of DB Syncers
298 | #
299 | # Mandatory: no
300 | # Range: 1-100
301 | # Default:
302 | # StartDBSyncers=4
303 |
304 | ### Option: HistoryCacheSize
305 | # Size of history cache, in bytes.
306 | # Shared memory size for storing history data.
307 | #
308 | # Mandatory: no
309 | # Range: 128K-2G
310 | # Default
311 | # HistoryCacheSize=8M
312 |
313 | ### Option: TrendCacheSize
314 | # Size of trend cache, in bytes.
315 | # Shared memory size for storing trends data.
316 | #
317 | # Mandatory: no
318 | # Range: 128K-2G
319 | # Default:
320 | # TrendCacheSize=4M
321 |
322 | ### Option: HistoryTextCacheSize
323 | # Size of text history cache, in bytes.
324 | # Shared memory size for storing character, text or log history data.
325 | #
326 | # Mandatory: no
327 | # Range: 128K-2G
328 | # Default:
329 | # HistoryTextCacheSize=16M
330 |
331 | ### Option: NodeNoEvents
332 | # If set to '1' local events won't be sent to master node.
333 | # This won't impact ability of this node to propagate events from its child nodes.
334 | #
335 | # Mandatory: no
336 | # Range: 0-1
337 | # Default:
338 | # NodeNoEvents=0
339 |
340 | ### Option: NodeNoHistory
341 | # If set to '1' local history won't be sent to master node.
342 | # This won't impact ability of this node to propagate history from its child nodes.
343 | #
344 | # Mandatory: no
345 | # Range: 0-1
346 | # Default:
347 | # NodeNoHistory=0
348 |
349 | ### Option: Timeout
350 | # Specifies how long we wait for agent, SNMP device or external check (in seconds).
351 | #
352 | # Mandatory: no
353 | # Range: 1-30
354 | # Default:
355 | # Timeout=3
356 |
357 | ### Option: TrapperTimeout
358 | # Specifies how many seconds trapper may spend processing new data.
359 | #
360 | # Mandatory: no
361 | # Range: 1-300
362 | # Default:
363 | # TrapperTimeout=300
364 |
365 | ### Option: UnreachablePeriod
366 | # After how many seconds of unreachability treat a host as unavailable.
367 | #
368 | # Mandatory: no
369 | # Range: 1-3600
370 | # Default:
371 | # UnreachablePeriod=45
372 |
373 | ### Option: UnavailableDelay
374 | # How often host is checked for availability during the unavailability period, in seconds.
375 | #
376 | # Mandatory: no
377 | # Range: 1-3600
378 | # Default:
379 | # UnavailableDelay=60
380 |
381 | ### Option: UnreachableDelay
382 | # How often host is checked for availability during the unreachability period, in seconds.
383 | #
384 | # Mandatory: no
385 | # Range: 1-3600
386 | # Default:
387 | # UnreachableDelay=15
388 |
389 | ### Option: AlertScriptsPath
390 | # Full path to location of custom alert scripts.
391 | # Default depends on compilation options.
392 | #
393 | # Mandatory: no
394 | # Default:
395 | # AlertScriptsPath=/var/lib/zabbixsrv/alertscripts
396 | AlertScriptsPath=/usr/bin
397 |
398 | ### Option: ExternalScripts
399 | # Full path to location of external scripts.
400 | # Default depends on compilation options.
401 | #
402 | # Mandatory: no
403 | # Default:
404 | ExternalScripts=/var/lib/zabbixsrv/externalscripts
405 |
406 | ### Option: FpingLocation
407 | # Location of fping.
408 | # Make sure that fping binary has root ownership and SUID flag set.
409 | #
410 | # Mandatory: no
411 | # Default:
412 | # FpingLocation=/usr/sbin/fping
413 |
414 | ### Option: Fping6Location
415 | # Location of fping6.
416 | # Make sure that fping6 binary has root ownership and SUID flag set.
417 | # Make empty if your fping utility is capable to process IPv6 addresses.
418 | #
419 | # Mandatory: no
420 | # Default:
421 | # Fping6Location=/usr/sbin/fping6
422 |
423 | ### Option: SSHKeyLocation
424 | # Location of public and private keys for SSH checks and actions
425 | #
426 | # Mandatory: no
427 | # Default:
428 | # SSHKeyLocation=
429 |
430 | ### Option: LogSlowQueries
431 | # How long a database query may take before being logged (in milliseconds).
432 | # Only works if DebugLevel set to 3 or 4.
433 | # 0 - don't log slow queries.
434 | #
435 | # Mandatory: no
436 | # Range: 1-3600000
437 | # Default:
438 | # LogSlowQueries=0
439 |
440 | ### Option: TmpDir
441 | # Temporary directory.
442 | #
443 | # Mandatory: no
444 | # Default:
445 | # TmpDir=/tmp
446 |
447 | ### Option: Include
448 | # You may include individual files or all files in a directory in the configuration file.
449 | # Installing Zabbix will create include directory in /etc, unless modified during the compile time.
450 | #
451 | # Mandatory: no
452 | # Default:
453 | # Include=
454 |
455 | # Include=/etc/zabbix_server.general.conf
456 | # Include=/etc/zabbix_server.conf.d/
457 |
458 | ### Option: StartProxyPollers
459 | # Number of pre-forked instances of pollers for passive proxies.
460 | #
461 | # Mandatory: no
462 | # Range: 0-250
463 | # Default:
464 | # StartProxyPollers=1
465 |
466 | ### Option: ProxyConfigFrequency
467 | # How often Zabbix Server sends configuration data to a Zabbix Proxy in seconds.
468 | # This parameter is used only for proxies in the passive mode.
469 | #
470 | # Mandatory: no
471 | # Range: 1-3600*24*7
472 | # Default:
473 | # ProxyConfigFrequency=3600
474 |
475 | ### Option: ProxyDataFrequency
476 | # How often Zabbix Server requests history data from a Zabbix Proxy in seconds.
477 | # This parameter is used only for proxies in the passive mode.
478 | #
479 | # Mandatory: no
480 | # Range: 1-3600
481 | # Default:
482 | # ProxyDataFrequency=1
483 |
--------------------------------------------------------------------------------
/salt/zabbix/files/usr/lib/python2.6/site-packages/zabbix/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ist0ne/salt-states/2b9d971d573e3fce824f683591a28a5c79d0ac0d/salt/zabbix/files/usr/lib/python2.6/site-packages/zabbix/__init__.py
--------------------------------------------------------------------------------
/salt/zabbix/files/usr/lib/python2.6/site-packages/zabbix/zapi.py:
--------------------------------------------------------------------------------
1 | ################################################
2 | # Python API for Zabbix v0.5
3 | #
4 | # Author: frankyao @PPTV, Shanghai, China
5 | # Email: baniu.yao@gmail.com
6 | # Date: 24, May, 2012
7 | ################################################
8 |
9 | try:
10 | import json
11 | except ImportError:
12 | import simplejson as json
13 |
14 | import urllib2, subprocess, re, time
15 |
16 | class ZabbixAPIException(Exception):
17 | pass
18 |
19 | class ZabbixAPI(object):
20 | __auth = ''
21 | __id = 0
22 | _state = {}
23 | # def __new__(cls, *args, **kw):
24 | # if not cls._state.has_key(cls):
25 | # cls._state[cls] = super(ZabbixAPI, cls).__new__(cls, *args, **kw)
26 | # return cls._state[cls]
27 | def __init__(self, url, user, password):
28 | self.__url = url.rstrip('/') + '/api_jsonrpc.php'
29 | self.__user = user
30 | self.__password = password
31 | self._zabbix_api_object_list = ('Action', 'Alert', 'APIInfo', 'Application', 'DCheck', 'DHost', 'DRule',
32 | 'DService', 'Event', 'Graph', 'Grahpitem', 'History', 'Host', 'Hostgroup', 'Image', 'Item',
33 | 'Maintenance', 'Map', 'Mediatype', 'Proxy', 'Screen', 'Script', 'Template', 'Trigger', 'User',
34 | 'Usergroup', 'Usermacro', 'Usermedia')
35 | def __getattr__(self, name):
36 | if name not in self._zabbix_api_object_list:
37 | raise ZabbixAPIException('No such API object: %s' % name)
38 | if not self.__dict__.has_key(name):
39 | self.__dict__[name] = ZabbixAPIObjectFactory(self, name)
40 | return self.__dict__[name]
41 | def login(self):
42 | user_info = {'user' : self.__user,
43 | 'password' : self.__password}
44 | obj = self.json_obj('user.login', user_info)
45 | content = self.postRequest(obj)
46 | try:
47 | self.__auth = content['result']
48 | except KeyError, e:
49 | e = content['error']['data']
50 | raise ZabbixAPIException(e)
51 | def isLogin(self):
52 | return self.__auth != ''
53 | def __checkAuth__(self):
54 | if not self.isLogin():
55 | raise ZabbixAPIException("NOT logged in")
56 | def json_obj(self, method, params):
57 | obj = { 'jsonrpc' : '2.0',
58 | 'method' : method,
59 | 'params' : params,
60 | 'auth' : self.__auth,
61 | 'id' : self.__id}
62 | return json.dumps(obj)
63 | def postRequest(self, json_obj):
64 | #print 'Post: %s' % json_obj
65 | headers = { 'Content-Type' : 'application/json-rpc',
66 | 'User-Agent' : 'python/zabbix_api'}
67 | req = urllib2.Request(self.__url, json_obj, headers)
68 | opener = urllib2.urlopen(req)
69 | content = json.loads(opener.read())
70 | self.__id += 1
71 | #print 'Receive: %s' % content
72 | return content
73 |
74 | '''
75 | /usr/local/zabbix/bin/zabbix_get is the default path to zabbix_get, it depends on the 'prefix' while install zabbix.
76 | plus, the ip(computer run this script) must be put into the conf of agent.
77 | '''
78 | @staticmethod
79 | def zabbixGet(ip, key):
80 | zabbix_get = subprocess.Popen('/usr/local/zabbix/bin/zabbix_get -s %s -k %s' % (ip, key), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
81 | result, err = zabbix_get.communicate()
82 | if err:
83 | return 'ERROR'
84 | return result.strip()
85 |
86 | def createObject(self, object_name, *args, **kwargs):
87 | return object_name(self, *args, **kwargs)
88 |
89 | def getHostByHostid(self, hostids):
90 | if not isinstance(hostids,list):
91 | hostids = [hostids]
92 | return [dict['host'] for dict in self.host.get({'hostids':hostids,'output':'extend'})]
93 |
94 | #################################################
95 | # Decorate Method
96 | #################################################
97 |
98 | def checkAuth(func):
99 | def ret(self, *args):
100 | self.__checkAuth__()
101 | return func(self, args)
102 | return ret
103 |
104 | def postJson(method_name):
105 | def decorator(func):
106 | def wrapper(self, params):
107 | try:
108 | content = self.postRequest(self.json_obj(method_name, params))
109 | return content['result']
110 | except KeyError, e:
111 | e = content['error']['data']
112 | raise ZabbixAPIException(e)
113 | return wrapper
114 | return decorator
115 |
116 | def ZabbixAPIObjectMethod(func):
117 | def wrapper(self, method_name, params):
118 | try:
119 | content = self.postRequest(self.json_obj(method_name, params))
120 | return content['result']
121 | except KeyError, e:
122 | e = content['error']['data']
123 | raise ZabbixAPIException(e)
124 | return wrapper
125 |
126 | #################################################
127 | # Zabbix API Object (host, item...)
128 | #################################################
129 |
130 | class ZabbixAPIObjectFactory(object):
131 | def __init__(self, zapi, object_name=''):
132 | self.__zapi = zapi
133 | self.__object_name = object_name
134 | def __checkAuth__(self):
135 | self.__zapi.__checkAuth__()
136 | def postRequest(self, json_obj):
137 | return self.__zapi.postRequest(json_obj)
138 | def json_obj(self, method, param):
139 | return self.__zapi.json_obj(method, param)
140 | def __getattr__(self, method_name):
141 | def method(params):
142 | return self.proxyMethod('%s.%s' % (self.__object_name,method_name), params)
143 | return method
144 | # 'find' method is a wrapper of get. Difference between 'get' and 'find' is that 'find' can create object you want while it dosn't exist
145 | def find(self, params, attr_name=None, to_create=False):
146 | filtered_list = []
147 | result = self.proxyMethod('%s.get' % self.__object_name, {'output':'extend','filter': params})
148 | if to_create and len(result) == 0:
149 | result = self.proxyMethod('%s.create' % self.__object_name, params)
150 | return result.values()[0]
151 | if attr_name is not None:
152 | for element in result:
153 | filtered_list.append(element[attr_name])
154 | return filtered_list
155 | else:
156 | return result
157 |
158 |
159 | @ZabbixAPIObjectMethod
160 | @checkAuth
161 | def proxyMethod(self, method_name, params):
162 | pass
163 |
164 | def testCase():
165 | zapi = ZabbixAPI(url='http://your.zabbix.address', user='admin', password='zabbix')
166 | zapi.login()
167 | print zapi.Graph.find({'graphid':'49931'}, attr_name='graphid')[0]
168 | hostid = zapi.Host.find({'ip':ip}, attr_name='hostid')[0]
169 | print zapi.Host.exists({'filter':{'host':'BJBSJ-Zabbix-Proxy-82-225'}})
170 | host = zapi.createObject(Host, 'HostToCreate')
171 | item = host.getItem('444107')
172 | zapi.host.get({'hostids':['16913','17411'],'output':'extend'})
173 | group = zapi.createObject(Hostgroup, '926')
174 | print zapi.getHostByHostid('16913')
175 |
176 | if __name__ == '__main__':
177 | testCase()
178 |
--------------------------------------------------------------------------------
/salt/zabbix/server.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - salt.minion
3 | - mysql.server
4 |
5 | zabbix-server:
6 | pkg.installed:
7 | - pkgs:
8 | - zabbix22-server
9 | - zabbix22-server-mysql
10 | file.managed:
11 | - name: /etc/zabbix_server.conf
12 | - source: salt://zabbix/files/etc/zabbix_server.conf
13 | - template: jinja
14 | - defaults:
15 | DBHost: localhost
16 | DBName: zabbix
17 | DBUser: zabbix
18 | DBPassword: zabbix_pass
19 | DBSocket: /var/lib/mysql/mysql.sock
20 | DBPort: 3306
21 | - require:
22 | - pkg: zabbix-server
23 | service.running:
24 | - enable: True
25 | - watch:
26 | - file: zabbix-server
27 |
28 | zabbix-server-role:
29 | file.append:
30 | - name: /etc/salt/roles
31 | - text:
32 | - 'zabbix-server'
33 | - require:
34 | - file: roles
35 | - service: zabbix-server
36 | - service: salt-minion
37 | - watch_in:
38 | - module: sync_grains
39 |
40 |
41 | zabbix_server.conf-link:
42 | file.symlink:
43 | - name: /etc/zabbix/zabbix_server.conf
44 | - target: /etc/zabbix_server.conf
45 | - require_in:
46 | - service: zabbix-server
47 | - require:
48 | - pkg: zabbix-server
49 | - file: zabbix-server
50 |
51 | zabbix_mysql:
52 | pkg.installed:
53 | - name: MySQL-python
54 | mysql_database.present:
55 | - name: zabbix
56 | - require:
57 | - pkg: zabbix_mysql
58 | - service: mysql-server
59 | mysql_user.present:
60 | - name: zabbix
61 | - host: localhost
62 | - password: zabbix_pass
63 | - require:
64 | - mysql_database: zabbix_mysql
65 | mysql_grants.present:
66 | - grant: all
67 | - database: zabbix.*
68 | - user: zabbix
69 | - host: localhost
70 | - require:
71 | - mysql_user: zabbix_mysql
72 | - require_in:
73 | - service: zabbix-server
74 |
75 |
76 | zabbix_mysql-init:
77 | cmd.run:
78 | - name: mysql -uroot zabbix < /usr/share/zabbix-mysql/schema.sql && mysql -uroot zabbix < /usr/share/zabbix-mysql/images.sql && mysql -uroot zabbix < /usr/share/zabbix-mysql/data.sql
79 | - unless: mysql -uroot -e "SELECT COUNT(*) from zabbix.users"
80 | - require:
81 | - pkg: zabbix-server
82 | - mysql_grants: zabbix_mysql
83 | - require_in:
84 | - file: zabbix-server
85 | - service: zabbix-server
86 |
--------------------------------------------------------------------------------
/salt/zabbix/web.sls:
--------------------------------------------------------------------------------
1 | include:
2 | - apache
3 | - php
4 | - salt.minion
5 |
6 | zabbix-web:
7 | pkg.installed:
8 | - pkgs:
9 | - zabbix22-web
10 | - zabbix22-web-mysql
11 | - watch_in:
12 | - service: apache
13 | file.managed:
14 | - name: /etc/zabbix/web/zabbix.conf.php
15 | - source: salt://zabbix/files/etc/zabbix/web/zabbix.conf.php
16 | - require:
17 | - pkg: zabbix-web
18 |
19 | zabbix-web-role:
20 | file.append:
21 | - name: /etc/salt/roles
22 | - text:
23 | - 'zabbix-web'
24 | - require:
25 | - file: roles
26 | - pkg: zabbix-web
27 | - service: salt-minion
28 | - watch_in:
29 | - module: sync_grains
30 |
--------------------------------------------------------------------------------