├── .gitignore ├── .gitmodules ├── 7zip └── init.sls ├── README.md ├── _modules └── additional.py ├── acestream ├── aceconfig.py.jinja ├── channels.tsv ├── getlogos.py ├── init.sls ├── makelist.py ├── monit.conf.jinja └── upstart.conf.jinja ├── apport └── init.sls ├── awesome ├── init.sls ├── lib │ └── scratch │ │ ├── drop.lua │ │ ├── init.lua │ │ └── pad.lua ├── rc.lua └── themes │ └── green_rabit │ ├── background.png │ ├── layouts │ ├── fairh.png │ ├── fairhw.png │ ├── fairv.png │ ├── fairvw.png │ ├── floating.png │ ├── floatingw.png │ ├── fullscreen.png │ ├── fullscreenw.png │ ├── magnifier.png │ ├── magnifierw.png │ ├── max.png │ ├── maxw.png │ ├── tile.png │ ├── tilebottom.png │ ├── tilebottomw.png │ ├── tileleft.png │ ├── tileleftw.png │ ├── tiletop.png │ ├── tiletopw.png │ └── tilew.png │ ├── submenu.png │ ├── taglist │ ├── squarefw.png │ └── squarew.png │ ├── tasklist │ ├── floating.png │ └── floatingw.png │ ├── theme.lua │ └── titlebar │ ├── close.png │ ├── close_focus.png │ ├── close_normal.png │ ├── closer.png │ ├── floating_focus_active.png │ ├── floating_focus_inactive.png │ ├── floating_normal_active.png │ ├── floating_normal_inactive.png │ ├── maximized_focus_active.png │ ├── maximized_focus_inactive.png │ ├── maximized_normal_active.png │ ├── maximized_normal_inactive.png │ ├── ontop_focus_active.png │ ├── ontop_focus_inactive.png │ ├── ontop_normal_active.png │ ├── ontop_normal_inactive.png │ ├── sticky_focus_active.png │ ├── sticky_focus_inactive.png │ ├── sticky_normal_active.png │ └── sticky_normal_inactive.png ├── backup ├── backup.sh.jinja └── init.sls ├── cash ├── cash-startup.sh ├── dnc-kassa │ ├── admin.sls │ ├── db.conf.jinja │ ├── dnc-run.sh │ ├── hw.conf │ ├── init.sls │ ├── prepare_dist.sh │ └── withoutFR.tab ├── init.sls ├── upstart.conf └── xwrapper.conf ├── collectd ├── battery │ ├── config.jinja │ └── init.sls ├── collectd.conf.jinja ├── init.sls ├── libvirt │ ├── config.jinja │ └── init.sls ├── nginx │ ├── config.jinja │ └── init.sls ├── notify_email │ ├── config.jinja │ └── init.sls ├── nut │ ├── config.jinja │ └── init.sls ├── openvpn │ ├── config.jinja │ └── init.sls ├── ping │ ├── config.jinja │ └── init.sls ├── postgresql │ ├── config.jinja │ └── init.sls ├── radeon │ ├── config.jinja │ ├── init.sls │ └── radeon_info.sh ├── server.sls └── upstart.conf.jinja ├── common └── init.sls ├── crypt ├── init.sls └── pam_mount.conf.xml.jinja ├── cryptsetup └── init.sls ├── dnsmasq ├── dnsmasq.conf.jinja └── init.sls ├── drone └── init.sls ├── fglrx └── init.sls ├── firefox └── init.sls ├── gawk └── init.sls ├── gimp └── init.sls ├── git └── init.sls ├── gogs ├── app.ini.jinja ├── init.sls ├── nginx-site.conf.jinja └── upstart.conf.jinja ├── htop └── init.sls ├── i2p └── init.sls ├── jarmon ├── init.sls ├── jarmon-11.8 │ ├── css │ │ ├── jquerytools.dateinput.skin1.css │ │ ├── style.css │ │ └── tabs-no-images.css │ ├── icons │ │ ├── calendar.png │ │ ├── loading.gif │ │ ├── next.gif │ │ └── prev.gif │ ├── index.html │ └── js │ │ ├── dependencies.js │ │ ├── jarmon.js │ │ ├── jarmon_modules.js │ │ └── jarmon_recipes.js └── nginx-site.conf.jinja ├── keepassx └── init.sls ├── kernel └── init.sls ├── kodi ├── addons │ └── script.securitycam │ │ ├── addon.xml │ │ ├── changelog.txt │ │ ├── default.py │ │ ├── icon.png │ │ └── resources │ │ ├── language │ │ └── English │ │ │ └── strings.xml │ │ └── settings.xml ├── advancedsettings.xml ├── init.sls ├── monit.conf.jinja ├── upstart.conf ├── xorg.conf └── xwrapper.conf ├── kvm ├── init.sls ├── interfaces └── vfio-bind ├── laptop └── init.sls ├── ldap-admin ├── config.php.jinja ├── init.sls └── nginx-site.conf.jinja ├── ldap ├── default-slapd ├── init.sls ├── ldif │ ├── index.ldif │ ├── ldaps.ldif │ ├── memberof.ldif │ ├── openssh-lpk.ldif │ ├── refint.ldif │ └── sudo.ldif └── server.sls ├── libcap2 └── init.sls ├── libva └── init.sls ├── libvirt └── init.sls ├── lightdm ├── init.sls ├── lightdm.conf └── login.sh ├── logwatch ├── init.sls └── logwatch.conf.jinja ├── lvm └── init.sls ├── mc └── init.sls ├── monit ├── init.sls ├── macros.sls ├── monitrc.jinja ├── nginx-site.conf.jinja ├── server.sls ├── server.xml.jinja └── upstart.conf.jinja ├── mpd ├── init.sls └── mpd.conf ├── net ├── hosts.jinja └── init.sls ├── nginx ├── common.conf.jinja ├── init.sls ├── monit.conf.jinja ├── nginx.conf ├── ssl.conf.jinja └── www-util.conf ├── nmap └── init.sls ├── ntp ├── init.sls └── ntp.conf ├── nut ├── init.sls ├── nut.conf ├── udev.rules ├── ups.conf ├── upsd.conf ├── upsd.users └── upsmon.conf ├── openssl ├── ca.config.jinja ├── ca.sls ├── certscheck.sh.jinja ├── init.sls └── vars.sls ├── openvpn ├── client.conf.jinja ├── init.sls ├── monit.conf.jinja ├── server.conf.jinja ├── server.sls └── vars.sls ├── php ├── init.sls ├── php-fpm.conf ├── php.ini ├── postgresql.sls └── www.pool.conf ├── polipo ├── config.jinja └── init.sls ├── postgresql ├── init.sls ├── pg_hba.conf.jinja └── postgresql.conf.jinja ├── prosody ├── init.sls ├── modules │ ├── mod_auth_ldap │ │ └── mod_auth_ldap.lua │ ├── mod_carbons │ │ └── mod_carbons.lua │ ├── mod_csi │ │ └── mod_csi.lua │ ├── mod_mam │ │ ├── mamprefs.lib.lua │ │ ├── mamprefsxml.lib.lua │ │ ├── mod_mam.lua │ │ └── rsm.lib.lua │ ├── mod_mam_muc │ │ └── mod_mam_muc.lua │ └── mod_smacks │ │ └── mod_smacks.lua ├── prosody.cfg.lua.jinja ├── sharedgroups.ini.jinja └── vhost.cfg.lua.jinja ├── python └── init.sls ├── rar └── init.sls ├── razerhydra ├── init.sls └── udev.rules ├── rstream ├── config.ini.jinja ├── init.sls ├── monit.conf.jinja ├── rstream_freespace.sh └── upstart.conf.jinja ├── rsync └── init.sls ├── rxvt-unicode └── init.sls ├── s3ql ├── init.sls └── upstart.conf.jinja ├── salt ├── init.sls ├── master.jinja ├── master.sls ├── minion.jinja ├── monit-master.conf.jinja └── monit.conf.jinja ├── schroot ├── fstab ├── init.sls ├── radeon.asound.conf ├── radeon.conf.jinja ├── radeon.sls ├── radeon.xorg.conf ├── radeon.xwrapper.conf ├── schroot.conf.jinja └── schroot.pam ├── seafile ├── ccnet_ccnet.conf.jinja ├── conf_seafdav.conf ├── init.sls ├── logrotate.conf.jinja ├── nginx-site.conf.jinja ├── seafile.conf.jinja ├── seahub_settings.py.jinja └── upstart.conf.jinja ├── sensors └── init.sls ├── simulator ├── init.sls └── udev.rules ├── skype └── init.sls ├── smart ├── default ├── init.sls └── smartd.conf.jinja ├── smplayer └── init.sls ├── squid ├── init.sls └── squid.conf.jinja ├── ssh ├── init.sls ├── monit.conf.jinja └── sshd_config ├── ssmtp ├── init.sls ├── revaliases.jinja └── ssmtp.conf.jinja ├── steam └── init.sls ├── sysstat └── init.sls ├── thunderbird └── init.sls ├── tinyproxy ├── init.sls └── tinyproxy.conf.jinja ├── tmux ├── disk.sh ├── init.sls ├── mem.sh ├── net.sh.jinja └── tmux.conf ├── tor └── init.sls ├── transmission ├── daemon │ ├── init.sls │ ├── script-done.sh.jinja │ └── settings.json.jinja └── init.sls ├── udev └── init.sls ├── users └── init.sls ├── vim ├── init.sls └── vimrc ├── virtualbox └── init.sls ├── vsftpd ├── init.sls └── vsftpd.conf.jinja ├── x86 └── init.sls ├── xen ├── init.sls └── interfaces ├── xonotic └── init.sls ├── zip └── init.sls └── zsh ├── init.sls └── zshrc /.gitignore: -------------------------------------------------------------------------------- 1 | cash/dnc-kassa/dnc/* 2 | cash/dnc-kassa/dist/* 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "rstream/src"] 2 | path = rstream/src 3 | url = https://github.com/rabits/rstream.git 4 | [submodule "acestream/aceproxy"] 5 | path = acestream/aceproxy 6 | url = https://github.com/ValdikSS/aceproxy 7 | [submodule "ldap-admin/src"] 8 | path = ldap-admin/src 9 | url = https://github.com/rabits/rldapadmin.git 10 | -------------------------------------------------------------------------------- /7zip/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # 7zip packer & unpacker 3 | # 4 | 5 | p7zip: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /_modules/additional.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | ''' 3 | Additional utils 4 | 5 | :maintainer: Rabit 6 | :maturity: new 7 | :depends: state 8 | :platform: all 9 | ''' 10 | 11 | def state_list(): 12 | ''' 13 | Returns list of current state on the node 14 | 15 | CLI Example:: 16 | 17 | salt '*' additional.state_list 18 | ''' 19 | import salt.state 20 | st_ = salt.state.HighState(__opts__) 21 | top = st_.get_top() 22 | return st_.top_matches(top) 23 | 24 | def state_in(check, env=None, **kwargs): 25 | ''' 26 | Return True if all check-modules in current state 27 | 28 | CLI Example:: 29 | 30 | salt '*' additional.state_in 'check={"nginx", "collectd"}' env=master 31 | ''' 32 | import salt.state 33 | from operator import add 34 | check = set(check) 35 | st_ = salt.state.HighState(__opts__) 36 | top = st_.get_top() 37 | match = st_.top_matches(top) 38 | if env != None: 39 | if env in match: 40 | return check.issubset(set(match[env])) 41 | else: 42 | for env in match: 43 | if check.issubset(set(match[env])): 44 | return True 45 | return False 46 | 47 | def sd_list(): 48 | ''' 49 | Return list of sd sata disk devices 50 | 51 | CLI Example:: 52 | 53 | salt '*' additional.sd_list 54 | ''' 55 | import os 56 | return [name for name in os.listdir('/sys/block') if name.startswith('sd') ] 57 | 58 | def substring_search(what, where): 59 | ''' 60 | Return True if substring is placed in array of gpus 61 | 62 | CLI Example:: 63 | 64 | salt '*' additional.substring_search 'Radeon' [{'model':'asd Radeon asdds'}] 65 | ''' 66 | return any(what in s['model'] for s in where) or any(what in s['vendor'] for s in where) 67 | 68 | def inverse(value, separator = '.'): 69 | ''' 70 | Will reverse seaprated value like 'sub.example.net' => 'net.example.sub' 71 | 72 | CLI Example:: 73 | 74 | salt '*' additional.inverse 'sub.example.net' 75 | ''' 76 | out = value.split(separator) 77 | out.reverse() 78 | return separator.join(out) 79 | -------------------------------------------------------------------------------- /acestream/channels.tsv: -------------------------------------------------------------------------------- 1 | # Title Group 2 | Cartoon Network 3 | Disney Channel 4 | Мужское кино 5 | Техно 24 6 | A-One 7 | Муз ТВ 8 | 112 Украина 9 | BBC World News 10 | CNN International 11 | Дождь 12 | РБК 13 | Россия 24 14 | BBC One 15 | BBC Two 16 | Домашний 17 | Звезда 18 | НТВ 19 | Первый канал 20 | Пятый канал 21 | РEН ТВ 22 | Россия 1 23 | ТВЦ 24 | 24 Док 25 | Animаl Planеt 26 | Nationаl Geogrаphic 27 | Nаt Geо Wild 28 | 2x2 29 | СТС 30 | ТДК 31 | ТНТ 32 | ТНТ (резерв) 33 | 100 ТВ 34 | Санкт-Петербург 35 | Eurosport 36 | Eurospоrt 2 37 | НТВ+ Спорт 38 | НТВ+ Спорт плюс 39 | AMC 40 | Amedia 1 41 | Amedia 2 42 | Fоx 43 | Fоx Lifе 44 | HBO 2 45 | TV 1000 46 | TV 1000 Action East 47 | TV 1000 Русское кино 48 | Дом кино 49 | Иллюзион+ 50 | НТВ+ 3D 51 | НТВ+ Кино Союз 52 | НТВ+ Кино плюс 53 | НТВ+ Киноклуб 54 | НТВ+ Кинохит 55 | НТВ+ Наше кино 56 | НТВ+ Наше новое кино 57 | НТВ+ Премьера 58 | Наше любимое кино 59 | ТВ 3 60 | -------------------------------------------------------------------------------- /acestream/getlogos.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python2 2 | # -*- coding: UTF-8 -*- 3 | 4 | # Usage: 5 | # ./getlogos.py 'http://www.trambroid.com/playlist.xspf' 6 | 7 | import sys, os 8 | from xml.dom.minidom import parseString 9 | import urllib2 10 | 11 | logo_dir = 'getlogos' 12 | 13 | if not os.path.isdir(logo_dir): 14 | os.makedirs(logo_dir) 15 | 16 | # Start parsing 17 | data = parseString(urllib2.urlopen(sys.argv[1]).read()) 18 | 19 | for track in data.documentElement.getElementsByTagName('track'): 20 | track_data = {} 21 | track_data['title'] = track.getElementsByTagName('title')[0].firstChild.nodeValue 22 | 23 | print "Processing channel: %s" % track_data['title'].encode('utf-8') 24 | 25 | if track.getElementsByTagName('image'): 26 | image = track.getElementsByTagName('image')[0].firstChild.nodeValue 27 | image_file = logo_dir + '/' + (track_data['title'] + image.split('.')[-1]).replace('/', '_') 28 | if not os.path.isfile(image_file): 29 | print " Download logo: %s" % image 30 | try: 31 | u = urllib2.urlopen(image, None, 30) 32 | with open(image_file, 'wb') as outfile: 33 | size = int(u.info().getheaders('Content-Length')[0]) 34 | while True: 35 | b = u.read(8192) 36 | if not b: 37 | break 38 | outfile.write(b) 39 | except Exception as e: 40 | print " error: %s" % str(e) 41 | -------------------------------------------------------------------------------- /acestream/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process aceproxy matching "acehttp.py" 7 | group service 8 | group media 9 | group aceproxy 10 | start program = "/usr/sbin/service aceproxy start" 11 | stop program = "/usr/sbin/service aceproxy stop" 12 | if failed url http://127.0.0.1:8000/stat/ timeout 10 seconds for 3 cycles then restart 13 | if failed host 127.0.0.1 port 4212 type tcp for 3 cycles then restart # VLC 14 | if failed host 127.0.0.1 port 62062 type tcp for 3 cycles then restart # ACEStream 15 | if 5 restarts with 5 cycles then timeout 16 | -------------------------------------------------------------------------------- /acestream/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | description "AceProxy startup script" 7 | author "asavah" 8 | 9 | start on (filesystem and net-device-up IFACE!=lo) 10 | stop on runlevel [016] 11 | 12 | respawn 13 | respawn limit 10 5 14 | 15 | setuid acestream 16 | 17 | exec python2 /srv/aceproxy/acehttp.py 18 | -------------------------------------------------------------------------------- /apport/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Apport disable 3 | # 4 | 5 | apport: 6 | pkg.purged 7 | -------------------------------------------------------------------------------- /awesome/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Awesome - window manager 3 | # 4 | 5 | awesome: 6 | pkg.installed 7 | 8 | light-locker: 9 | pkg.installed 10 | 11 | /etc/xdg/awesome/rc.lua: 12 | file.managed: 13 | - source: salt://awesome/rc.lua 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - pkg: awesome 19 | 20 | /usr/share/awesome/lib/scratch: 21 | file.recurse: 22 | - source: salt://awesome/lib/scratch 23 | - user: root 24 | - group: root 25 | - clean: True 26 | - dir_mode: 755 27 | - file_mode: 644 28 | 29 | /usr/share/awesome/themes/green_rabit: 30 | file.recurse: 31 | - source: salt://awesome/themes/green_rabit 32 | - user: root 33 | - group: root 34 | - clean: True 35 | - dir_mode: 755 36 | - file_mode: 644 37 | -------------------------------------------------------------------------------- /awesome/lib/scratch/init.lua: -------------------------------------------------------------------------------- 1 | --------------------------------------------------------------- 2 | -- Drop-down applications and scratchpad manager for awesome wm 3 | --------------------------------------------------------------- 4 | -- Coded by: * Adrian C. (anrxc) 5 | -- Licensed under the WTFPL version 2 6 | -- * http://sam.zoy.org/wtfpl/COPYING 7 | --------------------------------------------------------------- 8 | 9 | local scratch = {} -- module scratch 10 | 11 | scratch.pad = require("scratch.pad") 12 | scratch.drop = require("scratch.drop") 13 | 14 | return scratch 15 | -------------------------------------------------------------------------------- /awesome/themes/green_rabit/background.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/background.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/fairh.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/fairh.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/fairhw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/fairhw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/fairv.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/fairv.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/fairvw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/fairvw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/floating.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/floating.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/floatingw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/floatingw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/fullscreen.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/fullscreen.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/fullscreenw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/fullscreenw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/magnifier.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/magnifier.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/magnifierw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/magnifierw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/max.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/max.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/maxw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/maxw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tile.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tile.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tilebottom.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tilebottom.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tilebottomw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tilebottomw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tileleft.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tileleft.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tileleftw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tileleftw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tiletop.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tiletop.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tiletopw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tiletopw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/layouts/tilew.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/layouts/tilew.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/submenu.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/submenu.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/taglist/squarefw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/taglist/squarefw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/taglist/squarew.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/taglist/squarew.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/tasklist/floating.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/tasklist/floating.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/tasklist/floatingw.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/tasklist/floatingw.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/close.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/close.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/close_focus.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/close_focus.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/close_normal.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/close_normal.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/closer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/closer.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/floating_focus_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/floating_focus_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/floating_focus_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/floating_focus_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/floating_normal_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/floating_normal_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/floating_normal_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/floating_normal_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/maximized_focus_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/maximized_focus_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/maximized_focus_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/maximized_focus_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/maximized_normal_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/maximized_normal_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/maximized_normal_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/maximized_normal_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/ontop_focus_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/ontop_focus_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/ontop_focus_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/ontop_focus_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/ontop_normal_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/ontop_normal_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/ontop_normal_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/ontop_normal_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/sticky_focus_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/sticky_focus_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/sticky_focus_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/sticky_focus_inactive.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/sticky_normal_active.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/sticky_normal_active.png -------------------------------------------------------------------------------- /awesome/themes/green_rabit/titlebar/sticky_normal_inactive.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/awesome/themes/green_rabit/titlebar/sticky_normal_inactive.png -------------------------------------------------------------------------------- /backup/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Crontab encrypted lvm backup 3 | # 4 | 5 | # To use backup you must setup environment: 6 | # 1. Create key file /home/backup.key (don't forget save password into encrypted storage and wallet) 7 | # # tr -dc A-Za-z0-9_ < /dev/urandom | head -c 64 > /home/backup.key 8 | # 2. Encrypt selected partition: 9 | # # cryptsetup luksFormat /dev/mapper/vg-backup /home/backup.key 10 | # 3. Open encrypted device: 11 | # # cryptsetup -d /home/backup.key luksOpen /dev/mapper/vg-backup backup_crypt 12 | # 4. Create volume group "backup" into encrypted partition 13 | # # vgcreate backup /dev/mapper/backup_crypt 14 | # 5. Create 2 logical volumes into volume group: 15 | # # lvcreate -l 32767 -n daily backup && lvcreate -l 32768 -n weekly backup 16 | # 6. Format this partitions: 17 | # # mkfs.ext4 /dev/backup/daily && mkfs.ext4 /dev/backup/weekly 18 | # 7. Detach partitions: 19 | # # vgchange -a n backup 20 | # # cryptsetup luksClose backup_crypt 21 | # 8. Write nodeid with parameters into pillar/backup.sls 22 | 23 | {% if grains['id'] in salt['pillar.get']('backup', {}) %} 24 | include: 25 | - rsync 26 | - lvm 27 | - cryptsetup 28 | 29 | /usr/local/bin/backup.sh: 30 | file.managed: 31 | - source: salt://backup/backup.sh.jinja 32 | - template: jinja 33 | - user: root 34 | - group: root 35 | - mode: 755 36 | - require: 37 | - pkg: rsync 38 | - pkg: lvm 39 | - pkg: cryptsetup 40 | 41 | /usr/local/bin/backup.sh 2>&1 | /usr/bin/logger -t BACKUP: 42 | cron.present: 43 | - user: root 44 | - minute: 0 45 | - hour: 22 46 | - require: 47 | - file: /usr/local/bin/backup.sh 48 | {% endif %} 49 | -------------------------------------------------------------------------------- /cash/cash-startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Startup WM with Cash script 7 | 8 | matchbox-window-manager & 9 | /usr/local/bin/dnc-run.sh 10 | pkill matchbox-window 11 | -------------------------------------------------------------------------------- /cash/dnc-kassa/admin.sls: -------------------------------------------------------------------------------- 1 | # 2 | # DNC Admin soft 3 | # 4 | 5 | include: 6 | - cash.dnc-kassa 7 | 8 | {% for name in ['SetupLoadUnload', 'AccessRights'] %} 9 | /usr/bin/{{ name }}: 10 | file.managed: 11 | - source: salt://cash/dnc-kassa/dist/usr/bin_{{ grains['cpuarch'] }}/{{ name }} 12 | - user: root 13 | - group: dnc 14 | - mode: 750 15 | - require: 16 | - group: dnc 17 | - require_in: 18 | - file: dnc 19 | {% endfor %} 20 | -------------------------------------------------------------------------------- /cash/dnc-kassa/db.conf.jinja: -------------------------------------------------------------------------------- 1 | [DB] {{ dbname }} 2 | [USER] {{ salt['pillar.get']('cash:user', 'kassir') }} 3 | [PASS] {{ salt['pillar.get']('cash:password', '') }} 4 | [HOST] {{ salt['pillar.get']('cash:host', '') }} 5 | [PORT] {{ salt['pillar.get']('cash:port', '') }} 6 | [LOG] /tmp/dnc_{{ dbname }}.log 7 | -------------------------------------------------------------------------------- /cash/dnc-kassa/dnc-run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Run dnc kassa with specific library path 7 | 8 | export LD_LIBRARY_PATH=/usr/local/lib/dnc 9 | /usr/bin/reshka 10 | exit $? 11 | -------------------------------------------------------------------------------- /cash/dnc-kassa/hw.conf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/cash/dnc-kassa/hw.conf -------------------------------------------------------------------------------- /cash/dnc-kassa/prepare_dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # DNC DIST prepare script 3 | # How to use: 4 | # 1. Create dnc directory 5 | # $ mkdir -p dnc 6 | # 2. Unpack installer 1.2.5 into "dnc" dir 7 | # 3. Unpack with replace updater 1.2.8 into "dnc" dir 8 | # 4. Run prepare_dist.sh in current directory 9 | 10 | PREFIX="$(dirname "$0")/dist" 11 | INST_DIR="$(dirname "$0")/dnc" 12 | 13 | echo "Preparing of ${PREFIX} started..." 14 | if [ ! -d "${INST_DIR}" ] 15 | then 16 | echo "Couldn't find ${INST_DIR} directory" >&2 17 | exit 1 18 | fi 19 | mkdir -p "${PREFIX}" 20 | if [ $? -ne 0 ] 21 | then 22 | echo "Unable to create ${PREFIX} directory" >&2 23 | exit 1 24 | fi 25 | 26 | echo 'Copying config files...' 27 | mkdir -p "${PREFIX}/etc" 28 | cp -vr "${INST_DIR}/etc/dancy" "${PREFIX}/etc" 29 | cp -vr "${INST_DIR}/etc/hwsrv" "${PREFIX}/etc" 30 | mkdir -p "${PREFIX}/tmp/dancy" 31 | mkdir "${PREFIX}/tmp/dancy/upload_log" 32 | mkdir "${PREFIX}/tmp/dancy/unload_log" 33 | mkdir "${PREFIX}/tmp/dancy/postgres_log" 34 | mkdir "${PREFIX}/tmp/dancy/conf" 35 | mkdir -p "${PREFIX}/usr/share/dnc" 36 | cp -vr "${INST_DIR}/movie" "${PREFIX}/usr/share/dnc/" 37 | cp -vr "${INST_DIR}/Example_print_document" "${PREFIX}/usr/share/dnc/print_doc" 38 | 39 | for arch in i686 x86_64 40 | do 41 | case "${arch}" in 42 | x86_64) 43 | BIN="${INST_DIR}/bin_64" 44 | LIBS="${INST_DIR}/libs_64" 45 | ;; 46 | *) 47 | BIN="${INST_DIR}/bin" 48 | LIBS="${INST_DIR}/libs" 49 | ;; 50 | esac 51 | 52 | echo 'Copying programs & scripts ${arch}...' 53 | mkdir -p "${PREFIX}/usr/bin_${arch}" 54 | cp -v "${BIN}/Display" "${PREFIX}/usr/bin_${arch}" 55 | cp -v "${BIN}/RMK" "${PREFIX}/usr/bin_${arch}" 56 | cp -v "${BIN}/SetupLoadUnload" "${PREFIX}/usr/bin_${arch}" 57 | cp -v "${BIN}/WareProject" "${PREFIX}/usr/bin_${arch}" 58 | cp -v "${BIN}/AccessRights" "${PREFIX}/usr/bin_${arch}" 59 | cp -v "${BIN}/confGUI" "${PREFIX}/usr/bin_${arch}" 60 | cp -v "${BIN}/daemon_unload" "${PREFIX}/usr/bin_${arch}" 61 | cp -v "${BIN}/reshka" "${PREFIX}/usr/bin_${arch}" 62 | cp -v "${BIN}/upload" "${PREFIX}/usr/bin_${arch}" 63 | cp -v "${BIN}/run_reshka" "${PREFIX}/usr/bin_${arch}" 64 | cp -v "${BIN}/FindHardPath" "${PREFIX}/usr/bin_${arch}" 65 | cp -v "${BIN}/dnc_update" "${PREFIX}/usr/bin_${arch}" 66 | cp -v "${BIN}/update_from_cd" "${PREFIX}/usr/bin_${arch}" 67 | cp -v "${BIN}/reshkaver" "${PREFIX}/usr/bin_${arch}" 68 | chmod -R 755 "${PREFIX}/usr/bin_${arch}" 69 | 70 | echo 'Copying libraries ${arch}...' 71 | cp -rv "${LIBS}" "${PREFIX}/lib_${arch}" 72 | chmod -R 755 "${PREFIX}/lib_${arch}" 73 | done 74 | 75 | cp -v "${INST_DIR}/addon_conf/qtrc" "${PREFIX}/etc/qtrc" 76 | 77 | echo "Preparing complete" 78 | -------------------------------------------------------------------------------- /cash/dnc-kassa/withoutFR.tab: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/cash/dnc-kassa/withoutFR.tab -------------------------------------------------------------------------------- /cash/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # DNC-Kassa frontend 3 | # 4 | 5 | include: 6 | - cash.dnc-kassa 7 | 8 | kassir: 9 | group: 10 | - present 11 | user.present: 12 | - gid_from_name: True 13 | - groups: 14 | - dnc 15 | - dialout 16 | - require: 17 | - group: kassir 18 | - group: dnc 19 | 20 | xinit: 21 | pkg.installed 22 | 23 | matchbox-window-manager: 24 | pkg.installed 25 | 26 | cash: 27 | file.managed: 28 | - name: /usr/local/bin/cash-startup.sh 29 | - source: salt://cash/cash-startup.sh 30 | - user: root 31 | - group: kassir 32 | - mode: 750 33 | - require: 34 | - file: dnc 35 | - pkg: matchbox-window-manager 36 | service.running: 37 | - require: 38 | - pkg: xinit 39 | - user: kassir 40 | - watch: 41 | - file: dnc 42 | - file: cash 43 | 44 | /etc/init/cash.conf: 45 | file.managed: 46 | - source: salt://cash/upstart.conf 47 | - user: root 48 | - group: root 49 | - mode: 644 50 | - require: 51 | - pkg: xinit 52 | - user: kassir 53 | - file: cash 54 | - watch_in: 55 | - service: cash 56 | 57 | /etc/X11/Xwrapper.config: 58 | file.managed: 59 | - source: salt://cash/xwrapper.conf 60 | - user: root 61 | - group: root 62 | - mode: 644 63 | - require_in: 64 | - service: cash 65 | - require: 66 | - file: dnc 67 | -------------------------------------------------------------------------------- /cash/upstart.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # cash-upstart 7 | # starts DNC-Kassa on startup by using xinit. 8 | # by default runs as kassir, to change edit below. 9 | env USER=kassir 10 | 11 | description "Cash upstart script" 12 | author "Rabit " 13 | 14 | start on (filesystem and stopped udevtrigger) 15 | stop on runlevel [016] 16 | 17 | # tell upstart to respawn the process if abnormal exit 18 | respawn 19 | 20 | script 21 | exec su -c "xinit /usr/local/bin/cash-startup.sh -- -nolisten tcp :0" $USER 22 | end script 23 | -------------------------------------------------------------------------------- /cash/xwrapper.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | allowed_users=anybody 7 | -------------------------------------------------------------------------------- /collectd/battery/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin battery 7 | -------------------------------------------------------------------------------- /collectd/battery/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: battery 3 | # 4 | 5 | /etc/collectd/collectd.d/battery.conf: 6 | file.managed: 7 | - source: salt://collectd/battery/config.jinja 8 | - template: jinja 9 | - user: root 10 | - group: root 11 | - mode: 644 12 | - require: 13 | - file: /etc/collectd/collectd.d 14 | - watch_in: 15 | - service: collectd 16 | 17 | -------------------------------------------------------------------------------- /collectd/collectd.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | FQDNLookup true 7 | 8 | Interval 60 9 | Timeout 20 10 | 11 | LoadPlugin syslog 12 | 13 | LogLevel info 14 | 15 | 16 | LoadPlugin network 17 | 18 | {%- if 'monitorserver' in salt['pillar.get']('net:hosts:%s'|format(grains['id']), {}) %} 19 | Listen "{{ salt['pillar.get']('net:hosts:%s:ip'|format(grains['id']), 'localhost') }}" "{{ salt['pillar.get']('monitoring:port', '4940') }}" 20 | 21 | 22 | LoadPlugin rrdtool 23 | 24 | DataDir "{{ salt['pillar.get']('monitoring:rrd_dir', '/srv/rrd') }}" 25 | CacheTimeout 1200 26 | CacheFlush 1800 27 | WritesPerSecond 40 28 | RandomTimeout 300 29 | {%- else %} 30 | Server "{{ salt['pillar.get']('openvpn:ip', '192.168.0.1') if 'client' == salt['pillar.get']('net:hosts:%s:vpn'|format(grains['id']), 'no') else 'stat.'+salt['pillar.get']('net:main_domain', 'localhost') }}" "{{ salt['pillar.get']('monitoring:port', '4940') }}" 31 | {%- endif %} 32 | 33 | 34 | LoadPlugin cpu 35 | LoadPlugin cpufreq 36 | LoadPlugin memory 37 | LoadPlugin vmem 38 | LoadPlugin load 39 | LoadPlugin conntrack 40 | LoadPlugin contextswitch 41 | LoadPlugin entropy 42 | LoadPlugin uptime 43 | LoadPlugin users 44 | 45 | LoadPlugin df 46 | 47 | FSType rootfs 48 | FSType sysfs 49 | FSType proc 50 | FSType devtmpfs 51 | FSType devpts 52 | FSType tmpfs 53 | FSType fusectl 54 | FSType cgroup 55 | IgnoreSelected true 56 | 57 | 58 | LoadPlugin disk 59 | # 60 | # Disk "hda" 61 | # Disk "/sda[23]/" 62 | # IgnoreSelected false 63 | # 64 | 65 | LoadPlugin swap 66 | LoadPlugin processes 67 | LoadPlugin interface 68 | LoadPlugin irq 69 | 70 | LoadPlugin sensors 71 | # 72 | # SensorConfigFile "/etc/sensors3.conf" 73 | # 74 | 75 | LoadPlugin tail 76 | 77 | 78 | Instance "auth" 79 | 80 | Regex "sshd[^:]*: input_userauth_request: invalid user" 81 | DSType "GaugeInc" 82 | Type "gauge" 83 | Instance "sshd-invalid_user" 84 | 85 | 86 | Regex "sshd[^:]*: Accepted password for" 87 | DSType "GaugeInc" 88 | Type "gauge" 89 | Instance "sshd-accepted_password" 90 | 91 | 92 | Regex "sshd[^:]*: Accepted publickey for" 93 | DSType "GaugeInc" 94 | Type "gauge" 95 | Instance "sshd-accepted_publickey" 96 | 97 | 98 | Regex "sshd[^:]*: Failed password for" 99 | DSType "GaugeInc" 100 | Type "gauge" 101 | Instance "sshd-failed_password" 102 | 103 | 104 | 105 | 106 | LoadPlugin tcpconns 107 | 108 | LocalPort "80" 109 | LocalPort "443" 110 | LocalPort "{{ salt['pillar.get']('net:ssh_port', '22') }}" 111 | 112 | 113 | Include "/etc/collectd/collectd.d/*.conf" 114 | -------------------------------------------------------------------------------- /collectd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd monitor client 3 | # 4 | 5 | include: 6 | - sensors 7 | {% if salt['additional.state_in'](['nginx']) %} 8 | - collectd.nginx 9 | {% endif %} 10 | {% if salt['additional.state_in'](['openvpn.server']) %} 11 | - collectd.openvpn 12 | {% endif %} 13 | {% if salt['additional.state_in'](['nut']) %} 14 | - collectd.nut 15 | {% endif %} 16 | {% if salt['additional.state_in'](['libvirt']) %} 17 | - collectd.libvirt 18 | {% endif %} 19 | {% if salt['additional.state_in'](['postgresql']) %} 20 | - collectd.postgresql 21 | {% endif %} 22 | {% if salt['additional.substring_search']('Radeon', grains['gpus']) %} 23 | - collectd.radeon 24 | {% endif %} 25 | 26 | 27 | collectd5-repo: 28 | pkgrepo.managed: 29 | - name: deb http://ppa.launchpad.net/vbulax/collectd5/ubuntu precise main 30 | - keyid: 013B9839 31 | - keyserver: keyserver.ubuntu.com 32 | - require_in: 33 | - pkg: collectd-core 34 | 35 | collectd-core: 36 | pkg.installed 37 | 38 | /etc/collectd/collectd.d: 39 | file.directory: 40 | - user: root 41 | - group: root 42 | - mode: 755 43 | - makedirs: True 44 | - require: 45 | - pkg: collectd-core 46 | 47 | /etc/collectd/collectd.conf: 48 | file.managed: 49 | - source: salt://collectd/collectd.conf.jinja 50 | - template: jinja 51 | - user: root 52 | - group: root 53 | - mode: 644 54 | - watch_in: 55 | - service: collectd 56 | - require: 57 | - pkg: collectd-core 58 | - file: /etc/collectd/collectd.d 59 | 60 | /etc/init/collectd.conf: 61 | file.managed: 62 | - source: salt://collectd/upstart.conf.jinja 63 | - template: jinja 64 | - user: root 65 | - group: root 66 | - mode: 644 67 | - watch_in: 68 | - service: collectd 69 | - require: 70 | - pkg: collectd-core 71 | 72 | collectd: 73 | service.running: 74 | - require: 75 | - pkg: collectd-core 76 | - pkg: libsensors4 77 | - user: collectd 78 | user.present: 79 | - gid: {{ salt['file.group_to_gid']('nogroup') }} 80 | - shell: /bin/false 81 | - createhome: False 82 | - system: True 83 | -------------------------------------------------------------------------------- /collectd/libvirt/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin libvirt 7 | 8 | Connection "qemu-kvm:///" 9 | RefreshInterval 60 10 | # Domain "name" 11 | # BlockDevice "name:device" 12 | # InterfaceDevice "name:device" 13 | # IgnoreSelected false 14 | # HostnameFormat name 15 | # InterfaceFormat name 16 | 17 | -------------------------------------------------------------------------------- /collectd/libvirt/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: libvirt 3 | # 4 | 5 | /etc/collectd/collectd.d/libvirt.conf: 6 | file.managed: 7 | - source: salt://collectd/libvirt/config.jinja 8 | - template: jinja 9 | - user: root 10 | - group: root 11 | - mode: 644 12 | - require: 13 | - file: /etc/collectd/collectd.d 14 | - watch_in: 15 | - service: collectd 16 | 17 | -------------------------------------------------------------------------------- /collectd/nginx/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin nginx 7 | 8 | URL "http://localhost/status?auto" 9 | User "www-user" 10 | 11 | -------------------------------------------------------------------------------- /collectd/nginx/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: nginx 3 | # 4 | 5 | /etc/collectd/collectd.d/nginx.conf: 6 | file.managed: 7 | - source: salt://collectd/nginx/config.jinja 8 | - template: jinja 9 | - user: root 10 | - group: root 11 | - mode: 644 12 | - require: 13 | - file: /etc/collectd/collectd.d 14 | - watch_in: 15 | - service: collectd 16 | 17 | -------------------------------------------------------------------------------- /collectd/notify_email/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin notify_email 7 | # 8 | # SMTPServer "localhost" 9 | # SMTPPort 25 10 | # SMTPUser "my-username" 11 | # SMTPPassword "my-password" 12 | # From "collectd@main0server.com" 13 | # # on . 14 | # # Beware! Do not use not more than two placeholders (%)! 15 | # Subject "[collectd] %s on %s!" 16 | # Recipient "email1@domain1.net" 17 | # Recipient "email2@domain2.com" 18 | # 19 | -------------------------------------------------------------------------------- /collectd/notify_email/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: notify_email 3 | # 4 | 5 | libesmtp6: 6 | pkg.installed: 7 | - watch_in: 8 | - service: collectd 9 | 10 | /etc/collectd/collectd.d/notify_email.conf: 11 | file.managed: 12 | - source: salt://collectd/notify_email/config.jinja 13 | - template: jinja 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - file: /etc/collectd/collectd.d 19 | - watch_in: 20 | - service: collectd 21 | 22 | -------------------------------------------------------------------------------- /collectd/nut/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin nut 7 | 8 | UPS "ups@localhost:3493" 9 | 10 | -------------------------------------------------------------------------------- /collectd/nut/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: nut 3 | # 4 | 5 | /etc/collectd/collectd.d/nut.conf: 6 | file.managed: 7 | - source: salt://collectd/nut/config.jinja 8 | - template: jinja 9 | - user: root 10 | - group: root 11 | - mode: 644 12 | - require: 13 | - file: /etc/collectd/collectd.d 14 | - watch_in: 15 | - service: collectd 16 | -------------------------------------------------------------------------------- /collectd/openvpn/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin openvpn 7 | 8 | StatusFile "/etc/openvpn/openvpn-status.log" 9 | 10 | -------------------------------------------------------------------------------- /collectd/openvpn/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: openvpn 3 | # 4 | 5 | include: 6 | - collectd.ping 7 | 8 | /etc/collectd/collectd.d/openvpn.conf: 9 | file.managed: 10 | - source: salt://collectd/openvpn/config.jinja 11 | - template: jinja 12 | - user: root 13 | - group: root 14 | - mode: 644 15 | - require: 16 | - file: /etc/collectd/collectd.d 17 | - watch_in: 18 | - service: collectd 19 | 20 | -------------------------------------------------------------------------------- /collectd/ping/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin ping 7 | 8 | {%- for host in salt['pillar.get']('net:hosts', {})|sort %} 9 | Host "{{ host }}.vpn" 10 | {%- endfor %} 11 | Interval 1.0 12 | Timeout 0.9 13 | TTL 255 14 | 15 | -------------------------------------------------------------------------------- /collectd/ping/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: ping 3 | # 4 | 5 | liboping0: 6 | pkg.installed: 7 | - watch_in: 8 | - service: collectd 9 | 10 | /etc/collectd/collectd.d/ping.conf: 11 | file.managed: 12 | - source: salt://collectd/ping/config.jinja 13 | - template: jinja 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - file: /etc/collectd/collectd.d 19 | - watch_in: 20 | - service: collectd 21 | 22 | -------------------------------------------------------------------------------- /collectd/postgresql/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin postgresql 7 | # 8 | # 9 | # Statement "SELECT magic FROM wizard WHERE host = $1;" 10 | # Param hostname 11 | # 12 | # 13 | # Type gauge 14 | # InstancePrefix "magic" 15 | # ValuesFrom "magic" 16 | # 17 | # 18 | # 19 | # 20 | # Statement "SELECT COUNT(type) AS count, type \ 21 | # FROM (SELECT CASE \ 22 | # WHEN resolved = 'epoch' THEN 'open' \ 23 | # ELSE 'resolved' END AS type \ 24 | # FROM tickets) type \ 25 | # GROUP BY type;" 26 | # 27 | # 28 | # Type counter 29 | # InstancePrefix "rt36_tickets" 30 | # InstancesFrom "type" 31 | # ValuesFrom "count" 32 | # 33 | # 34 | # 35 | # 36 | # # See /usr/share/doc/collectd-core/examples/postgresql/collectd_insert.sql for details 37 | # Statement "SELECT collectd_insert($1, $2, $3, $4, $5, $6, $7, $8, $9);" 38 | # StoreRates true 39 | # 40 | # 41 | # 42 | # Host "hostname" 43 | # Port 5432 44 | # User "username" 45 | # Password "secret" 46 | # 47 | # SSLMode "prefer" 48 | # KRBSrvName "kerberos_service_name" 49 | # 50 | # Query magic 51 | # 52 | # 53 | # 54 | # Interval 60 55 | # Service "service_name" 56 | # 57 | # Query backend # predefined 58 | # Query rt36_tickets 59 | # 60 | # 61 | # 62 | # Service "collectd_store" 63 | # Writer sqlstore 64 | # # see collectd.conf(5) for details 65 | # CommitInterval 30 66 | # 67 | # 68 | -------------------------------------------------------------------------------- /collectd/postgresql/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: postgresql 3 | # 4 | 5 | /etc/collectd/collectd.d/postgresql.conf: 6 | file.managed: 7 | - source: salt://collectd/postgresql/config.jinja 8 | - template: jinja 9 | - user: root 10 | - group: root 11 | - mode: 644 12 | - require: 13 | - file: /etc/collectd/collectd.d 14 | - watch_in: 15 | - service: collectd 16 | 17 | -------------------------------------------------------------------------------- /collectd/radeon/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LoadPlugin exec 7 | 8 | Exec "collectd" "/usr/local/sbin/radeon_info.sh" 9 | 10 | -------------------------------------------------------------------------------- /collectd/radeon/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd plugin: radeon 3 | # 4 | 5 | include: 6 | - gawk 7 | 8 | /usr/local/sbin/radeon_info.sh: 9 | file.managed: 10 | - source: salt://collectd/radeon/radeon_info.sh 11 | - user: root 12 | - group: root 13 | - mode: 755 14 | - require: 15 | - file: /etc/sudoers.d/collectd_radeon 16 | - pkg: gawk 17 | 18 | /etc/collectd/collectd.d/radeon.conf: 19 | file.managed: 20 | - source: salt://collectd/radeon/config.jinja 21 | - template: jinja 22 | - user: root 23 | - group: root 24 | - mode: 644 25 | - require: 26 | - file: /etc/collectd/collectd.d 27 | - file: /usr/local/sbin/radeon_info.sh 28 | - watch_in: 29 | - service: collectd 30 | 31 | /etc/sudoers.d/collectd_radeon: 32 | file.managed: 33 | - user: root 34 | - group: root 35 | - mode: 640 36 | - contents: "collectd ALL=NOPASSWD: /usr/bin/aticonfig --odgc --odgt --adapter=all, /usr/bin/xhost +local\\:localuser\\:collectd\n" 37 | - require: 38 | - user: collectd 39 | 40 | -------------------------------------------------------------------------------- /collectd/radeon/radeon_info.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Collectd radeon videocard Temp, Freq and Load report 7 | 8 | HOSTNAME="${COLLECTD_HOSTNAME:-`hostname -f`}" 9 | INTERVAL="${COLLECTD_INTERVAL:-60}" 10 | 11 | for display in $(ls /tmp/.X11-unix | grep -o '[0-9]\+') 12 | do 13 | export DISPLAY=:${display} 14 | sudo /usr/bin/xhost +local:localuser:collectd 1>/dev/null 2>&1 15 | data=$(sudo aticonfig --odgc --odgt --adapter=all | awk ' 16 | /^Adapter/ {match($0, "^Adapter ([0-9])+ - (.+)", a); number=a[1]; gsub(/ /, "_", a[2]); print "name"a[1]"=\""a[2]"("a[1]")\""} 17 | /Current Clocks/ {match($0, " *([0-9]+) *([0-9]+)", a); print "freqcpu"number"="a[1]"000000"; print "freqmem"number"="a[2]"000000"} 18 | /GPU load/ {match($0, "([0-9]+)", a); print "load"number"="a[1]} 19 | /Temperature/ {match($0, "Temperature - ([0-9]+)", a); print "temp"number"="a[1]}' | sort -u) 20 | eval "${data}" 21 | 22 | for i in `seq 0 9` 23 | do 24 | eval name="\$name$i" freqcpu="\$freqcpu$i" freqmem="\$freqmem$i" load="\$load$i" temp="\$temp$i" 25 | [ "$name" ] || continue 26 | echo "PUTVAL ${HOSTNAME}/video_freq-${name}/gauge-cpu interval=${INTERVAL} N:${freqcpu:-'U'}" 27 | echo "PUTVAL ${HOSTNAME}/video_freq-${name}/gauge-mem interval=${INTERVAL} N:${freqmem:-'U'}" 28 | echo "PUTVAL ${HOSTNAME}/video_load-${name}/gauge-load interval=${INTERVAL} N:${load:-'U'}" 29 | echo "PUTVAL ${HOSTNAME}/video_temp-${name}/temperature interval=${INTERVAL} N:${temp:-'U'}" 30 | eval "name$i=" "freqcpu$i=" "freqmem$i=" "load$i=" "temp$i=" 31 | done 32 | done 33 | -------------------------------------------------------------------------------- /collectd/server.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Collectd monitor server 3 | # 4 | 5 | include: 6 | - collectd 7 | - collectd.notify_email 8 | 9 | {{ salt['pillar.get']('monitoring:rrd_dir', '/srv/rrd') }}: 10 | file.directory: 11 | - user: root 12 | - group: www-data 13 | - mode: 750 14 | - makedirs: True 15 | - required_in: 16 | - service: collectd 17 | -------------------------------------------------------------------------------- /collectd/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # http://collectd.org/ 7 | # Upstart is the replacement init system used in Debian, Ubuntu, 8 | # and in Fedora. Refer to http://upstart.ubuntu.com/cookbook/ 9 | # 10 | # Normally this file will live as `/etc/init/collectd.conf` 11 | 12 | usage "initctl collectd" 13 | 14 | author "Dave Cottlehuber " 15 | description "start/stop/control collectd" 16 | 17 | version "1.1" 18 | 19 | # There are a number of alternative start sequences however 20 | # most of those do not work on all Ubuntu flavours and releases. 21 | start on (local-filesystems and net-device-up IFACE!=lo) 22 | stop on runlevel [!2345] 23 | 24 | # collectd itself will run with reduced privileges, but not 25 | # all plugins will. Test and edit as required. 26 | # An alternative configuration is as a user script in ~/.init/ however 27 | # these cannot be started at boot time by the system without 28 | # arcane trickery. Also a root user will not see these tasks/jobs 29 | # by default. set*id is a reasonable and secure compromise. 30 | #setuid nobody 31 | #setgid nobody 32 | 33 | # Other parameters such as the path to the configuration file 34 | # will have been compiled into the binary. These are trivially 35 | # added as environment variables below, and then into both 36 | # `pre-start` command check before collectd runs, and subsequent 37 | # `exec` command parameters below. Remember that upstart runs all 38 | # shell commands via `sh -e`. 39 | env DAEMON=/usr/sbin/collectd 40 | 41 | # Tell upstart to watch for forking when tracking the pid for us. 42 | expect fork 43 | 44 | # prevent thrashing - 10 restarts in 5 seconds 45 | respawn 46 | respawn limit 10 5 47 | 48 | # Make a log available in /var/log/upstart/collectd.log 49 | console log 50 | 51 | # The daemon will not start if the configuration is invalid. 52 | pre-start exec $DAEMON -t 53 | # Let's Fork! 54 | exec $DAEMON 55 | -------------------------------------------------------------------------------- /common/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Common states 3 | # 4 | 5 | /srv/bin: 6 | file.directory: 7 | - user: root 8 | - group: root 9 | - mode: 755 10 | - makedirs: True 11 | -------------------------------------------------------------------------------- /crypt/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Crypt partitions 3 | # Decrypt partitions on login 4 | # 5 | # Do not forget to change configuration in ../../pillar/crypt.sls 6 | # 7 | # HowTO: 8 | # 1. Prepare home partition 9 | # # cryptsetup luksFormat /dev/md1 10 | # 2. Open encrypted device: 11 | # # cryptsetup luksOpen /dev/md1 home 12 | # 3. Format this partitions: 13 | # # mkfs.ext4 /dev/mapper/home 14 | # 4. Mount partition in some place 15 | # # mount /dev/mapper/home /mnt 16 | # 5. Copy homedir with your user 17 | # # cp -a /home/USER /mnt 18 | # 6. Umount & detach partition 19 | # # umount /dev/mapper/home && cryptsetup luksClose /dev/mapper/home 20 | # 21 | 22 | include: 23 | - cryptsetup 24 | 25 | libpam-mount: 26 | pkg.installed 27 | 28 | /etc/security/pam_mount.conf.xml: 29 | file.managed: 30 | - source: salt://crypt/pam_mount.conf.xml.jinja 31 | - template: jinja 32 | - user: root 33 | - group: root 34 | - mode: 644 35 | - require: 36 | - pkg: libpam-mount 37 | -------------------------------------------------------------------------------- /crypt/pam_mount.conf.xml.jinja: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | {% for user, mounts in salt['pillar.get']('crypt:%s'|format(grains['id']), {}).items() %} 18 | {%- for mountpoint, encrypteddisk in mounts.items() %} 19 | 20 | {%- endfor %} 21 | {%- endfor %} 22 | 23 | 24 | -------------------------------------------------------------------------------- /cryptsetup/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Encryption tools 3 | # 4 | 5 | cryptsetup: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /dnsmasq/dnsmasq.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Disable dhcp for all interfaces 7 | no-dhcp-interface={{ salt['grains.get']('ip_interfaces', ['lo'])|join(' ') }} 8 | 9 | listen-address=127.0.0.1 10 | {% for ip in salt['pillar.get']('net:hosts:%s:dnsmasq:listen'|format(grains['id']), {})|sort -%} 11 | listen-address={{ ip }} 12 | {% endfor -%} 13 | 14 | no-resolv 15 | no-poll 16 | 17 | # Local VPN network 18 | local=/vpn/ 19 | 20 | # Internal network routers 21 | {% for name in salt['pillar.get']('net:hosts', {})|sort -%} 22 | {% set router = salt['pillar.get']('net:hosts:%s'|format(name), {}) -%} 23 | {% if 'route' in router and 'ip' in router -%} 24 | # Router {{ name }} subnet 25 | server=/{{ name.split('.')[1:]|join('.') }}/{{ router['ip'] }} 26 | server=/{{ router['route'].split('.')[-2::-1]|join('.') }}.in-addr.arpa/{{ router['ip'] }} 27 | 28 | {% endif -%} 29 | {% endfor -%} 30 | 31 | # Other servers (ex. net provider dns nameservers) 32 | {% for ip in salt['pillar.get']('net:hosts:%s:dnsmasq:servers'|format(grains['id']), {})|sort -%} 33 | server={{ ip }} 34 | {% endfor %} 35 | {% for domain, ip in salt['pillar.get']('net:hosts:%s:dnsmasq:addresses'|format(grains['id']), {})|dictsort -%} 36 | address=/.{{ domain }}/{{ ip }} 37 | {% endfor %} 38 | -------------------------------------------------------------------------------- /dnsmasq/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Dnsmasq - DNS & DHCP service 3 | # 4 | 5 | dnsmasq: 6 | pkg: 7 | - installed 8 | service.running: 9 | - watch: 10 | - file: /etc/hosts 11 | - require: 12 | - pkg: dnsmasq 13 | 14 | /etc/dnsmasq.conf: 15 | file.managed: 16 | - source: salt://dnsmasq/dnsmasq.conf.jinja 17 | - template: jinja 18 | - mode: 0644 19 | - user: root 20 | - group: root 21 | - require: 22 | - pkg: dnsmasq 23 | - watch_in: 24 | - service: dnsmasq 25 | 26 | /etc/resolv.conf: 27 | file.managed: 28 | - mode: 0644 29 | - user: root 30 | - group: root 31 | - contents: "nameserver 127.0.0.1\n" 32 | - require: 33 | - pkg: dnsmasq 34 | - service: dnsmasq 35 | -------------------------------------------------------------------------------- /drone/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Drone - continuous integration server based on docker 3 | # 4 | 5 | {% set dist_def = 'http://downloads.drone.io/master/drone.deb' %} 6 | {% set dist = salt['pillar.get']('drone:dist', dist_def) %} 7 | 8 | docker.io: 9 | pkg.installed 10 | 11 | drone-package: 12 | pkg.installed: 13 | - sources: 14 | - drone: 15 | - '{{ dist }}' 16 | - '{{ dist_def }}' 17 | - require: 18 | - pkg: docker.io 19 | 20 | drone: 21 | service.running: 22 | - watch: 23 | - pkg: drone-package 24 | -------------------------------------------------------------------------------- /fglrx/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # FGLRX Radeon driver 3 | # 4 | 5 | fglrx: 6 | pkg.installed: 7 | - version: 2:9.000-0ubuntu3 # Lastest non-broken version for xorg 1.13 8 | 9 | fglrx xorg.conf: 10 | cmd.run: 11 | - name: aticonfig --initial -f 12 | - unless: test -f /etc/X11/xorg.conf 13 | 14 | amdconfig --set-pcs-str="DDX,EnableRandR12,FALSE": 15 | cmd.wait: 16 | - watch: 17 | - cmd: fglrx xorg.conf 18 | 19 | aticonfig --set-pcs-u32=MCIL,HWUVD_H264Level51Support,1: 20 | cmd.wait: 21 | - watch: 22 | - cmd: fglrx xorg.conf 23 | 24 | aticonfig --sync-vsync=on: 25 | cmd.wait: 26 | - watch: 27 | - cmd: fglrx xorg.conf 28 | 29 | aticonfig --set-pcs-val=MCIL,DigitalHDTVDefaultUnderscan,0: 30 | cmd.wait: 31 | - watch: 32 | - cmd: fglrx xorg.conf 33 | 34 | -------------------------------------------------------------------------------- /firefox/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Firefox - best browser 3 | # 4 | 5 | firefox: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /gawk/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # GAWK - AWK parser 3 | # 4 | 5 | gawk: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /gimp/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # GIMP - photoeditor 3 | # 4 | 5 | gimp: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /git/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # GIT scm 3 | # 4 | 5 | git-package: 6 | pkg.installed: 7 | - name: git 8 | -------------------------------------------------------------------------------- /gogs/nginx-site.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | server { 7 | listen 80; 8 | 9 | server_name {{ nginx_server_name }}; 10 | 11 | return 302 https://{{ nginx_server_name }}$request_uri; 12 | } 13 | 14 | server { 15 | listen 443 ssl spdy; 16 | 17 | server_name {{ nginx_server_name }}; 18 | 19 | ssl_certificate {{ ssl_cert }}; 20 | ssl_certificate_key {{ ssl_key }}; 21 | 22 | include /etc/nginx/ssl.conf; 23 | 24 | ssl_verify_client optional; 25 | ssl_verify_depth 1; 26 | 27 | charset utf8; 28 | limit_conn perip 10; 29 | limit_conn perserver 150; 30 | 31 | client_max_body_size 500m; 32 | client_body_buffer_size 128k; 33 | 34 | access_log /var/log/nginx/access.{{ nginx_conf_name }}.log; 35 | error_log /var/log/nginx/error.{{ nginx_conf_name }}.log; 36 | 37 | location / { 38 | proxy_pass http://localhost:3000; 39 | 40 | proxy_connect_timeout 36000s; 41 | proxy_read_timeout 36000s; 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /gogs/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | description "Gogs upstart script" 7 | author "Rabit " 8 | 9 | start on (local-filesystems and net-device-up) 10 | stop on shutdown 11 | 12 | respawn 13 | respawn limit 10 5 14 | 15 | setuid {{ username }} 16 | setgid {{ username }} 17 | 18 | env HOME="{{ home_dir }}" 19 | env USER="{{ username }}" 20 | 21 | chdir {{ dist_dir }}/gogs 22 | 23 | exec {{ dist_dir }}/gogs/gogs web 24 | -------------------------------------------------------------------------------- /htop/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # HTOP tool 3 | # 4 | 5 | htop: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /i2p/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # I2P - i2p network access 3 | # 4 | 5 | include: 6 | - tinyproxy 7 | 8 | i2p-repo: 9 | pkgrepo.managed: 10 | - ppa: i2p-maintainers/i2p 11 | - required_in: 12 | - pkg: i2p 13 | 14 | i2p: 15 | pkg: 16 | - installed 17 | service.running: 18 | - require: 19 | - pkg: i2p 20 | -------------------------------------------------------------------------------- /jarmon/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Jarmon RRD web analyzer 3 | # 4 | 5 | include: 6 | - nginx 7 | 8 | {% set nginx_listen = salt['pillar.get']('net:hosts:%s:ip'|format(grains['id']), '127.0.0.1') %} 9 | {% set nginx_server_name = 'stat.' + salt['pillar.get']('net:main_domain', 'localhost') %} 10 | {% set nginx_conf_name = salt['additional.inverse'](nginx_server_name) %} 11 | 12 | {% set home_dir = salt['pillar.get']('monitoring:stat_dir', '/srv/www/' + nginx_conf_name) %} 13 | 14 | {{ home_dir }}: 15 | file.recurse: 16 | - source: salt://jarmon/jarmon-11.8 17 | - user: root 18 | - group: www-data 19 | - clean: True 20 | - dir_mode: 750 21 | - file_mode: 640 22 | 23 | /etc/nginx/sites-available/{{ nginx_conf_name }}.conf: 24 | file.managed: 25 | - source: salt://jarmon/nginx-site.conf.jinja 26 | - template: jinja 27 | - user: root 28 | - group: root 29 | - mode: 644 30 | - context: 31 | nginx_listen: {{ nginx_listen }} 32 | nginx_server_name: {{ nginx_server_name }} 33 | nginx_conf_name: {{ nginx_conf_name }} 34 | home_dir: {{ home_dir }} 35 | - require: 36 | - pkg: nginx-full 37 | - watch_in: 38 | - service: nginx 39 | 40 | /etc/nginx/sites-enabled/{{ nginx_conf_name }}.conf: 41 | file.symlink: 42 | - target: ../sites-available/{{ nginx_conf_name }}.conf 43 | - watch_in: 44 | - service: nginx 45 | -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/css/jquerytools.dateinput.skin1.css: -------------------------------------------------------------------------------- 1 | /* For the details, see: http://flowplayer.org/tools/dateinput/index.html#skinning */ 2 | 3 | /* the input field */ 4 | .date { 5 | border:1px solid #ccc; 6 | font-size:18px; 7 | padding:4px; 8 | text-align:center; 9 | width:194px; 10 | -moz-box-shadow:0 0 10px #eee inset; 11 | } 12 | 13 | /* calendar root element */ 14 | #calroot { 15 | margin-top:-1px; 16 | width:198px; 17 | padding:2px; 18 | background-color:#fff; 19 | font-size:11px; 20 | border:1px solid #ccc; 21 | -moz-border-radius:5px; 22 | -webkit-border-radius:5px; 23 | -moz-box-shadow: 0 0 15px #666; 24 | -webkit-box-shadow: 0 0 15px #666; 25 | } 26 | 27 | /* head. contains title, prev/next month controls and possible month/year selectors */ 28 | #calhead { 29 | padding:2px 0; 30 | height:22px; 31 | } 32 | 33 | #caltitle { 34 | font-size:14px; 35 | color:#0150D1; 36 | float:left; 37 | text-align:center; 38 | width:155px; 39 | line-height:20px; 40 | text-shadow:0 1px 0 #ddd; 41 | } 42 | 43 | #calnext, #calprev { 44 | display:block; 45 | width:20px; 46 | height:20px; 47 | background:transparent url(../icons/prev.gif) no-repeat scroll center center; 48 | float:left; 49 | cursor:pointer; 50 | } 51 | 52 | #calnext { 53 | background-image:url(../icons/next.gif); 54 | float:right; 55 | } 56 | 57 | #calprev.caldisabled, #calnext.caldisabled { 58 | visibility:hidden; 59 | } 60 | 61 | /* year/month selector */ 62 | #caltitle select { 63 | font-size:10px; 64 | } 65 | 66 | /* names of the days */ 67 | #caldays { 68 | height:14px; 69 | border-bottom:1px solid #ddd; 70 | } 71 | 72 | #caldays span { 73 | display:block; 74 | float:left; 75 | width:28px; 76 | text-align:center; 77 | } 78 | 79 | /* container for weeks */ 80 | #calweeks { 81 | background-color:#fff; 82 | margin-top:4px; 83 | } 84 | 85 | /* single week */ 86 | .calweek { 87 | clear:left; 88 | height:22px; 89 | } 90 | 91 | /* single day */ 92 | .calweek a { 93 | display:block; 94 | float:left; 95 | width:27px; 96 | height:20px; 97 | text-decoration:none; 98 | font-size:11px; 99 | margin-left:1px; 100 | text-align:center; 101 | line-height:20px; 102 | color:#666; 103 | -moz-border-radius:3px; 104 | -webkit-border-radius:3px; 105 | } 106 | 107 | /* different states */ 108 | .calweek a:hover, .calfocus { 109 | background-color:#ddd; 110 | } 111 | 112 | /* sunday */ 113 | a.calsun { 114 | color:red; 115 | } 116 | 117 | /* offmonth day */ 118 | a.caloff { 119 | color:#ccc; 120 | } 121 | 122 | a.caloff:hover { 123 | background-color:rgb(245, 245, 250); 124 | } 125 | 126 | 127 | /* unselecteble day */ 128 | a.caldisabled { 129 | background-color:#efefef !important; 130 | color:#ccc !important; 131 | cursor:default; 132 | } 133 | 134 | /* current day */ 135 | #calcurrent { 136 | background-color:#498CE2; 137 | color:#fff; 138 | } 139 | 140 | /* today */ 141 | #caltoday { 142 | background-color:#333; 143 | color:#fff; 144 | } 145 | -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/css/style.css: -------------------------------------------------------------------------------- 1 | body { 2 | font-family: sans; 3 | width: 960px; 4 | margin: 20px auto 10px auto; 5 | } 6 | 7 | form div { 8 | text-align: center; 9 | } 10 | 11 | h2 { 12 | font-size: 14px; 13 | } 14 | 15 | p, li, dt, dd, td, th, div { 16 | font-size: 12px; 17 | } 18 | 19 | .loading .title { 20 | background-repeat: no-repeat; 21 | background-position: 0 50%; 22 | background-image: url(/icons/loading.gif); 23 | } 24 | 25 | .range-preview { 26 | height:50px; 27 | margin: 0 auto 10px auto; 28 | position: relative; 29 | } 30 | 31 | .chart { 32 | height:200px; 33 | width: 850px; 34 | margin: 0 auto 0 auto; 35 | clear: both; 36 | } 37 | 38 | .tickLabel { 39 | width:50px; 40 | overflow:hidden; 41 | } 42 | 43 | .graph-legend { 44 | width: 790px; 45 | padding: 5px 0 5px 0; 46 | margin: 10px auto 0 auto; 47 | background-color: #f7f7f7; 48 | position: relative; 49 | left: 25px; 50 | } 51 | 52 | .graph-legend .legendItem { 53 | float: left; 54 | cursor: pointer; 55 | margin-right: 20px; 56 | margin-top: 5px; 57 | margin-left: 5px; 58 | } 59 | 60 | .graph-legend .disabled { 61 | text-decoration: line-through; 62 | } 63 | 64 | .graph-legend .legendColorBox { 65 | float: left; 66 | margin-right: 5px; 67 | } 68 | 69 | input[type=checkbox] { 70 | margin: 0; 71 | padding: 0; 72 | border: none; 73 | } 74 | 75 | input[type=text] { 76 | padding: 3px; 77 | border: 1px solid #EEE; 78 | } 79 | 80 | .notice { 81 | border: 1px solid Green; 82 | background: #FFDDFF; 83 | margin-bottom: 20px; 84 | padding: 5px; 85 | } 86 | 87 | #calroot { 88 | z-index: 2; 89 | } 90 | 91 | .chart-header { 92 | width: 790px; 93 | padding: 5px 0 5px 0; 94 | margin: 20px auto 0 auto; 95 | position: relative; 96 | left: 25px; 97 | } 98 | 99 | .chart-header:AFTER { 100 | content: '' 101 | } 102 | 103 | .chart-container h2{ 104 | float: left; 105 | margin: 0; 106 | } 107 | 108 | .chart-container .chart-controls{ 109 | float: right; 110 | margin: 0; 111 | } 112 | 113 | .tab-controls { 114 | width: 790px; 115 | padding: 5px 0 5px 0; 116 | margin: 20px auto 0 auto; 117 | text-align: right; 118 | position: relative; 119 | left: 25px; 120 | } 121 | 122 | #host_list { 123 | position: absolute; 124 | left: 0px; 125 | top: 0px; 126 | border: 1px solid #EEE; 127 | } 128 | 129 | #host_list li { 130 | padding-right: 20px; 131 | } 132 | -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/css/tabs-no-images.css: -------------------------------------------------------------------------------- 1 | 2 | /* root element for tabs */ 3 | ul.css-tabs { 4 | margin:0 !important; 5 | padding:0; 6 | height:30px; 7 | border-bottom:1px solid #666; 8 | } 9 | 10 | /* single tab */ 11 | ul.css-tabs li { 12 | float:left; 13 | padding:0; 14 | margin:0; 15 | list-style-type:none; 16 | } 17 | 18 | /* link inside the tab. uses a background image */ 19 | ul.css-tabs a { 20 | float:left; 21 | font-size:13px; 22 | display:block; 23 | padding:5px 30px; 24 | text-decoration:none; 25 | border:1px solid #666; 26 | border-bottom:0px; 27 | height:18px; 28 | background-color:#efefef; 29 | color:#777; 30 | margin-right:2px; 31 | -moz-border-radius-topleft: 4px; 32 | -moz-border-radius-topright:4px; 33 | position:relative; 34 | top:1px; 35 | } 36 | 37 | ul.css-tabs a:hover { 38 | background-color:#F7F7F7; 39 | color:#333; 40 | } 41 | 42 | /* selected tab */ 43 | ul.css-tabs a.current { 44 | background-color:#ddd; 45 | border-bottom:2px solid #ddd; 46 | color:#000; 47 | cursor:default; 48 | } 49 | 50 | 51 | /* tab pane */ 52 | .css-panes > div { 53 | display:none; 54 | border:1px solid #666; 55 | border-width:0 1px 1px 1px; 56 | min-height:150px; 57 | padding:15px 20px; 58 | background-color:#ddd; 59 | } 60 | 61 | 62 | 63 | -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/icons/calendar.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/jarmon/jarmon-11.8/icons/calendar.png -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/icons/loading.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/jarmon/jarmon-11.8/icons/loading.gif -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/icons/next.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/jarmon/jarmon-11.8/icons/next.gif -------------------------------------------------------------------------------- /jarmon/jarmon-11.8/icons/prev.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/jarmon/jarmon-11.8/icons/prev.gif -------------------------------------------------------------------------------- /jarmon/nginx-site.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | server { 7 | listen {{ nginx_listen }}:80; 8 | 9 | server_name {{ nginx_server_name }}; 10 | 11 | allow 192.168.0.0/20; 12 | deny all; 13 | 14 | root {{ home_dir }}; 15 | charset utf8; 16 | limit_conn perip 20; 17 | limit_conn perserver 150; 18 | 19 | client_max_body_size 1m; 20 | client_body_buffer_size 128k; 21 | 22 | access_log /var/log/nginx/access.{{ nginx_conf_name }}.log; 23 | error_log /var/log/nginx/error.{{ nginx_conf_name }}.log; 24 | 25 | location / { 26 | } 27 | 28 | location /rrd { 29 | alias {{ salt['pillar.get']('monitoring:rrd_dir', '/srv/rrd') }}/; 30 | autoindex on; 31 | } 32 | 33 | include /etc/nginx/common.conf; 34 | } 35 | 36 | -------------------------------------------------------------------------------- /keepassx/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # KeePassX - good password storage 3 | # 4 | 5 | keepassx: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /kernel/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Kernel options 3 | # 4 | 5 | # Swap usage if ram free < 5% 6 | vm.swappiness: 7 | sysctl: 8 | - present 9 | - value: 5 10 | -------------------------------------------------------------------------------- /kodi/addons/script.securitycam/addon.xml: -------------------------------------------------------------------------------- 1 | 2 | 6 | 7 | 8 | 9 | 10 | 11 | all 12 | A script to support the overlay of a security camera image feed. 13 | A script to support the overlay of a security camera image feed. 14 | 15 | 16 | -------------------------------------------------------------------------------- /kodi/addons/script.securitycam/changelog.txt: -------------------------------------------------------------------------------- 1 | v0.0.9 2 | - Change image update method to prevent caching (should eliminate issue some users were seeing where image didn't update properly) 3 | - Eliminate dependency on urllib (now uses urllib2 exclusively) 4 | - Bumped python import version to 2.14.0 (per http://wiki.xbmc.org/index.php?title=Addon.xml#addon_attribute) 5 | 6 | v0.0.7 7 | - Fixed boolean bug that caused script to crash 8 | 9 | v0.0.6 10 | - Bump xmbc.python requirement from version 2.0 to version 2.1 11 | - Remove script.module.simplejson requirement 12 | - Add argument to ControlImage.setImage method to prevent image caching (http://mirrors.xbmc.org/docs/python-docs/13.0-gotham/xbmcgui.html#ControlImage) 13 | 14 | v0.0.5 15 | - Fixed bug causing cam image to download indefinitely when "Enable auto-close after duration" was set to disabled. 16 | - Added debug logging 17 | 18 | v0.0.4 19 | - Added support for url placeholders which can be passed to the Add-On (see http://wiki.xbmc.org/index.php?title=HOW-TO:Write_Python_Scripts#Passing_Arguments_to_a_Script) ex. http://localhost/{0}/{1}.jpg?size={2} 20 | 21 | v0.0.3 22 | - Added new method for authentication handling, previous method was failing 23 | 24 | v0.0.2 25 | - Added option to disable auto-close 26 | 27 | v0.0.1 28 | - Initial version 29 | 30 | -------------------------------------------------------------------------------- /kodi/addons/script.securitycam/icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rabits/salt-stack-modules/64410e80722b60f095a87f09d196afa1db728e5f/kodi/addons/script.securitycam/icon.png -------------------------------------------------------------------------------- /kodi/addons/script.securitycam/resources/language/English/strings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | Camera 6 | Image Url: 7 | 8 | Username: 9 | Password: 10 | Behavior 11 | Window Width: 12 | Window Height: 13 | Refresh Interval (in milliseconds): 14 | 15 | 16 | Enable auto-close after duration: 17 | Duration (in seconds): 18 | 19 | 20 | 21 | -------------------------------------------------------------------------------- /kodi/addons/script.securitycam/resources/settings.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | -------------------------------------------------------------------------------- /kodi/advancedsettings.xml: -------------------------------------------------------------------------------- 1 | 2 | 0 3 | 4 | 30 5 | 6 | true 7 | sensors|sed -ne "s/CPUTIN: \+[-+]\([0-9]\+\).*/\1 C/p" 8 | sensors|grep -A3 radeon|sed -ne "s/temp1: \+[-+]\([0-9]\+\).*/\1 C/p" 9 | 10 | 3 11 | 0 12 | 13 | 23 | 24 | -------------------------------------------------------------------------------- /kodi/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process kodi matching "kodi.bin" 7 | group service 8 | group media 9 | group kodi 10 | start program = "/usr/sbin/service kodi start" 11 | stop program = "/usr/sbin/service kodi stop" 12 | if failed host 127.0.0.1 port 8080 13 | send "GET / HTTP/1.1\r\n\r\n" 14 | expect "HTTP/1.1 401 Unauthorized.*" 15 | timeout 10 seconds for 3 cycles then restart 16 | if failed host 127.0.0.1 port 9090 17 | send '{"jsonrpc":"2.0","id":0,"method":"Player.GetActivePlayers"}' 18 | expect '\{".+' 19 | timeout 10 seconds for 3 cycles then restart 20 | if 5 restarts with 5 cycles then timeout 21 | -------------------------------------------------------------------------------- /kodi/upstart.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # kodi-upstart 7 | # starts KODI on startup by using xinit. 8 | # by default runs as kodi, to change edit below. 9 | env USER=kodi 10 | 11 | description "KODI-barebones-upstart-script" 12 | author "Matt Filetto" 13 | 14 | start on (filesystem and stopped udevtrigger) 15 | stop on runlevel [016] 16 | 17 | # tell upstart to respawn the process if abnormal exit 18 | respawn 19 | respawn limit 10 5 20 | limit nice 21 21 21 | 22 | script 23 | exec su -c "xinit /srv/bin/kodi -- -nolisten tcp -nocursor :0" $USER 24 | end script 25 | -------------------------------------------------------------------------------- /kodi/xorg.conf: -------------------------------------------------------------------------------- 1 | Section "ServerLayout" 2 | Identifier "X.org Configured" 3 | Screen 0 "Screen0" 0 0 4 | EndSection 5 | 6 | Section "Files" 7 | ModulePath "/usr/lib/xorg/modules" 8 | FontPath "/usr/share/fonts/X11/misc" 9 | FontPath "/usr/share/fonts/X11/cyrillic" 10 | FontPath "/usr/share/fonts/X11/100dpi/:unscaled" 11 | FontPath "/usr/share/fonts/X11/75dpi/:unscaled" 12 | FontPath "/usr/share/fonts/X11/Type1" 13 | FontPath "/usr/share/fonts/X11/100dpi" 14 | FontPath "/usr/share/fonts/X11/75dpi" 15 | FontPath "built-ins" 16 | EndSection 17 | 18 | Section "Module" 19 | Load "glx" 20 | EndSection 21 | 22 | Section "Monitor" 23 | Identifier "Monitor0" 24 | VendorName "Monitor Vendor" 25 | ModelName "Monitor Model" 26 | Option "Enable" "true" 27 | Option "DPMS" "false" 28 | Horizsync 15-81 29 | VertRefresh 24-75 30 | Modeline "1920x1080_sm" 148.50 1920 2008 2052 2200 1080 1084 1089 1125 +hsync +vsync 31 | EndSection 32 | 33 | Section "Device" 34 | Identifier "Card0" 35 | Driver "radeon" 36 | BusID "PCI:1:0:0" 37 | EndSection 38 | 39 | Section "Screen" 40 | Identifier "Screen0" 41 | Device "Card0" 42 | Monitor "Monitor0" 43 | Option "ConnectedMonitor" "HDMI-0" 44 | SubSection "Display" 45 | Viewport 0 0 46 | Depth 24 47 | Modes "1920x1080_sm" 48 | EndSubSection 49 | EndSection 50 | -------------------------------------------------------------------------------- /kodi/xwrapper.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | allowed_users=anybody 7 | -------------------------------------------------------------------------------- /kvm/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # KVM hypervisor 3 | # 4 | # VGA Passthrough: i7 3770 Asrock Extreme 4 5 | # - BIOS: 6 | # - Enable IOMMU 7 | # - Enable default video: pci-express (not onboard) 8 | # - Enable IGPU MultiMonitor 9 | # - Host: 10 | # - Create /etc/X11/xorg.conf.d/intel.conf: 11 | # - Driver "intel" 12 | # - BusID "PCI:0:2:0" 13 | # - 3.9x kernel: git://github.com/awilliam/linux-vfio.git vfio-vga-reset 14 | # - Qemu 1.5.0: git://github.com/awilliam/qemu-vfio.git vfio-vga-reset 15 | # - SeaBios: git://git.seabios.org/seabios.git (build it with `LANG="en_US.utf8" make`) 16 | # 17 | # Testing: 18 | # - Bind VGA to vfio-pci: 19 | # $ sudo vfio-bind 0000:01:00.0 0000:01:00.1 20 | # - Run: 21 | # $ sudo qemu-system-x86_64 -enable-kvm -M q35 -m 4096 -cpu host -smp 4,sockets=1,cores=2,threads=2 -bios /srv/kvm/seabios/bios.bin -vga none -nographic -device ioh3420,bus=pcie.0,addr=1c.0,multifunction=on,port=1,chassis=1,id=root.1 -device vfio-pci,host=01:00.0,bus=root.1,addr=00.0,multifunction=on,x-vga=on -boot menu=on -monitor stdio 22 | # - Switch to your second VGA and you should see bios 23 | # 24 | 25 | include: 26 | - libvirt 27 | 28 | kvm-pkgs: 29 | pkg.installed: 30 | - pkgs: 31 | - qemu-kvm 32 | - qemu-system 33 | - bridge-utils 34 | - python-spice-client-gtk 35 | - pm-utils 36 | 37 | /srv/kvm: 38 | file.directory: 39 | - user: root 40 | - group: libvirtd 41 | - mode: 755 42 | - require: 43 | - pkg: kvm-pkgs 44 | 45 | /usr/local/bin/vfio-bind: 46 | file.managed: 47 | - user: root 48 | - group: root 49 | - mode: 755 50 | - source: salt://kvm/vfio-bind 51 | - require: 52 | - pkg: kvm-pkgs 53 | 54 | /etc/default/grub: 55 | file.replace: 56 | - pattern: '^GRUB_HIDDEN_TIMEOUT=.*' 57 | - repl: 'GRUB_HIDDEN_TIMEOUT=5' 58 | # file.replace: 59 | # - pattern: '^GRUB_CMDLINE_LINUX=""$' 60 | # - repl: 'GRUB_CMDLINE_LINUX="intel_iommu=on"' 61 | 62 | #/etc/modprobe.d/vfio_iommu_type1.conf: 63 | # file.managed: 64 | # - user: root 65 | # - group: root 66 | # - mode: 644 67 | # - contents: options vfio_iommu_type1 allow_unsafe_interrupts=1 68 | # - require: 69 | # - pkg: kvm-pkgs 70 | 71 | /etc/network/interfaces: 72 | file.managed: 73 | - source: salt://kvm/interfaces 74 | - user: root 75 | - group: root 76 | - mode: 644 77 | - replace: False 78 | - require: 79 | - pkg: kvm-pkgs 80 | 81 | #network-manager: 82 | # pkg.removed 83 | -------------------------------------------------------------------------------- /kvm/interfaces: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | auto lo 7 | iface lo inet loopback 8 | 9 | auto eth0 10 | iface eth0 inet manual 11 | 12 | auto br0 13 | iface br0 inet dhcp 14 | bridge_ports eth0 15 | bridge_stp off 16 | bridge_fd 0 17 | bridge_maxwait 0 18 | -------------------------------------------------------------------------------- /kvm/vfio-bind: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | 7 | modprobe vfio-pci 8 | 9 | for var in "$@"; do 10 | for dev in $(ls /sys/bus/pci/devices/$var/iommu_group/devices); do 11 | vendor=$(cat /sys/bus/pci/devices/$dev/vendor) 12 | device=$(cat /sys/bus/pci/devices/$dev/device) 13 | if [ -e /sys/bus/pci/devices/$dev/driver ]; then 14 | echo $dev > /sys/bus/pci/devices/$dev/driver/unbind 15 | fi 16 | echo $vendor $device > /sys/bus/pci/drivers/vfio-pci/new_id 17 | done 18 | done 19 | -------------------------------------------------------------------------------- /laptop/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Laptop tweaks 3 | # 4 | 5 | cpufrequtils: 6 | pkg.installed 7 | 8 | powertop: 9 | pkg.installed 10 | 11 | laptop-mode-tools: 12 | pkg.installed 13 | -------------------------------------------------------------------------------- /ldap-admin/config.php.jinja: -------------------------------------------------------------------------------- 1 | 7 | # 8 | # Based on the proposal of : Mark Ruijter 9 | # 10 | # octetString SYNTAX 11 | olcAttributeTypes: ( 1.3.6.1.4.1.24552.500.1.1.1.13 12 | NAME 'sshPublicKey' 13 | DESC 'MANDATORY: OpenSSH Public key' 14 | EQUALITY octetStringMatch 15 | SYNTAX 1.3.6.1.4.1.1466.115.121.1.40 ) 16 | # printableString SYNTAX yes|no 17 | olcObjectClasses: ( 1.3.6.1.4.1.24552.500.1.1.2.0 18 | NAME 'ldapPublicKey' SUP top AUXILIARY 19 | DESC 'MANDATORY: OpenSSH LPK objectclass' 20 | MAY ( sshPublicKey $ uid ) 21 | ) 22 | -------------------------------------------------------------------------------- /ldap/ldif/refint.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=module,cn=config 2 | cn: module 3 | objectclass: olcModuleList 4 | objectclass: top 5 | olcmoduleload: refint.la 6 | olcmodulepath: /usr/lib/ldap 7 | 8 | dn: olcOverlay={1}refint,olcDatabase={1}hdb,cn=config 9 | objectClass: olcConfig 10 | objectClass: olcOverlayConfig 11 | objectClass: olcRefintConfig 12 | objectClass: top 13 | olcOverlay: {1}refint 14 | olcRefintAttribute: memberof member manager owner 15 | -------------------------------------------------------------------------------- /ldap/ldif/sudo.ldif: -------------------------------------------------------------------------------- 1 | dn: cn=sudo,cn=schema,cn=config 2 | objectClass: olcSchemaConfig 3 | cn: sudo 4 | olcAttributeTypes: {0}( 1.3.6.1.4.1.15953.9.1.1 NAME 'sudoUser' DESC 'User(s) 5 | who may run sudo' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMa 6 | tch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) 7 | olcAttributeTypes: {1}( 1.3.6.1.4.1.15953.9.1.2 NAME 'sudoHost' DESC 'Host(s) 8 | who may run sudo' EQUALITY caseExactIA5Match SUBSTR caseExactIA5SubstringsMat 9 | ch SYNTAX 1.3.6.1.4.1.1466.115.121.1.26 ) 10 | olcAttributeTypes: {2}( 1.3.6.1.4.1.15953.9.1.3 NAME 'sudoCommand' DESC 'Comma 11 | nd(s) to be executed by sudo' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1 12 | 466.115.121.1.26 ) 13 | olcAttributeTypes: {3}( 1.3.6.1.4.1.15953.9.1.4 NAME 'sudoRunAs' DESC 'User(s) 14 | impersonated by sudo (deprecated)' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1 15 | .4.1.1466.115.121.1.26 ) 16 | olcAttributeTypes: {4}( 1.3.6.1.4.1.15953.9.1.5 NAME 'sudoOption' DESC 'Option 17 | s(s) followed by sudo' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466.115 18 | .121.1.26 ) 19 | olcAttributeTypes: {5}( 1.3.6.1.4.1.15953.9.1.6 NAME 'sudoRunAsUser' DESC 'Use 20 | r(s) impersonated by sudo' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.1466 21 | .115.121.1.26 ) 22 | olcAttributeTypes: {6}( 1.3.6.1.4.1.15953.9.1.7 NAME 'sudoRunAsGroup' DESC 'Gr 23 | oup(s) impersonated by sudo' EQUALITY caseExactIA5Match SYNTAX 1.3.6.1.4.1.14 24 | 66.115.121.1.26 ) 25 | olcObjectClasses: {0}( 1.3.6.1.4.1.15953.9.2.1 NAME 'sudoRole' DESC 'Sudoer En 26 | tries' SUP top STRUCTURAL MUST cn MAY ( sudoUser $ sudoHost $ sudoCommand $ s 27 | udoRunAs $ sudoRunAsUser $ sudoRunAsGroup $ sudoOption $ description ) ) 28 | 29 | 30 | -------------------------------------------------------------------------------- /ldap/server.sls: -------------------------------------------------------------------------------- 1 | # 2 | # LDAP server - central authentificator 3 | # 4 | 5 | include: 6 | - ldap 7 | 8 | /etc/default/slapd: 9 | file.managed: 10 | - source: salt://ldap/default-slapd 11 | - user: root 12 | - group: root 13 | - mode: 644 14 | - require: 15 | - pkg: slapd 16 | - watch_in: 17 | - service: slapd 18 | 19 | slapd: 20 | pkg: 21 | - installed 22 | service.running: 23 | - require: 24 | - pkg: ldap-utils 25 | -------------------------------------------------------------------------------- /libcap2/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # libcap2 - capabilities library & binaries 3 | # 4 | 5 | libcap2: 6 | pkg.installed: 7 | - pkgs: 8 | - libcap2-bin 9 | -------------------------------------------------------------------------------- /libva/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Video accelerating libraries 3 | # 4 | 5 | videoaccel: 6 | pkg.installed: 7 | - names: 8 | - vainfo 9 | - libva-glx1 10 | {% if salt['additional.substring_search']('Radeon', grains['gpus']) %} 11 | # Radeon XvBA 12 | # We need xvba-va-driver (fglrx driver) or vdpau-va-driver (radeon_si driver) 13 | {% elif salt['additional.substring_search']('intel', grains['gpus']) %} 14 | # Intel vaapi 15 | - libva-intel-vaapi-driver 16 | {% elif salt['additional.substring_search']('GeForce', grains['gpus']) %} 17 | # Nvidia vdpau 18 | - vdpau-va-driver 19 | {% endif %} 20 | -------------------------------------------------------------------------------- /libvirt/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # LibVirt VM control 3 | # 4 | 5 | include: 6 | - users 7 | 8 | libvirt-pkgs: 9 | pkg.installed: 10 | - pkgs: 11 | - libvirt-bin 12 | - virt-manager 13 | - virtinst 14 | - virt-viewer 15 | 16 | {% for user in salt['pillar.get']('users', {}) %} 17 | {% if salt['pillar.get']('users:%s:admin'|format(user), False) == True %} 18 | extend: 19 | {{ user }}: 20 | user.present: 21 | - optional_groups: 22 | - libvirtd 23 | {% endif %} 24 | {% endfor %} 25 | -------------------------------------------------------------------------------- /lightdm/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # LightDM display manager 3 | # 4 | 5 | lightdm: 6 | pkg: 7 | - installed 8 | service.running: 9 | # We do not need to restart lightdm if file changes 10 | - require: 11 | - file: /etc/lightdm/lightdm.conf 12 | 13 | /etc/lightdm/login.sh: 14 | file.managed: 15 | - source: salt://lightdm/login.sh 16 | - mode: 755 17 | - user: root 18 | - group: root 19 | - replace: False 20 | 21 | /etc/lightdm/lightdm.conf: 22 | file.managed: 23 | - source: salt://lightdm/lightdm.conf 24 | - mode: 644 25 | - user: root 26 | - group: root 27 | - require: 28 | - pkg: lightdm 29 | - file: /etc/lightdm/login.sh 30 | -------------------------------------------------------------------------------- /lightdm/lightdm.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [SeatDefaults] 7 | allow-guest=false 8 | greeter-allow-guest=false 9 | greeter-show-remote-login=false 10 | 11 | display-setup-script=/etc/lightdm/login.sh 12 | session-setup-script=/etc/lightdm/login.sh 13 | -------------------------------------------------------------------------------- /lightdm/login.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | #xrandr --output VGA1 --primary --output eDP1 --right-of VGA1 7 | #setxkbmap -symbols "pc+us+ru:2+inet(evdev)+group(lctrl_toggle)+ctrl(nocaps)+altwin(swap_lalt_lwin)" 8 | -------------------------------------------------------------------------------- /logwatch/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Awesome - window manager 3 | # 4 | 5 | logwatch: 6 | pkg.installed 7 | 8 | /etc/logwatch/conf/logwatch.conf: 9 | file.managed: 10 | - source: salt://logwatch/logwatch.conf.jinja 11 | - template: jinja 12 | - user: root 13 | - group: root 14 | - mode: 644 15 | - require: 16 | - pkg: logwatch 17 | 18 | /var/cache/logwatch: 19 | file.directory: 20 | - user: root 21 | - group: root 22 | - mode: 750 23 | - require: 24 | - pkg: logwatch 25 | 26 | /etc/cron.weekly/00logwatch: 27 | file.rename: 28 | - source: /etc/cron.daily/00logwatch 29 | - require: 30 | - pkg: logwatch 31 | -------------------------------------------------------------------------------- /logwatch/logwatch.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LogDir = /var/log 7 | TmpDir = /var/cache/logwatch 8 | 9 | Output = mail 10 | Format = html 11 | Encode = none 12 | 13 | MailTo = root 14 | MailFrom = Logwatch 15 | mailer = "/usr/sbin/sendmail -t" 16 | 17 | Range = between -7 days and -1 days 18 | Detail = Low 19 | 20 | Service = All 21 | Service = "-zz-network" 22 | Service = "-zz-sys" 23 | Service = "-eximstats" 24 | -------------------------------------------------------------------------------- /lvm/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Logical Volume Manager utilities 3 | # 4 | 5 | lvm: 6 | pkg.installed: 7 | - pkgs: 8 | - lvm2 9 | -------------------------------------------------------------------------------- /mc/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # MC - twice panel midnight commander 3 | # 4 | 5 | mc: 6 | pkg: 7 | - installed 8 | -------------------------------------------------------------------------------- /monit/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Monit - system & services monitor client 3 | # 4 | 5 | include: 6 | - sensors 7 | 8 | {% set mmonit_address = salt['pillar.get']('mmonit:address', '127.0.0.1') %} 9 | {% set mmonit_user = salt['pillar.get']('mmonit:user', 'monit') %} 10 | {% set mmonit_password = salt['pillar.get']('mmonit:password', '') %} 11 | 12 | monit: 13 | pkg: 14 | - installed 15 | service.running: 16 | - reload: True 17 | - require: 18 | - pkg: monit 19 | 20 | /etc/monit/monitrc: 21 | file.managed: 22 | - source: salt://monit/monitrc.jinja 23 | - template: jinja 24 | - user: root 25 | - group: root 26 | - mode: 600 27 | - context: 28 | mmonit_address: {{ mmonit_address }} 29 | mmonit_user: {{ mmonit_user }} 30 | mmonit_password: {{ mmonit_password }} 31 | - watch_in: 32 | - service: monit 33 | - require: 34 | - pkg: lm-sensors 35 | - pkg: monit 36 | -------------------------------------------------------------------------------- /monit/macros.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Monit module macro 3 | # 4 | 5 | {% macro monit(module, suffix, subname) -%} 6 | /etc/monit/conf.d/{{ module }}{{ subname|default('') }}{{ suffix|default('') }}.conf: 7 | file.managed: 8 | - source: salt://{{ module }}/monit{{ suffix|default('') }}.conf.jinja 9 | - template: jinja 10 | - user: root 11 | - group: root 12 | - mode: 600 13 | - context: 14 | subname: {{ subname|default('') }} 15 | - require: 16 | - pkg: monit 17 | - watch_in: 18 | - service: monit 19 | {%- endmacro %} 20 | -------------------------------------------------------------------------------- /monit/monitrc.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | set daemon 120 7 | 8 | set logfile /var/log/monit.log 9 | set idfile /var/lib/monit/id 10 | set statefile /var/lib/monit/state 11 | 12 | set eventqueue 13 | basedir /var/lib/monit/events 14 | slots 1000 15 | 16 | set mmonit https://{{ mmonit_user }}:{{ mmonit_password }}@{{ mmonit_address }}/collector 17 | set httpd port 2812 18 | allow 127.0.0.1 19 | allow {{ mmonit_address }} 20 | 21 | include /etc/monit/conf.d/* 22 | 23 | {% if grains['get']('virtual') == 'physical' %} 24 | # Sensors 25 | check program sensors with path /usr/bin/sensors 26 | group system 27 | if status != 0 then alert 28 | {%- endif %} 29 | 30 | # RAID 31 | check file raid with path /proc/mdstat 32 | group system 33 | group filesystem 34 | if match "\[.*_.*\]" then alert 35 | 36 | # Root filesystem 37 | check device rootfs with path / 38 | group system 39 | group filesystem 40 | start program = "/bin/mount /" 41 | stop program = "/bin/mount -o remount,ro /" 42 | if failed permission 755 then unmonitor 43 | if failed uid root then unmonitor 44 | if space usage > 80% for 5 times within 15 cycles then alert 45 | if space usage > 99% then stop 46 | if inode usage > 80% then alert 47 | if inode usage > 99% then stop 48 | -------------------------------------------------------------------------------- /monit/nginx-site.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | server { 7 | listen 80; 8 | 9 | server_name {{ nginx_server_name }}; 10 | 11 | return 302 https://{{ nginx_server_name }}$request_uri; 12 | } 13 | 14 | server { 15 | listen 443 ssl spdy; 16 | 17 | server_name {{ nginx_server_name }}; 18 | 19 | ssl_certificate {{ ssl_cert }}; 20 | ssl_certificate_key {{ ssl_key }}; 21 | 22 | include /etc/nginx/ssl.conf; 23 | 24 | charset utf8; 25 | limit_conn perip 10; 26 | limit_conn perserver 150; 27 | 28 | client_max_body_size 500m; 29 | client_body_buffer_size 128k; 30 | 31 | access_log /var/log/nginx/access.{{ nginx_conf_name }}.log; 32 | error_log /var/log/nginx/error.{{ nginx_conf_name }}.log; 33 | 34 | location / { 35 | proxy_ignore_client_abort on; 36 | proxy_pass http://127.0.0.1:{{ service_port }}; 37 | } 38 | location /collector { 39 | allow 192.168.0.0/20; 40 | deny all; 41 | proxy_ignore_client_abort on; 42 | proxy_pass http://127.0.0.1:{{ service_port }}/collector; 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /monit/server.xml.jinja: -------------------------------------------------------------------------------- 1 | 5 | 6 | 7 | 8 | 9 | 10 | {%- if db_type == 'sqlite' %} 11 | 18 | 19 | 20 | 21 | 23 | 24 | 25 | 26 | 27 | 28 | {{ license_data | indent(8) }} 29 | 30 | 31 | -------------------------------------------------------------------------------- /monit/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | description "M/Monit system monitoring" 7 | author "Rabit " 8 | 9 | limit core unlimited unlimited 10 | 11 | start on runlevel [2345] 12 | stop on runlevel [!2345] 13 | 14 | setuid {{ username }} 15 | setgid {{ username }} 16 | 17 | expect daemon 18 | respawn 19 | respawn limit 10 5 20 | 21 | env USER="{{ username }}" 22 | 23 | exec {{ dist_dir }}/current/bin/mmonit -c {{ dist_dir }}/conf/server.xml -p {{ dist_dir }}/log 24 | 25 | pre-stop exec {{ dist_dir }}/current/bin/mmonit -c {{ dist_dir }}/conf/server.xml -p {{ dist_dir }}/log stop 26 | -------------------------------------------------------------------------------- /mpd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # MPD audio server 3 | # 4 | 5 | mpd: 6 | pkg: 7 | - installed 8 | service: 9 | - running 10 | 11 | /etc/mpd.conf: 12 | file.managed: 13 | - source: salt://mpd/mpd.conf 14 | - user: root 15 | - group: audio 16 | - mode: 640 17 | - require: 18 | - pkg: mpd 19 | - watch_in: 20 | - service: mpd 21 | -------------------------------------------------------------------------------- /mpd/mpd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | music_directory "/srv/user/music" 7 | playlist_directory "/var/lib/mpd/playlists" 8 | db_file "/var/lib/mpd/tag_cache" 9 | log_file "/var/log/mpd/mpd.log" 10 | pid_file "/run/mpd/pid" 11 | state_file "/var/lib/mpd/state" 12 | sticker_file "/var/lib/mpd/sticker.sql" 13 | 14 | user "mpd" 15 | bind_to_address "localhost" 16 | 17 | input { 18 | plugin "curl" 19 | } 20 | 21 | audio_output { 22 | type "pulse" 23 | name "Local Music Player Daemon" 24 | server "127.0.0.1" 25 | } 26 | 27 | filesystem_charset "UTF-8" 28 | id3v1_encoding "UTF-8" 29 | metadata_to_use "none" 30 | -------------------------------------------------------------------------------- /net/hosts.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | 127.0.0.1 localhost 7 | {{ salt['grains.get']('fqdn_ip4', ['127.0.1.1'])|first() }} {{ grains['id'] }} 8 | 9 | {% if salt['additional.state_in'](['openvpn.server']) -%} 10 | # VPN hosts: 11 | {%- for host, args in salt['pillar.get']('net:hosts', {})|dictsort %}{% if not 'hidden' in args %} 12 | {{ args['ip'] }} {{ host }}.vpn{% if 'aliases' in args %}{% for alias in args['aliases'] %} {{ alias }}{% endfor %}{% endif %} 13 | {%- endif %}{% endfor %} 14 | {%- endif %} 15 | -------------------------------------------------------------------------------- /net/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Network configuration 3 | # 4 | 5 | /etc/hosts: 6 | file.managed: 7 | - source: salt://net/hosts.jinja 8 | - template: jinja 9 | - mode: 0644 10 | - user: root 11 | - group: root 12 | -------------------------------------------------------------------------------- /nginx/common.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Common libs 7 | location ^~ /lib { 8 | root /srv/www; 9 | gzip_static on; 10 | access_log off; 11 | log_not_found off; 12 | expires 90d; 13 | } 14 | 15 | # Cache control for static files 16 | location ~* \.(jpg|jpeg|gif|png|css|js|ico|xml)$ { 17 | gzip_static on; 18 | access_log off; 19 | log_not_found off; 20 | expires 90d; 21 | } 22 | 23 | # Prevent access to hidden files & subdirs (.git, .svn etc.) 24 | location ~ /\. { return 403; } 25 | 26 | # Errors 27 | error_page 400 /error/400.html; 28 | error_page 401 /error/401.html; 29 | error_page 403 /error/403.html; 30 | error_page 404 /error/404.html; 31 | error_page 500 /error/500.html; 32 | error_page 502 /error/502.html; 33 | error_page 503 /error/503.html; 34 | 35 | location /error { 36 | root /srv/www; 37 | internal; 38 | } 39 | -------------------------------------------------------------------------------- /nginx/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Nginx webserver 3 | # 4 | 5 | {% import 'openssl/vars.sls' as ssl with context %} 6 | {% from 'monit/macros.sls' import monit with context %} 7 | 8 | nginx-full: 9 | pkg.installed 10 | 11 | /etc/nginx/nginx.conf: 12 | file.managed: 13 | - source: salt://nginx/nginx.conf 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - pkg: nginx-full 19 | - watch_in: 20 | - service: nginx 21 | 22 | /etc/nginx/common.conf: 23 | file.managed: 24 | - source: salt://nginx/common.conf.jinja 25 | - template: jinja 26 | - user: root 27 | - group: root 28 | - mode: 644 29 | - require: 30 | - pkg: nginx-full 31 | - watch_in: 32 | - service: nginx 33 | 34 | /etc/nginx/ssl.conf: 35 | file.managed: 36 | - source: salt://nginx/ssl.conf.jinja 37 | - template: jinja 38 | - user: root 39 | - group: root 40 | - mode: 644 41 | - context: 42 | ssl_ca_crt: {{ ssl.ca_crt }} 43 | - require: 44 | - pkg: nginx-full 45 | - watch_in: 46 | - service: nginx 47 | 48 | /etc/nginx/sites-available/www-util.conf: 49 | file.managed: 50 | - source: salt://nginx/www-util.conf 51 | - user: root 52 | - group: root 53 | - mode: 644 54 | - require: 55 | - pkg: nginx-full 56 | - watch_in: 57 | - service: nginx 58 | 59 | /etc/nginx/sites-enabled/www-util.conf: 60 | file.symlink: 61 | - target: /etc/nginx/sites-available/www-util.conf 62 | - watch_in: 63 | - service: nginx 64 | 65 | /etc/nginx/sites-enabled/default: 66 | file.absent: 67 | - require_in: 68 | - service: nginx 69 | 70 | nginx: 71 | service.running: 72 | - reload: True 73 | - watch: 74 | - pkg: nginx-full 75 | 76 | {{ monit('nginx') }} 77 | -------------------------------------------------------------------------------- /nginx/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process nginx with pidfile /var/run/nginx.pid 7 | group www 8 | group nginx 9 | start program = "/usr/sbin/service nginx start" 10 | stop program = "/usr/sbin/service nginx stop" 11 | if 5 restarts with 5 cycles then timeout 12 | depend nginx_bin 13 | depend nginx_rc 14 | 15 | check file nginx_bin with path /usr/sbin/nginx 16 | group nginx 17 | include /etc/monit/templates/rootbin 18 | 19 | check file nginx_rc with path /etc/init.d/nginx 20 | group nginx 21 | include /etc/monit/templates/rootbin 22 | -------------------------------------------------------------------------------- /nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | user www-data; 7 | pid /var/run/nginx.pid; 8 | 9 | # This number should be, at maximum, the number of CPU cores on your system. 10 | worker_processes 2; 11 | worker_priority -5; 12 | timer_resolution 100ms; 13 | worker_rlimit_nofile 8192; 14 | 15 | events { 16 | # Determines how many clients will be served by each worker process. 17 | worker_connections 1024; 18 | # The effective method, used on Linux 2.6+, optmized to serve many clients with each thread. 19 | use epoll; 20 | } 21 | 22 | http { 23 | ## 24 | # Basic Settings 25 | ## 26 | # Sendfile copies data between one FD and other from within the kernel. 27 | sendfile on; 28 | # Causes nginx to attempt to send its HTTP response head in one packet, instead of using partial frames. 29 | tcp_nopush on; 30 | # Don't buffer data-sends (disable Nagle algorithm). 31 | tcp_nodelay on; 32 | # Timeout for keep-alive connections. Server will close connections after this time. 33 | keepalive_timeout 30; 34 | # Number of requests a client can make over the keep-alive connection. 35 | keepalive_requests 1000; 36 | # Allow the server to close the connection after a client stops responding. 37 | reset_timedout_connection on; 38 | 39 | include /etc/nginx/mime.types; 40 | default_type application/octet-stream; 41 | 42 | ## 43 | # Caching 44 | ## 45 | # Caches information about open FDs, freqently accessed files. 46 | open_file_cache max=2000 inactive=20s; 47 | open_file_cache_valid 30s; 48 | open_file_cache_min_uses 2; 49 | open_file_cache_errors on; 50 | 51 | ## 52 | # Logging Settings 53 | ## 54 | log_format main '$remote_addr - $remote_user [$time_local] $request ' 55 | '"$status" $body_bytes_sent "$http_referer" ' 56 | '"$http_user_agent" "$http_x_forwarded_for"'; 57 | 58 | access_log /var/log/nginx/access.log; 59 | error_log /var/log/nginx/error.log; 60 | 61 | ## 62 | # Gzip Settings 63 | ## 64 | gzip on; 65 | gzip_disable "msie6"; 66 | gzip_min_length 10240; 67 | gzip_buffers 64 8k; 68 | gzip_comp_level 3; 69 | gzip_http_version 1.1; 70 | gzip_proxied expired no-cache no-store private auth; 71 | gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; 72 | 73 | ## 74 | # Optimizations 75 | ## 76 | server_tokens off; 77 | limit_conn_zone $binary_remote_addr zone=perip:10m; 78 | limit_conn_zone $server_name zone=perserver:10m; 79 | types_hash_max_size 2048; 80 | 81 | ## 82 | # Virtual Host Configs 83 | ## 84 | include /etc/nginx/conf.d/*.conf; 85 | include /etc/nginx/sites-enabled/*; 86 | } 87 | 88 | -------------------------------------------------------------------------------- /nginx/ssl.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | keepalive_timeout 70; 7 | 8 | ssl_client_certificate {{ ssl_ca_crt }}; 9 | ssl_protocols TLSv1 TLSv1.1 TLSv1.2; 10 | ssl_ciphers kEECDH+AES128:kEECDH:kEDH:-3DES:kRSA+AES128:kEDH+3DES:DES-CBC3-SHA:!RC4:!aNULL:!eNULL:!MD5:!EXPORT:!LOW:!SEED:!CAMELLIA:!IDEA:!PSK:!SRP:!SSLv2; 11 | ssl_prefer_server_ciphers on; 12 | ssl_session_cache shared:SSL:10m; 13 | ssl_session_timeout 24h; 14 | 15 | ssl_stapling on; 16 | ssl_stapling_verify on; 17 | resolver 127.0.0.1; 18 | 19 | add_header Strict-Transport-Security "max-age=63072000; includeSubDomains"; 20 | add_header X-Frame-Options DENY; 21 | add_header X-Content-Type-Options nosniff; 22 | resolver_timeout 5s; 23 | -------------------------------------------------------------------------------- /nginx/www-util.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | upstream phpfpm_backend { 7 | server unix:/var/run/php5-fpm.sock; 8 | } 9 | 10 | server { 11 | listen 80; 12 | 13 | location / { 14 | deny all; 15 | } 16 | } 17 | 18 | server { 19 | listen 80; 20 | server_name localhost 127.0.0.1; 21 | 22 | location / { 23 | deny all; 24 | } 25 | 26 | location /status { 27 | stub_status on; 28 | access_log off; 29 | allow 127.0.0.1; 30 | deny all; 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /nmap/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # NMAP network scaner 3 | # 4 | 5 | nmap: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /ntp/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # NTP time server 3 | # 4 | 5 | ntp: 6 | pkg: 7 | - installed 8 | service: 9 | - running 10 | - require: 11 | - pkg: ntp 12 | 13 | /etc/ntp.conf: 14 | file.managed: 15 | - source: salt://ntp/ntp.conf 16 | - user: root 17 | - group: root 18 | - mode: 440 19 | - require: 20 | - pkg: ntp 21 | - watch_in: 22 | - service: ntp 23 | -------------------------------------------------------------------------------- /ntp/ntp.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | driftfile /var/lib/ntp/ntp.drift 7 | 8 | statistics loopstats peerstats clockstats 9 | filegen loopstats file loopstats type day enable 10 | filegen peerstats file peerstats type day enable 11 | filegen clockstats file clockstats type day enable 12 | 13 | server 0.ubuntu.pool.ntp.org 14 | server 1.ubuntu.pool.ntp.org 15 | server 2.ubuntu.pool.ntp.org 16 | server 3.ubuntu.pool.ntp.org 17 | 18 | server ntp.ubuntu.com 19 | 20 | restrict -4 default kod notrap nomodify nopeer noquery 21 | restrict -6 default kod notrap nomodify nopeer noquery 22 | 23 | restrict 127.0.0.1 24 | restrict ::1 25 | -------------------------------------------------------------------------------- /nut/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # NUT UPS monitoring 3 | # 4 | 5 | include: 6 | - udev 7 | 8 | nut: 9 | pkg.installed 10 | 11 | /etc/udev/rules.d/10-nut-ups.rules: 12 | file.managed: 13 | - source: salt://nut/udev.rules 14 | - user: root 15 | - group: root 16 | - mode: 644 17 | - require: 18 | - pkg: nut 19 | - watch_in: 20 | - cmd: udev_trigger 21 | 22 | {% for conf in ['nut.conf', 'ups.conf', 'upsd.conf', 'upsmon.conf', 'upsd.users'] %} 23 | /etc/nut/{{ conf }}: 24 | file.managed: 25 | - source: salt://nut/{{ conf }} 26 | - user: root 27 | - group: nut 28 | - mode: 640 29 | - require: 30 | - pkg: nut 31 | - watch_in: 32 | - service: nut-server 33 | - service: nut-client 34 | {% endfor %} 35 | 36 | nut-server: 37 | service.running 38 | 39 | nut-client: 40 | service.running 41 | -------------------------------------------------------------------------------- /nut/nut.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | MODE=standalone 7 | -------------------------------------------------------------------------------- /nut/udev.rules: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | ATTRS{product}=="USB to Serial", GROUP="nut", MODE="0664" 7 | -------------------------------------------------------------------------------- /nut/ups.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [ups] 7 | driver = blazer_usb 8 | port = auto 9 | desc = "Ipponi Smart Power Pro UPS" 10 | offdelay = 12 11 | ondelay = 7 12 | -------------------------------------------------------------------------------- /nut/upsd.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | LISTEN 127.0.0.1 3493 7 | -------------------------------------------------------------------------------- /nut/upsd.users: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [upsmon] 7 | password = MonPasswD14129 8 | upsmon master 9 | -------------------------------------------------------------------------------- /nut/upsmon.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | MONITOR ups@localhost:3493 1 upsmon MonPasswD14129 master 7 | 8 | MINSUPPLIES 1 9 | POLLFREQ 5 10 | POLLFREQALERT 5 11 | HOSTSYNC 15 12 | DEADTIME 15 13 | POWERDOWNFLAG /etc/killpower 14 | SHUTDOWNCMD "/sbin/shutdown -h +0" 15 | 16 | RBWARNTIME 43200 17 | NOCOMMWARNTIME 300 18 | FINALDELAY 5 19 | -------------------------------------------------------------------------------- /openssl/certscheck.sh.jinja: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Check certificates on master server and send email to admin on: 7 | # - Invalid 8 | # - End date < 90 days 9 | # 10 | # Usage: 11 | # # certscheck.sh 12 | # 13 | # Root Crontab: 14 | # # Certscheck 15 | # 0 23 * * * /usr/local/bin/certscheck.sh 2>&1 | /usr/bin/logger -t CERTSCHECK 16 | 17 | DEBUG=false 18 | warning_days=90 # Number of days to warn about soon-to-expire certs 19 | recipients="{{ salt['pillar.get']('mail:admin', '') }}" 20 | 21 | PATH="/bin:/usr/bin:/sbin:/usr/sbin" 22 | 23 | ERROR=0 24 | subj="{{ grains['id'] }} CERTSCHECK error" 25 | msg="Hello from CERTSCHECK:\n" 26 | 27 | msg() { 28 | echo "${1}" 29 | msg="${msg}\n${1}" 30 | } 31 | 32 | err() { 33 | echo "${1}" 34 | msg="${msg}\n${1}" 35 | ERROR=1 36 | } 37 | 38 | for cert in {{ ssl_certs }}/* 39 | do 40 | msg "${cert}:" 41 | output=$(openssl x509 -noout -in "${cert}" -dates 2>/dev/null) 42 | 43 | if [ "$?" -ne 0 ]; then 44 | err " ! Error getting info for cert" 45 | continue 46 | fi 47 | 48 | start_date=$(echo "${output}" | tr -d '\n' | sed 's/.*notBefore=\(.*\).*not.*/\1/g') 49 | end_date=$(echo "${output}" | tr -d '\n' | sed 's/.*notAfter=\(.*\)$/\1/g') 50 | 51 | start_epoch=$(date +%s -d "$start_date") 52 | end_epoch=$(date +%s -d "$end_date") 53 | 54 | epoch_now=$(date +%s) 55 | 56 | if [ "$start_epoch" -gt "$epoch_now" ]; then 57 | err " ! is not yet valid" 58 | fi 59 | 60 | seconds_to_expire=$(($end_epoch - $epoch_now)) 61 | days_to_expire=$(($seconds_to_expire / 86400)) 62 | 63 | warning_seconds=$((86400 * $warning_days)) 64 | 65 | if [ "$seconds_to_expire" -lt "$warning_seconds" ]; then 66 | err " ! is soon to expire: $days_to_expire days" 67 | else 68 | msg " * days to expire: $days_to_expire days" 69 | fi 70 | done 71 | 72 | if [ ${ERROR} -gt 0 ] 73 | then 74 | # echo "${msg}" | mail -s "${subj}" ${recipients} 75 | cat < 81 | $(echo "${msg}" | sed ':a;N;$!ba;s/\n/\n/g') 82 | 83 | EOF 84 | if [ "$?" -ne 0 ]; then 85 | echo "Error: mail was not sent" 86 | else 87 | echo "Mail was sent" 88 | fi 89 | fi 90 | -------------------------------------------------------------------------------- /openssl/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # OpenSSL crypto library 3 | # 4 | 5 | {% import 'openssl/vars.sls' as ssl with context %} 6 | 7 | openssl: 8 | pkg.installed 9 | 10 | {{ ssl.home }}: 11 | file.directory: 12 | - user: root 13 | - group: root 14 | - mode: 755 15 | 16 | {{ ssl.ca }}: 17 | file.directory: 18 | - user: root 19 | - group: root 20 | - mode: 755 21 | - require: 22 | - file: {{ ssl.home }} 23 | 24 | {{ ssl.certs }}: 25 | file.directory: 26 | - user: root 27 | - group: root 28 | - mode: 755 29 | - require: 30 | - file: {{ ssl.home }} 31 | 32 | {{ ssl.keys }}: 33 | file.directory: 34 | - user: root 35 | - group: root 36 | - mode: 755 37 | - require: 38 | - file: {{ ssl.home }} 39 | 40 | {% if 'server' != salt['pillar.get']('net:hosts:%s:vpn'|format(grains['id']), {}) %} 41 | {{ ssl.key }}: 42 | file.managed: 43 | - source: salt://keys/{{ ssl.key.split('/')[-1] }} 44 | - user: root 45 | - group: root 46 | - mode: 640 47 | - makedirs: True 48 | 49 | {{ ssl.cert }}: 50 | file.managed: 51 | - source: salt://certs/{{ ssl.cert.split('/')[-1] }} 52 | - user: root 53 | - group: root 54 | - mode: 644 55 | - makedirs: True 56 | {% endif %} 57 | 58 | {{ ssl.ca_crt }}: 59 | file.managed: 60 | - source: salt://ca/ca.crt 61 | - user: root 62 | - group: root 63 | - mode: 644 64 | - makedirs: True 65 | -------------------------------------------------------------------------------- /openssl/vars.sls: -------------------------------------------------------------------------------- 1 | {% set home = salt['pillar.get']('ssl:home', '/srv/ssl') -%} 2 | {% set ca = salt['pillar.get']('ssl:ca', home+'/ca') -%} 3 | {% set ca_key = salt['pillar.get']('ssl:ca_key', ca+'/ca.key') -%} 4 | {% set ca_crt = salt['pillar.get']('ssl:ca_crt', ca+'/ca.crt') -%} 5 | {% set ca_config = salt['pillar.get']('ssl:ca_config', ca+'/ca.config') -%} 6 | {% set crl = salt['pillar.get']('ssl:crl', ca+'/crl.pem') -%} 7 | {% set keys = salt['pillar.get']('ssl:keys', home+'/keys') -%} 8 | {% set key = keys + "/internal_" + salt['additional.inverse'](grains['id']) + '.key' %} 9 | {% set certs = salt['pillar.get']('ssl:certs', home+'/certs') -%} 10 | {% set cert = certs + "/internal_" + salt['additional.inverse'](grains['id']) + '.crt' %} 11 | {% set newcerts = salt['pillar.get']('ssl:newcerts', home+'/newcerts') -%} 12 | {% set csrs = salt['pillar.get']('ssl:csrs', home+'/csrs') -%} 13 | {% set crls = salt['pillar.get']('ssl:crls', home+'/crls') -%} 14 | {% set dh = home + '/dh2048.pem' -%} 15 | -------------------------------------------------------------------------------- /openvpn/client.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | client 7 | proto udp 8 | dev tun 9 | 10 | remote {{ vpn_host }} 11 | port {{ vpn_port }} 12 | 13 | ca {{ ssl_ca_crt }} 14 | cert {{ ssl_cert }} 15 | key {{ ssl_key }} 16 | 17 | tls-client 18 | tls-auth {{ vpn_ta }} 1 19 | auth MD5 20 | cipher BF-CBC 21 | 22 | ns-cert-type server 23 | comp-lzo 24 | 25 | persist-key 26 | persist-tun 27 | 28 | status openvpn-status.log 29 | log /var/log/openvpn.log 30 | verb 3 31 | mute 10 32 | -------------------------------------------------------------------------------- /openvpn/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # OpenVPN - virtual private network client 3 | # 4 | # Notice: Disabled by default - to enable add net:hosts::vpn: client 5 | # 6 | 7 | {% import 'openssl/vars.sls' as ssl with context %} 8 | {% import 'openvpn/vars.sls' as vpn with context %} 9 | 10 | include: 11 | - openssl 12 | 13 | openvpn: 14 | pkg: 15 | - installed 16 | service: 17 | {%- if vpn.autorun != 'no' %} 18 | - running 19 | {%- else %} 20 | - disabled 21 | {%- endif %} 22 | 23 | {% if vpn.instance != 'none' -%} 24 | /etc/openvpn/{{ vpn.instance }}.conf: 25 | file.managed: 26 | - source: salt://openvpn/{{ vpn.instance }}.conf.jinja 27 | - user: root 28 | - group: root 29 | - mode: 640 30 | - template: jinja 31 | - context: 32 | vpn_host: {{ vpn.host }} 33 | vpn_port: {{ vpn.port }} 34 | vpn_cert: {{ vpn.cert }} 35 | vpn_key: {{ vpn.key }} 36 | vpn_ta: {{ vpn.ta }} 37 | vpn_ccd: {{ vpn.ccd }} 38 | vpn_ip: {{ vpn.ip }} 39 | vpn_net: {{ vpn.net }} 40 | vpn_mask: {{ vpn.mask }} 41 | ssl_ca_crt: {{ ssl.ca_crt }} 42 | ssl_cert: {{ ssl.cert }} 43 | ssl_key: {{ ssl.key }} 44 | ssl_dh: {{ ssl.dh }} 45 | - require: 46 | - pkg: openvpn 47 | - watch_in: 48 | - service: openvpn 49 | {%- endif %} 50 | 51 | /etc/default/openvpn: 52 | file.replace: 53 | - pattern: '^AUTOSTART=.+$' 54 | {%- if vpn.autorun == 'no' %} 55 | - repl: AUTOSTART="" 56 | {%- else %} 57 | - repl: AUTOSTART="{{ vpn.instance }}" 58 | {%- endif %} 59 | - append_if_not_found: True 60 | - watch_in: 61 | - service: openvpn 62 | - require: 63 | - pkg: openvpn 64 | {%- if vpn.instance != 'none' %} 65 | - file: /etc/openvpn/{{ vpn.instance }}.conf 66 | {%- endif %} 67 | 68 | {{ vpn.ta }}: 69 | file.managed: 70 | {%- if vpn.instance != 'server' %} 71 | - source: salt://openvpn_ta.key 72 | - group: root 73 | {%- else %} 74 | - group: salt-stack 75 | {% endif %} 76 | - user: root 77 | - mode: 640 78 | - makedirs: True 79 | - watch_in: 80 | - service: openvpn 81 | -------------------------------------------------------------------------------- /openvpn/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process openvpn-{{ subname }} with pidfile /var/run/openvpn/server.pid 7 | group system 8 | group network 9 | group openvpn 10 | start program = "/usr/sbin/service openvpn start {{ subname }}" 11 | stop program = "/usr/sbin/service openvpn stop {{ subname }}" 12 | -------------------------------------------------------------------------------- /openvpn/server.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | port {{ vpn_port }} 7 | proto udp 8 | dev tun 9 | 10 | ca {{ ssl_ca_crt }} 11 | cert {{ vpn_cert }} 12 | key {{ vpn_key }} 13 | dh {{ ssl_dh }} 14 | 15 | client-config-dir {{ vpn_ccd }} 16 | 17 | # Local networks from 192.168.0.0 to 192.168.15.0 18 | server {{ vpn_net }} 255.255.240.0 19 | 20 | # Client-Router networks: 21 | {%- for host, args in salt['pillar.get']('net:hosts', {})|dictsort %}{% if 'route' in args %} 22 | route {{ args['route'] }} {{ vpn_mask }} {{ vpn_ip }} # {{ host }} subnetwork 23 | {%- endif %}{% endfor %} 24 | 25 | tls-server 26 | tls-auth {{ vpn_ta }} 0 27 | tls-timeout 120 28 | auth MD5 29 | cipher BF-CBC 30 | 31 | client-to-client 32 | 33 | keepalive 10 120 34 | 35 | comp-lzo 36 | 37 | max-clients 100 38 | 39 | user nobody 40 | group nogroup 41 | 42 | persist-key 43 | persist-tun 44 | 45 | status openvpn-status.log 46 | log /var/log/openvpn.log 47 | verb 3 48 | mute 10 49 | -------------------------------------------------------------------------------- /openvpn/server.sls: -------------------------------------------------------------------------------- 1 | # 2 | # OpenVPN - master 3 | # 4 | # Do not forget to sign openvpn.csr! 5 | # 6 | 7 | {% import 'openssl/vars.sls' as ssl with context %} 8 | {% import 'openvpn/vars.sls' as vpn with context %} 9 | {% from 'monit/macros.sls' import monit with context %} 10 | 11 | include: 12 | - openvpn 13 | {%- if vpn.port <= 1024 %} 14 | - libcap2 15 | {%- endif %} 16 | 17 | 18 | openvpn --genkey --secret {{ vpn.ta }}: 19 | cmd.run: 20 | - unless: test -f {{ vpn.ta }} 21 | - require: 22 | - pkg: openvpn 23 | - require_in: 24 | - file: /etc/openvpn/server.conf 25 | - file: {{ vpn.ta }} 26 | 27 | openssl req -config {{ ssl.ca_config }} -extensions server -new -newkey 'rsa:2048' -nodes -keyout {{ vpn.key }} -out {{ vpn.csr }} -subj "/CN=vpn": 28 | cmd.run: 29 | - unless: test -f {{ vpn.key }} 30 | - require: 31 | - pkg: openssl 32 | - file: {{ ssl.ca_config }} 33 | - require_in: 34 | - file: /etc/openvpn/server.conf 35 | 36 | {% if vpn.port <= 1024 -%} 37 | setcap 'cap_net_bind_service=+ep' /usr/sbin/openvpn: 38 | cmd.run: 39 | - unless: getcap /usr/sbin/openvpn | grep -q 'cap_net_bind_service+ep' 40 | - require: 41 | - pkg: openssl 42 | - pkg: libcap2 43 | - require_in: 44 | - service: openvpn 45 | {%- endif %} 46 | 47 | {{ vpn.ccd }}: 48 | file.directory: 49 | - user: root 50 | - group: root 51 | - mode: 755 52 | 53 | {% for host, args in salt['pillar.get']('net:hosts', {}).items() %}{% if 'server' != args.vpn|default('none') %} 54 | {{ vpn.ccd }}/{{ host }}: 55 | file.managed: 56 | - contents: | 57 | ifconfig-push {{ args['ip'] }} {{ vpn.ip }}{% if 'route' in args %} 58 | iroute {{ args['route'] }} {{ vpn.mask }}{% endif %} 59 | - user: root 60 | - group: root 61 | - mode: 644 62 | {% endif %}{% endfor %} 63 | 64 | {{ monit('openvpn', '', 'server') }} 65 | -------------------------------------------------------------------------------- /openvpn/vars.sls: -------------------------------------------------------------------------------- 1 | {% import 'openssl/vars.sls' as ssl with context %} 2 | 3 | {% set autorun = salt['pillar.get']('net:hosts:%s:vpn'|format(grains['id']), 'no') -%} 4 | {% set instance = salt['pillar.get']('net:hosts:%s:vpn'|format(grains['id']), 'client') -%} 5 | 6 | {% set host = salt['pillar.get']('openvpn:host', 'localhost') -%} 7 | {% set port = salt['pillar.get']('openvpn:port', '1194') -%} 8 | {% set ccd = salt['pillar.get']('openvpn:ccd', '/etc/openvpn/ccd') -%} 9 | 10 | {% set ip = salt['pillar.get']('openvpn:ip', '192.168.0.1') %} 11 | {% set net = salt['pillar.get']('openvpn:net', '192.168.0.0') %} 12 | {% set mask = salt['pillar.get']('openvpn:mask', '255.255.255.0') %} 13 | 14 | {% set ta = ssl.home + '/openvpn_ta.key' -%} 15 | {% set key = ssl.keys + '/openvpn.key' -%} 16 | {% set cert = ssl.certs + '/openvpn.crt' -%} 17 | {% set csr = ssl.csrs + '/openvpn.csr' -%} 18 | -------------------------------------------------------------------------------- /php/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # PHP FPM server 3 | # 4 | 5 | {% if salt['additional.state_in'](['postgresql']) %} 6 | include: 7 | - php.postgresql 8 | {% endif %} 9 | 10 | php5: 11 | pkg.installed: 12 | - pkgs: 13 | - php5-fpm 14 | - php5-common 15 | - php-apc 16 | service.running: 17 | - name: php5-fpm 18 | - watch: 19 | - pkg: php5 20 | 21 | /etc/php5/fpm/php-fpm.conf: 22 | file.managed: 23 | - source: salt://php/php-fpm.conf 24 | - user: root 25 | - group: root 26 | - mode: 644 27 | - require: 28 | - pkg: php5 29 | - watch_in: 30 | - service: php5 31 | 32 | /etc/php5/fpm/pool.d/www.conf: 33 | file.managed: 34 | - source: salt://php/www.pool.conf 35 | - user: root 36 | - group: root 37 | - mode: 644 38 | - require: 39 | - pkg: php5 40 | - watch_in: 41 | - service: php5 42 | 43 | /etc/php5/fpm/php.ini: 44 | file.managed: 45 | - source: salt://php/php.ini 46 | - user: root 47 | - group: root 48 | - mode: 644 49 | - require: 50 | - pkg: php5 51 | - watch_in: 52 | - service: php5 53 | -------------------------------------------------------------------------------- /php/php-fpm.conf: -------------------------------------------------------------------------------- 1 | ; 2 | ; WARNING: 3 | ; This file is under CM control - all manual changes will be removed 4 | ; 5 | 6 | pid = /var/run/php5-fpm.pid 7 | error_log = /var/log/php5-fpm.log 8 | 9 | emergency_restart_threshold = 10 10 | emergency_restart_interval = 1m 11 | process_control_timeout = 10s 12 | 13 | include=/etc/php5/fpm/pool.d/*.conf 14 | -------------------------------------------------------------------------------- /php/postgresql.sls: -------------------------------------------------------------------------------- 1 | # 2 | # PgSQL php5 module 3 | # 4 | 5 | php5-pgsql: 6 | pkg.installed: 7 | - require: 8 | - pkg: php5 9 | - watch_in: 10 | - service: php5 11 | -------------------------------------------------------------------------------- /php/www.pool.conf: -------------------------------------------------------------------------------- 1 | ; 2 | ; WARNING: 3 | ; This file is under CM control - all manual changes will be removed 4 | ; 5 | 6 | [www] 7 | user = www-data 8 | group = www-data 9 | 10 | listen = /var/run/php5-fpm.sock 11 | listen.owner = www-data 12 | listen.group = www-data 13 | 14 | pm = dynamic 15 | pm.max_children = 5 16 | pm.start_servers = 2 17 | pm.min_spare_servers = 1 18 | pm.max_spare_servers = 3 19 | pm.max_requests = 200 20 | 21 | chdir = / 22 | -------------------------------------------------------------------------------- /polipo/config.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | logSyslog = true 7 | logFile = /var/log/polipo/polipo.log 8 | 9 | socksParentProxy = "127.0.0.1:9050" 10 | socksProxyType = socks5 11 | allowedClients = 127.0.0.1 12 | proxyPort = 8124 13 | -------------------------------------------------------------------------------- /polipo/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Polipo - cache proxy for tor 3 | # 4 | 5 | polipo: 6 | pkg: 7 | - installed 8 | service.running: 9 | - watch: 10 | - file: /etc/polipo/config 11 | - require: 12 | - pkg: polipo 13 | 14 | /etc/polipo/config: 15 | file.managed: 16 | - source: salt://polipo/config.jinja 17 | - template: jinja 18 | - user: root 19 | - group: root 20 | - mode: 644 21 | - require: 22 | - pkg: polipo 23 | -------------------------------------------------------------------------------- /postgresql/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # PostgreSQL relation database 3 | # 4 | 5 | {% set version = salt['pillar.get']('postgresql:'+grains['id']+':version', '9.3') %} 6 | {% set listen = salt['pillar.get']('postgresql:'+grains['id']+':listen', 'localhost') %} 7 | {% set access = salt['pillar.get']('postgresql:'+grains['id']+':access', '127.0.0.1/32') %} 8 | 9 | postgresql: 10 | pkg: 11 | - latest 12 | - names: 13 | - postgresql-{{ version }} 14 | - postgresql-client-{{ version }} 15 | service.running: 16 | - reload: True 17 | - watch: 18 | - pkg: postgresql 19 | 20 | /etc/postgresql/{{ version }}/main/postgresql.conf: 21 | file.managed: 22 | - source: salt://postgresql/postgresql.conf.jinja 23 | - template: jinja 24 | - context: 25 | version: {{ version }} 26 | listen: {{ listen }} 27 | - user: root 28 | - group: postgres 29 | - mode: 640 30 | - require: 31 | - pkg: postgresql 32 | - watch_in: 33 | - service: postgresql 34 | 35 | /etc/postgresql/{{ version }}/main/pg_hba.conf: 36 | file.managed: 37 | - source: salt://postgresql/pg_hba.conf.jinja 38 | - template: jinja 39 | - context: 40 | access: {{ access }} 41 | - user: root 42 | - group: postgres 43 | - mode: 640 44 | - require: 45 | - pkg: postgresql 46 | - watch_in: 47 | - service: postgresql 48 | -------------------------------------------------------------------------------- /postgresql/pg_hba.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | local all postgres peer 7 | 8 | # TYPE DATABASE USER ADDRESS METHOD 9 | local all all peer 10 | 11 | host all all {{ access }} md5 12 | -------------------------------------------------------------------------------- /postgresql/postgresql.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | data_directory = '/var/lib/postgresql/{{ version }}/main' 7 | hba_file = '/etc/postgresql/{{ version }}/main/pg_hba.conf' 8 | ident_file = '/etc/postgresql/{{ version }}/main/pg_ident.conf' 9 | 10 | external_pid_file = '/var/run/postgresql/{{ version }}-main.pid' 11 | 12 | # --- CONNECTIONS AND AUTHENTICATION 13 | 14 | listen_addresses = '{{ listen }}' 15 | port = 5432 16 | max_connections = 100 17 | unix_socket_directories = '/var/run/postgresql' 18 | 19 | ssl = true 20 | 21 | # --- RESOURCE USAGE (except WAL) 22 | 23 | shared_buffers = 24MB 24 | 25 | # --- CLIENT CONNECTION DEFAULTS 26 | 27 | datestyle = 'iso, mdy' 28 | lc_messages = 'en_US.UTF-8' 29 | lc_monetary = 'en_US.UTF-8' 30 | lc_numeric = 'en_US.UTF-8' 31 | lc_time = 'en_US.UTF-8' 32 | default_text_search_config = 'pg_catalog.english' 33 | 34 | -------------------------------------------------------------------------------- /prosody/modules/mod_csi/mod_csi.lua: -------------------------------------------------------------------------------- 1 | local st = require "util.stanza"; 2 | local xmlns_csi = "urn:xmpp:csi:0"; 3 | local csi_feature = st.stanza("csi", { xmlns = xmlns_csi }); 4 | 5 | module:hook("stream-features", function (event) 6 | if event.origin.username then 7 | event.features:add_child(csi_feature); 8 | end 9 | end); 10 | 11 | function refire_event(name) 12 | return function (event) 13 | if event.origin.username then 14 | module:fire_event(name, event); 15 | return true; 16 | end 17 | end; 18 | end 19 | 20 | module:hook("stanza/"..xmlns_csi..":active", refire_event("csi-client-active")); 21 | module:hook("stanza/"..xmlns_csi..":inactive", refire_event("csi-client-inactive")); 22 | -------------------------------------------------------------------------------- /prosody/modules/mod_mam/mamprefs.lib.lua: -------------------------------------------------------------------------------- 1 | -- XEP-0313: Message Archive Management for Prosody 2 | -- Copyright (C) 2011-2013 Kim Alvefur 3 | -- 4 | -- This file is MIT/X11 licensed. 5 | 6 | local global_default_policy = module:get_option("default_archive_policy", false); 7 | 8 | do 9 | local prefs_format = { 10 | [false] = "roster", 11 | -- default ::= true | false | "roster" 12 | -- true = always, false = never, nil = global default 13 | ["romeo@montague.net"] = true, -- always 14 | ["montague@montague.net"] = false, -- newer 15 | }; 16 | end 17 | 18 | local sessions = hosts[module.host].sessions; 19 | local prefs = module:open_store("archive2_prefs"); 20 | 21 | local function get_prefs(user) 22 | local user_sessions = sessions[user]; 23 | local user_prefs = user_sessions and user_sessions.archive_prefs 24 | if not user_prefs and user_sessions then 25 | user_prefs = prefs:get(user); 26 | user_sessions.archive_prefs = user_prefs; 27 | end 28 | return user_prefs or { [false] = global_default_policy }; 29 | end 30 | local function set_prefs(user, user_prefs) 31 | local user_sessions = sessions[user]; 32 | if user_sessions then 33 | user_sessions.archive_prefs = user_prefs; 34 | end 35 | return prefs:set(user, user_prefs); 36 | end 37 | 38 | return { 39 | get = get_prefs, 40 | set = set_prefs, 41 | } 42 | -------------------------------------------------------------------------------- /prosody/modules/mod_mam/mamprefsxml.lib.lua: -------------------------------------------------------------------------------- 1 | -- XEP-0313: Message Archive Management for Prosody 2 | -- Copyright (C) 2011-2013 Kim Alvefur 3 | -- 4 | -- This file is MIT/X11 licensed. 5 | 6 | local st = require"util.stanza"; 7 | local xmlns_mam = "urn:xmpp:mam:0"; 8 | 9 | local global_default_policy = module:get_option("default_archive_policy", false); 10 | 11 | local default_attrs = { 12 | always = true, [true] = "always", 13 | never = false, [false] = "never", 14 | roster = "roster", 15 | } 16 | 17 | local function tostanza(prefs) 18 | local default = prefs[false]; 19 | default = default ~= nil and default_attrs[default] or global_default_policy; 20 | local prefstanza = st.stanza("prefs", { xmlns = xmlns_mam, default = default }); 21 | local always = st.stanza("always"); 22 | local never = st.stanza("never"); 23 | for jid, choice in pairs(prefs) do 24 | if jid then 25 | (choice and always or never):tag("jid"):text(jid):up(); 26 | end 27 | end 28 | prefstanza:add_child(always):add_child(never); 29 | return prefstanza; 30 | end 31 | local function fromstanza(prefstanza) 32 | local prefs = {}; 33 | local default = prefstanza.attr.default; 34 | if default then 35 | prefs[false] = default_attrs[default]; 36 | end 37 | 38 | local always = prefstanza:get_child("always"); 39 | if always then 40 | for rule in always:childtags("jid") do 41 | local jid = rule:get_text(); 42 | prefs[jid] = true; 43 | end 44 | end 45 | 46 | local never = prefstanza:get_child("never"); 47 | if never then 48 | for rule in never:childtags("jid") do 49 | local jid = rule:get_text(); 50 | prefs[jid] = false; 51 | end 52 | end 53 | 54 | return prefs; 55 | end 56 | 57 | return { 58 | tostanza = tostanza; 59 | fromstanza = fromstanza; 60 | } 61 | -------------------------------------------------------------------------------- /prosody/modules/mod_mam/rsm.lib.lua: -------------------------------------------------------------------------------- 1 | local stanza = require"util.stanza".stanza; 2 | local tostring, tonumber = tostring, tonumber; 3 | local type = type; 4 | local pairs = pairs; 5 | 6 | local xmlns_rsm = 'http://jabber.org/protocol/rsm'; 7 | 8 | local element_parsers = {}; 9 | 10 | do 11 | local parsers = element_parsers; 12 | local function xs_int(st) 13 | return tonumber((st:get_text())); 14 | end 15 | local function xs_string(st) 16 | return st:get_text(); 17 | end 18 | 19 | parsers.after = xs_string; 20 | parsers.before = function(st) 21 | local text = st:get_text(); 22 | return text == "" or text; 23 | end; 24 | parsers.max = xs_int; 25 | parsers.index = xs_int; 26 | 27 | parsers.first = function(st) 28 | return { index = tonumber(st.attr.index); st:get_text() }; 29 | end; 30 | parsers.last = xs_string; 31 | parsers.count = xs_int; 32 | end 33 | 34 | local element_generators = setmetatable({ 35 | first = function(st, data) 36 | if type(data) == "table" then 37 | st:tag("first", { index = data.index }):text(data[1]):up(); 38 | else 39 | st:tag("first"):text(tostring(data)):up(); 40 | end 41 | end; 42 | before = function(st, data) 43 | if data == true then 44 | st:tag("before"):up(); 45 | else 46 | st:tag("before"):text(tostring(data)):up(); 47 | end 48 | end 49 | }, { 50 | __index = function(_, name) 51 | return function(st, data) 52 | st:tag(name):text(tostring(data)):up(); 53 | end 54 | end; 55 | }); 56 | 57 | 58 | local function parse(set) 59 | local rs = {}; 60 | for tag in set:childtags() do 61 | local name = tag.name; 62 | local parser = name and element_parsers[name]; 63 | if parser then 64 | rs[name] = parser(tag); 65 | end 66 | end 67 | return rs; 68 | end 69 | 70 | local function generate(t) 71 | local st = stanza("set", { xmlns = xmlns_rsm }); 72 | for k,v in pairs(t) do 73 | if element_parsers[k] then 74 | element_generators[k](st, v); 75 | end 76 | end 77 | return st; 78 | end 79 | 80 | local function get(st) 81 | local set = st:get_child("set", xmlns_rsm); 82 | if set and #set.tags > 0 then 83 | return parse(set); 84 | end 85 | end 86 | 87 | return { parse = parse, generate = generate, get = get }; 88 | -------------------------------------------------------------------------------- /prosody/prosody.cfg.lua.jinja: -------------------------------------------------------------------------------- 1 | --# 2 | --# WARNING: 3 | --# This file is under CM control - all manual changes will be removed 4 | --# 5 | 6 | -- Prosody XMPP Server Configuration 7 | 8 | admins = { '{{ admin_user }}' } 9 | 10 | daemonize = true 11 | use_libevent = true; 12 | 13 | plugin_paths = { "/etc/prosody/modules" } 14 | pidfile = "/var/run/prosody/prosody.pid" 15 | 16 | modules_enabled = { 17 | "roster"; -- Allow users to have a roster. Recommended ;) 18 | "saslauth"; -- Authentication for clients and servers. Recommended if you want to log in. 19 | "tls"; -- Add support for secure TLS on c2s/s2s connections 20 | "dialback"; -- s2s dialback support 21 | "disco"; -- Service discovery 22 | "posix"; -- POSIX functionality, sends server to background, enables syslog, etc. 23 | 24 | "private"; -- Private XML storage (for room bookmarks, etc.) 25 | "vcard"; -- Allow users to set vCards 26 | 27 | "compression"; -- Stream compression (requires the lua-zlib package installed) 28 | 29 | "version"; -- Replies to server version requests 30 | "uptime"; -- Report how long server has been running 31 | "time"; -- Let others know the time here on this server 32 | "ping"; -- Replies to XMPP pings with pongs 33 | "pep"; -- Enables users to publish their mood, activity, playing music and more 34 | "register"; -- Allow users to register on this server using a client and change passwords 35 | 36 | "admin_adhoc"; -- Allows administration via an XMPP client that supports ad-hoc commands 37 | 38 | "auth_ldap"; -- Ldap authentificator 39 | "groups"; -- Shared roster support 40 | "watchregistrations"; -- Alert admins of registrations 41 | "proxy65"; 42 | "mam"; 43 | "smacks"; 44 | "carbons"; 45 | "offline"; -- Store offline messages 46 | "lastactivity"; 47 | "pubsub"; 48 | "csi"; -- Mobile clients states 49 | } 50 | 51 | modules_disabled = { 52 | } 53 | 54 | groups_file = "/etc/prosody/sharedgroups.ini" 55 | default_archive_policy = "roster"; 56 | 57 | allow_registration = false; 58 | 59 | c2s_require_encryption = true 60 | s2s_secure_auth = true 61 | --s2s_secure_domains = { "jabber.org" } 62 | s2s_insecure_domains = { 63 | } 64 | 65 | -- Listen default jabber port & icq port 66 | c2s_ports = { 5222, 5190 } 67 | 68 | authentication = "ldap" 69 | 70 | ldap_server = "{{ ldap_server }}" 71 | ldap_tls = true 72 | ldap_base = "{{ ldap_userbase }}" 73 | ldap_filter = "{{ ldap_filter }}" 74 | 75 | storage = "sql2" 76 | 77 | -- Available: SQLite3, MySQL, PostgreSQL 78 | {% if db_type == 'SQLite3' -%} 79 | sql = { driver = "{{ db_type }}", database = "{{ db_path }}" } 80 | {%- else %} 81 | sql = { driver = "{{ db_type }}", database = "{{ db_name }}", username = "{{ db_user }}", password = "{{ db_password }}", host = "{{ db_path }}" } 82 | {%- endif %} 83 | 84 | log = { 85 | --debug = "/var/log/prosody/prosody.dbg"; 86 | info = "/var/log/prosody/prosody.log"; 87 | error = "/var/log/prosody/prosody.err"; 88 | -- Syslog: 89 | { levels = { "error" }; to = "syslog"; }; 90 | } 91 | 92 | Include "conf.d/*.cfg.lua" 93 | -------------------------------------------------------------------------------- /prosody/sharedgroups.ini.jinja: -------------------------------------------------------------------------------- 1 | {% for group, users in salt['pillar.get']('prosody:groups', {})|dictsort -%} 2 | [{{ group }}] 3 | {%- for login, name in users|dictsort %} 4 | {{ login }}={{ name.decode('unicode-escape') }} 5 | {%- endfor %} 6 | {%- endfor %} 7 | -------------------------------------------------------------------------------- /prosody/vhost.cfg.lua.jinja: -------------------------------------------------------------------------------- 1 | --# 2 | --# WARNING: 3 | --# This file is under CM control - all manual changes will be removed 4 | --# 5 | 6 | -- Section for {{ host }} 7 | 8 | VirtualHost "{{ host }}" 9 | enabled = true 10 | ssl = { 11 | key = "{{ ssl_key }}"; 12 | certificate = "{{ ssl_cert }}"; 13 | } 14 | 15 | Component "proxy.{{ host }}" "proxy65" 16 | Component "conference.{{ host }}" "muc" 17 | modules_enabled = { 18 | "mam_muc"; 19 | } 20 | -------------------------------------------------------------------------------- /python/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Python virtualenv utils 3 | # 4 | 5 | python-dev: 6 | pkg.installed 7 | 8 | python-virtualenv: 9 | pkg.installed: 10 | - require: 11 | - pkg: python-dev 12 | -------------------------------------------------------------------------------- /rar/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # RAR unpacker 3 | # 4 | 5 | rar-packages: 6 | pkg.installed: 7 | - pkgs: 8 | - unrar-free 9 | -------------------------------------------------------------------------------- /razerhydra/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Razer Hydra controller drivers 3 | # 4 | 5 | /etc/udev/rules.d/99-sixense-libusb.rules: 6 | file.managed: 7 | - source: salt://razerhydra/udev.rules 8 | - user: root 9 | - group: root 10 | - mode: 644 11 | -------------------------------------------------------------------------------- /razerhydra/udev.rules: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | ATTRS{manufacturer}=="Razer", ATTRS{product}=="Razer Hydra", MODE="0666" 7 | -------------------------------------------------------------------------------- /rstream/config.ini.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # {{ stream }} configuration file 7 | [rstream] 8 | {%- for var, val in salt['pillar.get']('rstream:streams:%s'|format(stream), {})|dictsort %} 9 | {%- if var in ['stream-from', 'stream-to', 'audio', 'output-dir', 'file-name', 'duration-limit', 'reset-url', 'log-file'] %} 10 | {{ var }}: {{ val }} 11 | {%- endif %} 12 | {%- endfor %} 13 | -------------------------------------------------------------------------------- /rstream/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process rstream-{{ subname }} matching "rstream-{{ subname }}.ini" 7 | group service 8 | group rstream 9 | group subname 10 | start program = "/usr/sbin/service rstream-{{ subname }} start" 11 | stop program = "/usr/sbin/service rstream-{{ subname }} stop" 12 | if 5 restarts with 5 cycles then timeout 13 | 14 | check program rstream-{{ subname }}-archive with path "/srv/bin/rstream-check-{{ subname }}-archive.sh" 15 | group rstream 16 | group subname 17 | if status != 0 then alert 18 | -------------------------------------------------------------------------------- /rstream/rstream_freespace.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # RStream archive cleaner if we have < 200GB in /srv partition 7 | 8 | export PATH="/bin:/usr/bin:/sbin:/usr/sbin" 9 | export ARCHIVE="/srv/streams/archive" 10 | 11 | # Find oldest directories in the archive 12 | # If free space < 150GB - we need to clean some old streams 13 | find "$ARCHIVE" -mindepth 1 -type d -printf '%T@\t%p\n' | sort -g | head -n6 | cut -f2- \ 14 | | while IFS= read -r item 15 | do 16 | available_space=$(df --block-size=1G -P "${ARCHIVE}" | tail -1 | tr -s ' ' | cut -d' ' -f4) 17 | if [ "${available_space}" -lt "150" ] 18 | then 19 | echo "RSTREAM: Available space in $ARCHIVE: ${available_space}GB, we need to remove old data '$item'" 20 | rm -rf "$item" 21 | else 22 | echo "RSTREAM: Available space in $ARCHIVE: ${available_space}GB, it's ok" 23 | break 24 | fi 25 | done 26 | -------------------------------------------------------------------------------- /rstream/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | description "Stream server starting" 7 | author "Rabit " 8 | 9 | start on net-device-up 10 | stop on runlevel [!2345] 11 | 12 | kill signal INT 13 | 14 | respawn 15 | respawn limit 10 5 16 | 17 | setuid {{ run_user }} 18 | setgid {{ run_group }} 19 | 20 | {% if salt['pillar.get']('rstream:streams:%s:debug'|format(stream), False) -%} 21 | env GST_DEBUG=*:4 22 | export GST_DEBUG 23 | 24 | {% endif -%} 25 | exec /usr/local/bin/rstream.py -v --config-file /srv/streams/conf/rstream-{{ stream }}.ini 26 | -------------------------------------------------------------------------------- /rsync/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Rsync synchronisation utility 3 | # 4 | 5 | rsync: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /rxvt-unicode/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # RXVT Terminal emulator + terminus font 3 | # 4 | 5 | rxvt-unicode-256color: 6 | pkg.installed 7 | 8 | xfonts-terminus: 9 | pkg.installed 10 | 11 | x-terminal-emulator: 12 | alternatives.install: 13 | - link: /usr/bin/x-terminal-emulator 14 | - path: /usr/bin/urxvt 15 | - priority: 100 16 | - require: 17 | - pkg: rxvt-unicode-256color 18 | -------------------------------------------------------------------------------- /s3ql/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # S3QL - aws s3 fuse file system 3 | # 4 | 5 | s3ql-repo: 6 | pkgrepo.managed: 7 | - ppa: nikratio/s3ql 8 | - required_in: 9 | - pkg: s3ql 10 | 11 | s3ql: 12 | pkg.installed 13 | 14 | {% for name, args in salt['pillar.get']('s3ql:%s'|format(grains['id']), {}).items() -%} 15 | {{ args.auth.file }}: 16 | file.managed: 17 | - user: {{ args.user }} 18 | - group: {{ args.group }} 19 | - mode: 600 20 | - contents: | 21 | [{{ name }}] 22 | storage-url: {{ args.url }} 23 | backend-login: {{ args.auth.login }} 24 | backend-password: {{ args.auth.password }} 25 | - require: 26 | - pkg: s3ql 27 | 28 | {{ args.cachedir }}: 29 | file.directory: 30 | - user: {{ args.user }} 31 | - group: {{ args.group }} 32 | - mode: 700 33 | - makedirs: True 34 | - require: 35 | - pkg: s3ql 36 | 37 | /etc/init/s3ql-{{ name }}.conf: 38 | file.managed: 39 | - source: salt://s3ql/upstart.conf.jinja 40 | - template: jinja 41 | - user: root 42 | - group: root 43 | - mode: 644 44 | - context: 45 | required_for: {{ args.required_for|default(None) }} 46 | runuser: {{ args.user }} 47 | rungroup: {{ args.group }} 48 | url: {{ args.url }} 49 | dir: {{ args.dir }} 50 | cachedir: {{ args.cachedir }} 51 | authfile: {{ args.auth.file }} 52 | - require: 53 | - pkg: s3ql 54 | - file: {{ args.auth.file }} 55 | - file: {{ args.cachedir }} 56 | - required_in: 57 | - service: s3ql-{{ name }} 58 | 59 | s3ql-{{ name }}: 60 | service.running 61 | {% endfor %} 62 | -------------------------------------------------------------------------------- /s3ql/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | description "S3QL S3 filesystem upstart script" 7 | author "Rabit " 8 | 9 | start on (local-filesystems and net-device-up IFACE!=lo{% if required_for != None %} or starting {{ required_for }}{% endif %}) 10 | stop on shutdown{% if required_for != None %} or stopped {{ required_for }}{% endif %} 11 | 12 | kill timeout 300 13 | expect stop 14 | 15 | respawn 16 | respawn limit 10 5 17 | 18 | setuid {{ runuser }} 19 | setgid {{ rungroup }} 20 | 21 | env url={{ url }} 22 | env dir={{ dir }} 23 | env cachedir={{ cachedir }} 24 | env authfile={{ authfile }} 25 | 26 | pre-start script 27 | mkdir -p "$dir" 28 | fsck.s3ql --cachedir "$cachedir" --authfile "$authfile" --log syslog --batch "$url" 29 | end script 30 | 31 | exec mount.s3ql --allow-root --cachedir "$cachedir" --authfile "$authfile" --cachesize 10485760 --upstart --log syslog "$url" "$dir" 32 | 33 | pre-stop script 34 | umount.s3ql "$dir" 35 | end script 36 | -------------------------------------------------------------------------------- /salt/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Salt client 3 | # 4 | 5 | {% from 'monit/macros.sls' import monit with context %} 6 | 7 | # Salt-Stack repo 8 | salt-stack-repo: 9 | pkgrepo.managed: 10 | - ppa: saltstack/salt 11 | - required_in: 12 | - pkg: salt-minion 13 | 14 | /etc/init/salt-minion.conf: 15 | file.replace: 16 | - pattern: '^#respawn$' 17 | - repl: 'respawn' 18 | 19 | salt-minion: 20 | pkg: 21 | - installed 22 | service: 23 | - running 24 | 25 | /etc/salt/minion: 26 | file.managed: 27 | - source: salt://salt/minion.jinja 28 | - template: jinja 29 | - user: root 30 | - group: root 31 | - mode: 600 32 | 33 | {{ monit('salt') }} 34 | -------------------------------------------------------------------------------- /salt/master.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | interface: 0.0.0.0 7 | publish_port: {{ salt['pillar.get']('saltstack:publish_port', '4505') }} 8 | ret_port: {{ salt['pillar.get']('saltstack:ret_port', '4506') }} 9 | 10 | user: salt-stack 11 | max_open_files: 100000 12 | worker_threads: 2 13 | 14 | verify_env: True 15 | keep_jobs: 24 16 | 17 | timeout: 5 18 | loop_interval: 60 19 | 20 | job_cache: True 21 | minion_data_cache: True 22 | 23 | open_mode: False 24 | auto_accept: False 25 | 26 | yaml_utf8: True 27 | 28 | client_acl: 29 | www-data: 30 | - test.ping 31 | 32 | state_top: top.sls 33 | renderer: yaml_jinja 34 | state_verbose: False 35 | state_output: changes 36 | 37 | file_roots: 38 | base: 39 | - /srv/salt/ 40 | master: 41 | - /srv/salt/master 42 | - /srv/ssl 43 | 44 | file_ignore_regex: 45 | - '/\.git($|/)' 46 | 47 | pillar_roots: 48 | base: 49 | - /srv/salt/pillar 50 | 51 | log_level: warning 52 | 53 | log_file: file:///dev/log 54 | log_level_logfile: info 55 | -------------------------------------------------------------------------------- /salt/master.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Salt master server 3 | # 4 | 5 | {% from 'monit/macros.sls' import monit with context %} 6 | 7 | include: 8 | - salt 9 | 10 | salt-stack: 11 | user.present: 12 | - home: /srv/salt 13 | - shell: /bin/nologin 14 | - system: True 15 | 16 | salt-master: 17 | pkg: 18 | - installed 19 | service.running: 20 | - watch: 21 | - pkg: salt-master 22 | - file: /etc/salt/master 23 | 24 | /etc/salt/master: 25 | file.managed: 26 | - source: salt://salt/master.jinja 27 | - template: jinja 28 | - user: root 29 | - group: salt-stack 30 | - mode: 640 31 | - require: 32 | - pkg: salt-master 33 | 34 | /etc/salt/pki/master: 35 | file.directory: 36 | - user: salt-stack 37 | - require: 38 | - pkg: salt-master 39 | - recurse: 40 | - user 41 | 42 | {{ monit('salt', '-master') }} 43 | -------------------------------------------------------------------------------- /salt/minion.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | master: {{ salt['config.option']('master') }} 7 | master_port: {{ salt['config.option']('master_port') }} 8 | 9 | id: {{ salt['grains.get']('id', 'noname') }} 10 | 11 | user: root 12 | 13 | cache_jobs: True 14 | 15 | acceptance_wait_time: 10 16 | 17 | loop_interval: 60 18 | 19 | failhard: False 20 | 21 | startup_states: 'highstate' 22 | 23 | renderer: yaml_jinja 24 | 25 | multiprocessing: True 26 | 27 | log_level: warning 28 | 29 | log_file: file:///dev/log 30 | log_level_logfile: info 31 | 32 | tcp_keepalive: True 33 | tcp_keepalive_idle: 300 34 | tcp_keepalive_cnt: -1 35 | tcp_keepalive_intvl: -1 36 | 37 | ext_job_cache: '' 38 | -------------------------------------------------------------------------------- /salt/monit-master.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process salt-master with pidfile /var/run/salt-master.pid 7 | group system 8 | start program = "/usr/sbin/service salt-master start" 9 | stop program = "/usr/sbin/service salt-master stop" 10 | if 5 restarts with 5 cycles then timeout 11 | -------------------------------------------------------------------------------- /salt/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process salt-minion with pidfile /var/run/salt-minion.pid 7 | group system 8 | start program = "/usr/sbin/service salt-minion start" 9 | stop program = "/usr/sbin/service salt-minion stop" 10 | if 5 restarts with 5 cycles then timeout 11 | -------------------------------------------------------------------------------- /schroot/fstab: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # 7 | /proc /proc none rw,bind 0 0 8 | /sys /sys none rw,bind 0 0 9 | /dev/pts /dev/pts none rw,bind 0 0 10 | tmpfs /dev/shm tmpfs defaults 0 0 11 | /srv /srv none rw,bind 0 0 12 | -------------------------------------------------------------------------------- /schroot/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SChroot - simple chroot for user 3 | # 4 | 5 | schroot-pkgs: 6 | pkg.installed: 7 | - pkgs: 8 | - schroot 9 | - debootstrap 10 | 11 | /srv/schroot: 12 | file.directory: 13 | - user: root 14 | - group: root 15 | - mode: 750 16 | - require: 17 | - pkg: schroot-pkgs 18 | 19 | debootstrap --variant=buildd --arch=amd64 raring /srv/schroot/raring64 'http://archive.ubuntu.com/ubuntu/': 20 | cmd.run: 21 | - unless: test -d /srv/schroot/raring64 22 | - require: 23 | - pkg: schroot-pkgs 24 | - file: /srv/schroot 25 | 26 | /etc/schroot/chroot.d/raring64.conf: 27 | file.managed: 28 | - source: salt://schroot/schroot.conf.jinja 29 | - user: root 30 | - group: root 31 | - mode: 644 32 | - template: jinja 33 | - makedirs: True 34 | - require: 35 | - pkg: schroot-pkgs 36 | 37 | /etc/schroot/buildd/fstab: 38 | file.managed: 39 | - source: salt://schroot/fstab 40 | - require: 41 | - pkg: schroot-pkgs 42 | 43 | /etc/pam.d/schroot: 44 | file.managed: 45 | - source: salt://schroot/schroot.pam 46 | - require: 47 | - pkg: schroot-pkgs 48 | -------------------------------------------------------------------------------- /schroot/radeon.asound.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | pcm.!default { 7 | type plug 8 | slave { 9 | # ATI hdmi (get info by aplay -l) 10 | pcm "hw:0,3" 11 | rate 48000 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /schroot/radeon.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [radeon] 7 | description=Ubuntu Raring 13.04 radeon 8 | directory=/srv/schroot/radeon 9 | root-users={% for user in salt['pillar.get']('users', {}) %}{% if salt['pillar.get']('users:%s:admin'|format(user), False) == True %}{{ user }},{% endif %}{% endfor %} 10 | type=directory 11 | users={{ salt['pillar.get']('users', {})|join(',') }} 12 | profile=desktop 13 | -------------------------------------------------------------------------------- /schroot/radeon.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SChroot radeon runplace 3 | # 4 | # You need: 5 | # - Install fglrx to master system and disable it by using intel drivers 6 | # - Init ati in radeon schroot 7 | # # aticonfig --init 8 | # - Copy libs in radeon schroot (maybe bug in ati?) 9 | # # cp -a /usr/lib/x86_64-linux-gnu/xorg/extra-modules/modules/* /usr/lib/xorg/modules/ 10 | 11 | include: 12 | - schroot 13 | 14 | schroot-radeon-debootstrap: 15 | cmd.run: 16 | - name: debootstrap --variant=minbase --arch=amd64 --include=xserver-xorg,libfontconfig1,libqtcore4,libxcursor1,libxfixes3,libxxf86vm1,xinit,libc6-i386,dkms,lib32gcc1,linux-headers-generic,python,curl,linux-sound-base,alsa-utils,software-properties-common raring /srv/schroot/radeon 'http://archive.ubuntu.com/ubuntu/' 17 | - unless: test -d /srv/schroot/radeon 18 | - require: 19 | - pkg: schroot-pkgs 20 | - file: /srv/schroot 21 | 22 | /etc/schroot/chroot.d/radeon.conf: 23 | file.managed: 24 | - source: salt://schroot/radeon.conf.jinja 25 | - user: root 26 | - group: root 27 | - mode: 644 28 | - template: jinja 29 | - makedirs: True 30 | - require: 31 | - pkg: schroot-pkgs 32 | 33 | schroot-radeon-install-fglrx: 34 | cmd.wait: 35 | - name: mount --bind /dev /srv/schroot/radeon/dev && chroot /srv/schroot/radeon sh -cx 'dpkg --add-architecture i386 ; echo "deb http://archive.ubuntu.com/ubuntu raring main restricted universe multiverse" > /etc/apt/sources.list ; add-apt-repository -y ppa:xorg-edgers/ppa ; apt-get update ; apt-get install -y --no-install-recommends fglrx-13 x2x' ; umount /srv/schroot/radeon/dev 36 | - watch: 37 | - cmd: schroot-radeon-debootstrap 38 | 39 | /srv/schroot/radeon/etc/X11/xorg.conf: 40 | file.managed: 41 | - source: salt://schroot/radeon.xorg.conf 42 | - user: root 43 | - group: root 44 | - mode: 644 45 | - require: 46 | - cmd: schroot-radeon-debootstrap 47 | 48 | /srv/schroot/radeon/etc/X11/Xwrapper.config: 49 | file.managed: 50 | - source: salt://schroot/radeon.xwrapper.conf 51 | - user: root 52 | - group: root 53 | - mode: 644 54 | - require: 55 | - cmd: schroot-radeon-debootstrap 56 | 57 | /srv/schroot/radeon/etc/asound.conf: 58 | file.managed: 59 | - source: salt://schroot/radeon.asound.conf 60 | - user: root 61 | - group: root 62 | - mode: 644 63 | - require: 64 | - cmd: schroot-radeon-debootstrap 65 | -------------------------------------------------------------------------------- /schroot/radeon.xorg.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | Section "ServerLayout" 7 | Identifier "X.org Configured" 8 | Screen 0 "screen0" 0 0 9 | # InputDevice "mouse0" "CorePointer" 10 | # InputDevice "keyboard0" "CoreKeyboard" 11 | EndSection 12 | 13 | Section "Files" 14 | ModulePath "/usr/lib/xorg/modules" 15 | FontPath "/usr/share/fonts/X11/misc" 16 | FontPath "/usr/share/fonts/X11/cyrillic" 17 | FontPath "/usr/share/fonts/X11/100dpi/:unscaled" 18 | FontPath "/usr/share/fonts/X11/75dpi/:unscaled" 19 | FontPath "/usr/share/fonts/X11/Type1" 20 | FontPath "/usr/share/fonts/X11/100dpi" 21 | FontPath "/usr/share/fonts/X11/75dpi" 22 | FontPath "/var/lib/defoma/x-ttcidfont-conf.d/dirs/TrueType" 23 | FontPath "built-ins" 24 | EndSection 25 | 26 | Section "Module" 27 | Load "glx" 28 | EndSection 29 | 30 | Section "Monitor" 31 | Identifier "monitor0" 32 | Option "VendorName" "ATI Proprietary Driver" 33 | Option "ModelName" "Generic Autodetecting Monitor" 34 | Option "DPMS" "true" 35 | EndSection 36 | 37 | Section "Device" 38 | Identifier "device0" 39 | Driver "fglrx" 40 | Option "Capabilities" "0x00000800" 41 | BusID "PCI:1:0:0" 42 | EndSection 43 | 44 | Section "Screen" 45 | Identifier "screen0" 46 | Device "device0" 47 | Monitor "monitor0" 48 | DefaultDepth 24 49 | SubSection "Display" 50 | Viewport 0 0 51 | Depth 24 52 | EndSubSection 53 | EndSection 54 | 55 | #Section "InputDevice" 56 | # Identifier "mouse0" 57 | # Driver "evdev" 58 | # Option "Device" "/dev/input/event4" 59 | #EndSection 60 | 61 | #Section "InputDevice" 62 | # Identifier "keyboard0" 63 | # Driver "evdev" 64 | # Option "Device" "/dev/input/event13" 65 | #EndSection 66 | -------------------------------------------------------------------------------- /schroot/radeon.xwrapper.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | allowed_users=anybody 7 | -------------------------------------------------------------------------------- /schroot/schroot.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [raring64] 7 | description=Ubuntu Raring 13.04 8 | directory=/srv/schroot/raring64 9 | root-users={% for user in salt['pillar.get']('users', {}) %}{% if salt['pillar.get']('users:%s:admin'|format(user), False) == True %}{{ user }},{% endif %}{% endfor %} 10 | type=directory 11 | users={{ salt['pillar.get']('users', {})|join(',') }} 12 | profile=buildd 13 | -------------------------------------------------------------------------------- /schroot/schroot.pam: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # 7 | # The PAM configuration file for the sbuild `schroot' service 8 | # 9 | 10 | # ADDED TO PREVENT DOUBLE pam_mount 11 | auth [success=1 default=ignore] pam_unix.so nullok_secure 12 | auth requisite pam_deny.so 13 | auth required pam_permit.so 14 | auth optional pam_cap.so 15 | #@include common-auth 16 | @include common-account 17 | session [default=1] pam_permit.so 18 | session requisite pam_deny.so 19 | session required pam_permit.so 20 | session optional pam_umask.so 21 | session required pam_unix.so 22 | session optional pam_xdg_support.so 23 | session optional pam_ck_connector.so nox11 24 | #@include common-session 25 | -------------------------------------------------------------------------------- /seafile/ccnet_ccnet.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [General] 7 | USER_NAME = FileServer 8 | ID = 5d5c3b589e64322ce6c93af11d814e12e3283523 9 | NAME = FileServer 10 | SERVICE_URL = https://{{ domain }} 11 | 12 | [Network] 13 | PORT = 10001 14 | 15 | [Client] 16 | PORT = 13418 17 | 18 | {% if db_type != 'sqlite3' -%} 19 | [Database] 20 | ENGINE={{ db_type }} 21 | HOST={{ db_path }} 22 | USER={{ db_user }} 23 | PASSWD={{ db_password }} 24 | DB={{ db_name }} 25 | {%- endif %} 26 | 27 | {% if ldap_url != '' -%} 28 | [LDAP] 29 | HOST = {{ ldap_url }} 30 | BASE = {{ ldap_userbase }} 31 | LOGIN_ATTR = {{ ldap_attr }} 32 | FILTER = {{ ldap_filter }} 33 | {%- endif %} 34 | -------------------------------------------------------------------------------- /seafile/conf_seafdav.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [WEBDAV] 7 | enabled = true 8 | port = 8080 9 | fastcgi = true 10 | share_name = /webdav 11 | -------------------------------------------------------------------------------- /seafile/logrotate.conf.jinja: -------------------------------------------------------------------------------- 1 | {{ home_dir }}/logs/seaf-server.log 2 | { 3 | daily 4 | missingok 5 | rotate 52 6 | compress 7 | delaycompress 8 | notifempty 9 | sharedscripts 10 | postrotate 11 | [ ! -f {{ home_dir }}/pids/seaf-server.pid ] || kill -USR1 `cat {{ home_dir }}/pids/seaf-server.pid` 12 | endscript 13 | } 14 | 15 | {{ home_dir }}/logs/ccnet.log 16 | { 17 | daily 18 | missingok 19 | rotate 52 20 | compress 21 | delaycompress 22 | notifempty 23 | sharedscripts 24 | postrotate 25 | [ ! -f {{ home_dir }}/pids/ccnet.pid ] || kill -USR1 `cat {{ home_dir }}/pids/ccnet.pid` 26 | endscript 27 | } 28 | -------------------------------------------------------------------------------- /seafile/nginx-site.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | server { 7 | listen 80; 8 | 9 | server_name {{ nginx_server_name }}; 10 | 11 | return 302 https://{{ nginx_server_name }}$request_uri; 12 | } 13 | 14 | server { 15 | listen 443 ssl spdy; 16 | 17 | server_name {{ nginx_server_name }}; 18 | 19 | ssl_certificate {{ ssl_cert }}; 20 | ssl_certificate_key {{ ssl_key }}; 21 | 22 | include /etc/nginx/ssl.conf; 23 | 24 | charset utf8; 25 | limit_conn perip 10; 26 | limit_conn perserver 150; 27 | 28 | client_max_body_size 500m; 29 | client_body_buffer_size 128k; 30 | 31 | access_log /var/log/nginx/access.{{ nginx_conf_name }}.log; 32 | error_log /var/log/nginx/error.{{ nginx_conf_name }}.log; 33 | 34 | location / { 35 | fastcgi_intercept_errors on; 36 | fastcgi_pass 127.0.0.1:8000; 37 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 38 | fastcgi_param PATH_INFO $fastcgi_script_name; 39 | 40 | fastcgi_param SERVER_PROTOCOL $server_protocol; 41 | fastcgi_param QUERY_STRING $query_string; 42 | fastcgi_param REQUEST_METHOD $request_method; 43 | fastcgi_param CONTENT_TYPE $content_type; 44 | fastcgi_param CONTENT_LENGTH $content_length; 45 | fastcgi_param SERVER_ADDR $server_addr; 46 | fastcgi_param SERVER_PORT $server_port; 47 | fastcgi_param SERVER_NAME $server_name; 48 | fastcgi_param REMOTE_ADDR $remote_addr; 49 | 50 | fastcgi_param HTTPS on; 51 | fastcgi_param HTTP_SCHEME https; 52 | } 53 | location /seafhttp { 54 | rewrite ^/seafhttp(.*)$ $1 break; 55 | proxy_pass http://127.0.0.1:10003; 56 | client_max_body_size 0; 57 | 58 | proxy_connect_timeout 36000s; 59 | proxy_read_timeout 36000s; 60 | } 61 | location /media { 62 | root {{ home_dir }}/seafile-server-latest/seahub; 63 | } 64 | location /webdav { 65 | fastcgi_pass 127.0.0.1:8080; 66 | fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name; 67 | fastcgi_param PATH_INFO $fastcgi_script_name; 68 | 69 | fastcgi_param SERVER_PROTOCOL $server_protocol; 70 | fastcgi_param QUERY_STRING $query_string; 71 | fastcgi_param REQUEST_METHOD $request_method; 72 | fastcgi_param CONTENT_TYPE $content_type; 73 | fastcgi_param CONTENT_LENGTH $content_length; 74 | fastcgi_param SERVER_ADDR $server_addr; 75 | fastcgi_param SERVER_PORT $server_port; 76 | fastcgi_param SERVER_NAME $server_name; 77 | fastcgi_param REMOTE_ADDR $remote_addr; 78 | 79 | fastcgi_param HTTPS on; 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /seafile/seafile.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | [quota] 7 | default = 20 8 | 9 | [network] 10 | port=10002 11 | 12 | [fileserver] 13 | port=10003 14 | 15 | {% if db_type != 'sqlite3' -%} 16 | [Database] 17 | type={{ db_type }} 18 | host={{ db_path }} 19 | user={{ db_user }} 20 | password={{ db_password }} 21 | db_name={{ db_name }} 22 | {%- endif %} 23 | -------------------------------------------------------------------------------- /seafile/seahub_settings.py.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | SECRET_KEY = "{{ secret_key }}" 7 | FILE_SERVER_ROOT = 'https://{{ domain }}/seafhttp' 8 | 9 | {% if db_type == 'pgsql' -%} 10 | DATABASES = { 11 | 'default': { 12 | 'ENGINE' : 'django.db.backends.postgresql_psycopg2', 13 | 'NAME' : '{{ db_name }}', 14 | 'USER' : '{{ db_user }}', 15 | 'PASSWORD' : '{{ db_password }}', 16 | 'HOST' : '{{ db_path }}', 17 | } 18 | } 19 | {%- endif %} 20 | 21 | SITE_BASE = 'https://{{ domain }}/' 22 | SITE_NAME = '{{ domain }}' 23 | -------------------------------------------------------------------------------- /seafile/upstart.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | description "SeaFile server startup script" 7 | author "Rabit " 8 | 9 | start on net-device-up and local-filesystems 10 | stop on runlevel [!2345] 11 | 12 | kill signal INT 13 | 14 | setuid seafile 15 | setgid seafile 16 | 17 | pre-start script 18 | {{ home_dir }}/seafile-server-latest/seafile.sh start 19 | {{ home_dir }}/seafile-server-latest/seahub.sh start-fastcgi 8000 20 | end script 21 | 22 | post-stop script 23 | {{ home_dir }}/seafile-server-latest/seafile.sh stop 24 | {{ home_dir }}/seafile-server-latest/seahub.sh stop 25 | end script 26 | -------------------------------------------------------------------------------- /sensors/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Sensors temperature etc 3 | # 4 | 5 | lm-sensors: 6 | pkg.installed 7 | 8 | libsensors4: 9 | pkg.installed 10 | 11 | yes "yes" | sensors-detect: 12 | cmd.wait: 13 | - watch: 14 | - pkg: lm-sensors 15 | -------------------------------------------------------------------------------- /simulator/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Simulator USB interface 3 | # 4 | 5 | /etc/udev/rules.d/60-simulator-usb.rules: 6 | file.managed: 7 | - source: salt://simulator/udev.rules 8 | - user: root 9 | - group: root 10 | - mode: 644 11 | -------------------------------------------------------------------------------- /simulator/udev.rules: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # AFPD Dongle 7 | ATTRS{idVendor}=="127f", ATTRS{idProduct}=="e007", MODE="0666" 8 | ATTRS{idVendor}=="127f", ATTRS{idProduct}=="e008", MODE="0666" 9 | # ACE RC Generic Dongle 10 | ATTRS{idVendor}=="0c45", ATTRS{idProduct}=="70bd", MODE="0666" 11 | -------------------------------------------------------------------------------- /skype/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Skype - ip videocall 3 | # 4 | 5 | include: 6 | - x86 7 | 8 | canonical-partner-repo: 9 | pkgrepo.managed: 10 | - name: deb http://archive.canonical.com/ubuntu {{ grains['oscodename'] }} partner 11 | - require: 12 | - cmd: arch 13 | - require_in: 14 | - pkg: skype 15 | 16 | skype: 17 | pkg.installed 18 | -------------------------------------------------------------------------------- /smart/default: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | start_smartd=yes 7 | smartd_opts="--interval=1800" 8 | -------------------------------------------------------------------------------- /smart/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SMART Disk monitoring 3 | # 4 | 5 | {% if salt['additional.sd_list']() %} 6 | smartmontools: 7 | pkg: 8 | - installed 9 | service.running: 10 | - require: 11 | - pkg: smartmontools 12 | - watch: 13 | - file: /etc/default/smartmontools 14 | - file: /etc/smartd.conf 15 | 16 | {% for device in salt['additional.sd_list']() %} 17 | smartctl -s on /dev/{{ device }}: 18 | cmd.run: 19 | - onlyif: "smartctl -i /dev/{{ device }} | grep -q 'SMART support is: Disabled'" 20 | - require: 21 | - pkg: smartmontools 22 | {% endfor %} 23 | 24 | /etc/default/smartmontools: 25 | file.managed: 26 | - source: salt://smart/default 27 | - user: root 28 | - group: root 29 | - mode: 644 30 | - require: 31 | - pkg: smartmontools 32 | 33 | /etc/smartd.conf: 34 | file.managed: 35 | - source: salt://smart/smartd.conf.jinja 36 | - template: jinja 37 | - user: root 38 | - group: root 39 | - mode: 644 40 | - require: 41 | - pkg: smartmontools 42 | {% endif %} 43 | -------------------------------------------------------------------------------- /smart/smartd.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | {% for device in salt['additional.sd_list']() %} 7 | /dev/{{ device }} -n standby -H -l error -l selftest -f -s (S/../.././04|L/../../6/05) -m root -M daily 8 | {% endfor %} 9 | -------------------------------------------------------------------------------- /smplayer/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SMPlayer best frontend for mplayer 3 | # 4 | 5 | include: 6 | - libva 7 | 8 | smplayer: 9 | pkg.installed 10 | -------------------------------------------------------------------------------- /squid/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SQUID proxy 3 | # 4 | 5 | squid: 6 | pkg.installed 7 | 8 | squid3: 9 | service.running: 10 | - require: 11 | - pkg: squid 12 | 13 | /etc/squid3/squid.conf: 14 | file.managed: 15 | - source: salt://squid/squid.conf.jinja 16 | - template: jinja 17 | - user: root 18 | - group: root 19 | - mode: 644 20 | - require: 21 | - pkg: squid 22 | - watch_in: 23 | - service: squid3 24 | -------------------------------------------------------------------------------- /squid/squid.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | cache deny all 7 | #acl manager proto cache_object 8 | #acl localhost src 127.0.0.1/32 ::1 9 | #acl to_localhost dst 127.0.0.0/24 0.0.0.0/32 ::1 10 | 11 | acl localnet src 192.168.0.0/20 # RFC1918 possible internal network 12 | 13 | acl SSL_ports port 443 14 | acl Safe_ports port 80 # http 15 | acl Safe_ports port 21 # ftp 16 | acl Safe_ports port 443 # https 17 | acl Safe_ports port 70 # gopher 18 | acl Safe_ports port 210 # wais 19 | acl Safe_ports port 1025-65535 # unregistered ports 20 | acl Safe_ports port 280 # http-mgmt 21 | acl Safe_ports port 488 # gss-http 22 | acl Safe_ports port 591 # filemaker 23 | acl Safe_ports port 777 # multiling http 24 | 25 | acl CONNECT method CONNECT 26 | 27 | http_access allow manager localhost 28 | http_access deny manager 29 | 30 | http_access deny !Safe_ports 31 | 32 | http_access deny CONNECT !SSL_ports 33 | http_access allow localnet 34 | http_access deny all 35 | 36 | http_port 3128 37 | 38 | coredump_dir /var/spool/squid3 39 | 40 | refresh_pattern ^ftp: 1440 20% 10080 41 | refresh_pattern ^gopher: 1440 0% 1440 42 | refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 43 | refresh_pattern (Release|Packages(.gz)*)$ 0 20% 2880 44 | refresh_pattern . 0 20% 4320 45 | 46 | deny_info TCP_RESET acl 47 | -------------------------------------------------------------------------------- /ssh/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SSH client & server 3 | # 4 | 5 | {% from 'monit/macros.sls' import monit with context %} 6 | 7 | ssh: 8 | pkg: 9 | - installed 10 | service.running: 11 | - require: 12 | - pkg: ssh 13 | 14 | sshfs: 15 | pkg.installed 16 | 17 | /etc/ssh/sshd_config: 18 | file.managed: 19 | - source: salt://ssh/sshd_config 20 | - user: root 21 | - group: root 22 | - mode: 644 23 | - template: jinja 24 | - makedirs: True 25 | - require: 26 | - pkg: ssh 27 | - watch_in: 28 | - service: ssh 29 | 30 | sftpusers: 31 | group.present 32 | 33 | sftp: 34 | group: 35 | - present 36 | user.present: 37 | - gid_from_name: True 38 | - system: True 39 | - createhome: False 40 | - groups: 41 | - sftpusers 42 | - require: 43 | - group: sftp 44 | - group: sftpusers 45 | 46 | /srv/sftp: 47 | file.directory: 48 | - user: root 49 | - group: root 50 | - mode: 755 51 | - makedirs: True 52 | - require: 53 | - pkg: ssh 54 | 55 | /srv/sftp/sftp: 56 | file.directory: 57 | - user: root 58 | - group: root 59 | - mode: 755 60 | 61 | /home/sftp: 62 | file.directory: 63 | - user: sftp 64 | - group: sftp 65 | - mode: 750 66 | - require: 67 | - user: sftp 68 | 69 | {{ monit('ssh') }} 70 | -------------------------------------------------------------------------------- /ssh/monit.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | check process sshd with pidfile /var/run/sshd.pid 7 | group system 8 | group sshd 9 | start program = "/usr/sbin/service ssh start" 10 | stop program = "/usr/sbin/service ssh stop" 11 | if failed host 127.0.0.1 port {{ salt['pillar.get']('net:hosts:%s:ssh_port'|format(grains['id']), '22') }} with proto ssh then restart 12 | if 5 restarts with 5 cycles then timeout 13 | depend on sshd_bin 14 | depend on sftp_bin 15 | depend on sshd_rc 16 | depend on sshd_rsa_key 17 | depend on sshd_dsa_key 18 | 19 | check file sshd_bin with path /usr/sbin/sshd 20 | group sshd 21 | include /etc/monit/templates/rootbin 22 | 23 | check file sftp_bin with path /usr/lib/openssh/sftp-server 24 | group sshd 25 | include /etc/monit/templates/rootbin 26 | 27 | check file sshd_rsa_key with path /etc/ssh/ssh_host_rsa_key 28 | group sshd 29 | include /etc/monit/templates/rootstrict 30 | 31 | check file sshd_dsa_key with path /etc/ssh/ssh_host_dsa_key 32 | group sshd 33 | include /etc/monit/templates/rootstrict 34 | 35 | check file sshd_rc with path /etc/ssh/sshd_config 36 | group sshd 37 | include /etc/monit/templates/rootrc 38 | -------------------------------------------------------------------------------- /ssh/sshd_config: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | Port {{ salt['pillar.get']('net:hosts:%s:ssh_port'|format(grains['id']), '22') }} 7 | Protocol 2 8 | 9 | HostKey /etc/ssh/ssh_host_rsa_key 10 | HostKey /etc/ssh/ssh_host_dsa_key 11 | HostKey /etc/ssh/ssh_host_ecdsa_key 12 | 13 | UsePrivilegeSeparation yes 14 | 15 | KeyRegenerationInterval 3600 16 | ServerKeyBits 768 17 | 18 | SyslogFacility AUTH 19 | LogLevel INFO 20 | 21 | LoginGraceTime 120 22 | PermitRootLogin no 23 | StrictModes yes 24 | 25 | RSAAuthentication yes 26 | PubkeyAuthentication yes 27 | 28 | IgnoreRhosts yes 29 | RhostsRSAAuthentication no 30 | HostbasedAuthentication no 31 | 32 | PermitEmptyPasswords no 33 | 34 | ChallengeResponseAuthentication no 35 | 36 | PasswordAuthentication {{ 'no' if salt['pillar.get']('net:hosts:%s:access_from_outside'|format(grains['id']), False) else 'yes' }} 37 | 38 | X11Forwarding no 39 | X11DisplayOffset 10 40 | PrintMotd no 41 | PrintLastLog yes 42 | TCPKeepAlive yes 43 | 44 | #AcceptEnv LANG LC_* 45 | 46 | UsePAM yes 47 | 48 | Subsystem sftp internal-sftp 49 | 50 | Match Group sftpusers 51 | ChrootDirectory /srv/sftp/%u 52 | ForceCommand internal-sftp 53 | -------------------------------------------------------------------------------- /ssmtp/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # SSMTP local mail sender 3 | # 4 | 5 | ssmtp: 6 | pkg.installed 7 | 8 | /etc/ssmtp/ssmtp.conf: 9 | file.managed: 10 | - source: salt://ssmtp/ssmtp.conf.jinja 11 | - template: jinja 12 | - user: root 13 | - group: mail 14 | - mode: 640 15 | - require: 16 | - pkg: ssmtp 17 | 18 | /etc/ssmtp/revaliases: 19 | file.managed: 20 | - source: salt://ssmtp/revaliases.jinja 21 | - template: jinja 22 | - user: root 23 | - group: mail 24 | - mode: 644 25 | - require: 26 | - pkg: ssmtp 27 | -------------------------------------------------------------------------------- /ssmtp/revaliases.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | root:{{ salt['pillar.get']('mail:admin', 'root') }}:{{ salt['pillar.get']('mail:server', 'localhost') }}:{{ salt['pillar.get']('mail:port', '25') }} 7 | -------------------------------------------------------------------------------- /ssmtp/ssmtp.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | root={{ salt['pillar.get']('mail:admin', 'root') }} 7 | mailhub={{ salt['pillar.get']('mail:server', 'localhost') }}:{{ salt['pillar.get']('mail:port', '25') }} 8 | UseTLS=YES 9 | UseSTARTTLS=YES 10 | AuthMethod=LOGIN 11 | AuthUser={{ salt['pillar.get']('mail:account', 'none') }} 12 | AuthPass={{ salt['pillar.get']('mail:password', 'none') }} 13 | FromLineOverride=YES 14 | -------------------------------------------------------------------------------- /steam/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Steam client 3 | # 4 | 5 | include: 6 | - x86 7 | 8 | steam-repo: 9 | pkgrepo.managed: 10 | - name: deb http://repo.steampowered.com/steam precise steam 11 | - keyid: B05498B7 12 | - keyserver: keyserver.ubuntu.com 13 | - require: 14 | - cmd: arch 15 | 16 | steam: 17 | pkg.installed: 18 | - require: 19 | - pkgrepo: steam-repo 20 | 21 | steam-launcher: 22 | pkg.installed: 23 | - require: 24 | - pkgrepo: steam-repo 25 | -------------------------------------------------------------------------------- /sysstat/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Sysstat - system statistic tools (like iostat, pidstat, mpstat...) 3 | # 4 | 5 | sysstat: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /thunderbird/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Thunderbird - mail client 3 | # 4 | 5 | thunderbird: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /tinyproxy/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Tinyproxy - used to access hidden networks 3 | # 4 | 5 | tinyproxy: 6 | pkg: 7 | - installed 8 | service.running: 9 | - watch: 10 | - file: /etc/tinyproxy.conf 11 | - require: 12 | - pkg: tinyproxy 13 | 14 | /etc/tinyproxy.conf: 15 | file.managed: 16 | - source: salt://tinyproxy/tinyproxy.conf.jinja 17 | - template: jinja 18 | - user: root 19 | - group: root 20 | - mode: 644 21 | - require: 22 | - pkg: tinyproxy 23 | -------------------------------------------------------------------------------- /tinyproxy/tinyproxy.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | User nobody 7 | Group nogroup 8 | 9 | Listen {% salt['pillar.get']('net:hosts:%s:ip'|format(grains['id']), 'localhost') %} 10 | Port 80 11 | 12 | Timeout 600 13 | DefaultErrorFile "/usr/share/tinyproxy/default.html" 14 | StatFile "/usr/share/tinyproxy/stats.html" 15 | Logfile "/var/log/tinyproxy/tinyproxy.log" 16 | LogLevel Info 17 | PidFile "/var/run/tinyproxy/tinyproxy.pid" 18 | 19 | MaxClients 100 20 | MinSpareServers 5 21 | MaxSpareServers 20 22 | StartServers 10 23 | MaxRequestsPerChild 0 24 | 25 | Allow 127.0.0.1 26 | Allow 192.168.0.0/16 27 | 28 | ViaProxyName "tinyproxy" 29 | ConnectPort 443 30 | ConnectPort 563 31 | 32 | upstream 127.0.0.1:4444 ".i2p" 33 | upstream 127.0.0.1:8124 ".onion" 34 | -------------------------------------------------------------------------------- /tmux/disk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | io_line_count=$((`iostat -d -x -m | wc -l`+1)); 7 | iostat -d -x -k 1 2 -z | tail -n +$io_line_count | grep -v "Device" | awk 'BEGIN{rsum=0; wsum=0}{ rsum+=$6; wsum+=$7} END {printf "r%3dK w%3dK", rsum, wsum}' 8 | -------------------------------------------------------------------------------- /tmux/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # TMux - advanced terminal multiplexer 3 | # 4 | 5 | include: 6 | - sysstat 7 | 8 | tmux: 9 | pkg.installed 10 | 11 | /etc/tmux.conf: 12 | file.managed: 13 | - source: salt://tmux/tmux.conf 14 | - template: jinja 15 | - user: root 16 | - group: root 17 | - mode: 644 18 | - require: 19 | - pkg: tmux 20 | 21 | /etc/tmux: 22 | file.directory: 23 | - user: root 24 | - group: root 25 | - mode: 755 26 | - require: 27 | - file: /etc/tmux.conf 28 | 29 | /etc/tmux/disk.sh: 30 | file.managed: 31 | - source: salt://tmux/disk.sh 32 | - user: root 33 | - group: root 34 | - mode: 755 35 | - require: 36 | - file: /etc/tmux 37 | - pkg: sysstat 38 | 39 | /etc/tmux/mem.sh: 40 | file.managed: 41 | - source: salt://tmux/mem.sh 42 | - user: root 43 | - group: root 44 | - mode: 755 45 | - require: 46 | - file: /etc/tmux 47 | 48 | /etc/tmux/net.sh: 49 | file.managed: 50 | - source: salt://tmux/net.sh.jinja 51 | - template: jinja 52 | - user: root 53 | - group: root 54 | - mode: 755 55 | - require: 56 | - file: /etc/tmux 57 | -------------------------------------------------------------------------------- /tmux/mem.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | FREE_MEM=`LANG=C free | awk '/buffers\/cache/{print (100 - ($4/($3+$4) * 100.0));}'` 7 | printf "M %.f%%" $FREE_MEM 8 | -------------------------------------------------------------------------------- /tmux/net.sh.jinja: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | IF=$1 7 | if [ -z "$IF" ] 8 | then 9 | {%- set default_ip = salt['grains.get']('fqdn_ip4', ['127.0.0.1'])|first() %} 10 | {%- for int, addresses in salt['grains.get']('ip_interfaces', {'lo':'127.0.0.1'}).items() if default_ip in addresses %} 11 | IF="{{ int }}" 12 | {%- endfor %} 13 | fi 14 | 15 | R1=`cat /sys/class/net/$IF/statistics/rx_bytes` 16 | T1=`cat /sys/class/net/$IF/statistics/tx_bytes` 17 | sleep 1 18 | R2=`cat /sys/class/net/$IF/statistics/rx_bytes` 19 | T2=`cat /sys/class/net/$IF/statistics/tx_bytes` 20 | 21 | R=$(( $R2 - $R1 )) 22 | T=$(( $T2 - $T1 )) 23 | MODR="B" 24 | MODT="B" 25 | 26 | if [ $R -gt 1024 ] 27 | then 28 | MODR="K" 29 | R=$(( $R / 1024 )) 30 | 31 | if [ $R -gt 1024 ] 32 | then 33 | MODR="M" 34 | R=$(( $R / 1024 )) 35 | fi 36 | fi 37 | 38 | if [ $T -gt 1024 ] 39 | then 40 | MODT="K" 41 | T=$(( $T / 1024 )) 42 | 43 | if [ $T -gt 1024 ] 44 | then 45 | MODT="M" 46 | T=$(( $T / 1024 )) 47 | fi 48 | fi 49 | 50 | printf "%s:%4d%s↓%4d%s↑" $IF $R $MODR $T $MODT 51 | -------------------------------------------------------------------------------- /tmux/tmux.conf: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | # Start windows and panes at 1, not 0, 7 | set -g base-index 1 8 | set -g pane-base-index 1 9 | 10 | # Set the prefix to ^A. 11 | unbind C-b 12 | set -g prefix ^A 13 | bind a send-prefix 14 | 15 | # History 16 | set -g history-limit 10000 17 | 18 | # Terminal emulator window title 19 | set -g set-titles on 20 | set -g set-titles-string '#S:#I.#P #W' 21 | 22 | # Status Bar 23 | set -g status-interval 10 24 | set -g status-utf8 on 25 | set -g status-bg '#333333' 26 | set -g status-fg '#ffffff' 27 | set -g status-left '#[fg=green]#H#[default]' 28 | set -g status-right-length 100 29 | set -g status-right '#[bg=#666666]#[fg=#000000]#(cut -d " " -f 1 /proc/loadavg)#[default] #[bg=#226622]#[fg=#111111]#(/etc/tmux/mem.sh)#[default] #[bg=#662222]#[fg=#111111]#(/etc/tmux/disk.sh)#[default] #[bg=#222266]#[fg=#aaaaaa]#(/etc/tmux/net.sh)#[default] #[fg=white,bold]%Y-%m-%d %H:%M ' 30 | set -g window-status-format '#I #W' 31 | set -g window-status-current-format ' #I #W ' 32 | setw -g window-status-current-bg '#44aa44' 33 | setw -g window-status-current-fg '#000000' 34 | 35 | # Enable utf-8 36 | setw -g utf8 on 37 | set -g status-utf8 on 38 | 39 | # Notifying if other windows has activities 40 | setw -g monitor-activity on 41 | set -g visual-activity on 42 | 43 | # Clock 44 | setw -g clock-mode-colour green 45 | setw -g clock-mode-style 24 46 | 47 | # Toggle mouse on with ^A m 48 | bind m \ 49 | set -g mode-mouse on \;\ 50 | set -g mouse-resize-pane on \;\ 51 | set -g mouse-select-pane on \;\ 52 | set -g mouse-select-window on \;\ 53 | display 'Mouse: ON' 54 | 55 | # Toggle mouse off with ^A M 56 | bind M \ 57 | set -g mode-mouse off \;\ 58 | set -g mouse-resize-pane off \;\ 59 | set -g mouse-select-pane off \;\ 60 | set -g mouse-select-window off \;\ 61 | display 'Mouse: OFF' 62 | 63 | # Copy-paste buffers: 64 | setw -g mode-keys vi # vim-style movement 65 | 66 | # in normal tmux mode 67 | bind Escape copy-mode # `tmux prefix + Escape` starts copy mode. 68 | bind p paste-buffer # `prefix + p` pastes the latest buffer 69 | 70 | # in copy mode… 71 | bind -t vi-copy v begin-selection # `v` begins a selection. (movement keys to select the desired bits) 72 | bind -t vi-copy y copy-selection # `y` copies the current selection to one of tmux's "paste buffers" 73 | bind -t vi-copy V rectangle-toggle # `V` changes between line- and columnwise selection 74 | 75 | bind -t vi-copy Y copy-end-of-line # ^1 76 | bind + delete-buffer 77 | 78 | # Window controlling 79 | bind [ previous-window 80 | bind ] next-window 81 | 82 | # 83 | # Create new session 84 | # 85 | new-session 86 | -------------------------------------------------------------------------------- /tor/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Tor - onion network access 3 | # 4 | 5 | include: 6 | - polipo 7 | - tinyproxy 8 | 9 | tor: 10 | pkg: 11 | - installed 12 | service.running: 13 | - require: 14 | - pkg: tor 15 | -------------------------------------------------------------------------------- /transmission/daemon/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Transmission - torrent daemon 3 | # 4 | 5 | {% set kodi_present = salt['additional.state_in'](['kodi']) %} 6 | 7 | transmission-daemon: 8 | pkg: 9 | - installed 10 | service.running: 11 | - require: 12 | - pkg: transmission-daemon 13 | 14 | {% if kodi_present -%} 15 | debian-transmission: 16 | user.present: 17 | - groups: 18 | - debian-transmission 19 | - kodi 20 | {%- endif %} 21 | 22 | /srv/torrent/incomplete: 23 | file.directory: 24 | - user: debian-transmission 25 | - group: debian-transmission 26 | - mode: 750 27 | - makedirs: True 28 | - require: 29 | - pkg: transmission-daemon 30 | 31 | /srv/torrent/completed: 32 | file.directory: 33 | - user: debian-transmission 34 | - group: debian-transmission 35 | - mode: 755 36 | - makedirs: True 37 | - require: 38 | - pkg: transmission-daemon 39 | 40 | /srv/torrent/watch: 41 | file.directory: 42 | - user: debian-transmission 43 | - group: debian-transmission 44 | - mode: 775 45 | - makedirs: True 46 | - require: 47 | - pkg: transmission-daemon 48 | 49 | /srv/media/torrents: 50 | file.directory: 51 | - user: debian-transmission 52 | - group: debian-transmission 53 | - mode: 777 54 | - makedirs: True 55 | - require: 56 | - pkg: transmission-daemon 57 | 58 | /etc/transmission-daemon/script-done.sh: 59 | file.managed: 60 | - source: salt://transmission/daemon/script-done.sh.jinja 61 | - template: jinja 62 | - user: root 63 | - group: debian-transmission 64 | - mode: 750 65 | - context: 66 | kodi_present: {{ kodi_present }} 67 | - require: 68 | - pkg: transmission-daemon 69 | 70 | /etc/transmission-daemon/settings.json: 71 | file.managed: 72 | - source: salt://transmission/daemon/settings.json.jinja 73 | - template: jinja 74 | - user: debian-transmission 75 | - group: debian-transmission 76 | - mode: 600 77 | - require: 78 | - pkg: transmission-daemon 79 | - file: /srv/media/torrents 80 | - file: /srv/torrent/watch 81 | - file: /srv/torrent/incomplete 82 | - file: /srv/torrent/completed 83 | - file: /etc/transmission-daemon/script-done.sh 84 | - watch_in: 85 | - service: transmission-daemon 86 | -------------------------------------------------------------------------------- /transmission/daemon/script-done.sh.jinja: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | HOST="127.0.0.1" 7 | PORT=8091 8 | USER="transmission" 9 | PASSWORD="transmission" 10 | 11 | echo "Starting torrent complete script: id#$TR_TORRENT_ID" | tee /tmp/transmission-script.log 12 | 13 | transmission-remote "$HOST:$PORT" -n "$USER:$PASSWORD" -t$TR_TORRENT_ID --stop 14 | {%- if kodi_present %} 15 | transmission-remote "$HOST:$PORT" -n "$USER:$PASSWORD" -t$TR_TORRENT_ID --move /srv/media/torrents 16 | chgrp -R kodi /srv/media/torrents 17 | chmod -R og+w /srv/media/torrents 18 | {%- endif %} 19 | transmission-remote "$HOST:$PORT" -n "$USER:$PASSWORD" -t$TR_TORRENT_ID --remove 20 | 21 | echo "Ending torrent complete script: id#$TR_TORRENT_ID" | tee /tmp/transmission-script.log 22 | -------------------------------------------------------------------------------- /transmission/daemon/settings.json.jinja: -------------------------------------------------------------------------------- 1 | { 2 | "alt-speed-down": 50, 3 | "alt-speed-enabled": true, 4 | "alt-speed-time-begin": 420, 5 | "alt-speed-time-day": 127, 6 | "alt-speed-time-enabled": true, 7 | "alt-speed-time-end": 1320, 8 | "alt-speed-up": 1, 9 | "bind-address-ipv4": "0.0.0.0", 10 | "bind-address-ipv6": "::", 11 | "blocklist-enabled": false, 12 | "blocklist-url": "http://www.example.com/blocklist", 13 | "cache-size-mb": 4, 14 | "dht-enabled": true, 15 | "download-dir": "/srv/torrent/completed", 16 | "download-limit": 100, 17 | "download-limit-enabled": 0, 18 | "download-queue-enabled": true, 19 | "download-queue-size": 5, 20 | "encryption": 1, 21 | "idle-seeding-limit": 1, 22 | "idle-seeding-limit-enabled": false, 23 | "incomplete-dir": "/srv/torrent/incomplete", 24 | "incomplete-dir-enabled": true, 25 | "lpd-enabled": false, 26 | "max-peers-global": 200, 27 | "message-level": 2, 28 | "peer-congestion-algorithm": "", 29 | "peer-id-ttl-hours": 6, 30 | "peer-limit-global": 240, 31 | "peer-limit-per-torrent": 60, 32 | "peer-port": 51413, 33 | "peer-port-random-high": 65535, 34 | "peer-port-random-low": 49152, 35 | "peer-port-random-on-start": false, 36 | "peer-socket-tos": "default", 37 | "pex-enabled": true, 38 | "port-forwarding-enabled": false, 39 | "preallocation": 1, 40 | "prefetch-enabled": 1, 41 | "queue-stalled-enabled": true, 42 | "queue-stalled-minutes": 30, 43 | "ratio-limit": 2, 44 | "ratio-limit-enabled": true, 45 | "rename-partial-files": true, 46 | "rpc-authentication-required": true, 47 | "rpc-bind-address": "0.0.0.0", 48 | "rpc-enabled": true, 49 | "rpc-password": "{f7ea9584ff17cd0772b84d57178bf7334bac9fb82NK9EntK", 50 | "rpc-port": 8091, 51 | "rpc-url": "/transmission/", 52 | "rpc-username": "transmission", 53 | "rpc-whitelist": "127.0.0.1,192.168.*", 54 | "rpc-whitelist-enabled": true, 55 | "scrape-paused-torrents-enabled": false, 56 | "script-torrent-done-enabled": true, 57 | "script-torrent-done-filename": "/etc/transmission-daemon/script-done.sh", 58 | "seed-queue-enabled": false, 59 | "seed-queue-size": 10, 60 | "speed-limit-down": 100, 61 | "speed-limit-down-enabled": false, 62 | "speed-limit-up": 10, 63 | "speed-limit-up-enabled": true, 64 | "start-added-torrents": true, 65 | "trash-original-torrent-files": false, 66 | "umask": 18, 67 | "upload-limit": 1, 68 | "upload-limit-enabled": 1, 69 | "upload-slots-per-torrent": 14, 70 | "utp-enabled": true, 71 | "watch-dir": "/srv/torrent/watch", 72 | "watch-dir-enabled": true 73 | } 74 | -------------------------------------------------------------------------------- /transmission/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Transmission - small torrent client 3 | # 4 | 5 | transmission: 6 | pkg.installed 7 | -------------------------------------------------------------------------------- /udev/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # UDEV device configuration 3 | # 4 | 5 | udev_trigger: 6 | cmd.wait: 7 | - name: udevadm trigger 8 | -------------------------------------------------------------------------------- /users/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Users creation 3 | # 4 | 5 | {% for user, args in salt['pillar.get']('users', {}).items() %} 6 | {{ user }}: 7 | group: 8 | - present 9 | user.present: 10 | - remove_groups: False 11 | - gid_from_name: True 12 | {% if 'fullname' in args %} 13 | - fullname: {{ args['fullname'] }} 14 | {% endif %} 15 | {% if 'home' in args %} 16 | - home: {{ args['home'] }} 17 | {% endif %} 18 | {% if 'shell' in args %} 19 | - shell: {{ args['shell'] }} 20 | {% endif %} 21 | {% if 'password' in args %} 22 | - password: {{ args['password'] }} 23 | {% if 'enforce_password' in args %} 24 | - enforce_password: {{ args['enforce_password'] }} 25 | {% endif %} 26 | {% endif %} 27 | {% if 'groups' in args %} 28 | - groups: 29 | {% if 'admin' in args and args['admin'] == True %} 30 | - sudo 31 | - adm 32 | {% endif %} 33 | {% for group in args['groups'] %} 34 | - {{ group }} 35 | {% endfor %} 36 | {% endif %} 37 | - require: 38 | - group: {{ user }} 39 | 40 | {% if 'keys_pub' in args %} 41 | {%- for pubkey in args['keys_pub'] %} 42 | {{ pubkey }}: 43 | ssh_auth.present: 44 | - user: {{ user }} 45 | {% endfor %} 46 | {% endif %} 47 | {% endfor %} 48 | -------------------------------------------------------------------------------- /vim/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # VIM - console editor 3 | # 4 | 5 | vim: 6 | pkg.installed 7 | 8 | /etc/vim/vimrc: 9 | file.managed: 10 | - source: salt://vim/vimrc 11 | - user: root 12 | - group: root 13 | - mode: 644 14 | - require: 15 | - pkg: vim 16 | -------------------------------------------------------------------------------- /virtualbox/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # VirtualBox vitual machine 3 | # 4 | 5 | virtualbox: 6 | pkg.installed 7 | 8 | /etc/modules: 9 | file.append: 10 | - text: 11 | - pci-stub 12 | 13 | /etc/default/grub: 14 | file.sed: 15 | - before: '""' 16 | - after: '"intel_iommu=on"' 17 | - limit: '^GRUB_CMDLINE_LINUX=' 18 | 19 | update-grub: 20 | cmd.wait: 21 | - watch: 22 | - file: /etc/default/grub 23 | -------------------------------------------------------------------------------- /vsftpd/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # vsftpd installer 3 | # 4 | 5 | vsftpd: 6 | pkg: 7 | - installed 8 | service.running: 9 | - require: 10 | - pkg: vsftpd 11 | - watch: 12 | - user: ftp 13 | 14 | /etc/vsftpd.conf: 15 | file.managed: 16 | - source: salt://vsftpd/vsftpd.conf.jinja 17 | - template: jinja 18 | - context: 19 | users: {{ salt['pillar.get']('ftp:'+grains['id']+':users', 'False') }} 20 | anon: {{ salt['pillar.get']('ftp:'+grains['id']+':anon', 'False') }} 21 | write: {{ salt['pillar.get']('ftp:'+grains['id']+':write', 'False') }} 22 | - user: root 23 | - group: root 24 | - mode: 644 25 | - require: 26 | - pkg: vsftpd 27 | - watch_in: 28 | - service: vsftpd 29 | 30 | ftp: 31 | user.present: 32 | - system: True 33 | - createhome: False 34 | - require: 35 | - pkg: vsftpd 36 | - groups: 37 | {%- for group in salt['pillar.get']('ftp:'+grains['id']+':groups', []) %} 38 | - {{ group }} 39 | {% endfor %} 40 | 41 | {% for name, dir in salt['pillar.get']('ftp:'+grains['id']+':anon_dirs', {}).items() %} 42 | /srv/ftp/{{ name }}: 43 | mount.mounted: 44 | - device: {{ dir }} 45 | - mkmnt: True 46 | - fstype: none 47 | - opts: 48 | - defaults 49 | - bind 50 | - remount: False 51 | - require_in: 52 | - service: vsftpd 53 | {% endfor %} 54 | -------------------------------------------------------------------------------- /vsftpd/vsftpd.conf.jinja: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | listen=YES 7 | listen_ipv6=NO 8 | 9 | anonymous_enable={% if anon %}YES{% else %}NO{% endif %} 10 | local_enable={% if users %}YES{% else %}NO{% endif %} 11 | 12 | write_enable={% if write %}YES{% else %}NO{% endif %} 13 | anon_umask=000 14 | anon_upload_enable=YES 15 | anon_mkdir_write_enable=YES 16 | anon_root=/srv/ftp 17 | 18 | use_localtime=YES 19 | 20 | xferlog_enable=YES 21 | 22 | dirmessage_enable=YES 23 | ftpd_banner=FTP server 24 | pam_service_name=vsftpd 25 | rsa_cert_file=/etc/ssl/certs/ssl-cert-snakeoil.pem 26 | rsa_private_key_file=/etc/ssl/private/ssl-cert-snakeoil.key 27 | -------------------------------------------------------------------------------- /x86/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # x386 libraries 3 | # 4 | 5 | arch: 6 | cmd.wait: 7 | - name: dpkg --add-architecture i386 8 | - watch: 9 | - pkg: ia32-libs 10 | 11 | ia32-libs: 12 | pkg.installed 13 | -------------------------------------------------------------------------------- /xen/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # XEN hypervisor 3 | # 4 | 5 | include: 6 | - libvirt 7 | 8 | xen-pkgs: 9 | pkg.installed: 10 | - pkgs: 11 | - xen-hypervisor-4.2-amd64 12 | - xen-utils-4.2 13 | - xenwatch 14 | - xen-tools 15 | 16 | network-manager: 17 | pkg.removed 18 | 19 | /etc/grub.d/09_linux_xen: 20 | file.rename: 21 | - source: /etc/grub.d/20_linux_xen 22 | - require: 23 | - pkg: xen-pkgs 24 | 25 | /usr/lib/xen-default: 26 | file.symlink: 27 | - target: /usr/lib/xen-4.2 28 | - require: 29 | - pkg: xen-pkgs 30 | 31 | /etc/default/grub: 32 | file.sed: 33 | - before: 0 34 | - after: 5 35 | - limit: '^GRUB_HIDDEN_TIMEOUT=' 36 | file.sed: 37 | - before: '""' 38 | - after: '"max_loop=64 xen-pciback.permissive xen-pciback.hide=(01:00.0)(01:00.1) dom0_mem=8192M dom0_max_vcpus=4 intel_iommu=on"' 39 | - limit: '^GRUB_CMDLINE_LINUX=' 40 | 41 | update-grub: 42 | cmd.wait: 43 | - watch: 44 | - file: /etc/default/grub 45 | - file: /etc/grub.d/09_linux_xen 46 | 47 | /srv/xen: 48 | file.directory: 49 | - user: root 50 | - group: libvirtd 51 | - mode: 750 52 | - require: 53 | - pkg: xen-pkgs 54 | 55 | /etc/modules: 56 | file.append: 57 | - text: 58 | - xen-pciback passthrough=1 59 | 60 | /etc/network/interfaces: 61 | file.managed: 62 | - source: salt://xen/interfaces 63 | - user: root 64 | - group: root 65 | - mode: 644 66 | - require: 67 | - pkg: xen-pkgs 68 | -------------------------------------------------------------------------------- /xen/interfaces: -------------------------------------------------------------------------------- 1 | # 2 | # WARNING: 3 | # This file is under CM control - all manual changes will be removed 4 | # 5 | 6 | auto lo 7 | iface lo inet loopback 8 | 9 | auto br0 10 | iface br0 inet dhcp 11 | bridge_ports eth0 12 | 13 | auto eth0 14 | iface eth0 inet manual 15 | -------------------------------------------------------------------------------- /xonotic/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Xonotic - good 3d shooter 3 | # 4 | 5 | getdeb-games-repo: 6 | pkgrepo.managed: 7 | - name: deb http://archive.getdeb.net/ubuntu {{ grains['oscodename'] }}-getdeb games 8 | - key_url: http://archive.getdeb.net/getdeb-archive.key 9 | - require: 10 | - cmd: arch 11 | - require_in: 12 | - pkg: xonotic 13 | 14 | xonotic: 15 | pkg.installed 16 | -------------------------------------------------------------------------------- /zip/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # Zip packer & unpacker 3 | # 4 | 5 | zip-packages: 6 | pkg.installed: 7 | - pkgs: 8 | - zip 9 | - unzip 10 | -------------------------------------------------------------------------------- /zsh/init.sls: -------------------------------------------------------------------------------- 1 | # 2 | # ZSH shell 3 | # 4 | 5 | zsh: 6 | pkg.installed 7 | 8 | /etc/zsh/zshrc: 9 | file.managed: 10 | - source: salt://zsh/zshrc 11 | - user: root 12 | - group: root 13 | - mode: 644 14 | - require: 15 | - pkg: zsh 16 | --------------------------------------------------------------------------------