├── LICENSE ├── README.md ├── ROADMAP.md ├── banquise.png ├── doc ├── docs │ ├── FAQ.md │ ├── about.md │ ├── debugging_banquise.md │ ├── hpc_cluster_installation │ │ └── prepare_installation.md │ ├── img │ │ ├── Saltstack_logo.png │ │ ├── banquise.png │ │ ├── banquise_logo.svg │ │ ├── banquise_logo_large.svg │ │ ├── banquise_logo_small.svg │ │ ├── banquise_logo_smallnoalpha.svg │ │ └── splitted_icebergs.svg │ ├── index.md │ ├── standard_installation │ │ ├── before_installation.md │ │ ├── img │ │ │ ├── 1.svg │ │ │ └── 2.svg │ │ └── prepare_installation.md │ ├── useful_links.md │ └── what_is_banquise.md ├── mkdocs.yml └── mkdocs_windmill │ ├── 404.html │ ├── __init__.py │ ├── article-nav.html │ ├── base.html │ ├── css │ ├── base.css │ ├── bootstrap-3.3.7.css │ ├── bootstrap-3.3.7.min.css │ ├── font-awesome-4.7.0.css │ ├── font-awesome-4.7.0.min.css │ └── highlight.css │ ├── fonts │ ├── fontawesome-webfont.eot │ ├── fontawesome-webfont.svg │ ├── fontawesome-webfont.ttf │ ├── fontawesome-webfont.woff │ ├── fontawesome-webfont.woff2 │ ├── glyphicons-halflings-regular.eot │ ├── glyphicons-halflings-regular.svg │ ├── glyphicons-halflings-regular.ttf │ ├── glyphicons-halflings-regular.woff │ └── glyphicons-halflings-regular.woff2 │ ├── img │ ├── banquise_logo_smallnoalpha.svg │ └── favicon.ico │ ├── js │ ├── base.js │ ├── bootstrap-3.3.7.js │ ├── bootstrap-3.3.7.min.js │ ├── elasticlunr.js │ ├── elasticlunr.min.js │ ├── highlight.pack.js │ ├── jquery-3.2.1.js │ └── jquery-3.2.1.min.js │ ├── main.html │ ├── nav-item.html │ ├── nav-pane.html │ ├── search.html │ └── topbar.html ├── pillar ├── cluster │ ├── authentication │ │ ├── ldap_private.sls │ │ ├── ldap_public.sls │ │ ├── passwords_public.sls │ │ ├── ssh_private.sls │ │ └── ssh_public.sls │ ├── connect.sls │ ├── core.sls │ ├── equipment │ │ └── vm_management_1 │ │ │ └── system.sls │ ├── io │ │ └── nfs.sls │ ├── masters │ │ ├── masters.sls │ │ ├── masters_states.sls │ │ └── masters_system.sls │ ├── monitoring.sls │ ├── network.sls │ ├── nodes │ │ ├── computes.sls │ │ ├── computes_states.sls │ │ ├── computes_system.sls │ │ ├── ios.sls │ │ ├── ios_states.sls │ │ ├── ios_system.sls │ │ ├── logins.sls │ │ ├── logins_states.sls │ │ ├── logins_system.sls │ │ ├── managements.sls │ │ ├── switchs.sls │ │ ├── switchs_states.sls │ │ └── switchs_system.sls │ └── settings │ │ ├── general_settings.sls │ │ └── network.sls ├── engine │ ├── engine.sls │ ├── engine_connect.sls │ ├── engine_monitoring.sls │ ├── engine_network.sls │ ├── engine_reverse.sls │ ├── equipment.sls │ ├── out │ ├── system.sls │ ├── system.sls.bkp │ ├── system.sls.bkp3 │ ├── system.sls.bkp4 │ ├── system.sls.bkp5 │ ├── system.sls.bkp6 │ ├── system.sls.ckp2 │ ├── toto.sls.bkp │ ├── toto.sls.bkp2 │ └── yaml_macros.sls ├── general │ ├── pkgs.sls │ ├── services.sls │ └── templating.sls ├── top.sls └── top.sls.bkp ├── salt ├── .monitoring.sls.swp ├── bootstrap │ ├── init.sls │ ├── os_dvd.local.repo.jinja │ └── salt.local.repo.jinja ├── debug │ ├── debug.jinja │ ├── dhcpd.conf.jinja │ ├── dhcpd0.conf.jinja │ ├── server.sls │ ├── server_monitoring.sls │ └── server_monitoring_v2.sls ├── dhcp │ ├── dhcpd.conf.jinja │ ├── dhcpd0.conf.jinja │ ├── server.sls │ ├── server_monitoring.sls │ └── server_monitoring_v2.sls ├── dns │ ├── client.sls │ ├── forward.jinja │ ├── forward.jinja.bkp │ ├── forward.jinja.bkp2 │ ├── named.conf.jinja │ ├── named.conf.jinja.bkp │ ├── resolv.conf.jinja │ ├── reverse.jinja │ ├── reverse.jinja.bkp │ ├── server.sls │ └── server_monitoring.sls ├── dumb.jinja ├── dumb.sls ├── include │ └── myself.sls ├── ldap │ ├── basedomain.ldif.jinja │ ├── chdomain.ldif.jinja │ ├── client.sls │ ├── client_monitoring.sls │ ├── phpldapadmin.sls │ ├── server.sls │ ├── server_monitoring.sls │ └── sssd.conf.jinja ├── network │ ├── firewall.sls │ ├── masquerading.sls │ ├── nmanager.sls │ └── static.sls ├── nfs │ ├── client.sls │ ├── exports.jinja │ ├── server.sls │ └── server_monitoring.sls ├── ntp │ ├── client.sls │ ├── client_monitoring.sls │ ├── ntp.conf.client.jinja │ ├── ntp.conf.jinja │ ├── ntp.conf.jinja.bkp │ ├── server.sls │ └── server_monitoring.sls ├── nyancat │ ├── init.sls │ └── nyancat_login.sh ├── pxe │ ├── default.jinja │ ├── default_ks.jinja │ ├── default_preseed.jinja │ ├── ks.cfg.jinja │ ├── preseed.jinja │ ├── server.sls │ └── server_monitoring.sls ├── repository │ ├── banquise.local.repo.jinja │ ├── client.sls │ ├── client.sls.bkp │ ├── genericwithupdate.local.repo.jinja │ ├── os_dvd.local.repo.jinja │ ├── salt.local.repo.jinja │ ├── server.sls │ └── server_monitoring.sls ├── shinken │ ├── client.sls │ ├── client.sls.bkp │ ├── cluster-groups.cfg.jinja │ ├── computes.cfg.jinja │ ├── dumb.sls │ ├── ios.cfg.jinja │ ├── logins.cfg.jinja │ ├── masters.cfg.jinja │ ├── nodes.cfg.jinja │ ├── server.sls │ ├── server.sls.bkp │ ├── servicegroup.cfg.jinja │ ├── services.cfg.jinja │ └── services.cfg.jinja.old ├── slurm │ ├── client.sls │ ├── client_monitoring.sls │ ├── login.sls │ ├── munge.key │ ├── server.sls │ ├── server_monitoring.sls │ ├── slurm.conf.jinja │ └── slurm.epilog.clean ├── ssh │ ├── client.sls │ ├── id_rsa.jinja │ ├── id_rsa.pub.jinja │ └── master.sls ├── top.sls └── workstation │ ├── Centos_7_gnome_desktop.sls │ ├── Centos_7_office_desktop.sls │ └── Fedora_27_xfce_desktop.sls └── tools └── auth_helper.sh /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Ox 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | ██████ █████ ███ ██ ██████ ██ ██ ██ ███████ ███████ 3 | ██ ██ ██ ██ ████ ██ ██ ██ ██ ██ ██ ██ ██ 4 | ██████ ███████ ██ ██ ██ ██ ██ ██ ██ ██ ███████ █████ 5 | ██ ██ ██ ██ ██ ██ ██ ██ ▄▄ ██ ██ ██ ██ ██ ██ 6 | ██████ ██ ██ ██ ████ ██████ ██████ ██ ███████ ███████ 7 | ▀▀ 8 | 9 | Current release: v 1.0 10 | 11 | Mail: oxedions@gmail.com 12 | 13 | **Banquise** is a Salt based stack, design to deploy enterprise workstations or HPC clusters. Aim is to simplify basic operations to help administrators focus on what really matter. 14 | 15 | Please have a look on the wiki for more information (What is Banquise, how to install, etc). https://github.com/oxedions/banquise/wiki 16 | 17 | Special thanks: 18 | - remyd1: https://github.com/remyd1 19 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | Current version: 1.0 2 | 3 | # 1.1 current roadmap: 4 | 5 | - Management tools: 6 | - Deployment: ◕ in progress 7 | - Node control: Not done 8 | - Nfs improvement: Not done 9 | - Multiple system compatibility: Y 10 | - Updates: ◕ in progress 11 | - Basic stack RPM: ◕ in progress 12 | - Improve Monitoring: Not done 13 | - Workstation state: ◕ in progress 14 | - EFI boot support: Not done 15 | - Good documentation: ◕ in progress 16 | -------------------------------------------------------------------------------- /banquise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/banquise.png -------------------------------------------------------------------------------- /doc/docs/FAQ.md: -------------------------------------------------------------------------------- 1 | # FAQ 2 | 3 | ## Why is Banquise using Salt? 4 | 5 | Few open sources deployment solutions were tested before Salt. Salt was chosen because it was powerful and flexible enough to handle something like Banquise. Also, the pure Jinja2 way of Salt should help in the future. 6 | Last point: debugging Salt is easy compared to other solutions. 7 | 8 | ## There is no High Availability or Failover, do you plan to add it? 9 | 10 | Yes and no. HA/Failover and Multi-Islands systems are scheduled. However, choice was made to first consolidate the Standalone version and wait for a specific demand before developing it. 11 | 12 | ## Salt is compatible with many Linux distributions, but not Banquise. Is it planed also? 13 | 14 | Yes. Banquise is already compatible with RHEL/Centos for management nodes, and RHEL/Centos/Fedora/Ubuntu/Debian for client nodes. Other distributions could be added if their is a demand. 15 | 16 | ## Is there a way to use OpenHPC rpm into Banquise? 17 | 18 | Yes, you just need to develop a simple module that add this repository. If enough people need it, it will be added into main part of Banquise. 19 | 20 | ## Do you plan to support other operating systems than Linux, has Salt handle that ? 21 | 22 | If there is a demand, yes, as long as files are opensources. Banquise is open in code but also in philosophy: it is made for everyone. 23 | -------------------------------------------------------------------------------- /doc/docs/about.md: -------------------------------------------------------------------------------- 1 | # About 2 | 3 | ## Author 4 | 5 | Oxedions 6 | 7 | ## Contributors 8 | 9 | * oxedions 10 | * remyd1 11 | 12 | ## License 13 | 14 | Banquise is distributed under [MIT license](https://github.com/oxedions/banquise/blob/master/LICENSE). 15 | 16 | Banquise logo is using the following content and is under CC BY-NC 4.0 Licence: 17 | * Iceberg drawing by Berniek Wittendorp on IJsberg metaforen Pinterest Tattoo, CC BY-NC 4.0 Licence 18 | * Uroob font 19 | 20 | Other drawings are under ??? 21 | 22 | Documentation web site is generated using MkDocs http://www.mkdocs.org/ . Theme is a modified version of the Windmill theme by Grist Labs https://github.com/gristlabs/mkdocs-windmill . 23 | 24 | ## Release notes 25 | 26 | ### Version 1.1 27 | 28 | Development in progress... 29 | 30 | * Add tools 31 | * Improve modulatory 32 | * Add multi OS support 33 | * Add system configuration per subtype 34 | 35 | ### Version 1.0 36 | 37 | * First stable version 38 | * Include all core features 39 | -------------------------------------------------------------------------------- /doc/docs/debugging_banquise.md: -------------------------------------------------------------------------------- 1 | # Debugging Banquise 2 | 3 | Banquise is fully based on Salt Stack. 4 | 5 | To debug Banquise, please understand the rendering order of Salt, and then use the provided debugging information below to investigate the faulty part. The global strategy is to identify which step is failing, and then use logs and test samples to solve issue. 6 | 7 | Remember: the saltmaster is only some kind of files provider, rendering process is done on minions. 8 | 9 | ## Salt rendering order 10 | 11 | pillars -> top.sls pillars (who can access which pillars) -> top.sls states (who apply which states) -> states -> finalize with grains -> apply 12 | 13 | ## Gather information and check data 14 | 15 | When having an issue, best way if issue is not clearly specify is to check all elements one by one. 16 | 17 | ### Check pillars 18 | 19 | Pillars contains data, but also some logic that could fail. They must be checked. 20 | 21 | Pillars can be rendered on a specific target to display their content. Best target is a master node, has these nodes commonly have access to all pillars. 22 | 23 | For example, to ask Salt to render management1 pillars, and display them, use: 24 | 25 | ```bash 26 | salt 'management1*' pillar.items 27 | ``` 28 | 29 | First thing is then to scroll at the top of the output and check their is no errors, like: 30 | 31 | ```bash 32 | _errors: 33 | - Rendering Primary Top file failed, render error: 34 | ``` 35 | 36 | or 37 | 38 | ``` 39 | _errors: 40 | - Rendering SLS 'engine/engine_connect' failed. Please see master log for details. 41 | ``` 42 | 43 | Then, if error is not display here, check master logs at /var/log/salt/master. For example, content can be: 44 | 45 | ``` 46 | ParserError: while parsing a block mapping 47 | in "", line 15, column 2: 48 | dhcp_server: 49 | ^ 50 | expected , but found '' 51 | in "", line 17, column 4: 52 | management: auto 53 | ^ 54 | 55 | ; line 5 56 | ``` 57 | 58 | To get more information, use: 59 | 60 | ```bash 61 | salt-call -l trace 62 | ``` 63 | 64 | Recommended is (because trace provides too much non useful information): 65 | 66 | ``` bash 67 | salt-call -l debug 68 | ``` 69 | 70 | ### Check grains 71 | 72 | While grains are nearly never the source of issue in Banquise, it can be checked the same way than pillars using: 73 | 74 | ``` bash 75 | salt 'management1*' grains.items 76 | ``` 77 | 78 | Note that this command is useful to get data of a specific minion. 79 | 80 | ## Check states 81 | 82 | If data (pillars and grains) is not source of the issue state generating the error must be investigated. 83 | 84 | States can fail during their tasks executions, or during a rendering of a jinja template. 85 | 86 | ### Debugging tasks 87 | 88 | A task can fail during its own rendering, or during it's execution. 89 | 90 | First part to check is the rendering, and the resulting task that is then executed. There are 2 ways to do this. 91 | 92 | First, using salt-call -l debug, at beginning of execution, Salt will display the rendered content of each task executed. 93 | While this is the simplest way to get the rendered task, the second way is more accurate. 94 | 95 | Second way is to use the dumb.sls provided state, and copy content of your faulty state into the dumb.jinja file. 96 | Then, execute the dumb state on a minion, using from the minion: 97 | 98 | ``` bash 99 | salt-call state.apply dumb 100 | ``` 101 | 102 | And content of the /root/dumb file is the rendered state. It is then possible to check content for issues. 103 | 104 | ### Debugging templates 105 | 106 | If issue occur while rendering a template file (a jinja2 template), standard code debugging strategy can be used. 107 | 108 | Investigate error, and if Salt report is not enough, use brute force (remove part of template, check if error occur, if yes remove another part until you find what is generating the issue, etc). 109 | -------------------------------------------------------------------------------- /doc/docs/hpc_cluster_installation/prepare_installation.md: -------------------------------------------------------------------------------- 1 | # Prepare installation 2 | 3 | ## Download needed elements 4 | 5 | The management nodes needs a Centos Everything DVD iso. It is possible to download one from http://distrib-coffee.ipsl.jussieu.fr/pub/linux/centos/7/isos/x86_64/ . 6 | 7 | Clients nodes need a Centos Everything DVD iso or a Fedora iso (one depending of the desktop you whish: gnome, xfce, kde, etc). 8 | For Centos, see above to download. For Fedora, see at https://getfedora.org/ or https://spins.fedoraproject.org/ . Note that for very large network, you may need for Fedora an Everything iso also to lower web bandwith usage. 9 | 10 | Get also Banquise repositories: 11 | 12 | wget --reject="index.html*" -nH -r --no-parent https://repo.saltstack.com/yum/redhat/7/x86_64/2016.11/ 13 | -------------------------------------------------------------------------------- /doc/docs/img/Saltstack_logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/docs/img/Saltstack_logo.png -------------------------------------------------------------------------------- /doc/docs/img/banquise.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/docs/img/banquise.png -------------------------------------------------------------------------------- /doc/docs/img/banquise_logo_smallnoalpha.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 21 | 43 | 45 | 46 | 48 | image/svg+xml 49 | 51 | 52 | 53 | 54 | 55 | 60 | 66 | 71 | 76 | 81 | 86 | 92 | 98 | 103 | 109 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /doc/docs/index.md: -------------------------------------------------------------------------------- 1 | # Welcome 2 | 3 | ![Banquise_logo](img/banquise_logo_large.svg) 4 | 5 | Welcome on Banquise documentation. 6 | 7 | This documentation provides: 8 | 9 | * Information on Banquise (What is Banquise, About, FAQ) 10 | * Step by step Banquise installation procedure 11 | * How to manage the installed Banquise 12 | * How to develop new Banquise modules 13 | * How debug Banquise 14 | 15 | Please use menu on the left to navigate between topics. 16 | 17 | Note that this documentation can be loaded locally on installed Banquise system, using MkDocs. See "Enable documentation" in Saltmaster part of the Installation procedure. 18 | -------------------------------------------------------------------------------- /doc/docs/standard_installation/before_installation.md: -------------------------------------------------------------------------------- 1 | # Before installation 2 | 3 | This guide will help you setup and deploy a standard Banquise, to manage an IT infrastructure. 4 | 5 | Please note that during this process, some terminology will be used: 6 | 7 | * **A node** is an entity hosting software resources. It can be anything: an hardware server, a workstation, a laptop, a node, a container, etc. 8 | * **A server** will always refer to the software server terminology: a software that listen and manage clients. 9 | * **An OS** is an Operating System. 10 | * **The Saltmaster** is the node hosting the saltmaster server. 11 | 12 | This guide will help you install Banquise step by step. 13 | 14 | We will be using the reference infrastructure example provided: 15 | 16 | - 1 x management also doing saltmaster 17 | - 3 x nodes (generic workstations for example) 18 | 19 | ![Reference infrastructure](img/1.svg) 20 | 21 | And modify files step by step to move it to the following infrastructure: 22 | 23 | - 1 x external saltmaster 24 | - 1 x management with an external access as gateway 25 | - 1 x storage (exporting /home) 26 | - 1 x switch 27 | - 6 x workstations with 28 | - Manufacturer A 29 | - 2 x workstations of model AA (Ubuntu) 30 | - 1 x workstation of model AB (Fedora) 31 | - Manufacturer B 32 | - 1 x workstations of model BA (Centos) 33 | - 2 x workstation of model BB (Centos) 34 | 35 | ![Updated infrastructure](img/2.svg) 36 | 37 | Note that all infrastructure nodes like the saltmaster, the management or the storage will all be using Centos Linux. 38 | 39 | If you encounter issues, do not hesitate to check the debug part of the documentation. 40 | -------------------------------------------------------------------------------- /doc/docs/standard_installation/prepare_installation.md: -------------------------------------------------------------------------------- 1 | # Prepare installation 2 | 3 | ## Download needed elements 4 | 5 | The management nodes, called "masters", need a Centos Everything DVD iso (or an RHEL equivalent). It is possible to download one from http://distrib-coffee.ipsl.jussieu.fr/pub/linux/centos/7/isos/x86_64/ . 6 | Other "key" nodes will also use Centos. 7 | 8 | Clients nodes need iso from the targeted Linux distribution. 9 | 10 | Here is the list of needed files per client OS: 11 | 12 | * Centos: 13 | * 7: 14 | * Everything iso, >= 7.4. Can be found here: http://distrib-coffee.ipsl.jussieu.fr/pub/linux/centos/7/isos/x86_64/ 15 | Tested isos are: 16 | * CentOS-7-x86_64-Everything-1708.iso, sha256sum 8593f5a1631ebfb7581193a7b4ef96d44f500d3ceb49cc4cfbfd71d5698e4173 17 | 18 | * Fedora: 19 | * 27: 20 | * Server iso. Can be found here: 21 | Tested isos are: 22 | * Fedora-Server-dvd-x86_64-27-1.6.iso, sha256sum e383dd414bb57231b20cbed11c4953cac71785f7d4f5990b0df5ad534a0ba95c 23 | 24 | * Debian: 25 | * 9: 26 | 27 | * Ubuntu: 28 | * 16.04: 29 | * Server iso. Can be found here: 30 | Tested isos are: 31 | * ubuntu-16.04.4-server-amd64.iso, sha256sum 0a03608988cfd2e50567990dc8be96fb3c501e198e2e6efcb846d89efc7b89f2 32 | * 18.04: 33 | * __Standard server iso__ (not the live server one). Can be found here: 34 | Tested isos are: 35 | * ubuntu-18.04-server-amd64.iso, sha256sum a7f5c7b0cdd0e9560d78f1e47660e066353bb8a79eb78d1fc3f4ea62a07e6cbc 36 | 37 | 38 | Get also Banquise repositories: 39 | ```bash 40 | wget --reject="index.html*" -nH -r --no-parent https://repo.saltstack.com/yum/redhat/7/x86_64/2016.11/ 41 | ``` 42 | -------------------------------------------------------------------------------- /doc/docs/useful_links.md: -------------------------------------------------------------------------------- 1 | # Useful links 2 | 3 | * [Banquise github page](https://github.com/oxedions) 4 | * Salt Stack 5 | * Sphenisc.com 6 | * Slurm 7 | * RedHat documentation 8 | * Banquise repository 9 | -------------------------------------------------------------------------------- /doc/docs/what_is_banquise.md: -------------------------------------------------------------------------------- 1 | # What is Banquise ? 2 | 3 | ## The story... 4 | 5 | Once upon a time, in a very cold sea... 6 | 7 | Penguins were living on small icebergs, each colony separated from the others by dangerous and cold water... Each tentative to join to the others was a disaster. 8 | 9 | ![Banquise_logo](img/splitted_icebergs.svg) 10 | 11 | One day, penguins discovered that sea Salt could be used to build strong structures. 12 | 13 | From this day, penguins started using Salt to gather icebergs and created a unified land: the Banquise. They started living in harmony and effectiveness, gathering icebergs and communicating with other species... 14 | 15 | ## The tool 16 | 17 | Banquise is a tool made using [Salt](https://saltstack.com/) to allow simple design and deployment of groups of computers (cluster). It is a full opensource tool, using other opensource resources. 18 | ![Salt_logo](img/Saltstack_logo.png) 19 | 20 | Note: Salt is the opensource tool, Salt Stack is the company that develops and provides support on Salt. 21 | 22 | Banquise is composed of two things: 23 | 24 | * A Salt pillars custom organization, designed to use full power of Salt while having a cluster oriented simple structure. 25 | * A group of Salt states, called modules, ready to deploy a standard cluster of nodes (HPC cluster, small company office/work-stations, etc). 26 | 27 | Banquise has been made to abstract from host: it can be deployed on bare metal, VM or containers. 28 | 29 | Each module has been made to be as autonomous as possible. 30 | 31 | Engines allow to plug other Salt Formulae to Banquise pillar structure. 32 | 33 | ### Enterprise size networks 34 | 35 | Banquise can be used to deploy, monitor and maintain many kind of IT park: enterprise, university, laboratory, ... 36 | 37 | All systems can then share same user data base, shared spaces, unified computational resources, etc. 38 | 39 | Banquise provides modules for servers but also for workstations, based on standard Linux operating systems. 40 | 41 | ### HPC cluster 42 | 43 | Banquise can be used to easily configure, deploy, update and maintain an HPC cluster, in the pets and cattle cloud philosophy. 44 | 45 | The following (but not mandatory) solution is proposed: 46 | * a base core (repository, dhcp, dns, ntp, pxe) 47 | * an authentication (ldap/phpldapadmin/sssd) 48 | * a network file system (nfs) 49 | * a job scheduler (slurm) 50 | * a monitoring (shinken/webui2) 51 | 52 | Assuming system administrator already know MAC addresses of servers, a standard cluster can be deployed and go in production in less than a day. 53 | -------------------------------------------------------------------------------- /doc/mkdocs.yml: -------------------------------------------------------------------------------- 1 | site_name: Banquise Documentation 2 | site_description: 'Banquise stack main documentation' 3 | site_author: 'Oxedions' 4 | 5 | theme_dir: mkdocs_windmill 6 | pages: 7 | - Home: 8 | - Welcome: index.md 9 | - What is Banquise: what_is_banquise.md 10 | - Useful links: useful_links.md 11 | - Standard install: 12 | - Before installation: standard_installation/before_installation.md 13 | - Prepare installation: standard_installation/prepare_installation.md 14 | - HPC cluster install: 15 | - Prepare installation: hpc_cluster_installation/prepare_installation.md 16 | - Debugging Banquise: debugging_banquise.md 17 | - FAQ: FAQ.md 18 | - About: about.md 19 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/404.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 | 5 |

404

6 |

Page not found

7 | 8 | {% endblock %} 9 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/__init__.py -------------------------------------------------------------------------------- /doc/mkdocs_windmill/article-nav.html: -------------------------------------------------------------------------------- 1 | {% block article_nav %} 2 | {%- if page and (page.next_page or page.previous_page) %} 3 | 4 | 27 | 28 | {%- endif %} 29 | {% endblock %} 30 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/css/highlight.css: -------------------------------------------------------------------------------- 1 | /* 2 | This is the GitHub theme for highlight.js 3 | 4 | github.com style (c) Vasily Polovnyov 5 | 6 | */ 7 | 8 | .hljs { 9 | display: block; 10 | overflow-x: auto; 11 | color: #333; 12 | -webkit-text-size-adjust: none; 13 | } 14 | 15 | .hljs-comment, 16 | .diff .hljs-header, 17 | .hljs-javadoc { 18 | color: #998; 19 | font-style: italic; 20 | } 21 | 22 | .hljs-keyword, 23 | .css .rule .hljs-keyword, 24 | .hljs-winutils, 25 | .nginx .hljs-title, 26 | .hljs-subst, 27 | .hljs-request, 28 | .hljs-status { 29 | color: #333; 30 | font-weight: bold; 31 | } 32 | 33 | .hljs-number, 34 | .hljs-hexcolor, 35 | .ruby .hljs-constant { 36 | color: #008080; 37 | } 38 | 39 | .hljs-string, 40 | .hljs-tag .hljs-value, 41 | .hljs-phpdoc, 42 | .hljs-dartdoc, 43 | .tex .hljs-formula { 44 | color: #d14; 45 | } 46 | 47 | .hljs-title, 48 | .hljs-id, 49 | .scss .hljs-preprocessor { 50 | color: #900; 51 | font-weight: bold; 52 | } 53 | 54 | .hljs-list .hljs-keyword, 55 | .hljs-subst { 56 | font-weight: normal; 57 | } 58 | 59 | .hljs-class .hljs-title, 60 | .hljs-type, 61 | .vhdl .hljs-literal, 62 | .tex .hljs-command { 63 | color: #458; 64 | font-weight: bold; 65 | } 66 | 67 | .hljs-tag, 68 | .hljs-tag .hljs-title, 69 | .hljs-rule .hljs-property, 70 | .django .hljs-tag .hljs-keyword { 71 | color: #000080; 72 | font-weight: normal; 73 | } 74 | 75 | .hljs-attribute, 76 | .hljs-variable, 77 | .lisp .hljs-body, 78 | .hljs-name { 79 | color: #008080; 80 | } 81 | 82 | .hljs-regexp { 83 | color: #009926; 84 | } 85 | 86 | .hljs-symbol, 87 | .ruby .hljs-symbol .hljs-string, 88 | .lisp .hljs-keyword, 89 | .clojure .hljs-keyword, 90 | .scheme .hljs-keyword, 91 | .tex .hljs-special, 92 | .hljs-prompt { 93 | color: #990073; 94 | } 95 | 96 | .hljs-built_in { 97 | color: #0086b3; 98 | } 99 | 100 | .hljs-preprocessor, 101 | .hljs-pragma, 102 | .hljs-pi, 103 | .hljs-doctype, 104 | .hljs-shebang, 105 | .hljs-cdata { 106 | color: #999; 107 | font-weight: bold; 108 | } 109 | 110 | .hljs-deletion { 111 | background: #fdd; 112 | } 113 | 114 | .hljs-addition { 115 | background: #dfd; 116 | } 117 | 118 | .diff .hljs-change { 119 | background: #0086b3; 120 | } 121 | 122 | .hljs-chunk { 123 | color: #aaa; 124 | } 125 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.eot -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.ttf -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.woff -------------------------------------------------------------------------------- /doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/fonts/glyphicons-halflings-regular.woff2 -------------------------------------------------------------------------------- /doc/mkdocs_windmill/img/banquise_logo_smallnoalpha.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 19 | 21 | 43 | 45 | 46 | 48 | image/svg+xml 49 | 51 | 52 | 53 | 54 | 55 | 60 | 66 | 71 | 76 | 81 | 86 | 92 | 98 | 103 | 109 | 115 | 116 | 117 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/img/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/doc/mkdocs_windmill/img/favicon.ico -------------------------------------------------------------------------------- /doc/mkdocs_windmill/main.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {# 4 | The entry point for the Windmill Theme. 5 | 6 | Any theme customisations should override this file to redefine blocks defined in 7 | the various templates. The custom theme should only need to define a main.html 8 | which `{% extends "base.html" %}` and defines various blocks which will replace 9 | the blocks defined in base.html and its included child templates. 10 | #} 11 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/nav-item.html: -------------------------------------------------------------------------------- 1 | 8 | 9 | {%- if nav_item.children %} 10 |
  • 11 |
      12 | {%- set navlevel = navlevel + 1%} 13 | {%- for nav_item in nav_item.children %} 14 | {% include 'nav-item.html' %} 15 | {%- endfor %} 16 | {%- set navlevel = navlevel - 1%} 17 |
    18 |
  • 19 | {% endif %} 20 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/nav-pane.html: -------------------------------------------------------------------------------- 1 | {# Side-pane with table of contents #} 2 | 30 | 31 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/search.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | {% block content %} 4 |

    Search Results

    5 | 6 |
      7 | Searching... 8 |
    9 | 10 | 18 | {% endblock %} 19 | -------------------------------------------------------------------------------- /doc/mkdocs_windmill/topbar.html: -------------------------------------------------------------------------------- 1 | 52 | -------------------------------------------------------------------------------- /pillar/cluster/authentication/ldap_private.sls: -------------------------------------------------------------------------------- 1 | ldap_private: 2 | ldap_admin_pass: toto # this administration password will only be used during ldap server installation, and is protected by pillar/top.sls 3 | -------------------------------------------------------------------------------- /pillar/cluster/authentication/ldap_public.sls: -------------------------------------------------------------------------------- 1 | ldap_public: 2 | ldap_admin_pass_ssha: '{SSHA}vZKT3iyuo+Pu0ybmX+MazbNq8vXPHVu9' 3 | dc: 4 | - sphen 5 | - local 6 | -------------------------------------------------------------------------------- /pillar/cluster/authentication/passwords_public.sls: -------------------------------------------------------------------------------- 1 | passwords_public: 2 | root_password_hash: $6$KgHD7z1VDpWOB7Sy$v7sb3pjwk7ofvMhe1MRP.BkgaSjVMMJy5Q2nn6gG1Ms3g7XxhOTPsuatF1LE9iWnojSb6y8eWGa2/8VRNUuJW. 3 | -------------------------------------------------------------------------------- /pillar/cluster/authentication/ssh_private.sls: -------------------------------------------------------------------------------- 1 | ssh_private: 2 | ssh_master_private_key: | 3 | -----BEGIN RSA PRIVATE KEY----- 4 | MIIEpgIBAAKCAQEA1j++tltSt5aQq0JVFJ0ZcDjhEop/QMpFfGX/REhGpYP6KaZN 5 | Rj5vn94DzXvifZPJibxQi5XACCmkD++SILBq0tOu1ZzqdEECGcOucn2ZBlCHotb8 6 | 5cXGEuIyoMxAy6+UIeLYcHIrQo4Xnz2FY9VpKysDZxND3z3KR0cIW7qdzGaNZcqA 7 | Ra4QRYpVVCprJR8zmrAFvnPzbxXbwoWWI6nzrh6/kS6P1FLypHob+y/NJAONtlC1 8 | 69mqWFbXKyfAmn6MrG8q4LuhYhSvb1l+kREm4nLwRAHb02GkTfuUHnOQm0CAghsE 9 | P9We2JWtWqA1dMfPGjASCahegAuiJJa33C0xEQIDAQABAoIBAQC/Y1JKNGo/9hBl 10 | 7Gt0bqI6zZTBq1bh2c8YwUS3CpmRYcQnUJfXGA8/EGT1OqWo6Eww7jhFeRCCOjCE 11 | y84NxYqnqtGToQkPsMLfoR5EyE3ahjWkEcj4uSOn2si/O7bmPYJmgu42BMcAPU58 12 | wchW1l2MYnA3s839roT0B1BAIQS1gYRpLxnA1KHJ0MqTfJT9DdBPUzpi0jlBuyDv 13 | naVQRfzHS5CW1vdN8SxM4RJ+mvWkTZrwJZ6/7nfEv/s1dm0YmBFmgtZnjl9CiMlz 14 | sJ9h53buovyQaSzd/OrljRgxWIf3C4VTkMIQcEj6dgVXzlMZqFmc8HR7CoPIuEh1 15 | 8NcKHSABAoGBAO9kYWKa3Zl3ZZRg3ljRhA3yjaM992POM/uq+fabsDdKaHL7wB1e 16 | FOWtTBjr9gcVG0yr6iiBNAMBulxfl5XSezuC0p8LYcycT8OID7N2HLpH5ffPi8ev 17 | dautyMGgjBlLGdo4wUW0aGqISStwJwSDLoq+jIfWVjAxRZQAKMCA33ahAoGBAOUc 18 | 0kJ4BvR33bh3Se+DGYOzl9AKMQQ97vJyFevfGhKHE+CMTZJqCBAxQwEIK59NF2nW 19 | +Wmj9cydhXYnWXGW8f37IvYiSnqngmcgVdBXcbEkaEKaN4rWtUpDde2TK5IcgH/z 20 | MpwMAtEybavncuqqHWplP/lkylClaChjAdB6fVRxAoGBALw+9DGE1EMM140BS3cO 21 | ckhBds1qOfJ6wPfi8CLTNJOV6GKawuyVAdvGFXP3gQT6vPfirARj7VF7918SMHI5 22 | DHy0mAlG+wXZ2QNKKAaYF2kIhrcCtsk0mVSCGL7iD6kIHHE0dg7pGb7EIr2MuLHc 23 | simaQ4nBqw7EgBEsut79uDthAoGBAKFRztZQbw05TRuympOTaqaqEK3CT0seVJ3P 24 | KJJBL7l19M/52jDKiczk8rnKR/TNsjuqy0jP0qV1EGDmSftJ/jAH4VEPSgPc6Fu2 25 | j9W3jGzfJq4mqg8PKO0WXPWf12D0wqHU2oC3kFA+Qqx0thx3FWkxy37NsgpgvNsb 26 | A08IwDBxAoGBALj6qyeu/6N1MGpSqbb+6ZBJRUq7LcrfnGhIqM6Fcwmj/Qwg6kdA 27 | dbi5CwjlDyysDTpxtf2depxtLzA1roAFqEz907vi8ujhyvhJ0dIAzkpzEtTPh2+l 28 | Gl4nqCmhsnM94ecbpamr8nnCcFz/zXFLR/S/mqish2rJoX37BnKhhAK+ 29 | -----END RSA PRIVATE KEY----- 30 | -------------------------------------------------------------------------------- /pillar/cluster/authentication/ssh_public.sls: -------------------------------------------------------------------------------- 1 | ssh_public: 2 | ssh_master_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDWP762W1K3lpCrQlUUnRlwOOESin9AykV8Zf9ESEalg/oppk1GPm+f3gPNe+J9k8mJvFCLlcAIKaQP75IgsGrS067VnOp0QQIZw65yfZkGUIei1vzlxcYS4jKgzEDLr5Qh4thwcitCjhefPYVj1WkrKwNnE0PfPcpHRwhbup3MZo1lyoBFrhBFilVUKmslHzOasAW+c/NvFdvChZYjqfOuHr+RLo/UUvKkehv7L80kA422ULXr2apYVtcrJ8Cafoysbyrgu6FiFK9vWX6RESbicvBEAdvTYaRN+5Qec5CbQICCGwQ/1Z7Yla1aoDV0x88aMBIJqF6AC6IklrfcLTER root@saltmaster.sphen.local 3 | -------------------------------------------------------------------------------- /pillar/cluster/connect.sls: -------------------------------------------------------------------------------- 1 | connect: 2 | 3 | # The services listed here are the Banquise default ones 4 | # However, you can add a new one if you desire to use Banquise to handle one of your ip 5 | 6 | # state_to_watch: state to watch, the ip will be the one of the host installing this state 7 | # management: 8 | # "auto" let Banquise choose (will use state_to_watch to figure ip and hostname) 9 | # "external" set an external ip, to be used by clients. Banquise will not manage the server but user whish to use client side. 10 | # "link" mix between auto and external. Banquise will choose depending of state_to_watch, but will also configure the service (if compatible) to be connected to an external same service (recursive dns for example) 11 | # "none" do not use this service, ignored by Banquise and if client installed ip will be empty 12 | # Banquise will create two values each time: ipname_ip and ipname_hostname 13 | # for example, for dhcp_server, it will create dhcp_server_ip and dhcp_server_host 14 | 15 | dhcp_server: 16 | state_to_watch: dhcp.server 17 | management: auto 18 | ip_value: 19 | host_value: 20 | 21 | # dns_server: 22 | # state_to_watch: dns.server 23 | # management: external 24 | # ip_value: 10.0.0.2 25 | # host_value: hostnametest 26 | 27 | dns_server: 28 | state_to_watch: dns.server 29 | management: link 30 | ip_value: 8.8.8.8 31 | host_value: 32 | 33 | repository_server: 34 | state_to_watch: repository.server 35 | management: auto 36 | ip_value: 37 | host_value: 38 | 39 | pxe_server: 40 | state_to_watch: pxe.server 41 | management: auto 42 | ip_value: 43 | host_value: 44 | 45 | ntp_server: 46 | state_to_watch: ntp.server 47 | management: auto 48 | ip_value: 49 | host_value: 50 | 51 | jobscheduler_server: 52 | state_to_watch: slurm.server 53 | management: auto 54 | ip_value: 55 | host_value: 56 | 57 | authentication_server: 58 | state_to_watch: ldap.server 59 | management: auto 60 | ip_value: 61 | host_value: 62 | 63 | monitoring_server: 64 | state_to_watch: shinken.server 65 | management: auto 66 | ip_value: 67 | host_value: 68 | -------------------------------------------------------------------------------- /pillar/cluster/core.sls: -------------------------------------------------------------------------------- 1 | core: 2 | 3 | cluster_name: banquise 4 | # master_mode: standalone # only standalone is available for now, using something else will result in crash 5 | # domain_name: sphen.local 6 | salt_master_ip: 10.1.0.77 # ip of the saltmaster server 7 | pillar_path: /srv/pillar 8 | states_path: /srv/salt 9 | types: # These are the types taken into account by Banquise 10 | - computes 11 | - logins 12 | - ios 13 | - switchs 14 | # admin_network: net0 15 | # external_dns: 8.8.8.8 16 | time_zone: America/New_York 17 | language: us # us, fr, etc 18 | keyboard: us 19 | -------------------------------------------------------------------------------- /pillar/cluster/equipment/vm_management_1/system.sls: -------------------------------------------------------------------------------- 1 | system: 2 | 3 | operating_system: 4 | os: centos 5 | os_release: 7.6 6 | kernel_parameters: 7 | console: 8 | boot_mode: bios # bios, uefi 9 | partitioning: | # Multiple lines variables, start with a | and indented. This is plain kickstart syntax here. 10 | clearpart --all --initlabel 11 | part /boot --fstype=ext4 --size=2048 12 | part / --fstype=ext4 --size=1 --grow 13 | updates: kickstart, none # when to update, separate by comma 14 | hardware: 15 | sockets: 16 | cores_per_socket: 17 | threads_per_core: 18 | memory: 19 | 20 | bmc: 21 | user: 22 | password: 23 | console: 24 | 25 | 26 | # os: Centos 27 | # os_release: 7.4.1708 28 | # partitioning: | 29 | # clearpart --all --initlabel 30 | # part /boot --fstype=ext4 --size=2048 31 | # part / --fstype=ext4 --size=1 --grow 32 | # boot_mode: bios # bios, uefi 33 | # kernel_parameters: 34 | # bmc_console: 35 | # update: none # when to update 36 | 37 | -------------------------------------------------------------------------------- /pillar/cluster/io/nfs.sls: -------------------------------------------------------------------------------- 1 | # this file contains nfs servers description (and not nodes that host it !!!, these are IOs or Masters nodes) 2 | 3 | nfs: 4 | 5 | management1: # Put here the id of the node hosting these exports. You can define multiple nodes with multiple exports. 6 | /home: # Put here the mount point seen by the client 7 | servermountpoint: /home # Put here the export from the server 8 | network: net0 # Put here the network used (only eth for current release) 9 | # rights: rw # Put here the rights (read = ro, read/write = rw) 10 | mountpool: # Set here the subtypes that should mount this FS 11 | - computes:standard 12 | - logins:standard 13 | - computes:gpu 14 | export_parameters: rw,no_root_squash,sync 15 | mount_parameters: rw,rsize=32768,wsize=32768,intr,nfsvers=3,bg 16 | /opt: 17 | servermountpoint: /opt 18 | network: net0 19 | # rights: ro 20 | mountpool: 21 | - computes:standard 22 | - logins:standard 23 | export_parameters: ro,no_root_squash,sync 24 | mount_parameters: ro,intr,nfsvers=3,bg 25 | 26 | # nfs1: 27 | # /scratch: 28 | # servermountpoint: /scratch_mount 29 | # network: ic0 30 | # rights: rw 31 | # mountpool: 32 | # - 'computes:standard' 33 | # - 'logins:standard' 34 | -------------------------------------------------------------------------------- /pillar/cluster/masters/masters.sls: -------------------------------------------------------------------------------- 1 | # This file contains masters description 2 | masters: 3 | 4 | management1: 5 | network: 6 | net0: 7 | ip: 10.10.0.1 8 | interface: enp3s0 9 | -------------------------------------------------------------------------------- /pillar/cluster/masters/masters_states.sls: -------------------------------------------------------------------------------- 1 | masters_states: 2 | 3 | management1: 4 | # - repository.client 5 | - repository.server 6 | # - dhcp.server 7 | # - dns.server 8 | # - dns.client 9 | # - network.firewall 10 | # - network.nmanager 11 | # - network.static 12 | # - network.masquerading 13 | # - ntp.server 14 | # - slurm.server 15 | # - ldap.server 16 | # - ldap.phpldapadmin 17 | # - ldap.client 18 | # - pxe.server 19 | # - nfs.server 20 | # - ssh.master 21 | # - shinken.server 22 | # - shinken.client 23 | # - nyancat 24 | -------------------------------------------------------------------------------- /pillar/cluster/masters/masters_system.sls: -------------------------------------------------------------------------------- 1 | masters_system: 2 | 3 | management1: 4 | operating_system: 5 | os: Centos 6 | os_release: 7.4.1708 7 | kernel_parameters: 8 | boot_mode: bios # bios, uefi 9 | partitioning: | # Multiple lines variables, start with a | and indented. This is plain kickstart syntax here. 10 | clearpart --all --initlabel 11 | part /boot --fstype=ext4 --size=2048 12 | part / --fstype=ext4 --size=1 --grow 13 | updates: kickstart, none # when to update, separate by comma 14 | hardware: 15 | sockets: 16 | cores_per_socket: 17 | threads_per_core: 18 | memory: 19 | bmc: 20 | user: 21 | password: 22 | console: 23 | 24 | 25 | # os: Centos 26 | # os_release: 7.4.1708 27 | # partitioning: | 28 | # clearpart --all --initlabel 29 | # part /boot --fstype=ext4 --size=2048 30 | # part / --fstype=ext4 --size=1 --grow 31 | # boot_mode: bios # bios, uefi 32 | # kernel_parameters: 33 | # bmc_console: 34 | # update: none # when to update 35 | 36 | -------------------------------------------------------------------------------- /pillar/cluster/monitoring.sls: -------------------------------------------------------------------------------- 1 | # Should migrate to system files... 2 | monitoring: 3 | 4 | parameters: 5 | 6 | enable_states_probs: true 7 | 8 | default_probs: 9 | 10 | masters: 11 | disk: true 12 | zombie: false 13 | 14 | computes: 15 | disk: true 16 | zombie: true 17 | 18 | logins: 19 | disk: true 20 | zombie: true 21 | 22 | ios: 23 | disk: true 24 | zombie: false 25 | 26 | -------------------------------------------------------------------------------- /pillar/cluster/network.sls: -------------------------------------------------------------------------------- 1 | # This file describe the network parameters of the cluster 2 | # warning, Banquise does not accept other netmask than 255.255.0.0 for the time being 3 | 4 | network: 5 | 6 | global_parameters: 7 | admin_network: net0 8 | external_dns: 9 | - 8.8.8.8 10 | domain_name: sphen.local 11 | 12 | net0: 13 | subnet: 10.1.0.0 14 | netmask: 255.255.0.0 # See warning above 15 | dhcp: 16 | dhcp_unknown_range: 10.1.254.1 10.1.254.254 # nodes whose mac are not knowed will be put into this range 17 | gateway: 10.1.0.1 18 | 19 | net1: 20 | subnet: 10.10.0.0 21 | netmask: 255.255.0.0 # See warning above 22 | dhcp: 23 | dhcp_unknown_range: 10.10.254.1 10.10.254.254 # nodes whose mac are not knowed will be put into this range 24 | gateway: 10.10.0.1 25 | 26 | ic0: 27 | subnet: 10.2.0.0 28 | netmask: 255.255.0.0 29 | 30 | net2: 31 | subnet: 172.16.0.0 32 | netmask: 255.255.0.0 33 | 34 | net3: 35 | subnet: 192.168.1.0 36 | netmask: 255.255.255.0 37 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/computes.sls: -------------------------------------------------------------------------------- 1 | # This file contains the list of cluster nodes, with their parameters. Note that you can add parameters (in this example position), Banquise will not use them as long as you respect YAML structure. 2 | computes: 3 | 4 | standard: 5 | 6 | compute1: # Name of the compute 7 | bmc: 8 | name: bmccompute1 # Name of the BMC 9 | network: net0 10 | ip: 10.1.103.1 # Ip of the bmc 11 | hwaddr: 08:00:28:18:67:BC # Mac of the Eth NIC of the BMC 12 | position: # Additional informations not used by Banquise 13 | rack: rack1 14 | z: A 15 | network: # Network information, specify here configuration for each network and associeted interface 16 | net0: 17 | ip: 10.1.3.1 18 | hwaddr: 08:00:27:84:F5:FA 19 | interface: auto # On net0, you can use auto 20 | ic0: 21 | ip: 10.2.3.1 22 | interface: enp0s8 23 | 24 | compute2: 25 | bmc: 26 | name: bmccompute2 27 | network: net0 28 | ip: 10.1.103.2 29 | hwaddr: 08:00:28:18:67:EE 30 | network: 31 | net0: 32 | ip: 10.1.3.2 33 | hwaddr: 08:00:27:9E:2A:97 34 | interface: auto 35 | ic0: 36 | ip: 10.2.3.2 37 | interface: enp0s8 38 | 39 | 40 | 41 | gpu: # another group in type computes, here gpu 42 | 43 | compute3: 44 | network: 45 | net0: 46 | ip: 10.1.3.3 47 | hwaddr: 08:00:27:8A:EA:57 48 | interface: auto 49 | ic0: 50 | ip: 10.2.3.3 51 | interface: enp0s8 52 | 53 | smp: 54 | 55 | compute4: 56 | bmc: 57 | name: bmccompute4 58 | network: net0 59 | ip: 10.1.103.4 60 | hwaddr: 08:00:28:18:67:EA 61 | network: 62 | net0: 63 | ip: 10.1.3.4 64 | hwaddr: 08:00:27:8A:EA:58 65 | interface: auto 66 | ic0: 67 | ip: 10.2.3.4 68 | interface: enp0s8 69 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/computes_states.sls: -------------------------------------------------------------------------------- 1 | computes_states: 2 | 3 | standard: 4 | - repository.client 5 | - dns.client 6 | - ntp.client 7 | - network.static 8 | - network.firewall 9 | - network.nmanager 10 | - slurm.client 11 | - nfs.client 12 | - ldap.client 13 | - ssh.client 14 | - shinken.client 15 | 16 | gpu: 17 | - repository.client 18 | - dns.client 19 | - ntp.client 20 | # - network.static 21 | - network.firewall 22 | - network.nmanager 23 | - shinken.client 24 | - ldap.client 25 | # - slurm.client 26 | - nfs.client 27 | - ssh.client 28 | 29 | smp: 30 | - repository.client 31 | - dns.client 32 | - ntp.client 33 | - network.static 34 | - network.firewall 35 | - network.nmanager 36 | - slurm.client 37 | - ldap.client 38 | - ssh.client 39 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/computes_system.sls: -------------------------------------------------------------------------------- 1 | computes_system: 2 | 3 | standard: 4 | operating_system: 5 | os: Ubuntu #Centos 6 | os_release: 16.04 #7.4.1708 7 | kernel_parameters: 8 | boot_mode: bios # bios, uefi 9 | partitioning: | # Multiple lines variables, start with a | and indented. This is plain kickstart syntax here. 10 | clearpart --all --initlabel 11 | part /boot --fstype=ext4 --size=2048 12 | part / --fstype=ext4 --size=1 --grow 13 | updates: kickstart, none # when to update, separate by comma 14 | hardware: 15 | sockets: 1 16 | cores_per_socket: 1 17 | threads_per_core: 1 18 | memory: 1024 19 | bmc: 20 | user: 21 | password: 22 | console: 23 | 24 | # os: Centos 25 | # os_release: 7.4.1708 26 | # partitioning: | 27 | # clearpart --all --initlabel 28 | # part /boot --fstype=ext4 --size=2048 29 | # part / --fstype=ext4 --size=1 --grow 30 | # boot_mode: bios # bios, uefi 31 | # kernel_parameters: 32 | # bmc_console: 33 | # update: none # when to update 34 | 35 | gpu: 36 | operating_system: 37 | os: Fedora 38 | os_release: 27 39 | kernel_parameters: 40 | boot_mode: bios 41 | partitioning: | 42 | clearpart --all --initlabel 43 | part /boot --fstype=ext4 --size=2048 44 | part / --fstype=ext4 --size=1 --grow 45 | updates: kickstart, none 46 | hardware: 47 | sockets: 1 48 | cores_per_socket: 1 49 | threads_per_core: 1 50 | memory: 1024 51 | bmc: 52 | user: 53 | password: 54 | console: 55 | 56 | # os: Fedora 57 | # os_release: 27 58 | # partitioning: | 59 | # clearpart --all --initlabel 60 | # part /boot --fstype=ext4 --size=2048 61 | # part / --fstype=ext4 --size=1 --grow 62 | # boot_mode: bios # bios, uefi 63 | # kernel_parameters: 64 | # bmc_console: 65 | # update: none # when to update 66 | 67 | smp: 68 | operating_system: 69 | os: Fedora 70 | os_release: 27 71 | kernel_parameters: 72 | boot_mode: bios 73 | partitioning: | 74 | clearpart --all --initlabel 75 | part /boot --fstype=ext4 --size=2048 76 | part / --fstype=ext4 --size=1 --grow 77 | updates: kickstart, none 78 | hardware: 79 | sockets: 1 80 | cores_per_socket: 1 81 | threads_per_core: 1 82 | memory: 1024 83 | bmc: 84 | user: 85 | password: 86 | console: 87 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/ios.sls: -------------------------------------------------------------------------------- 1 | ios: 2 | 3 | nfs: 4 | 5 | nfs1: 6 | network: 7 | net0: 8 | ip: 10.1.1.1 9 | hwaddr: 08:00:27:26:EC:68 10 | interface: auto 11 | ic0: 12 | ip: 10.2.1.1 13 | interface: enp0s8 14 | 15 | lustre: 16 | 17 | lustre1: 18 | network: 19 | net0: 20 | ip: 10.1.1.2 21 | hwaddr: 08:00:27:0D:37:D7 22 | interface: auto 23 | ic0: 24 | ip: 10.2.1.2 25 | interface: enp0s8 26 | 27 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/ios_states.sls: -------------------------------------------------------------------------------- 1 | ios_states: 2 | 3 | nfs: 4 | - repository.client 5 | - dns.client 6 | - ntp.client 7 | - network.static 8 | - network.firewall 9 | - network.nmanager 10 | - nfs.server 11 | - ldap.client 12 | - shinken.client 13 | - ssh.client 14 | 15 | lustre: 16 | - repository.client 17 | - dns.client 18 | - ntp.client 19 | - network.static 20 | - network.firewall 21 | - network.nmanager 22 | - ldap.client 23 | - ssh.client 24 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/ios_system.sls: -------------------------------------------------------------------------------- 1 | ios_system: 2 | 3 | nfs: 4 | operating_system: 5 | os: Centos 6 | os_release: 7.4.1708 7 | kernel_parameters: 8 | boot_mode: bios 9 | partitioning: | 10 | clearpart --all --initlabel 11 | part /boot --fstype=ext4 --size=2048 12 | part / --fstype=ext4 --size=1 --grow 13 | updates: kickstart, none 14 | hardware: 15 | sockets: 1 16 | cores_per_socket: 1 17 | threads_per_core: 1 18 | memory: 1024 19 | bmc: 20 | user: 21 | password: 22 | console: 23 | 24 | lustre: 25 | operating_system: 26 | os: Centos 27 | os_release: 7.4.1708 28 | kernel_parameters: 29 | boot_mode: bios 30 | partitioning: | 31 | clearpart --all --initlabel 32 | part /boot --fstype=ext4 --size=2048 33 | part / --fstype=ext4 --size=1 --grow 34 | updates: kickstart, none 35 | hardware: 36 | sockets: 1 37 | cores_per_socket: 1 38 | threads_per_core: 1 39 | memory: 1024 40 | bmc: 41 | user: 42 | password: 43 | console: 44 | 45 | 46 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/logins.sls: -------------------------------------------------------------------------------- 1 | logins: 2 | 3 | standard: 4 | 5 | login1: 6 | network: 7 | net0: 8 | ip: 10.1.2.1 9 | hwaddr: 08:00:27:4C:86:3F 10 | interface: auto 11 | ic0: 12 | ip: 10.2.2.1 13 | interface: enp0s8 14 | net2: 15 | ip: 192.168.1.77 16 | interface: na 17 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/logins_states.sls: -------------------------------------------------------------------------------- 1 | logins_states: 2 | 3 | standard: 4 | - repository.client 5 | - dns.client 6 | - ntp.client 7 | - network.static 8 | - network.firewall 9 | - network.nmanager 10 | - slurm.login 11 | - nfs.client 12 | - ldap.client 13 | - ssh.client 14 | - shinken.client 15 | - nyancat 16 | 17 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/logins_system.sls: -------------------------------------------------------------------------------- 1 | logins_system: 2 | 3 | standard: 4 | operating_system: 5 | os: Centos 6 | os_release: 7.4.1708 7 | kernel_parameters: 8 | boot_mode: bios 9 | partitioning: | 10 | clearpart --all --initlabel 11 | part /boot --fstype=ext4 --size=2048 12 | part / --fstype=ext4 --size=1 --grow 13 | updates: kickstart, none 14 | hardware: 15 | sockets: 1 16 | cores_per_socket: 1 17 | threads_per_core: 1 18 | memory: 1024 19 | bmc: 20 | user: 21 | password: 22 | console: 23 | 24 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/managements.sls: -------------------------------------------------------------------------------- 1 | # This file contains masters description 2 | managements: 3 | 4 | vm_management_1: 5 | 6 | management1: 7 | network: 8 | net0: 9 | ip: 10.10.0.1 10 | interface: enp3s0 11 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/switchs.sls: -------------------------------------------------------------------------------- 1 | switchs: 2 | 3 | cisco: 4 | 5 | switch1: 6 | network: 7 | net0: 8 | ip: 10.1.200.1 9 | hwaddr: 08:00:27:77:77:77 10 | # no interface because Banquise will not be in charge of configuring network on this equipment 11 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/switchs_states.sls: -------------------------------------------------------------------------------- 1 | switchs_states: 2 | 3 | cisco: 4 | -------------------------------------------------------------------------------- /pillar/cluster/nodes/switchs_system.sls: -------------------------------------------------------------------------------- 1 | switchs_system: 2 | 3 | cisco: 4 | 5 | -------------------------------------------------------------------------------- /pillar/cluster/settings/general_settings.sls: -------------------------------------------------------------------------------- 1 | general_settings: 2 | 3 | ########################## 4 | # General settings 5 | ### 6 | 7 | cluster_name: banquise 8 | domain_name: sphen.local 9 | time_zone: America/New_York 10 | # language: us # us, fr, etc 11 | # keyboard: us 12 | 13 | ########################## 14 | # Salt settings 15 | ### 16 | 17 | salt_master_ip: 10.1.0.77 # ip of the saltmaster server 18 | pillar_path: /srv/pillar 19 | states_path: /srv/salt 20 | 21 | nodes_groups: 22 | - managements 23 | # - computes 24 | -------------------------------------------------------------------------------- /pillar/cluster/settings/network.sls: -------------------------------------------------------------------------------- 1 | # This file describe the network parameters of the cluster 2 | 3 | networks: 4 | 5 | net0: 6 | subnet: 10.10.0.0 7 | netmask: 255.255.0.0 # See warning above 8 | dhcp: 9 | 10 | dhcp_unknown_range: 10.10.254.1 10.10.254.254 # nodes whose mac are not knowed will be put into this range 11 | gateway: 10.10.0.1 12 | services: 13 | pxe_server_ip: 10.10.0.1 14 | repositories_server_ip: 10.10.0.1 15 | -------------------------------------------------------------------------------- /pillar/engine/engine.sls: -------------------------------------------------------------------------------- 1 | # THIS FILE SHOULD BE REMOVED SOON 2 | 3 | {% import_yaml 'cluster/network.sls' as net %} 4 | {% import_yaml 'cluster/masters/masters.sls' as mas %} 5 | {% import_yaml 'cluster/core.sls' as cor %} 6 | 7 | helloworld: "Hey !!" 8 | 9 | engine: 10 | 11 | network: 12 | domaine_name: {{ net.network.net0.domaine_name }} 13 | subnet: {{ net.network.net0.subnet }} 14 | netmask: {{ net.network.net0.netmask }} 15 | dhcp_unknown_range: {{ net.network.net0.dhcp_unknown_range }} 16 | {% set list1 = net.network.net0.subnet.split('.') %} 17 | {% if net.network.net0.netmask == '255.255.255.0' %} 18 | reverse: 1 19 | shortnetmask: 24 20 | matchpatern: "{{list1[0]}}.{{list1[1]}}.{{list1[2]}}" 21 | broadcast_address: {{list1[0]}}.{{list1[1]}}.{{list1[2]}}.255 22 | {% elif net.network.net0.netmask == '255.255.0.0' %} 23 | reverse: 2 24 | shortnetmask: 16 25 | matchpatern: "{{list1[0]}}.{{list1[1]}}" 26 | broadcast_address: {{list1[0]}}.{{list1[1]}}.225.255 27 | {% elif net.network.net0.netmask == '255.0.0.0' %} 28 | reverse: 3 29 | shortnetmask: 8 30 | matchpatern: "{{list1[0]}}" 31 | broadcast_address: {{list1[0]}}.255.225.255 32 | {% else %} 33 | reverse: unknown (banquise engine) 34 | shortnetmask: unknown (banquise engine) 35 | broadcast_address: unknown (banquise engine) 36 | {% endif %} 37 | 38 | master: 39 | # render master master ip and id 40 | {% set count = 1 %}{% if cor.core.master_mode == "standalone" %} 41 | {% for ma, args in mas.masters.items() %} 42 | {% if count == 1%} 43 | masterip: {{args.network.net0.ip}} 44 | masterid: {{ma}} 45 | {% endif %} 46 | {% endfor %} 47 | {% set count = 2 %}{% endif %} 48 | 49 | servers: 50 | {% set count = 1 %}{% if cor.core.master_mode == "standalone" %} 51 | {% for ma, args in mas.masters.items() %} 52 | {% if count == 1%} 53 | dhcp_server_ip: {{args.network.net0.ip}} 54 | dns_server_ip: {{args.network.net0.ip}} 55 | pxe_server_ip: {{args.network.net0.ip}} 56 | repository_server_ip: {{args.network.net0.ip}} 57 | ntp_server_ip: {{args.network.net0.ip}} 58 | ldap_server_ip: {{args.network.net0.ip}} # change this to authentication server 59 | slurm_server_ip: {{args.network.net0.ip}} # change this to jobscheduler server 60 | shinken_server_ip: {{args.network.net0.ip}} # change this to monitoring server 61 | {% set count = 2 %} 62 | {% endif %} 63 | {% endfor %} 64 | {% endif %} 65 | -------------------------------------------------------------------------------- /pillar/engine/engine_connect.sls: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/network.sls' as net %} 2 | {% import_yaml 'cluster/masters/masters.sls' as mas %} 3 | {% import_yaml 'cluster/masters/masters_states.sls' as mas_states %} 4 | {% import_yaml 'cluster/core.sls' as cor %} 5 | {% import_yaml 'cluster/connect.sls' as con %} 6 | 7 | engine_connect: 8 | 9 | {% for server, server_args in con.connect.items() %} 10 | 11 | # External ip, use provided values for ip(s) and host(s) 12 | {% if server_args.management == "external" %} 13 | {{server~"_ip"}}: {{server_args.ip_value}} 14 | # {% for ip in server_args.ip_value %} 15 | # - {{ip}} 16 | # {% endfor %} 17 | {{server~"_host"}}: {{server_args.host_value}} 18 | # {% for host in server_args.host_value %} 19 | # - {{host}} 20 | # {% endfor %} 21 | {{server~"_link"}}: false 22 | {% endif %} 23 | 24 | # Auto ip, get who install the to check state 25 | {% if server_args.management == "auto" or server_args.management == "link" %} 26 | # {{server~"_ip"}}: 27 | {% for masst, masst_args in mas_states.masters_states.items() %} 28 | {% for states in masst_args %} 29 | {% if states == server_args.state_to_watch %} 30 | {% for masters, masters_args in mas.masters.items() %} 31 | {% if masters == masst %} 32 | {{server~"_ip"}}: {{masters_args.network.net0.ip}} 33 | # - {{masters_args.network.net0.ip}} 34 | {% endif %} 35 | {% endfor %} 36 | {% endif %} 37 | {% endfor %} 38 | {% endfor %} 39 | # {{server~"_host"}}: 40 | {% for masst, masst_args in mas_states.masters_states.items() %} 41 | {% for states in masst_args %} 42 | {% if states == server_args.state_to_watch %} 43 | # - {{masst}} 44 | {{server~"_host"}}: {{masst}} 45 | {% endif %} 46 | {% endfor %} 47 | {% endfor %} 48 | {% if server_args.management == "link" %} 49 | {{server~"_link"}}: true 50 | {{server~"_link_ip"}}: {{server_args.ip_value}} 51 | {{server~"_link_host"}}: {{server_args.host_value}} 52 | {% else %} 53 | {{server~"_link"}}: false 54 | {% endif %} 55 | 56 | {% endif %} 57 | 58 | {% endfor %} 59 | 60 | 61 | 62 | 63 | 64 | -------------------------------------------------------------------------------- /pillar/engine/engine_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Experimental monitoring pillar 2 | # Aim is to allow "packaged" states, so that states bring with them their monitoring instructions 3 | # This pillar will try to read all monitoring files related to states used, and provide them to the monitoring tool (default shinken) 4 | # This pillar break the Salt standard organisation, be carefull. 5 | 6 | {% import_yaml 'cluster/core.sls' as cor %} 7 | {% import_yaml 'cluster/masters/masters_states.sls' as mas %} 8 | 9 | 10 | engine_monitoring: 11 | 12 | masters: 13 | masters: 14 | {% for master, sta in mas.masters_states.items() %} 15 | {% for st in sta %} 16 | {{st}}: 17 | {% include cor.core.states_path~"/"~(st|replace(".","/"))~"_monitoring.sls" ignore missing %} 18 | {% endfor %} 19 | {% endfor %} 20 | 21 | {% for types in cor.core.types %} 22 | {% import_yaml 'cluster/nodes/'~types~'_states.sls' as typestates %} 23 | {{types}}: 24 | {% for ttypestates, stb in typestates.items() %} 25 | {% for group, sta in stb.items() %} 26 | {{group}}: 27 | {% if sta is not none %} 28 | {% for st in sta %} 29 | {{st}}: 30 | {% include cor.core.states_path~"/"~(st|replace(".","/"))~"_monitoring.sls" ignore missing %} 31 | {% endfor %} 32 | {% endif %} 33 | {% endfor %} 34 | {% endfor %} 35 | {% endfor %} 36 | -------------------------------------------------------------------------------- /pillar/engine/engine_network.sls: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/network.sls' as net %} 2 | {% import_yaml 'cluster/masters/masters.sls' as mas %} 3 | {% import_yaml 'cluster/core.sls' as cor %} 4 | 5 | engine_network: 6 | {% for network, network_args in net.network.items() %} 7 | {% if network != "global_parameters" %} 8 | 9 | {{network}}: 10 | subnet: {{ network_args.subnet }} 11 | netmask: {{ network_args.netmask }} 12 | {% if network_args.dhcp is defined and network_args.dhcp is not none %} 13 | dhcp_on: true 14 | dhcp_unknown_range: {{ network_args.dhcp.dhcp_unknown_range }} 15 | gateway: {{ network_args.dhcp.gateway }} 16 | {% else %} 17 | dhcp_on: false 18 | {% endif %} 19 | {% set list1 = network_args.subnet.split('.') %} # very basic intelligence, need to enhance this 20 | {% if network_args.netmask == '255.255.255.0' %} 21 | reverse: 1 22 | shortnetmask: 24 23 | matchpatern: "{{list1[0]}}.{{list1[1]}}.{{list1[2]}}" 24 | broadcast_address: {{list1[0]}}.{{list1[1]}}.{{list1[2]}}.255 25 | {% elif network_args.netmask == '255.255.0.0' %} 26 | reverse: 2 27 | shortnetmask: 16 28 | matchpatern: "{{list1[0]}}.{{list1[1]}}" 29 | broadcast_address: {{list1[0]}}.{{list1[1]}}.225.255 30 | {% elif network_args.netmask == '255.0.0.0' %} 31 | reverse: 3 32 | shortnetmask: 8 33 | matchpatern: "{{list1[0]}}" 34 | broadcast_address: {{list1[0]}}.255.225.255 35 | {% endif %} 36 | 37 | {% endif %} 38 | {% endfor %} 39 | -------------------------------------------------------------------------------- /pillar/engine/engine_reverse.sls: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/core.sls' as cor %} 2 | {% import_yaml 'cluster/masters/masters.sls' as mas %} 3 | {% import_yaml 'cluster/network.sls' as net %} 4 | 5 | engine_reverse: 6 | 7 | {% for master in mas.masters %} 8 | {{master}}.{{net.network.global_parameters.domain_name}}: 9 | subtype: {{master}} 10 | type: masters 11 | {% endfor %} 12 | 13 | {% for types in cor.core.types %} 14 | {% import_yaml 'cluster/nodes/'~types~'.sls' as type %} 15 | {% for ttype, argy in type.items() %} 16 | {% for group, args in argy.items() %} 17 | {% for node in args %} 18 | {{node}}.{{net.network.global_parameters.domain_name}}: 19 | subtype: {{group}} 20 | type: {{types}} 21 | {% endfor %} 22 | {% endfor %} 23 | {% endfor %} 24 | 25 | 26 | {% endfor %} 27 | -------------------------------------------------------------------------------- /pillar/engine/equipment.sls: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/settings/general_settings.sls' as general_settings %} 2 | 3 | equipment_list: 4 | # Allowed to all hosts 5 | {% set buffer_list = [] %} 6 | 7 | {% for nodes_group in general_settings.general_settings.nodes_groups %} 8 | {% import_yaml ("cluster/nodes/"+nodes_group+".sls") as current_group %} 9 | {% for group, group_args in current_group.items() %} 10 | {% for equipment in group_args %} 11 | {% if equipment not in buffer_list %} 12 | {% do buffer_list.append(equipment|string) %} 13 | {% endif %} 14 | {% endfor %} 15 | {% endfor %} 16 | {% endfor %} 17 | 18 | {% for equipment in buffer_list %} 19 | - {{equipment}} 20 | {% endfor %} 21 | -------------------------------------------------------------------------------- /pillar/engine/system.sls: -------------------------------------------------------------------------------- 1 | {% import 'engine/yaml_macros.sls' as yaml_macros %} 2 | 3 | {% import_yaml 'engine/equipment.sls' as equipment %} 4 | 5 | all_system: 6 | 7 | {% for current_equipment in equipment.equipment_list %} 8 | 9 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 10 | 11 | {{yaml_macros.write_yaml(current_system.system)}} 12 | 13 | {% endfor %} 14 | 15 | 16 | -------------------------------------------------------------------------------- /pillar/engine/system.sls.bkp: -------------------------------------------------------------------------------- 1 | {% import_yaml 'engine/equipment.sls' as equipment %} 2 | 3 | all_system: 4 | 5 | {% for current_equipment in equipment.equipment_list %} 6 | 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | {% for lvl1, lvl1_args in current_system.items() %} 9 | {% if lvl1_args is defined and not none %} 10 | {{lvl1}}: 11 | {% for lvl2, lvl2_args in lvl1_args.items() %} 12 | {% if lvl2_args is defined and not none %} 13 | {{lvl2}}: 14 | {% for lvl3, lvl3_args in lvl2_args.items() %} 15 | {% if lvl3_args is defined and not none %} 16 | {% if lvl3_args is mapping %} 17 | {{lvl3}}: mapp 18 | {% else %} 19 | {% if lvl3_args is string %} 20 | {{lvl3}}: {{lvl3_args}} 21 | {% else %} 22 | {{lvl3}}: notiter 23 | 24 | {% endif %} 25 | 26 | {% endif %} 27 | 28 | {% endif %} 29 | {% endfor %} 30 | {% endif %} 31 | {% endfor %} 32 | {% endif %} 33 | {% endfor %} 34 | 35 | {% endfor %} 36 | 37 | -------------------------------------------------------------------------------- /pillar/engine/system.sls.bkp3: -------------------------------------------------------------------------------- 1 | {% import_yaml 'engine/equipment.sls' as equipment %} 2 | 3 | all_system: 4 | 5 | {% for current_equipment in equipment.equipment_list %} 6 | 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | 9 | {% for lvl1, lvl1_args in current_system.system.items() %} 10 | {% if lvl1_args is defined and not none %} 11 | {% if lvl1_args is not mapping %} 12 | {{lvl1}}: {{lvl1_args}} 13 | {% else %} 14 | {{lvl1}}: 15 | {% for lvl2, lvl2_args in lvl1_args.items() %} 16 | {% if lvl2_args is defined and not none %} 17 | {% if (lvl2|string) != ("partitioning"|string) %} 18 | {% if lvl2_args is not mapping %} 19 | {% if lvl2_args is not iterable %} 20 | {{lvl2}}: {{lvl2_args}} 21 | {% else %} 22 | {% if lvl2_args is string %} 23 | {{lvl2}}: {{lvl2_args}} 24 | {% endif %} 25 | {% endif %} 26 | {% else %} 27 | {{lvl2}}: 28 | {% endif %} 29 | {% endif %} 30 | {% endif %} 31 | {% endfor %} 32 | {% endif %} 33 | {% endif %} 34 | {% endfor %} 35 | 36 | 37 | 38 | {% endfor %} 39 | 40 | 41 | -------------------------------------------------------------------------------- /pillar/engine/system.sls.bkp4: -------------------------------------------------------------------------------- 1 | {% import_yaml 'engine/equipment.sls' as equipment %} 2 | 3 | all_system: 4 | 5 | {% for current_equipment in equipment.equipment_list %} 6 | 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | 9 | {% for lvl1, lvl1_args in current_system.system.items() %} 10 | {% if lvl1_args is defined and not none %} 11 | {% if lvl1_args is not mapping %} 12 | {{lvl1}}: {{lvl1_args}} 13 | {% else %} 14 | {{lvl1}}: 15 | {% for lvl2, lvl2_args in lvl1_args.items() %} 16 | {% if lvl2_args is defined and not none %} 17 | {% if (lvl2|string) != ("partitioningu"|string) %} 18 | {% if lvl2_args is not mapping %} 19 | {% if lvl2_args is not iterable %} 20 | {{lvl2}}: {{lvl2_args}} 21 | {% else %} 22 | {% if lvl2_args is string %} 23 | {% if '\n' in lvl2_args %} {# This is a multi line string #} 24 | {{lvl2}}: | 25 | {% for current_line in lvl2_args.split('\n') %} 26 | {{current_line}} 27 | {% endfor %} 28 | {% else %} 29 | {{lvl2}}: {{lvl2_args}} 30 | {% endif %} 31 | {% endif %} 32 | {% endif %} 33 | {% else %} 34 | {{lvl2}}: 35 | {% endif %} 36 | {% endif %} 37 | {% endif %} 38 | {% endfor %} 39 | {% endif %} 40 | {% endif %} 41 | {% endfor %} 42 | 43 | 44 | 45 | {% endfor %} 46 | 47 | 48 | -------------------------------------------------------------------------------- /pillar/engine/system.sls.bkp5: -------------------------------------------------------------------------------- 1 | {% import_yaml 'engine/equipment.sls' as equipment %} 2 | 3 | all_system: 4 | 5 | {% for current_equipment in equipment.equipment_list %} 6 | 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | 9 | {% for lvl1, lvl1_args in current_system.system.items() %} 10 | {% if lvl1_args is defined and not none %} 11 | 12 | 13 | 14 | {% if lvl1_args is not mapping %} 15 | {{lvl1}}: {{lvl1_args}} 16 | 17 | {% else %} 18 | {{lvl1}}: 19 | 20 | {% for lvl2, lvl2_args in lvl1_args.items() %} 21 | {% if lvl2_args is defined and not none %} 22 | {% if (lvl2|string) != ("partitioningu"|string) %} 23 | {% if lvl2_args is not mapping %} {# This is not a dict, so we reached last level #} 24 | {% if lvl2_args is not iterable %} {# This is not a list or a string #} 25 | {{lvl2}}: {{lvl2_args}} 26 | {% else %} {# This is a list or a string #} 27 | {% if lvl2_args is string %} {# This is a string #} 28 | {% if '\n' in lvl2_args %} {# This is a multi line string #} 29 | {{lvl2}}: "{% for current_line in lvl2_args.split('\n') %}{{current_line}}\n{% endfor %}" 30 | {% else %} {# This is a single line string #} 31 | {{lvl2}}: {{lvl2_args}} 32 | {% endif %} 33 | {% else %} {# This is a list #} 34 | {{lvl2}}: 35 | {% for current_item in lvl2_args %} 36 | - {{current_item}} 37 | {% endfor %} 38 | {% endif %} 39 | {% endif %} 40 | {% else %} {# This is a dict, entering next level #} 41 | {{lvl2}}: 42 | {% endif %} 43 | {% endif %} 44 | {% endif %} 45 | {% endfor %} 46 | 47 | {% endif %} 48 | {% endif %} 49 | {% endfor %} 50 | 51 | 52 | 53 | {% endfor %} 54 | 55 | 56 | -------------------------------------------------------------------------------- /pillar/engine/system.sls.bkp6: -------------------------------------------------------------------------------- 1 | {% import_yaml 'engine/equipment.sls' as equipment %} 2 | 3 | all_system: 4 | 5 | {% for current_equipment in equipment.equipment_list %} 6 | 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | 9 | {% for lvl1, lvl1_args in current_system.system.items() %} 10 | {# LVL 1 #} 11 | {% if lvl1_args is defined and not none %} 12 | {% if (lvl1|string) != ("partitioningu"|string) %} 13 | {% if lvl1_args is not mapping %} {# This is not a dict, so we reached last level #} 14 | {% if lvl1_args is not iterable %} {# This is not a list or a string #} 15 | {{lvl1}}: {{lvl1_args}} 16 | {% else %} {# This is a list or a string #} 17 | {% if lvl1_args is string %} {# This is a string #} 18 | {% if '\n' in lvl1_args %} {# This is a multi line string #} 19 | {{lvl1}}: "{% for current_line in lvl1_args.split('\n') %}{{current_line}}\n{% endfor %}" 20 | {% else %} {# This is a single line string #} 21 | {{lvl1}}: {{lvl1_args}} 22 | {% endif %} 23 | {% else %} {# This is a list #} 24 | {{lvl1}}: 25 | {% for current_item in lvl1_args %} 26 | - {{current_item}} 27 | {% endfor %} 28 | {% endif %} 29 | {% endif %} 30 | {% else %} {# This is a dict, entering next level #} 31 | {{lvl1}}: 32 | 33 | {# LVL 2 #} 34 | 35 | {% for lvl2, lvl2_args in lvl1_args.items() %} 36 | {% if lvl2_args is defined and not none %} 37 | {% if (lvl2|string) != ("partitioningu"|string) %} 38 | {% if lvl2_args is not mapping %} {# This is not a dict, so we reached last level #} 39 | {% if lvl2_args is not iterable %} {# This is not a list or a string #} 40 | {{lvl2}}: {{lvl2_args}} 41 | {% else %} {# This is a list or a string #} 42 | {% if lvl2_args is string %} {# This is a string #} 43 | {% if '\n' in lvl2_args %} {# This is a multi line string #} 44 | {{lvl2}}: "{% for current_line in lvl2_args.split('\n') %}{{current_line}}\n{% endfor %}" 45 | {% else %} {# This is a single line string #} 46 | {{lvl2}}: {{lvl2_args}} 47 | {% endif %} 48 | {% else %} {# This is a list #} 49 | {{lvl2}}: 50 | {% for current_item in lvl2_args %} 51 | - {{current_item}} 52 | {% endfor %} 53 | {% endif %} 54 | {% endif %} 55 | {% else %} {# This is a dict, entering next level #} 56 | {{lvl2}}: 57 | {% endif %} 58 | {% endif %} 59 | {% endif %} 60 | {% endfor %} 61 | 62 | {% endif %} 63 | 64 | {% endif %} 65 | {% endif %} 66 | {% endfor %} 67 | 68 | 69 | 70 | {% endfor %} 71 | 72 | 73 | -------------------------------------------------------------------------------- /pillar/engine/system.sls.ckp2: -------------------------------------------------------------------------------- 1 | {% import_yaml 'engine/equipment.sls' as equipment %} 2 | 3 | all_system: 4 | 5 | {% for current_equipment in equipment.equipment_list %} 6 | 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | 9 | {% for lvl1, lvl1_args in current_system.items() %} 10 | {% if lvl1_args is defined and not none %} 11 | {% if lvl1_args is not mapping %} 12 | {{lvl1}}: {{lvl1_args}} 13 | {% else %} 14 | {{lvl1}}: 15 | {% for lvl2, lvl2_args in lvl1_args.items() %} 16 | {% if lvl2_args is defined and not none %} 17 | {% if lvl2_args is not mapping %} 18 | {{lvl2}}: {{lvl2_args}} 19 | {% else %} 20 | {{lvl2}}: 21 | {% for lvl3, lvl3_args in lvl2_args.items() %} 22 | {% if lvl3_args is defined and not none %} 23 | {% if lvl3_args is not mapping %} 24 | {{lvl3}}: {{lvl3_args}} 25 | {% else %} 26 | {{lvl3}}: 27 | {% endif %} 28 | {% endif %} 29 | {% endfor %} 30 | {% endif %} 31 | {% endif %} 32 | {% endfor %} 33 | {% endif %} 34 | {% endif %} 35 | {% endfor %} 36 | 37 | 38 | 39 | {% endfor %} 40 | 41 | 42 | -------------------------------------------------------------------------------- /pillar/engine/toto.sls.bkp: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/settings/general_settings.sls' as general_settings %} 2 | 3 | test: 4 | # Allowed to all hosts 5 | {% set equipments = [] %} 6 | 7 | {% for nodes_group in general_settings.general_settings.nodes_groups %} 8 | {% import_yaml "cluster/nodes/{{nodes_group}}.sls" as current_group %} 9 | {% for group, group_args in current_group.items() %} 10 | {% for equipment in group_args %} 11 | - {{equipment}} 12 | {% endfor %} 13 | {% endfor %} 14 | {% endfor %} 15 | 16 | -------------------------------------------------------------------------------- /pillar/engine/toto.sls.bkp2: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/settings/general_settings.sls' as general_settings %} 2 | 3 | test: 4 | # Allowed to all hosts 5 | {% set equipments_list = [] %} 6 | 7 | {% for nodes_group in general_settings.general_settings.nodes_groups %} 8 | {% import_yaml ("cluster/nodes/"+nodes_group+".sls") as current_group %} 9 | {% for group, group_args in current_group.items() %} 10 | {% for equipment in group_args %} 11 | {% if equipment not in equipments_list %} 12 | {{ equipments_list.append(equipment|string) }} 13 | {% endif %} 14 | {% endfor %} 15 | {% endfor %} 16 | {% endfor %} 17 | 18 | {% for equipment in equipments_list %} 19 | - {{equipment}} 20 | {% endfor %} 21 | -------------------------------------------------------------------------------- /pillar/engine/yaml_macros.sls: -------------------------------------------------------------------------------- 1 | {% macro write_yaml(my_dict) %} 2 | 3 | {% for lvl1, lvl1_args in my_dict.items() %} 4 | {# LVL 1 #} 5 | {% if lvl1_args is defined and not none %} 6 | {% if lvl1_args is not mapping %} {# This is not a dict, so we reached last level #} 7 | {% if lvl1_args is not iterable %} {# This is not a list or a string #} 8 | {{lvl1}}: {{lvl1_args}} 9 | {% else %} {# This is a list or a string #} 10 | {% if lvl1_args is string %} {# This is a string #} 11 | {% if '\n' in lvl1_args %} {# This is a multi line string #} 12 | {{lvl1}}: "{% for current_line in lvl1_args.split('\n') %}{{current_line}}\n{% endfor %}" 13 | {% else %} {# This is a single line string #} 14 | {{lvl1}}: {{lvl1_args}} 15 | {% endif %} 16 | {% else %} {# This is a list #} 17 | {{lvl1}}: 18 | {% for current_item in lvl1_args %} 19 | - {{current_item}} 20 | {% endfor %} 21 | {% endif %} 22 | {% endif %} 23 | {% else %} {# This is a dict, entering next level #} 24 | {{lvl1}}: 25 | 26 | {# LVL 2 #} 27 | 28 | {% for lvl2, lvl2_args in lvl1_args.items() %} 29 | {% if lvl2_args is defined and not none %} 30 | {% if lvl2_args is not mapping %} {# This is not a dict, so we reached last level #} 31 | {% if lvl2_args is not iterable %} {# This is not a list or a string #} 32 | {{lvl2}}: {{lvl2_args}} 33 | {% else %} {# This is a list or a string #} 34 | {% if lvl2_args is string %} {# This is a string #} 35 | {% if '\n' in lvl2_args %} {# This is a multi line string #} 36 | {{lvl2}}: "{% for current_line in lvl2_args.split('\n') %}{{current_line}}\n{% endfor %}" 37 | {% else %} {# This is a single line string #} 38 | {{lvl2}}: {{lvl2_args}} 39 | {% endif %} 40 | {% else %} {# This is a list #} 41 | {{lvl2}}: 42 | {% for current_item in lvl2_args %} 43 | - {{current_item}} 44 | {% endfor %} 45 | {% endif %} 46 | {% endif %} 47 | {% else %} {# This is a dict, entering next level #} 48 | {{lvl2}}: 49 | {% endif %} 50 | {% endif %} 51 | {% endfor %} 52 | 53 | {% endif %} 54 | 55 | {% endif %} 56 | {% endfor %} 57 | 58 | {% endmacro %} 59 | 60 | 61 | -------------------------------------------------------------------------------- /pillar/general/pkgs.sls: -------------------------------------------------------------------------------- 1 | # This file regroups packages names. It's purpose is to help allowing multiple Linux distros in the future (like debian, etc) 2 | {% if grains.os_family == 'RedHat' %} 3 | pkgs: 4 | apache: httpd 5 | vim: vim 6 | dns: bind 7 | dhcp: dhcp 8 | sftp: vsftpd 9 | tftp: tftp 10 | tftpserver: tftp-server 11 | xinetd: xinetd 12 | syslinux: syslinux 13 | wget: wget 14 | ntp: ntp 15 | ldap_server: openldap-servers 16 | ldap_client: openldap-clients 17 | munge: munge 18 | slurm: slurm 19 | slurm_munge: slurm-munge 20 | webserver: httpd 21 | salt_minion: salt-minion 22 | firewall: firewalld 23 | networkmanager: NetworkManager 24 | nfs_utils: nfs-utils 25 | nsspamldap: nss-pam-ldapd 26 | sssd: sssd 27 | phpldapadmin: phpldapadmin 28 | shinken: shinken 29 | shinken_arbiter: shinken-arbiter 30 | shinken_broker: shinken-broker 31 | shinken_poller: shinken-poller 32 | shinken_reactionner: shinken-reactionner 33 | shinken_receiver: shinken-receiver 34 | shinken_scheduler: shinken-scheduler 35 | shinken_webui2: shinken-webui2 36 | nrpe: nrpe 37 | plugin_nrpe: nagios-plugins-nrpe 38 | monitoring_proc: nagios-plugins-procs 39 | monitoring_disk: nagios-plugins-disk 40 | {% elif grains.os_family == 'Debian' %} 41 | 42 | 43 | {% endif %} 44 | -------------------------------------------------------------------------------- /pillar/general/services.sls: -------------------------------------------------------------------------------- 1 | # This file regroups services names. It's purpose is to help allowing multiple Linux distros in the future (like debian, etc) 2 | services: 3 | apache: httpd 4 | dns: named 5 | dhcp: dhcpd 6 | sftp: vsftpd 7 | firewalld: firewalld 8 | xinetd: xinetd 9 | ntp: ntpd 10 | ldapserver: slapd 11 | webserver: httpd 12 | salt_minion: salt-minion 13 | tftpserver: tftp 14 | munge: munge 15 | slurmserver: slurmctld 16 | slurmclient: slurmd 17 | rpcbind: rpcbind 18 | nfs_server: nfs-server 19 | sssd: sssd 20 | nscd: nscd 21 | networkmanager: NetworkManager 22 | shinken_arbiter: shinken-arbiter 23 | shinken_broker: shinken-broker 24 | shinken_poller: shinken-poller 25 | shinken_reactionner: shinken-reactionner 26 | shinken_receiver: shinken-receiver 27 | shinken_scheduler: shinken-scheduler 28 | mongodb_server: mongod 29 | nrpe: nrpe 30 | -------------------------------------------------------------------------------- /pillar/general/templating.sls: -------------------------------------------------------------------------------- 1 | templating: 2 | hash_header: | 3 | # +--------------------------------------------------+ 4 | # | This file is managed by Banquise. Do not edit. | 5 | # +--------------------------------------------------+ 6 | semicolon_header: | 7 | ; +--------------------------------------------------+ 8 | ; | This file is managed by Banquise. Do not edit. | 9 | ; +--------------------------------------------------+ 10 | -------------------------------------------------------------------------------- /pillar/top.sls: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/masters/masters.sls' as mas %} 2 | {% import_yaml 'cluster/network.sls' as net %} 3 | {% import_yaml 'cluster/core.sls' as cor %} 4 | 5 | base: 6 | # Allowed to all hosts 7 | '*': 8 | - general/* 9 | {%- for types in cor.core.types %} 10 | - cluster/nodes/{{types}} 11 | - cluster/nodes/{{types}}_states 12 | - cluster/nodes/{{types}}_system 13 | {%- endfor %} 14 | - cluster/io/nfs 15 | - cluster/network 16 | - cluster/masters/masters 17 | - cluster/masters/masters_states 18 | - cluster/masters/masters_system 19 | # - engine/engine # Should be removed soon 20 | - cluster/core 21 | - cluster/authentication/ssh_public 22 | - cluster/authentication/passwords_public 23 | - cluster/authentication/ldap_public 24 | - engine/engine_connect 25 | - engine/engine_monitoring 26 | - engine/engine_reverse 27 | - engine/engine_network 28 | - engine/equipment 29 | - engine/system 30 | - cluster/monitoring 31 | # Allowed to masters only, secure passwords and ssh private key 32 | {% for master in mas.masters %} 33 | '{{master}}.{{net.network.global_parameters.domain_name}}': 34 | - cluster/authentication/ssh_private 35 | - cluster/authentication/ldap_private 36 | {% endfor %} 37 | -------------------------------------------------------------------------------- /pillar/top.sls.bkp: -------------------------------------------------------------------------------- 1 | {% import_yaml 'cluster/masters/masters.sls' as mas %} 2 | {% import_yaml 'cluster/network.sls' as net %} 3 | {% import_yaml 'cluster/core.sls' as cor %} 4 | 5 | base: 6 | # Allowed to all hosts 7 | '*': 8 | - general/pkgs 9 | - general/services 10 | - general/templating 11 | #{% for path in salt['cp.list_master']() %} 12 | 13 | #{% endfor %} 14 | {%- for types in cor.core.types %} 15 | - cluster/nodes/{{types}} 16 | - cluster/nodes/{{types}}_states 17 | - cluster/nodes/{{types}}_system 18 | {%- endfor %} 19 | - cluster/io/nfs 20 | - cluster/network 21 | - cluster/masters/masters 22 | - cluster/masters/masters_states 23 | - cluster/masters/masters_system 24 | # - engine/engine # Should be removed soon 25 | - cluster/core 26 | - cluster/authentication/ssh_public 27 | - cluster/authentication/passwords_public 28 | - cluster/authentication/ldap_public 29 | - engine/engine_connect 30 | - engine/engine_monitoring 31 | - engine/engine_reverse 32 | - engine/engine_network 33 | - cluster/monitoring 34 | # Allowed to masters only, secure passwords and ssh private key 35 | {% for master in mas.masters %} 36 | '{{master}}.{{net.network.global_parameters.domain_name}}': 37 | - cluster/authentication/ssh_private 38 | - cluster/authentication/ldap_private 39 | {% endfor %} 40 | -------------------------------------------------------------------------------- /salt/.monitoring.sls.swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/salt/.monitoring.sls.swp -------------------------------------------------------------------------------- /salt/bootstrap/init.sls: -------------------------------------------------------------------------------- 1 | sethostname: 2 | cmd.run: 3 | - name: hostnamectl set-hostname {{salt['grains.get'] ('id')}} 4 | - unless: test "$(hostname)" = "{{salt['grains.get'] ('id')}}" 5 | 6 | /etc/yum.repos.d/CentOS-Base.repo: 7 | file: 8 | - absent 9 | 10 | /etc/yum.repos.d/CentOS-CR.repo: 11 | file: 12 | - absent 13 | 14 | /etc/yum.repos.d/CentOS-Debuginfo.repo: 15 | file: 16 | - absent 17 | 18 | /etc/yum.repos.d/CentOS-fasttrack.repo: 19 | file: 20 | - absent 21 | 22 | /etc/yum.repos.d/CentOS-Sources.repo: 23 | file: 24 | - absent 25 | 26 | /etc/yum.repos.d/CentOS-Vault.repo: 27 | file: 28 | - absent 29 | 30 | /etc/yum.repos.d/salt.local.repo: 31 | file: 32 | - managed 33 | - source: salt://bootstrap/salt.local.repo.jinja 34 | - template: jinja 35 | 36 | /etc/yum.repos.d/os_dvd.local.repo: 37 | file: 38 | - managed 39 | - source: salt://bootstrap/os_dvd.local.repo.jinja 40 | - template: jinja 41 | 42 | salt-master: 43 | host.present: 44 | - ip: {{salt['pillar.get']('core:salt_master_ip')}} 45 | - names: 46 | - salt 47 | - salt-master 48 | 49 | salt-minion: 50 | pkg.installed: 51 | - name: {{ pillar['pkgs']['salt_minion'] }} 52 | - require: 53 | - file: /etc/yum.repos.d/salt.local.repo 54 | - file: /etc/yum.repos.d/os_dvd.local.repo 55 | - file: /etc/yum.repos.d/CentOS-Base.repo 56 | - file: /etc/yum.repos.d/CentOS-CR.repo 57 | - file: /etc/yum.repos.d/CentOS-Debuginfo.repo 58 | - file: /etc/yum.repos.d/CentOS-fasttrack.repo 59 | - file: /etc/yum.repos.d/CentOS-Sources.repo 60 | - file: /etc/yum.repos.d/CentOS-Vault.repo 61 | 62 | salt-minion-service: 63 | service: 64 | - name: {{ pillar['services']['salt_minion'] }} 65 | - running 66 | - enable: True 67 | - require: 68 | - pkg: {{ pillar['pkgs']['salt_minion'] }} 69 | - host: salt-master 70 | - cmd: sethostname 71 | 72 | -------------------------------------------------------------------------------- /salt/bootstrap/os_dvd.local.repo.jinja: -------------------------------------------------------------------------------- 1 | [os_dvd.local] 2 | name=os_dvd.local.repo 3 | baseurl=file:///var/www/html/os_dvd.local.repo/ 4 | gpgcheck=0 5 | enabled=1 6 | 7 | -------------------------------------------------------------------------------- /salt/bootstrap/salt.local.repo.jinja: -------------------------------------------------------------------------------- 1 | [salt] 2 | name=local salt repository 3 | baseurl=http://{{salt['pillar.get']('core:salt_master_ip')}}/salt.local.repo/ 4 | gpgcheck=0 5 | enabled=1 6 | 7 | -------------------------------------------------------------------------------- /salt/debug/debug.jinja: -------------------------------------------------------------------------------- 1 | 2 | all_system: 3 | 4 | {% for current_equipment in equipment.equipment_list %} 5 | 6 | salt['pillar.get']('engine_network:'~network~':dhcp_on') 7 | {% import_yaml ("cluster/equipment/"+current_equipment+"/system.sls") as current_system %} 8 | {% for lvl1, lvl1_args in current_system.items() %} 9 | {% if lvl1_args is defined and not none %} 10 | {{lvl1}}: 11 | {% for lvl2, lvl2_args in lvl1_args.items() %} 12 | {% if lvl2_args is defined and not none %} 13 | {{lvl2}}: 14 | {% for lvl3, lvl3_args in lvl2_args.items() %} 15 | {% if lvl3_args is defined and not none %} 16 | {{lvl3}}: {{mapping(lvl3_args)}} 17 | {% endif %} 18 | {% endfor %} 19 | {% endif %} 20 | {% endfor %} 21 | {% endif %} 22 | {% endfor %} 23 | 24 | {% endfor %} 25 | 26 | -------------------------------------------------------------------------------- /salt/debug/dhcpd.conf.jinja: -------------------------------------------------------------------------------- 1 | {{ salt['pillar.get']('templating:hash_header') }} 2 | 3 | # 4 | # DHCP Server Configuration file. 5 | # see /usr/share/doc/dhcp*/dhcpd.conf.example 6 | # see dhcpd.conf(5) man page 7 | # 8 | 9 | # EFI 10 | option space pxelinux; 11 | option pxelinux.magic code 208 = string; 12 | option pxelinux.configfile code 209 = text; 13 | option pxelinux.pathprefix code 210 = text; 14 | option pxelinux.reboottime code 211 = unsigned integer 32; 15 | option architecture-type code 93 = unsigned integer 16; 16 | # END EFI 17 | 18 | authoritative; 19 | 20 | {%- for network in salt['pillar.get']('engine_network') %} 21 | {%- if salt['pillar.get']('engine_network:'~network~':dhcp_on') == true %} 22 | ############################################# 23 | ####### NETWORK {{network}} 24 | ### 25 | subnet {{ salt['pillar.get']('engine_network:'~network~':subnet') }} netmask {{ salt['pillar.get']('engine_network:'~network~':netmask') }} { 26 | range {{ salt['pillar.get']('engine_network:'~network~':dhcp_unknown_range') }}; 27 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 28 | {%- if salt['pillar.get']('engine_network:'~network~':gateway') is defined %} 29 | option routers {{salt['pillar.get']('engine_network:'~network~':gateway')}}; 30 | {%- endif %} 31 | 32 | {%- if network == salt['pillar.get']('network:global_parameters:admin_network') %} 33 | option domain-name-servers {{salt['pillar.get']('engine_connect:dns_server_ip')}}; 34 | next-server {{salt['pillar.get']('engine_connect:pxe_server_ip')}}; 35 | 36 | if option architecture-type = 00:07 { 37 | filename "shim.efi"; 38 | } else { 39 | filename "pxelinux.0"; 40 | } 41 | {%- endif %} 42 | 43 | default-lease-time 600; 44 | max-lease-time 7200; 45 | 46 | {%- for host, host_args in salt['pillar.get']('masters').items() %} 47 | {%- for host_network, host_network_args in host_args.network.items() %} 48 | {%- if host_network == network and host_network_args.hwaddr is defined %} 49 | 50 | host {{ host }} { 51 | hardware ethernet {{ host_network_args.hwaddr }}; 52 | fixed-address {{ host_network_args.ip }}; 53 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 54 | option host-name "{{host}}"; 55 | } 56 | {%- endif %} 57 | {%- endfor %} 58 | {%- if host_args.bmc is defined %} 59 | {%- if host_args.bmc.network == network and host_args.bmc.hwaddr is defined %} 60 | 61 | host {{ host_args.bmc.name }} { 62 | hardware ethernet {{ host_args.bmc.hwaddr }}; 63 | fixed-address {{ host_args.bmc.ip }}; 64 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 65 | option host-name "{{host_args.bmc.name}}"; 66 | } 67 | {%- endif %} 68 | {%- endif %} 69 | {% endfor %} 70 | 71 | {%- for type in salt['pillar.get']('core:types') %} 72 | {%- for sub_type, sub_type_args in salt['pillar.get'](type, {}).items() %} 73 | {%- for host, host_args in sub_type_args.items() %} 74 | {%- for host_network, host_network_args in host_args.network.items() %} 75 | {%- if host_network == network and host_network_args.hwaddr is defined %} 76 | 77 | host {{ host }} { 78 | hardware ethernet {{ host_network_args.hwaddr }}; 79 | fixed-address {{ host_network_args.ip }}; 80 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 81 | option host-name "{{host}}"; 82 | } 83 | {%- endif %} 84 | {%- endfor %} 85 | {%- if host_args.bmc is defined %} 86 | {%- if host_args.bmc.network == network and host_args.bmc.hwaddr is defined %} 87 | 88 | host {{ host_args.bmc.name }} { 89 | hardware ethernet {{ host_args.bmc.hwaddr }}; 90 | fixed-address {{ host_args.bmc.ip }}; 91 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 92 | option host-name "{{host_args.bmc.name}}"; 93 | } 94 | {%- endif %} 95 | {%- endif %} 96 | {%- endfor %} 97 | {%- endfor %} 98 | {%- endfor %} 99 | 100 | } 101 | {%- endif %} 102 | {%- endfor %} 103 | -------------------------------------------------------------------------------- /salt/debug/dhcpd0.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set admin_network = salt['pillar.get']('core:admin_network') %} 2 | 3 | {{ salt['pillar.get']('templating:hash_header') }} 4 | 5 | # 6 | # DHCP Server Configuration file. 7 | # see /usr/share/doc/dhcp*/dhcpd.conf.example 8 | # see dhcpd.conf(5) man page 9 | # 10 | 11 | # EFI 12 | option space pxelinux; 13 | option pxelinux.magic code 208 = string; 14 | option pxelinux.configfile code 209 = text; 15 | option pxelinux.pathprefix code 210 = text; 16 | option pxelinux.reboottime code 211 = unsigned integer 32; 17 | option architecture-type code 93 = unsigned integer 16; 18 | # END EFI 19 | 20 | authoritative; 21 | 22 | subnet {{ salt['pillar.get']('engine_network:'~admin_network~':subnet') }} netmask {{ salt['pillar.get']('engine_network:'~admin_network~':netmask') }} { 23 | range {{ salt['pillar.get']('engine_network:'~admin_network~':dhcp_unknown_range') }}; 24 | option domain-name "{{ salt['pillar.get']('core:domain_name') }}"; 25 | option domain-name-servers {{salt['pillar.get']('engine_connect:dns_server_ip')}}; 26 | 27 | default-lease-time 600; 28 | max-lease-time 7200; 29 | 30 | next-server {{salt['pillar.get']('engine_connect:pxe_server_ip')}}; 31 | 32 | if option architecture-type = 00:07 { 33 | filename "shim.efi"; 34 | } else { 35 | filename "pxelinux.0"; 36 | } 37 | 38 | {% for type in salt['pillar.get']('core:types') %} 39 | ############################################# 40 | ########## {{type}} 41 | ### 42 | {% for group, group_args in salt['pillar.get'](type, {}).items() %} 43 | ########## 44 | # {{group}} 45 | 46 | {%- for host, host_args in group_args.items() %} 47 | {%- for network, network_args in host_args.network.items() %} 48 | {%- if network == admin_network %} 49 | host {{ host }} { 50 | hardware ethernet {{ network_args.hwaddr }}; 51 | fixed-address {{ network_args.ip }}; 52 | option domain-name "{{ salt['pillar.get']('core:domain_name') }}"; 53 | option host-name "{{host}}"; 54 | } 55 | {%- endif %} 56 | {%- endfor %} 57 | {%- if host_args.bmc is defined %} 58 | host {{ host_args.bmc.name }} { 59 | hardware ethernet {{ host_args.bmc.hwaddr }}; 60 | fixed-address {{ host_args.bmc.ip }}; 61 | option domain-name "{{ salt['pillar.get']('core:domain_name') }}"; 62 | option host-name "{{host_args.bmc.name}}"; 63 | } 64 | {%- endif %} 65 | {%- endfor %} 66 | {% endfor %} 67 | {% endfor %} 68 | 69 | 70 | } 71 | -------------------------------------------------------------------------------- /salt/debug/server.sls: -------------------------------------------------------------------------------- 1 | /tmp/debug: 2 | file: 3 | - managed 4 | - source: salt://debug/debug.jinja 5 | - template: jinja 6 | 7 | -------------------------------------------------------------------------------- /salt/debug/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | dhcpd: 5 | service_description: check_proc_dhcpd 6 | check_command: check_nrpe!check_proc_dhcpd 7 | 8 | command: 9 | 10 | dhcpd: 11 | command_name: check_proc_dhcpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C dhcpd" 14 | -------------------------------------------------------------------------------- /salt/debug/server_monitoring_v2.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | dhcpd: 5 | service_description: check_proc_dhcpd 6 | check_command: check_nrpe!check_proc_dhcpd 7 | 8 | command: 9 | 10 | dhcpd: 11 | command_name: check_proc_dhcpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C dhcpd" 14 | -------------------------------------------------------------------------------- /salt/dhcp/dhcpd.conf.jinja: -------------------------------------------------------------------------------- 1 | {{ salt['pillar.get']('templating:hash_header') }} 2 | 3 | # 4 | # DHCP Server Configuration file. 5 | # see /usr/share/doc/dhcp*/dhcpd.conf.example 6 | # see dhcpd.conf(5) man page 7 | # 8 | 9 | # EFI 10 | option space pxelinux; 11 | option pxelinux.magic code 208 = string; 12 | option pxelinux.configfile code 209 = text; 13 | option pxelinux.pathprefix code 210 = text; 14 | option pxelinux.reboottime code 211 = unsigned integer 32; 15 | option architecture-type code 93 = unsigned integer 16; 16 | # END EFI 17 | 18 | authoritative; 19 | 20 | {%- for network in salt['pillar.get']('engine_network') %} 21 | {%- if salt['pillar.get']('engine_network:'~network~':dhcp_on') == true %} 22 | ############################################# 23 | ####### NETWORK {{network}} 24 | ### 25 | subnet {{ salt['pillar.get']('engine_network:'~network~':subnet') }} netmask {{ salt['pillar.get']('engine_network:'~network~':netmask') }} { 26 | range {{ salt['pillar.get']('engine_network:'~network~':dhcp_unknown_range') }}; 27 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 28 | {%- if salt['pillar.get']('engine_network:'~network~':gateway') is defined %} 29 | option routers {{salt['pillar.get']('engine_network:'~network~':gateway')}}; 30 | {%- endif %} 31 | 32 | {%- if network == salt['pillar.get']('network:global_parameters:admin_network') %} 33 | option domain-name-servers {{salt['pillar.get']('engine_connect:dns_server_ip')}}; 34 | next-server {{salt['pillar.get']('engine_connect:pxe_server_ip')}}; 35 | 36 | if option architecture-type = 00:07 { 37 | filename "shim.efi"; 38 | } else { 39 | filename "pxelinux.0"; 40 | } 41 | {%- endif %} 42 | 43 | default-lease-time 600; 44 | max-lease-time 7200; 45 | 46 | {%- for host, host_args in salt['pillar.get']('masters').items() %} 47 | {%- for host_network, host_network_args in host_args.network.items() %} 48 | {%- if host_network == network and host_network_args.hwaddr is defined %} 49 | 50 | host {{ host }} { 51 | hardware ethernet {{ host_network_args.hwaddr }}; 52 | fixed-address {{ host_network_args.ip }}; 53 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 54 | option host-name "{{host}}"; 55 | } 56 | {%- endif %} 57 | {%- endfor %} 58 | {%- if host_args.bmc is defined %} 59 | {%- if host_args.bmc.network == network and host_args.bmc.hwaddr is defined %} 60 | 61 | host {{ host_args.bmc.name }} { 62 | hardware ethernet {{ host_args.bmc.hwaddr }}; 63 | fixed-address {{ host_args.bmc.ip }}; 64 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 65 | option host-name "{{host_args.bmc.name}}"; 66 | } 67 | {%- endif %} 68 | {%- endif %} 69 | {% endfor %} 70 | 71 | {%- for type in salt['pillar.get']('core:types') %} 72 | {%- for sub_type, sub_type_args in salt['pillar.get'](type, {}).items() %} 73 | {%- for host, host_args in sub_type_args.items() %} 74 | {%- for host_network, host_network_args in host_args.network.items() %} 75 | {%- if host_network == network and host_network_args.hwaddr is defined %} 76 | 77 | host {{ host }} { 78 | hardware ethernet {{ host_network_args.hwaddr }}; 79 | fixed-address {{ host_network_args.ip }}; 80 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 81 | option host-name "{{host}}"; 82 | } 83 | {%- endif %} 84 | {%- endfor %} 85 | {%- if host_args.bmc is defined %} 86 | {%- if host_args.bmc.network == network and host_args.bmc.hwaddr is defined %} 87 | 88 | host {{ host_args.bmc.name }} { 89 | hardware ethernet {{ host_args.bmc.hwaddr }}; 90 | fixed-address {{ host_args.bmc.ip }}; 91 | option domain-name "{{ salt['pillar.get']('network:global_parameters:domain_name') }}"; 92 | option host-name "{{host_args.bmc.name}}"; 93 | } 94 | {%- endif %} 95 | {%- endif %} 96 | {%- endfor %} 97 | {%- endfor %} 98 | {%- endfor %} 99 | 100 | } 101 | {%- endif %} 102 | {%- endfor %} 103 | -------------------------------------------------------------------------------- /salt/dhcp/dhcpd0.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set admin_network = salt['pillar.get']('core:admin_network') %} 2 | 3 | {{ salt['pillar.get']('templating:hash_header') }} 4 | 5 | # 6 | # DHCP Server Configuration file. 7 | # see /usr/share/doc/dhcp*/dhcpd.conf.example 8 | # see dhcpd.conf(5) man page 9 | # 10 | 11 | # EFI 12 | option space pxelinux; 13 | option pxelinux.magic code 208 = string; 14 | option pxelinux.configfile code 209 = text; 15 | option pxelinux.pathprefix code 210 = text; 16 | option pxelinux.reboottime code 211 = unsigned integer 32; 17 | option architecture-type code 93 = unsigned integer 16; 18 | # END EFI 19 | 20 | authoritative; 21 | 22 | subnet {{ salt['pillar.get']('engine_network:'~admin_network~':subnet') }} netmask {{ salt['pillar.get']('engine_network:'~admin_network~':netmask') }} { 23 | range {{ salt['pillar.get']('engine_network:'~admin_network~':dhcp_unknown_range') }}; 24 | option domain-name "{{ salt['pillar.get']('core:domain_name') }}"; 25 | option domain-name-servers {{salt['pillar.get']('engine_connect:dns_server_ip')}}; 26 | 27 | default-lease-time 600; 28 | max-lease-time 7200; 29 | 30 | next-server {{salt['pillar.get']('engine_connect:pxe_server_ip')}}; 31 | 32 | if option architecture-type = 00:07 { 33 | filename "shim.efi"; 34 | } else { 35 | filename "pxelinux.0"; 36 | } 37 | 38 | {% for type in salt['pillar.get']('core:types') %} 39 | ############################################# 40 | ########## {{type}} 41 | ### 42 | {% for group, group_args in salt['pillar.get'](type, {}).items() %} 43 | ########## 44 | # {{group}} 45 | 46 | {%- for host, host_args in group_args.items() %} 47 | {%- for network, network_args in host_args.network.items() %} 48 | {%- if network == admin_network %} 49 | host {{ host }} { 50 | hardware ethernet {{ network_args.hwaddr }}; 51 | fixed-address {{ network_args.ip }}; 52 | option domain-name "{{ salt['pillar.get']('core:domain_name') }}"; 53 | option host-name "{{host}}"; 54 | } 55 | {%- endif %} 56 | {%- endfor %} 57 | {%- if host_args.bmc is defined %} 58 | host {{ host_args.bmc.name }} { 59 | hardware ethernet {{ host_args.bmc.hwaddr }}; 60 | fixed-address {{ host_args.bmc.ip }}; 61 | option domain-name "{{ salt['pillar.get']('core:domain_name') }}"; 62 | option host-name "{{host_args.bmc.name}}"; 63 | } 64 | {%- endif %} 65 | {%- endfor %} 66 | {% endfor %} 67 | {% endfor %} 68 | 69 | 70 | } 71 | -------------------------------------------------------------------------------- /salt/dhcp/server.sls: -------------------------------------------------------------------------------- 1 | dhcp: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['dhcp'] }} 4 | # - require: 5 | # - sls: repository.client 6 | 7 | /etc/dhcp/dhcpd.conf: 8 | file: 9 | - managed 10 | - source: salt://dhcp/dhcpd.conf.jinja 11 | - template: jinja 12 | - require: 13 | - pkg: {{ pillar['pkgs']['dhcp'] }} 14 | 15 | dhcpd: 16 | service: 17 | - name: {{ pillar['services']['dhcp'] }} 18 | - running 19 | - enable: True 20 | - watch: 21 | - file: /etc/dhcp/dhcpd.conf 22 | - require: 23 | - pkg: {{ pillar['pkgs']['dhcp'] }} 24 | - file: /etc/dhcp/dhcpd.conf 25 | -------------------------------------------------------------------------------- /salt/dhcp/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | dhcpd: 5 | service_description: check_proc_dhcpd 6 | check_command: check_nrpe!check_proc_dhcpd 7 | 8 | command: 9 | 10 | dhcpd: 11 | command_name: check_proc_dhcpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C dhcpd" 14 | -------------------------------------------------------------------------------- /salt/dhcp/server_monitoring_v2.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | dhcpd: 5 | service_description: check_proc_dhcpd 6 | check_command: check_nrpe!check_proc_dhcpd 7 | 8 | command: 9 | 10 | dhcpd: 11 | command_name: check_proc_dhcpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C dhcpd" 14 | -------------------------------------------------------------------------------- /salt/dns/client.sls: -------------------------------------------------------------------------------- 1 | /etc/resolv.conf: 2 | file: 3 | - managed 4 | - source: salt://dns/resolv.conf.jinja 5 | - template: jinja 6 | - require: 7 | - sls: network.nmanager 8 | -------------------------------------------------------------------------------- /salt/dns/forward.jinja: -------------------------------------------------------------------------------- 1 | {% set admin_ip = salt['pillar.get']('masters:'~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),''))~':network:'~salt['pillar.get']('network:global_parameters:admin_network')~':ip') %} 2 | {% set admin_hostname = (salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),'')) %} 3 | {% set admin_network = salt['pillar.get']('network:global_parameters:admin_network') %} 4 | 5 | {{ salt['pillar.get']('templating:semicolon_header') }} 6 | 7 | $TTL 86400 8 | @ IN SOA {{admin_hostname}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. root.{{salt['pillar.get']('network:global_parameters:domain_name')}}. ( 9 | 2011071001 ;Serial 10 | 3600 ;Refresh 11 | 1800 ;Retry 12 | 604800 ;Expire 13 | 86400 ;Minimum TTL 14 | ) 15 | @ IN NS {{admin_hostname}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 16 | @ IN A {{admin_ip}} 17 | 18 | {{admin_hostname}} IN A {{admin_ip}} 19 | 20 | {%- for host, host_args in salt['pillar.get']('masters').items() %} 21 | {%- for host_network, host_network_args in host_args.network.items() %} 22 | {%- if host_network == admin_network %} 23 | {{host}} IN A {{ host_network_args.ip }} 24 | {{host}}.{{host_network}} IN A {{ host_network_args.ip }} 25 | {%- else %} 26 | {{host}}.{{host_network}} IN A {{ host_network_args.ip }} 27 | {%- endif %} 28 | {%- endfor %} 29 | {%- if host_args.bmc is defined %} 30 | {{host_args.bmc.name}} IN A {{host_args.bmc.ip}} 31 | {{host_args.bmc.name}}.{{host_args.bmc.network}} IN A {{host_args.bmc.ip}} 32 | {%- endif %} 33 | {%- endfor %} 34 | 35 | {% for type in salt['pillar.get']('core:types') %} 36 | ;############################################# 37 | ;########## {{type}} 38 | ;### 39 | {% for subtype, subtype_args in salt['pillar.get'](type, {}).items() %} 40 | ;########## 41 | ;# {{subtype}} 42 | 43 | {%- for host, host_args in subtype_args.items() %} 44 | {%- for host_network, host_network_args in host_args.network.items() %} 45 | {%- if host_network == admin_network %} 46 | {{host}} IN A {{ host_network_args.ip }} 47 | {%- else %} 48 | {{host}}.{{host_network}} IN A {{ host_network_args.ip }} 49 | {%- endif %} 50 | {%- endfor %} 51 | {%- if host_args.bmc is defined %} 52 | {{host_args.bmc.name}} IN A {{host_args.bmc.ip}} 53 | {{host_args.bmc.name}}.{{host_args.bmc.network}} IN A {{host_args.bmc.ip}} 54 | {%- endif %} 55 | {%- endfor %} 56 | {% endfor %} 57 | {% endfor %} 58 | -------------------------------------------------------------------------------- /salt/dns/forward.jinja.bkp: -------------------------------------------------------------------------------- 1 | $TTL 86400 2 | @ IN SOA {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. root.{{salt['pillar.get']('engine:network:domaine_name')}}. ( 3 | 2011071001 ;Serial 4 | 3600 ;Refresh 5 | 1800 ;Retry 6 | 604800 ;Expire 7 | 86400 ;Minimum TTL 8 | ) 9 | @ IN NS {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 10 | @ IN A {{salt['pillar.get']('engine:master:masterip')}} 11 | 12 | {{salt['pillar.get']('engine:master:masterid')}} IN A {{salt['pillar.get']('engine:master:masterip')}} 13 | 14 | {% for type in salt['pillar.get']('core:types') %} 15 | ;############################################# 16 | ;########## {{type}} 17 | ;### 18 | {% for group, argu in salt['pillar.get'](type, {}).items() %} 19 | ;########## 20 | ;# {{group}} 21 | 22 | {%- for host, argo in argu.items() %} 23 | {%- for network, args in argo.network.items() %} 24 | {%- if network == "net0" %} 25 | {{host}} IN A {{ args.ip }} 26 | {%- else %} 27 | {{host}}.{{network}} IN A {{ args.ip }} 28 | {%- endif %} 29 | {%- endfor %} 30 | {%- if argo.bmc is defined %} 31 | {{argo.bmc.name}} IN A {{argo.bmc.ip}} 32 | {%- endif %} 33 | {%- endfor %} 34 | {% endfor %} 35 | {% endfor %} 36 | 37 | -------------------------------------------------------------------------------- /salt/dns/forward.jinja.bkp2: -------------------------------------------------------------------------------- 1 | $TTL 86400 2 | @ IN SOA {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. root.{{salt['pillar.get']('engine:network:domaine_name')}}. ( 3 | 2011071001 ;Serial 4 | 3600 ;Refresh 5 | 1800 ;Retry 6 | 604800 ;Expire 7 | 86400 ;Minimum TTL 8 | ) 9 | {% if net == "net0" %} 10 | @ IN NS {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 11 | @ IN A {{salt['pillar.get']('engine:master:masterip')}} 12 | 13 | {{salt['pillar.get']('engine:master:masterid')}} IN A {{salt['pillar.get']('engine:master:masterip')}} 14 | {% endif %} 15 | 16 | {% if net == "net0" %} 17 | {% set nett = "" %} 18 | {% else %} 19 | {% set nett = "."~net %} 20 | {% endif %} 21 | 22 | {% for type in salt['pillar.get']('core:types') %} 23 | ;############################################# 24 | ;########## {{type}} 25 | ;### 26 | {% for group, argu in salt['pillar.get'](type, {}).items() %} 27 | ;########## 28 | ;# {{group}} 29 | 30 | {%- for host, argo in argu.items() %} 31 | {%- for network, args in argo.network.items() %} 32 | {{host}}{{nett}} IN A {{ args.ip }} 33 | {%- endfor %} 34 | {% if net == "net0" %} 35 | {%- if argo.bmc is defined %} 36 | {{argo.bmc.name}} IN A {{argo.bmc.ip}} 37 | {%- endif %} 38 | {%- endif %} 39 | {%- endfor %} 40 | {% endfor %} 41 | {% endfor %} 42 | 43 | -------------------------------------------------------------------------------- /salt/dns/named.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set admin_interface_ip = salt['pillar.get']('masters:'~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),''))~':network:'~salt['pillar.get']('network:global_parameters:admin_network')~':ip') %} 2 | {% set admin_network = salt['pillar.get']('network:global_parameters:admin_network') %} 3 | 4 | options { 5 | listen-on port 53 { 127.0.0.1; {{ admin_interface_ip }};}; 6 | listen-on-v6 port 53 { ::1; }; 7 | directory "/var/named"; 8 | dump-file "/var/named/data/cache_dump.db"; 9 | statistics-file "/var/named/data/named_stats.txt"; 10 | memstatistics-file "/var/named/data/named_mem_stats.txt"; 11 | allow-query { localhost; {{ salt['pillar.get']('engine_network:'~admin_network~':subnet') }}/{{ salt['pillar.get']('engine_network:'~admin_network~':shortnetmask') }};}; 12 | 13 | {% if salt['pillar.get']('engine_connect:dns_server_link') == true %} 14 | recursion yes; 15 | 16 | forwarders { 17 | {{salt['pillar.get']('engine_connect:dns_server_link_ip')}}; 18 | }; 19 | {% else %} 20 | recursion no; 21 | {% endif %} 22 | 23 | dnssec-enable yes; 24 | dnssec-validation yes; 25 | dnssec-lookaside auto; 26 | 27 | /* Path to ISC DLV key */ 28 | bindkeys-file "/etc/named.iscdlv.key"; 29 | 30 | managed-keys-directory "/var/named/dynamic"; 31 | 32 | pid-file "/run/named/named.pid"; 33 | session-keyfile "/run/named/session.key"; 34 | }; 35 | 36 | logging { 37 | channel default_debug { 38 | file "data/named.run"; 39 | severity dynamic; 40 | }; 41 | }; 42 | 43 | zone "." IN { 44 | type hint; 45 | file "named.ca"; 46 | }; 47 | 48 | zone"{{salt['pillar.get']('network:global_parameters:domain_name')}}" IN { 49 | type master; 50 | file "forward"; 51 | allow-update { none; }; 52 | }; 53 | 54 | {% for network, args in salt['pillar.get']('engine_network').items() %} 55 | {% set list1 = args.subnet.split('.') %} 56 | 57 | zone"{{list1[1]}}.{{list1[0]}}.in-addr.arpa" IN { 58 | type master; 59 | file "reverse.{{network}}"; 60 | allow-update { none; }; 61 | }; 62 | 63 | {% endfor %} 64 | 65 | include "/etc/named.rfc1912.zones"; 66 | include "/etc/named.root.key"; 67 | -------------------------------------------------------------------------------- /salt/dns/named.conf.jinja.bkp: -------------------------------------------------------------------------------- 1 | options { 2 | listen-on port 53 { 127.0.0.1; {{salt['pillar.get']('engine:servers:dns_server_ip')}};}; 3 | listen-on-v6 port 53 { ::1; }; 4 | directory "/var/named"; 5 | dump-file "/var/named/data/cache_dump.db"; 6 | statistics-file "/var/named/data/named_stats.txt"; 7 | memstatistics-file "/var/named/data/named_mem_stats.txt"; 8 | allow-query { localhost; {{salt['pillar.get']('engine:network:subnet')}}/{{salt['pillar.get']('engine:network:shortnetmask')}};}; 9 | 10 | recursion no; 11 | 12 | dnssec-enable yes; 13 | dnssec-validation yes; 14 | dnssec-lookaside auto; 15 | 16 | /* Path to ISC DLV key */ 17 | bindkeys-file "/etc/named.iscdlv.key"; 18 | 19 | managed-keys-directory "/var/named/dynamic"; 20 | 21 | pid-file "/run/named/named.pid"; 22 | session-keyfile "/run/named/session.key"; 23 | }; 24 | 25 | logging { 26 | channel default_debug { 27 | file "data/named.run"; 28 | severity dynamic; 29 | }; 30 | }; 31 | 32 | zone "." IN { 33 | type hint; 34 | file "named.ca"; 35 | }; 36 | 37 | zone"{{salt['pillar.get']('engine:network:domaine_name')}}" IN { 38 | type master; 39 | file "forward"; 40 | allow-update { none; }; 41 | }; 42 | {% set list1 = salt['pillar.get']('engine:network:subnet').split('.') %} 43 | {% if salt['pillar.get']('engine:network:reverse') == 1 %} 44 | zone"{{list1[0]}}.in-addr.arpa" IN { 45 | type master; 46 | file "reverse"; 47 | allow-update { none; }; 48 | }; 49 | {% elif salt['pillar.get']('engine:network:reverse') == 2 %} 50 | zone"{{list1[1]}}.{{list1[0]}}.in-addr.arpa" IN { 51 | type master; 52 | file "reverse"; 53 | allow-update { none; }; 54 | }; 55 | {% elif salt['pillar.get']('engine:network:reverse') == 3 %} 56 | zone"{{list1[2]}}.{{list1[1]}}.{{list1[0]}}.in-addr.arpa" IN { 57 | type master; 58 | file "reverse"; 59 | allow-update { none; }; 60 | }; 61 | {% endif %} 62 | 63 | include "/etc/named.rfc1912.zones"; 64 | include "/etc/named.root.key"; 65 | -------------------------------------------------------------------------------- /salt/dns/resolv.conf.jinja: -------------------------------------------------------------------------------- 1 | search {{salt['pillar.get']('network:global_parameters:domain_name')}} 2 | nameserver {{salt['pillar.get']('engine_connect:dns_server_ip')}} 3 | -------------------------------------------------------------------------------- /salt/dns/reverse.jinja: -------------------------------------------------------------------------------- 1 | {% set admin_ip = salt['pillar.get']('masters:'~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),''))~':network:'~salt['pillar.get']('network:global_parameters:admin_network')~':ip') %} 2 | {% set admin_hostname = (salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),'')) %} 3 | {% set admin_network = salt['pillar.get']('network:global_parameters:admin_network') %} 4 | $TTL 86400 5 | @ IN SOA {{admin_hostname}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. root.{{salt['pillar.get']('network:global_parameters:domain_name')}}. ( 6 | 2011071001 ;Serial 7 | 3600 ;Refresh 8 | 1800 ;Retry 9 | 604800 ;Expire 10 | 86400 ;Minimum TTL 11 | ) 12 | @ IN NS {{admin_hostname}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 13 | @ IN PTR {{salt['pillar.get']('network:global_parameters:domain_name')}}. 14 | 15 | {% if net == admin_network %} 16 | {% set list1 = admin_ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{admin_hostname}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 17 | {% endif %} 18 | 19 | 20 | {%- for host, host_args in salt['pillar.get']('masters').items() %} 21 | {%- for host_network, host_network_args in host_args.network.items() %} 22 | {% if host_network == net %} 23 | {%- if host_network == admin_network %} 24 | {% set list1 = host_network_args.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 25 | {%- else %} 26 | {% set list1 = host_network_args.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host}}.{{host_network}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 27 | {%- endif %} 28 | {%- endif %} 29 | {%- endfor %} 30 | 31 | {%- if host_args.bmc is defined %} 32 | {%- if host_args.bmc.network == net %} 33 | {% set list1 = host_args.bmc.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host_args.bmc.name}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 34 | {%- endif %} 35 | {%- endif %} 36 | 37 | {%- endfor %} 38 | 39 | {% for type in salt['pillar.get']('core:types') %} 40 | ;############################################# 41 | ;########## {{type}} 42 | ;### 43 | {% for subtype, subtype_args in salt['pillar.get'](type, {}).items() %} 44 | ;########## 45 | ;# {{subtype}} 46 | 47 | {%- for host, host_args in subtype_args.items() %} 48 | {%- for host_network, host_network_args in host_args.network.items() %} 49 | {% if host_network == net %} 50 | {%- if host_network == admin_network %} 51 | {% set list1 = host_network_args.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 52 | {%- else %} 53 | {% set list1 = host_network_args.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host}}.{{host_network}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 54 | {%- endif %} 55 | {%- endif %} 56 | {%- endfor %} 57 | 58 | {%- if host_args.bmc is defined %} 59 | {%- if host_args.bmc.network == net %} 60 | {% set list1 = host_args.bmc.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host_args.bmc.name}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}. 61 | {%- endif %} 62 | {%- endif %} 63 | 64 | {%- endfor %} 65 | {% endfor %} 66 | {% endfor %} 67 | -------------------------------------------------------------------------------- /salt/dns/reverse.jinja.bkp: -------------------------------------------------------------------------------- 1 | $TTL 86400 2 | @ IN SOA {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. root.{{salt['pillar.get']('engine:network:domaine_name')}}. ( 3 | 2011071001 ;Serial 4 | 3600 ;Refresh 5 | 1800 ;Retry 6 | 604800 ;Expire 7 | 86400 ;Minimum TTL 8 | ) 9 | @ IN NS {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 10 | @ IN PTR {{salt['pillar.get']('engine:network:domaine_name')}}. 11 | 12 | {% if salt['pillar.get']('engine:network:reverse') == 1 %} 13 | {% set list1 = salt['pillar.get']('engine:master:masterip').split('.') %}{{ list1[3] }} IN PTR {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 14 | {% elif salt['pillar.get']('engine:network:reverse') == 2 %} 15 | {% set list1 = salt['pillar.get']('engine:master:masterip').split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 16 | {% elif salt['pillar.get']('engine:network:reverse') == 3 %} 17 | {% set list1 = salt['pillar.get']('engine:master:masterip').split('.') %}{{ list1[3] }}.{{ list1[2] }}.{{ list1[1] }} IN PTR {{salt['pillar.get']('engine:master:masterid')}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 18 | {% endif %} 19 | 20 | {% for type in salt['pillar.get']('core:types') %} 21 | ;############################################# 22 | ;########## {{type}} 23 | ;### 24 | {% for group, argu in salt['pillar.get'](type, {}).items() %} 25 | ;########## 26 | ;# {{group}} 27 | 28 | {%- for host, argo in argu.items() %} 29 | {%- for network, args in argo.network.items() %} 30 | 31 | {%- if network == "net0" %} 32 | {% set list1 = args.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 33 | {%- else %} 34 | {% set list1 = args.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{host}}.{{network}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 35 | {%- endif %} 36 | {%- endfor %} 37 | {%- if argo.bmc is defined %} 38 | {% set list1 = argo.bmc.ip.split('.') %}{{ list1[3] }}.{{ list1[2] }} IN PTR {{argo.bmc.name}}.{{salt['pillar.get']('engine:network:domaine_name')}}. 39 | {%- endif %} 40 | {%- endfor %} 41 | {% endfor %} 42 | {% endfor %} 43 | 44 | -------------------------------------------------------------------------------- /salt/dns/server.sls: -------------------------------------------------------------------------------- 1 | dns: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['dns'] }} 4 | # - require: 5 | # - sls: repository.client 6 | 7 | /etc/named.conf: 8 | file: 9 | - managed 10 | - source: salt://dns/named.conf.jinja 11 | - template: jinja 12 | - require: 13 | - pkg: {{ pillar['pkgs']['dns'] }} 14 | 15 | /var/named/forward: 16 | file: 17 | - managed 18 | - source: salt://dns/forward.jinja 19 | - template: jinja 20 | - require: 21 | - pkg: {{ pillar['pkgs']['dns'] }} 22 | 23 | {% for network, args in salt['pillar.get']('network').items() %} 24 | /var/named/reverse.{{network}}: 25 | file: 26 | - managed 27 | - source: salt://dns/reverse.jinja 28 | - template: jinja 29 | - require: 30 | - pkg: {{ pillar['pkgs']['dns'] }} 31 | - defaults: 32 | net: {{network}} 33 | {% endfor %} 34 | 35 | named: 36 | service: 37 | - name: {{ pillar['services']['dns'] }} 38 | - running 39 | - enable: True 40 | - watch: 41 | - file: /etc/named.conf 42 | - file: /var/named/forward 43 | - require: 44 | - pkg: {{ pillar['pkgs']['dns'] }} 45 | - file: /etc/named.conf 46 | - file: /var/named/forward 47 | -------------------------------------------------------------------------------- /salt/dns/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | named: 5 | service_description: check_proc_named 6 | check_command: check_nrpe!check_proc_named 7 | 8 | command: 9 | 10 | named: 11 | command_name: check_proc_named 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C named" 14 | -------------------------------------------------------------------------------- /salt/dumb.jinja: -------------------------------------------------------------------------------- 1 | {% import 'include/myself.sls' as ms with context %} 2 | {{ms.type+ms.subtype+ms.hostname+ms.netpath}} 3 | Hello World ! 4 | {{salt['pillar.get']('helloworld')}} 5 | {{custom_var}} 6 | -------------------------------------------------------------------------------- /salt/dumb.sls: -------------------------------------------------------------------------------- 1 | {% import 'include/myself.sls' as ms with context %} 2 | /root/dumb.{{ms.os}}.{{ms.os_release}}: 3 | file: 4 | - managed 5 | - source: salt://dumb.jinja 6 | - template: jinja 7 | - defaults: 8 | custom_var: {{ms}} 9 | -------------------------------------------------------------------------------- /salt/include/myself.sls: -------------------------------------------------------------------------------- 1 | {% set subtype = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':subtype') %} 2 | {% set type = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type') %} 3 | 4 | {% set hostname = (salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),'')) %} 5 | 6 | {% if type == "masters" %} 7 | {% set netpath = type~":"~hostname~":network" %} 8 | {% else %} 9 | {% set netpath = type~":"~subtype~":"~hostname~":network" %} 10 | {% endif %} 11 | 12 | {% set os = salt['pillar.get'](type~'_system:'~subtype~':operating_system:os') %} 13 | {% set os_release = salt['pillar.get'](type~'_system:'~subtype~':operating_system:os_release') %} 14 | -------------------------------------------------------------------------------- /salt/ldap/basedomain.ldif.jinja: -------------------------------------------------------------------------------- 1 | dn: {% set count = 1 %}{% for dc in pillar['ldap_public']['dc'] %}{% if count == 1%}dc={{dc}}{% set count = 2 %}{% else %},dc={{dc}}{% endif %}{% endfor %} 2 | objectClass: top 3 | objectClass: dcObject 4 | objectclass: organization 5 | o: {% for dc in pillar['ldap_public']['dc'] %}{{dc}} {% endfor %} 6 | dc: {% set count = 1 %}{% for dc in pillar['ldap_public']['dc'] %}{% if count == 1%}{{dc}}{% set count = 2 %}{% else %}{% endif %}{% endfor %} 7 | 8 | dn: cn=Manager{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %} 9 | objectClass: organizationalRole 10 | cn: Manager 11 | description: Directory Manager 12 | 13 | dn: ou=People{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %} 14 | objectClass: organizationalUnit 15 | ou: People 16 | 17 | dn: ou=Group{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %} 18 | objectClass: organizationalUnit 19 | ou: Group 20 | 21 | -------------------------------------------------------------------------------- /salt/ldap/chdomain.ldif.jinja: -------------------------------------------------------------------------------- 1 | dn: olcDatabase={1}monitor,cn=config 2 | changetype: modify 3 | replace: olcAccess 4 | olcAccess: {0}to * by dn.base="gidNumber=0+uidNumber=0,cn=peercred,cn=external,cn=auth" 5 | read by dn.base="cn=Manager{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %}" read by * none 6 | 7 | dn: olcDatabase={2}hdb,cn=config 8 | changetype: modify 9 | replace: olcSuffix 10 | olcSuffix: {% set count = 1 %}{% for dc in pillar['ldap_public']['dc'] %}{% if count == 1%}dc={{dc}}{% set count = 2 %}{% else %},dc={{dc}}{% endif %}{% endfor %} 11 | 12 | dn: olcDatabase={2}hdb,cn=config 13 | changetype: modify 14 | replace: olcRootDN 15 | olcRootDN: cn=Manager{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %} 16 | 17 | dn: olcDatabase={2}hdb,cn=config 18 | changetype: modify 19 | add: olcRootPW 20 | olcRootPW: {{pillar['ldap_public']['ldap_admin_pass_ssha']}} 21 | 22 | dn: olcDatabase={2}hdb,cn=config 23 | changetype: modify 24 | add: olcAccess 25 | olcAccess: {0}to attrs=userPassword,shadowLastChange by 26 | dn="cn=Manager{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %}" write by anonymous auth by self write by * none 27 | olcAccess: {1}to dn.base="" by * read 28 | olcAccess: {2}to * by dn="cn=Manager{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %}" write by * read 29 | 30 | -------------------------------------------------------------------------------- /salt/ldap/client.sls: -------------------------------------------------------------------------------- 1 | sssd_package: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['sssd'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | enable_sssd: 8 | cmd.run: 9 | - name: authconfig --enablesssd --enablesssdauth --enablelocauthorize --enablemkhomedir --update 10 | - unless: grep "USESSSDAUTH=yes" /etc/sysconfig/authconfig 11 | - require: 12 | - pkg: sssd_package 13 | 14 | /etc/sssd/sssd.conf: 15 | file.managed: 16 | - source: salt://ldap/sssd.conf.jinja 17 | - template: jinja 18 | - mode: 600 19 | 20 | nscd_service: 21 | service: 22 | - name: {{ pillar['services']['nscd'] }} 23 | - dead 24 | - Disabled: True 25 | - require: 26 | - cmd: enable_sssd 27 | 28 | sssd_service: 29 | service: 30 | - name: {{ pillar['services']['sssd'] }} 31 | - running 32 | - enable: True 33 | - require: 34 | - file: /etc/sssd/sssd.conf 35 | - cmd: enable_sssd 36 | 37 | -------------------------------------------------------------------------------- /salt/ldap/client_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | sssd: 4 | service_description: check_proc_sssd 5 | check_command: check_nrpe!check_proc_sssd 6 | 7 | command: 8 | sssd: 9 | command_name: check_proc_sssd 10 | command_path: "/usr/lib64/nagios/plugins/check_procs" 11 | command_arguments: "-w 1: -c 1:1 -C sssd" 12 | 13 | -------------------------------------------------------------------------------- /salt/ldap/phpldapadmin.sls: -------------------------------------------------------------------------------- 1 | webserver_pkg_ldap: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['webserver'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | phpldapadmin: 8 | pkg.installed: 9 | - name: {{ pillar['pkgs']['phpldapadmin'] }} 10 | - require: 11 | - sls: repository.client 12 | 13 | /etc/phpldapadmin/config.php_comment: 14 | file.comment: 15 | - name: /etc/phpldapadmin/config.php 16 | - char: // 17 | - regex: ^\$servers\-\>setValue\('login','attr','uid'\); 18 | - require: 19 | - pkg: phpldapadmin 20 | 21 | /etc/phpldapadmin/config.php_uncomment: 22 | file.uncomment: 23 | - name: /etc/phpldapadmin/config.php 24 | - char: // 25 | - regex: \$servers\-\>setValue\('login','attr','dn'\); 26 | - require: 27 | - file: /etc/phpldapadmin/config.php_comment 28 | 29 | /etc/httpd/conf.d/phpldapadmin.conf: 30 | file.line: 31 | - mode: replace 32 | - match: Allow from 127.0.0.1 33 | - content: Allow from 127.0.0.1 {{ pillar['core']['salt_master_ip'] }} 34 | - require: 35 | - pkg: phpldapadmin 36 | 37 | enableaccess: 38 | cmd.run: 39 | - name: setsebool httpd_can_connect_ldap 1 40 | - onlyif: getsebool -a | grep httpd_can_connect_ldap | grep off 41 | - require: 42 | - pkg: phpldapadmin 43 | 44 | webserver_service_ldap: 45 | service: 46 | - name: {{ pillar['services']['webserver'] }} 47 | - running 48 | - enable: True 49 | - require: 50 | - pkg: webserver_pkg 51 | - file: /etc/phpldapadmin/config.php_comment 52 | - file: /etc/phpldapadmin/config.php_uncomment 53 | - file: /etc/httpd/conf.d/phpldapadmin.conf 54 | - cmd: enableaccess 55 | - watch: 56 | - file: /etc/phpldapadmin/config.php_comment 57 | - file: /etc/phpldapadmin/config.php_uncomment 58 | - file: /etc/httpd/conf.d/phpldapadmin.conf 59 | - cmd: enableaccess 60 | 61 | -------------------------------------------------------------------------------- /salt/ldap/server.sls: -------------------------------------------------------------------------------- 1 | ldap_server: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['ldap_server'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | ldap_client: 8 | pkg.installed: 9 | - name: {{ pillar['pkgs']['ldap_client'] }} 10 | - require: 11 | - sls: repository.client 12 | 13 | /var/lib/ldap/DB_CONFIG: 14 | file.copy: 15 | - source: /usr/share/openldap-servers/DB_CONFIG.example 16 | - unless: test -e /var/lib/ldap/DB_CONFIG 17 | - user: ldap 18 | - group: ldap 19 | - require: 20 | - pkg: ldap_server 21 | - pkg: ldap_client 22 | 23 | ldap_service: 24 | service: 25 | - name: {{ pillar['services']['ldapserver'] }} 26 | - running 27 | - enable: True 28 | - require: 29 | - file: /var/lib/ldap/DB_CONFIG 30 | 31 | /root/chdomain.ldif: 32 | file.managed: 33 | - source: salt://ldap/chdomain.ldif.jinja 34 | - template: jinja 35 | - mode: '0644' 36 | 37 | /root/basedomain.ldif: 38 | file.managed: 39 | - source: salt://ldap/basedomain.ldif.jinja 40 | - template: jinja 41 | - mode: '0644' 42 | 43 | cosine.ldif: 44 | cmd.run: 45 | - name: ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/cosine.ldif 46 | - unless: test -e /etc/openldap/slapd.d/cn=config/cn=schema/cn={1}cosine.ldif 47 | - require: 48 | - service: ldap_service 49 | 50 | nis.ldif: 51 | cmd.run: 52 | - name: ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/nis.ldif 53 | - unless: test -e /etc/openldap/slapd.d/cn=config/cn=schema/cn={2}nis.ldif 54 | - require: 55 | - cmd: cosine.ldif 56 | 57 | inetorgperson.ldif: 58 | cmd.run: 59 | - name: ldapadd -Y EXTERNAL -H ldapi:/// -f /etc/openldap/schema/inetorgperson.ldif 60 | - unless: test -e /etc/openldap/slapd.d/cn=config/cn=schema/cn={3}inetorgperson.ldif 61 | - require: 62 | - cmd: nis.ldif 63 | 64 | chdomain.ldif: 65 | cmd.run: 66 | - name: ldapmodify -Y EXTERNAL -H ldapi:/// -f chdomain.ldif 67 | - unless: grep RootPW /etc/openldap/slapd.d/cn=config/olcDatabase={2}hdb.ldif 68 | - require: 69 | - cmd: inetorgperson.ldif 70 | - file: /root/chdomain.ldif 71 | 72 | basedomain.ldif: ### Need to be corrected 73 | cmd.run: 74 | - name: ldapadd -x -D cn=Manager{% for dc in pillar['ldap_public']['dc'] %},dc={{dc}}{% endfor %} -w {{ pillar['ldap_private']['ldap_admin_pass'] }} -f basedomain.ldif 75 | - unless: test $(($(ldapsearch -x -b "dc=sphen,dc=local" | grep People > /dev/null 2>&1; echo $?) + $(ldapsearch -x -b "dc=sphen,dc=local" | grep Group > /dev/null 2>&1; echo $?) )) = 0 76 | - onlyif: test $(systemctl status slapd > /dev/null 2>&1; echo $?) = 0 77 | - require: 78 | - cmd: chdomain.ldif 79 | - file: /root/basedomain.ldif 80 | 81 | -------------------------------------------------------------------------------- /salt/ldap/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | slapd: 5 | service_description: check_proc_slapd 6 | check_command: check_nrpe!check_proc_slapd 7 | 8 | command: 9 | 10 | slapd: 11 | command_name: check_proc_slapd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C slapd" 14 | -------------------------------------------------------------------------------- /salt/ldap/sssd.conf.jinja: -------------------------------------------------------------------------------- 1 | [domain/LDAP] 2 | enumerate = False 3 | cache_credentials = true 4 | id_provider = ldap 5 | auth_provider = ldap 6 | #access_provider = ldap 7 | access_provider = permit 8 | chpass_provider = ldap 9 | ldap_uri = ldap://{{salt['pillar.get']('engine_connect:authentication_server_ip')}}/ 10 | ldap_search_base = {% set count = 1 %}{% for dc in pillar['ldap_public']['dc'] %}{% if count == 1%}dc={{dc}}{% set count = 2 %}{% else %},dc={{dc}}{% endif %}{% endfor %} 11 | ldap_tls_reqcert = never 12 | ldap_auth_disable_tls_never_use_in_production = true 13 | [sssd] 14 | services = nss, pam 15 | config_file_version = 2 16 | domains = LDAP 17 | sbus_timeout = 25 18 | reconnection_retries = 4 19 | [nss] 20 | filter_groups = root 21 | filter_users = root 22 | reconnection_retries = 3 23 | [pam] 24 | [sudo] 25 | [autofs] 26 | [ssh] 27 | [pac] 28 | [ifp] 29 | -------------------------------------------------------------------------------- /salt/network/firewall.sls: -------------------------------------------------------------------------------- 1 | firewall: 2 | pkg.removed: 3 | - name: {{ pillar['pkgs']['firewall'] }} 4 | - require: 5 | - sls: repository.client 6 | -------------------------------------------------------------------------------- /salt/network/masquerading.sls: -------------------------------------------------------------------------------- 1 | # On the fly masquerading, not persistent. 2 | 3 | {% set group = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':subtype') %} 4 | {% set type = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type') %} 5 | 6 | 7 | {% if type == "masters" %} 8 | {% set netpath = type~":"~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),'')) %} 9 | {% else %} 10 | {% set netpath = type~":"~group~":"~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),'')) %} 11 | {% endif %} 12 | 13 | 14 | {% if salt['pillar.get'](netpath~':external_interface') is defined and salt['pillar.get'](netpath~':external_interface') is not none %} 15 | non_persistent_ipforward: 16 | cmd.run: 17 | - name: sysctl -w net.ipv4.ip_forward=1 18 | 19 | # {{salt['pillar.get'](netpath~':external_interface')}} 20 | non_persistent_masquerading: 21 | cmd.run: 22 | - name: iptables -t nat -A POSTROUTING -o {{salt['pillar.get'](netpath~':external_interface')}} -j MASQUERADE 23 | 24 | {% endif %} 25 | -------------------------------------------------------------------------------- /salt/network/nmanager.sls: -------------------------------------------------------------------------------- 1 | networkmanager_service: 2 | service: 3 | - name: {{ pillar['services']['networkmanager'] }} 4 | - dead 5 | - disabled: True 6 | 7 | networkmanager: 8 | pkg.removed: 9 | - name: {{ pillar['pkgs']['networkmanager'] }} 10 | - require: 11 | - sls: repository.client 12 | - service: networkmanager_service 13 | -------------------------------------------------------------------------------- /salt/network/static.sls: -------------------------------------------------------------------------------- 1 | {% if salt['grains.get']('localhost') != salt['grains.get']('id') %} 2 | sethostname: 3 | cmd.run: 4 | - name: hostnamectl set-hostname {{salt['grains.get']('id')}} 5 | {% endif %} 6 | 7 | {% set group = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':subtype') %} 8 | {% set type = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type') %} 9 | 10 | 11 | {% if type == "masters" %} 12 | {% set netpath = type~":"~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),''))~":network" %} 13 | {% else %} 14 | {% set netpath = type~":"~group~":"~(salt['grains.get']('id')|replace("."~salt['pillar.get']('network:global_parameters:domain_name'),''))~":network" %} 15 | {% endif %} 16 | 17 | {% for network, argo in salt['pillar.get'](netpath).items() %} 18 | 19 | {% if network == salt['pillar.get']('network:global_parameters:admin_network') and argo.interface == "auto" %} 20 | # auto interface enabled 21 | {% for interf, args in salt['grains.get']('ip4_interfaces').items() %} 22 | {% for ips in args %} 23 | {% if salt['pillar.get']("engine_network:"~network~":matchpatern") in ips %} 24 | {{interf}}: 25 | network.managed: 26 | - enabled: True 27 | - type: eth 28 | - proto: none 29 | - ipaddr: {{ips}} 30 | - netmask: {{salt['pillar.get']("engine_network:"~network~":netmask")}} 31 | {% if salt['pillar.get']("engine_network:"~network~":gateway") is defined %} 32 | - gateway: {{salt['pillar.get']("engine_network:"~network~":gateway")}} 33 | {% endif %} 34 | {% endif %} 35 | {% endfor %} 36 | {% endfor %} 37 | {% endif %} 38 | 39 | {% if argo.interface != "auto" and argo.interface != "na" %} 40 | {{argo.interface}}: 41 | network.managed: 42 | - enabled: True 43 | - type: eth 44 | - proto: none 45 | - ipaddr: {{argo.ip}} 46 | - netmask: {{salt['pillar.get']("engine_network:"~network~":netmask")}} {#"network:"~network~":netmask")}}#} 47 | {% if salt['pillar.get']("engine_network:"~network~":gateway") is defined and salt['pillar.get']("engine_network:"~network~":gateway") is not none %} 48 | - gateway: {{salt['pillar.get']("engine_network:"~network~":gateway")}} 49 | {% endif %} 50 | 51 | {% endif %} 52 | 53 | {% endfor %} 54 | -------------------------------------------------------------------------------- /salt/nfs/client.sls: -------------------------------------------------------------------------------- 1 | {% set host_type = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type') %} 2 | {% set host_subtype = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':subtype') %} 3 | nfs_utils: 4 | pkg.installed: 5 | - name: {{ pillar['pkgs']['nfs_utils'] }} 6 | - require: 7 | - sls: repository.client 8 | 9 | {% for nfsservid, nfsservid_args in salt['pillar.get']('nfs', {}).items() %} 10 | {% for mountpoint, mountpoint_args in nfsservid_args.items() %} 11 | {% if (host_type~':'~host_subtype) in mountpoint_args.mountpool %} 12 | {{mountpoint}}: 13 | mount.mounted: 14 | {%- if mountpoint_args.network == "net0" %} 15 | - device: {{nfsservid}}:{{mountpoint_args.servermountpoint}} 16 | {%- else %} 17 | - device: {{nfsservid}}.{{mountpoint_args.network}}:{{mountpoint_args.servermountpoint}} 18 | {%- endif %} 19 | - fstype: nfs 20 | - persist: True 21 | - mkmnt: True 22 | - opts: {{mountpoint_args.mount_parameters}} 23 | - require: 24 | - pkg: nfs_utils 25 | - sls: dns.client 26 | {% endif %} 27 | {% endfor %}{% endfor %} 28 | -------------------------------------------------------------------------------- /salt/nfs/exports.jinja: -------------------------------------------------------------------------------- 1 | {% for nfsservid, nfsservid_args in salt['pillar.get']('nfs', {}).items() %} 2 | {% if salt['grains.get']('host') == nfsservid %} 3 | {% for mount, mount_args in nfsservid_args.items() %}{{mount_args.servermountpoint}} {{salt['pillar.get']("network:"~mount_args.network~":subnet")}}/16({{mount_args.export_parameters}}) 4 | {% endfor %}{% endif %}{% endfor %} 5 | -------------------------------------------------------------------------------- /salt/nfs/server.sls: -------------------------------------------------------------------------------- 1 | {% for nfsservid, args in salt['pillar.get']('nfs', {}).items() %} 2 | {% if salt['grains.get']('host') == nfsservid %} 3 | 4 | nfs_utils: 5 | pkg.installed: 6 | - name: {{ pillar['pkgs']['nfs_utils'] }} 7 | - require: 8 | - sls: repository.client 9 | 10 | /etc/exports: 11 | file: 12 | - managed 13 | - source: salt://nfs/exports.jinja 14 | - template: jinja 15 | 16 | rpcbind: 17 | service: 18 | - name: {{ pillar['services']['rpcbind'] }} 19 | - running 20 | - enable: True 21 | - require: 22 | - pkg: {{ pillar['pkgs']['nfs_utils'] }} 23 | 24 | nfs_server: 25 | service: 26 | - name: {{ pillar['services']['nfs_server'] }} 27 | - running 28 | - enable: True 29 | - watch: 30 | - file: /etc/exports 31 | - require: 32 | - pkg: {{ pillar['pkgs']['nfs_utils'] }} 33 | - file: /etc/exports 34 | - service: rpcbind 35 | 36 | {% endif %} 37 | {% endfor %} 38 | 39 | -------------------------------------------------------------------------------- /salt/nfs/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | nfsd: 5 | service_description: check_proc_nfsd 6 | check_command: check_nrpe!check_proc_nfsd 7 | 8 | rpcbind: 9 | service_description: check_proc_rpcbind 10 | check_command: check_nrpe!check_proc_rpcbind 11 | 12 | command: 13 | 14 | nfsd: 15 | command_name: check_proc_nfsd 16 | command_path: "/usr/lib64/nagios/plugins/check_procs" 17 | command_arguments: "-w 8:8 -c 7:9 -C nfsd" 18 | 19 | rpcbind: 20 | command_name: check_proc_rpcbind 21 | command_path: "/usr/lib64/nagios/plugins/check_procs" 22 | command_arguments: "-w 1: -c 1:1 -C rpcbind" 23 | -------------------------------------------------------------------------------- /salt/ntp/client.sls: -------------------------------------------------------------------------------- 1 | ntp: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['ntp'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/ntp.conf: 8 | file: 9 | - managed 10 | - source: salt://ntp/ntp.conf.client.jinja 11 | - template: jinja 12 | - require: 13 | - pkg: {{ pillar['pkgs']['ntp'] }} 14 | 15 | ntpd: 16 | service: 17 | - name: {{ pillar['services']['ntp'] }} 18 | - running 19 | - enable: True 20 | - watch: 21 | - file: /etc/ntp.conf 22 | - require: 23 | - pkg: {{ pillar['pkgs']['ntp'] }} 24 | - file: /etc/ntp.conf 25 | 26 | -------------------------------------------------------------------------------- /salt/ntp/client_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | ntpd: 5 | service_description: check_proc_ntpd 6 | check_command: check_nrpe!check_proc_ntpd 7 | 8 | command: 9 | 10 | ntpd: 11 | command_name: check_proc_ntpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C ntpd" 14 | -------------------------------------------------------------------------------- /salt/ntp/ntp.conf.client.jinja: -------------------------------------------------------------------------------- 1 | # For more information about this file, see the man pages 2 | # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). 3 | 4 | driftfile /var/lib/ntp/drift 5 | 6 | # Permit time synchronization with our time source, but do not 7 | # permit the source to query or modify the service on this system. 8 | restrict default nomodify notrap nopeer noquery 9 | 10 | # Permit all access over the loopback interface. This could 11 | # be tightened as well, but to do so would effect some of 12 | # the administrative functions. 13 | restrict 127.0.0.1 14 | restrict ::1 15 | 16 | # Hosts on local network are less restricted. 17 | 18 | # Use public servers from the pool.ntp.org project. 19 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 20 | #server 0.centos.pool.ntp.org iburst 21 | #server 1.centos.pool.ntp.org iburst 22 | #server 2.centos.pool.ntp.org iburst 23 | #server 3.centos.pool.ntp.org iburst 24 | server {{salt['pillar.get']('engine_connect:ntp_server_ip')}} iburst 25 | 26 | #broadcast 192.168.1.255 autokey # broadcast server 27 | #broadcastclient # broadcast client 28 | #broadcast 224.0.1.1 autokey # multicast server 29 | #multicastclient 224.0.1.1 # multicast client 30 | #manycastserver 239.255.254.254 # manycast server 31 | #manycastclient 239.255.254.254 autokey # manycast client 32 | 33 | # Enable public key cryptography. 34 | #crypto 35 | 36 | includefile /etc/ntp/crypto/pw 37 | 38 | # Key file containing the keys and key identifiers used when operating 39 | # with symmetric key cryptography. 40 | keys /etc/ntp/keys 41 | 42 | # Specify the key identifiers which are trusted. 43 | #trustedkey 4 8 42 44 | 45 | # Specify the key identifier to use with the ntpdc utility. 46 | #requestkey 8 47 | 48 | # Specify the key identifier to use with the ntpq utility. 49 | #controlkey 8 50 | 51 | # Enable writing of statistics records. 52 | #statistics clockstats cryptostats loopstats peerstats 53 | 54 | # Disable the monitoring facility to prevent amplification attacks using ntpdc 55 | # monlist command when default restrict does not include the noquery flag. See 56 | # CVE-2013-5211 for more details. 57 | # Note: Monitoring will not be disabled with the limited restriction flag. 58 | disable monitor 59 | -------------------------------------------------------------------------------- /salt/ntp/ntp.conf.jinja: -------------------------------------------------------------------------------- 1 | {% set admin_network = salt['pillar.get']('network:global_parameters:admin_network') %} 2 | # For more information about this file, see the man pages 3 | # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). 4 | 5 | driftfile /var/lib/ntp/drift 6 | 7 | # Permit time synchronization with our time source, but do not 8 | # permit the source to query or modify the service on this system. 9 | restrict default nomodify notrap nopeer noquery 10 | 11 | # Permit all access over the loopback interface. This could 12 | # be tightened as well, but to do so would effect some of 13 | # the administrative functions. 14 | restrict 127.0.0.1 15 | restrict ::1 16 | 17 | # Hosts on local network are less restricted. 18 | restrict {{salt['pillar.get']('engine_network:'~admin_network~':subnet')}} mask {{salt['pillar.get']('engine_network:'~admin_network~':netmask')}} nomodify notrap 19 | 20 | # Use public servers from the pool.ntp.org project. 21 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 22 | #server 0.centos.pool.ntp.org iburst 23 | #server 1.centos.pool.ntp.org iburst 24 | #server 2.centos.pool.ntp.org iburst 25 | #server 3.centos.pool.ntp.org iburst 26 | server 127.127.1.0 27 | fudge 127.127.1.0 stratum 14 28 | 29 | #broadcast 192.168.1.255 autokey # broadcast server 30 | #broadcastclient # broadcast client 31 | #broadcast 224.0.1.1 autokey # multicast server 32 | #multicastclient 224.0.1.1 # multicast client 33 | #manycastserver 239.255.254.254 # manycast server 34 | #manycastclient 239.255.254.254 autokey # manycast client 35 | 36 | # Enable public key cryptography. 37 | #crypto 38 | 39 | includefile /etc/ntp/crypto/pw 40 | 41 | # Key file containing the keys and key identifiers used when operating 42 | # with symmetric key cryptography. 43 | keys /etc/ntp/keys 44 | 45 | # Specify the key identifiers which are trusted. 46 | #trustedkey 4 8 42 47 | 48 | # Specify the key identifier to use with the ntpdc utility. 49 | #requestkey 8 50 | 51 | # Specify the key identifier to use with the ntpq utility. 52 | #controlkey 8 53 | 54 | # Enable writing of statistics records. 55 | #statistics clockstats cryptostats loopstats peerstats 56 | 57 | # Disable the monitoring facility to prevent amplification attacks using ntpdc 58 | # monlist command when default restrict does not include the noquery flag. See 59 | # CVE-2013-5211 for more details. 60 | # Note: Monitoring will not be disabled with the limited restriction flag. 61 | disable monitor 62 | -------------------------------------------------------------------------------- /salt/ntp/ntp.conf.jinja.bkp: -------------------------------------------------------------------------------- 1 | {% set netadmin = 'engine_network:'~salt['pillar.get']('core:admin_network') %} 2 | # For more information about this file, see the man pages 3 | # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). 4 | 5 | driftfile /var/lib/ntp/drift 6 | 7 | # Permit time synchronization with our time source, but do not 8 | # permit the source to query or modify the service on this system. 9 | restrict default nomodify notrap nopeer noquery 10 | 11 | # Permit all access over the loopback interface. This could 12 | # be tightened as well, but to do so would effect some of 13 | # the administrative functions. 14 | restrict 127.0.0.1 15 | restrict ::1 16 | 17 | # Hosts on local network are less restricted. 18 | restrict {{salt['pillar.get'](netadmin~':subnet')}} mask {{salt['pillar.get'](netadmin~':netmask')}} nomodify notrap 19 | 20 | # Use public servers from the pool.ntp.org project. 21 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 22 | #server 0.centos.pool.ntp.org iburst 23 | #server 1.centos.pool.ntp.org iburst 24 | #server 2.centos.pool.ntp.org iburst 25 | #server 3.centos.pool.ntp.org iburst 26 | server 127.127.1.0 27 | fudge 127.127.1.0 stratum 14 28 | 29 | #broadcast 192.168.1.255 autokey # broadcast server 30 | #broadcastclient # broadcast client 31 | #broadcast 224.0.1.1 autokey # multicast server 32 | #multicastclient 224.0.1.1 # multicast client 33 | #manycastserver 239.255.254.254 # manycast server 34 | #manycastclient 239.255.254.254 autokey # manycast client 35 | 36 | # Enable public key cryptography. 37 | #crypto 38 | 39 | includefile /etc/ntp/crypto/pw 40 | 41 | # Key file containing the keys and key identifiers used when operating 42 | # with symmetric key cryptography. 43 | keys /etc/ntp/keys 44 | 45 | # Specify the key identifiers which are trusted. 46 | #trustedkey 4 8 42 47 | 48 | # Specify the key identifier to use with the ntpdc utility. 49 | #requestkey 8 50 | 51 | # Specify the key identifier to use with the ntpq utility. 52 | #controlkey 8 53 | 54 | # Enable writing of statistics records. 55 | #statistics clockstats cryptostats loopstats peerstats 56 | 57 | # Disable the monitoring facility to prevent amplification attacks using ntpdc 58 | # monlist command when default restrict does not include the noquery flag. See 59 | # CVE-2013-5211 for more details. 60 | # Note: Monitoring will not be disabled with the limited restriction flag. 61 | disable monitor 62 | 63 | -------------------------------------------------------------------------------- /salt/ntp/server.sls: -------------------------------------------------------------------------------- 1 | ntp: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['ntp'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/ntp.conf: 8 | file: 9 | - managed 10 | - source: salt://ntp/ntp.conf.jinja 11 | - template: jinja 12 | - require: 13 | - pkg: {{ pillar['pkgs']['ntp'] }} 14 | 15 | ntpd: 16 | service: 17 | - name: {{ pillar['services']['ntp'] }} 18 | - running 19 | - enable: True 20 | - watch: 21 | - file: /etc/ntp.conf 22 | - require: 23 | - pkg: {{ pillar['pkgs']['ntp'] }} 24 | - file: /etc/ntp.conf 25 | 26 | -------------------------------------------------------------------------------- /salt/ntp/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | ntpd: 5 | service_description: check_proc_ntpd 6 | check_command: check_nrpe!check_proc_ntpd 7 | 8 | command: 9 | 10 | ntpd: 11 | command_name: check_proc_ntpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 1: -c 1:1 -C ntpd" 14 | -------------------------------------------------------------------------------- /salt/nyancat/init.sls: -------------------------------------------------------------------------------- 1 | nyancat: 2 | pkg.installed: 3 | - name: nyancat 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/profile.d/nyancat_login.sh: 8 | file: 9 | - managed 10 | - source: salt://nyancat/nyancat_login.sh 11 | - mode: 555 12 | - require: 13 | - pkg: nyancat 14 | 15 | -------------------------------------------------------------------------------- /salt/nyancat/nyancat_login.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | if [ "$PS1" ]; then 3 | nyancat -f 20 4 | fi; 5 | 6 | -------------------------------------------------------------------------------- /salt/pxe/default.jinja: -------------------------------------------------------------------------------- 1 | default menu.c32 2 | prompt 5 3 | timeout 30 4 | MENU TITLE Banquise PXE Menu for {{type}}:{{subtype}} 5 | 6 | default {{os}}_{{os_release}}_INSTALL 7 | # default localdisk 8 | 9 | LABEL {{os}}_{{os_release}}_INSTALL 10 | MENU LABEL {{type}} {{subtype}} 11 | KERNEL /netboot/{{os}}/{{os_release}}/vmlinuz 12 | APPEND initrd=/netboot/{{os}}/{{os_release}}/initrd.img inst.repo=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/{{os}}_{{os_release}}.local.repo ks=http://{{salt['pillar.get']('engine_connect:pxe_server_ip')}}/ks_{{type}}_{{subtype}}.cfg {% if salt['pillar.get'](type~'_system:'~subtype~':operating_system:kernel_parameters') is not none %}{{salt['pillar.get'](type~'_system:'~subtype~':operating_system:kernel_parameters')}}{% endif %} {% if salt['pillar.get'](type~'_system:'~subtype~':bmc:console') is not none %}{{salt['pillar.get'](type~'_system:'~subtype~':bmc:console')}}{% endif %} 13 | 14 | LABEL localdisk 15 | KERNEL chain.c32 16 | APPEND hd0 17 | 18 | -------------------------------------------------------------------------------- /salt/pxe/default_ks.jinja: -------------------------------------------------------------------------------- 1 | default menu.c32 2 | prompt 5 3 | timeout 30 4 | MENU TITLE Banquise PXE Menu for {{type}}:{{subtype}} 5 | 6 | default {{os}}_{{os_release}}_INSTALL 7 | # default localdisk 8 | 9 | LABEL {{os}}_{{os_release}}_INSTALL 10 | MENU LABEL {{type}} {{subtype}} 11 | KERNEL /netboot/{{os}}/{{os_release}}/vmlinuz 12 | APPEND initrd=/netboot/{{os}}/{{os_release}}/initrd.img inst.repo=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/{{os}}_{{os_release}}.local.repo ks=http://{{salt['pillar.get']('engine_connect:pxe_server_ip')}}/ks/ks_{{type}}_{{subtype}}.cfg {% if salt['pillar.get'](type~'_system:'~subtype~':operating_system:kernel_parameters') is not none %}{{salt['pillar.get'](type~'_system:'~subtype~':operating_system:kernel_parameters')}}{% endif %} {% if salt['pillar.get'](type~'_system:'~subtype~':bmc:console') is not none %}{{salt['pillar.get'](type~'_system:'~subtype~':bmc:console')}}{% endif %} 13 | 14 | LABEL localdisk 15 | KERNEL chain.c32 16 | APPEND hd0 17 | -------------------------------------------------------------------------------- /salt/pxe/default_preseed.jinja: -------------------------------------------------------------------------------- 1 | default menu.c32 2 | prompt 5 3 | timeout 30 4 | MENU TITLE Banquise PXE Menu for {{type}}:{{subtype}} 5 | 6 | default {{os}}_{{os_release}}_INSTALL 7 | # default localdisk 8 | 9 | LABEL {{os}}_{{os_release}}_INSTALL 10 | MENU LABEL {{type}} {{subtype}} 11 | KERNEL /netboot/{{os}}/{{os_release}}/linux 12 | APPEND auto=true locale={{salt['pillar.get']('core:language')|lower}}_{{salt['pillar.get']('core:language')|upper}}.UTF-8 console-setup/charmap=UTF-8 console-keymaps-at/keymaps=pc105 console-setup/layoutcode={{salt['pillar.get']('core:keyboard')|lower}} console-setup/ask_detect=false pkgsel/language-pack-patterns=pkgsel/install-language-support=false netcfg/choose_interface=auto hostname=unassigned-hostname domain=unassigned-domain vga=normal url=http://10.1.0.1/preseed/{{type}}_{{subtype}}.preseed initrd=/netboot/{{os}}/{{os_release}}/initrd.gz {% if salt['pillar.get'](type~'_system:'~subtype~':operating_system:kernel_parameters') is not none %}{{salt['pillar.get'](type~'_system:'~subtype~':operating_system:kernel_parameters')}}{% endif %} {% if salt['pillar.get'](type~'_system:'~subtype~':bmc:console') is not none %}{{salt['pillar.get'](type~'_system:'~subtype~':bmc:console')}}{% endif %} -- 13 | 14 | LABEL localdisk 15 | KERNEL chain.c32 16 | APPEND hd0 17 | -------------------------------------------------------------------------------- /salt/pxe/ks.cfg.jinja: -------------------------------------------------------------------------------- 1 | # Kickstart File 2 | # OS is {{os}} {{os_release}} 3 | # Target system are {{type}}:{{subtype}} 4 | 5 | # System authorization information 6 | auth --enableshadow --passalgo=sha512 7 | 8 | # Do not use graphical install 9 | text 10 | 11 | # Run the Setup Agent on first boot 12 | firstboot --enable 13 | 14 | # Salt Stack repository (to get salt-minion) 15 | {% if os == 'Centos' %} 16 | {% if os_release == '7.4.1708' %} 17 | {% set salt_reponame = 'RH_7' %} 18 | {% endif %} 19 | {% endif %} 20 | {% if os == 'Fedora' %} 21 | {% if os_release == '27' %} 22 | {% set salt_reponame = 'Fedora_27' %} 23 | {% endif %} 24 | {% endif %} 25 | 26 | repo --name=salt.local.repo --baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/salt_{{salt_reponame}}.local.repo 27 | 28 | # Keyboard layouts 29 | keyboard --vckeymap={{salt['pillar.get']('core:keyboard')|lower}} --xlayouts='{{salt['pillar.get']('core:keyboard')|lower}}' 30 | 31 | # System language 32 | lang {{salt['pillar.get']('core:language')|lower}}_{{salt['pillar.get']('core:language')|upper}}.UTF-8 33 | 34 | # Network information 35 | network --onboot yes --bootproto dhcp 36 | network --hostname=localhost.localdomain 37 | 38 | # Root password 39 | rootpw --iscrypted {{salt['pillar.get']('passwords_public:root_password_hash')}} 40 | 41 | # System timezone 42 | timezone {{salt['pillar.get']('core:time_zone')}} --isUtc 43 | 44 | # System bootloader configuration 45 | bootloader --append=" crashkernel=auto" --location=mbr 46 | 47 | # Partition clearing information 48 | {{salt['pillar.get'](type~'_system:'~subtype~':operating_system:partitioning')}} 49 | #clearpart --all --initlabel 50 | #part /boot --fstype=ext4 --size=2048 51 | #part / --fstype=ext4 --size=1 --grow 52 | 53 | # Reboot after installation 54 | reboot 55 | 56 | # Install minimal system and salt-minion 57 | %packages 58 | @core 59 | salt-minion 60 | %end 61 | 62 | # Add root public key in authorized_keys on minion, don't forget to restorecon 63 | # Add salt and salt-master hosts to hosts file 64 | %post --log=/root/ks-post.log 65 | mkdir /root/.ssh 66 | cat << xxEOFxx >> /root/.ssh/authorized_keys 67 | {{salt['pillar.get']('ssh_public:ssh_master_public_key')}} 68 | xxEOFxx 69 | restorecon -r /root/.ssh 70 | echo "{{salt['pillar.get']('core:salt_master_ip')}} salt salt-master" >> /etc/hosts 71 | systemctl disable NetworkManager 72 | systemctl enable salt-minion 73 | %end 74 | -------------------------------------------------------------------------------- /salt/pxe/preseed.jinja: -------------------------------------------------------------------------------- 1 | ## Options to set on the command line 2 | 3 | d-i debconf/language string {{salt['pillar.get']('core:language')|lower}} 4 | d-i debian-installer/language string {{salt['pillar.get']('core:language')|lower}} 5 | d-i debian-installer/country string {{salt['pillar.get']('core:language')|upper}} 6 | d-i debian-installer/locale string {{salt['pillar.get']('core:language')|lower}}_{{salt['pillar.get']('core:language')|upper}}.UTF-8 7 | 8 | d-i netcfg/get_hostname string unassigned-hostname 9 | d-i netcfg/get_domain string unassigned-domain 10 | d-i netcfg/choose_interface select auto 11 | 12 | d-i console-setup/ask_detect boolean false 13 | d-i console-setup/layoutcode string {{salt['pillar.get']('core:keyboard')|lower}} 14 | d-i keyboard-configuration/modelcode string pc105 15 | d-i keyboard-configuration/layoutcode string {{salt['pillar.get']('core:keyboard')|lower}} 16 | d-i keyboard-configuration/variantcode string latin9 17 | 18 | #d-i keymap select us 19 | 20 | #d-i netcfg/choose_interface select auto 21 | #d-i netcfg/get_gateway 10.1.0.1 22 | #d-i netcfg/get_hostname string unassigned 23 | #d-i netcfg/get_domain string unassigned 24 | d-i netcfg/no_default_route boolean 25 | 26 | d-i mirror/country string manual 27 | d-i mirror/http/hostname string {{salt['pillar.get']('engine_connect:repository_server_ip')}} 28 | d-i mirror/http/directory string /{{os}}_{{os_release}}.local.repo 29 | d-i mirror/http/proxy string 30 | 31 | d-i pkgsel/update-policy select none 32 | 33 | 34 | d-i base-installer/kernel/override-image string linux-server 35 | d-i clock-setup/utc-auto boolean true 36 | d-i clock-setup/utc boolean true 37 | d-i time/zone string US/Pacific 38 | d-i clock-setup/ntp boolean true 39 | 40 | d-i pkgsel/install-language-support boolean false 41 | tasksel tasksel/first multiselect standard, ubuntu-server 42 | 43 | ### Partitioning 44 | d-i partman-auto/disk string /dev/sda 45 | d-i partman-auto/method string regular 46 | d-i partman-lvm/device_remove_lvm boolean true 47 | d-i partman-md/device_remove_md boolean true 48 | d-i partman-auto/choose_recipe select atomic 49 | 50 | # This makes partman automatically partition without confirmation 51 | d-i partman-partitioning/confirm_write_new_label boolean true 52 | d-i partman/choose_partition select finish 53 | d-i partman/confirm boolean true 54 | d-i partman/confirm_nooverwrite boolean true 55 | 56 | #d-i partman-auto/disk string /dev/sda 57 | #d-i partman-auto/method string regular 58 | 59 | #d-i partman-auto/method string regular 60 | #d-i partman-auto/purge_lvm_from_device boolean true 61 | #d-i partman-lvm/confirm boolean true 62 | #d-i partman-auto/choose_recipe select atomic 63 | #d-i partman/confirm_write_new_label boolean true 64 | #d-i partman/choose_partition select finish 65 | #d-i partman/confirm boolean true 66 | d-i passwd/user-fullname string Ubuntu User 67 | d-i passwd/username string ubuntu 68 | d-i passwd/user-password password insecure 69 | d-i passwd/user-password-again password insecure 70 | 71 | d-i grub-installer/only_debian boolean true 72 | d-i grub-installer/with_other_os boolean true 73 | d-i finish-install/reboot_in_progress note 74 | -------------------------------------------------------------------------------- /salt/pxe/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | httpd: 5 | service_description: check_proc_httpd 6 | check_command: check_nrpe!check_proc_httpd 7 | 8 | tftpd: 9 | service_description: check_proc_tftpd 10 | check_command: check_nrpe!check_proc_tftpd 11 | 12 | command: 13 | 14 | httpd: 15 | command_name: check_proc_httpd 16 | command_path: "/usr/lib64/nagios/plugins/check_procs" 17 | command_arguments: "-w 4:16 -c 1:20 -C httpd" 18 | 19 | tftpd: 20 | command_name: check_proc_tftpd 21 | command_path: "/usr/lib64/nagios/plugins/check_procs" 22 | command_arguments: "-w 1: -c 1:1 -C in.tftpd" 23 | -------------------------------------------------------------------------------- /salt/repository/banquise.local.repo.jinja: -------------------------------------------------------------------------------- 1 | {% if os == 'Centos' %} 2 | {% if os_release == '7.4.1708' %} 3 | [banquise_RH_7.local] 4 | name=banquise repository 5 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/banquise_RH_7.local.repo 6 | gpgcheck=0 7 | enabled=1 8 | {% endif %} 9 | {% endif %} 10 | 11 | {% if os == 'Fedora' %} 12 | {% if os_release == '27' %} 13 | [banquise_Fedora_27.local] 14 | name=banquise repository 15 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/banquise_Fedora_27.local.repo 16 | gpgcheck=0 17 | enabled=1 18 | {% endif %} 19 | {% endif %} 20 | 21 | -------------------------------------------------------------------------------- /salt/repository/client.sls: -------------------------------------------------------------------------------- 1 | {% import 'include/myself.sls' as ms with context %} 2 | 3 | 4 | /etc/yum.repos.d/os_base.local.repo: 5 | file: 6 | - managed 7 | - source: salt://repository/genericwithupdate.local.repo.jinja 8 | - template: jinja 9 | - defaults: 10 | reponame: {{ms.os}}_{{ms.os_release}} 11 | 12 | #/etc/yum.repos.d/{{ms.os}}_{{ms.os_release}}.local.repo: 13 | # file: 14 | # - managed 15 | # - source: salt://repository/os_dvd.local.repo.jinja 16 | # - template: jinja 17 | # - defaults: 18 | # os: {{ms.os}} 19 | # os_release: {{ms.os_release}} 20 | 21 | /etc/yum.repos.d/banquise.local.repo: 22 | file: 23 | - managed 24 | - source: salt://repository/banquise.local.repo.jinja 25 | - template: jinja 26 | - defaults: 27 | os: {{ms.os}} 28 | os_release: {{ms.os_release}} 29 | 30 | /etc/yum.repos.d/salt.local.repo: 31 | file: 32 | - managed 33 | - source: salt://repository/salt.local.repo.jinja 34 | - template: jinja 35 | - defaults: 36 | os: {{ms.os}} 37 | os_release: {{ms.os_release}} 38 | 39 | 40 | /etc/yum.repos.d/CentOS-Base.repo: 41 | file: 42 | - absent 43 | 44 | /etc/yum.repos.d/CentOS-CR.repo: 45 | file: 46 | - absent 47 | 48 | /etc/yum.repos.d/CentOS-Debuginfo.repo: 49 | file: 50 | - absent 51 | 52 | /etc/yum.repos.d/CentOS-fasttrack.repo: 53 | file: 54 | - absent 55 | 56 | /etc/yum.repos.d/CentOS-Sources.repo: 57 | file: 58 | - absent 59 | 60 | /etc/yum.repos.d/CentOS-Vault.repo: 61 | file: 62 | - absent 63 | 64 | /etc/yum.repos.d/os_dvd.local.reposerver.repo: 65 | file: 66 | - absent 67 | 68 | -------------------------------------------------------------------------------- /salt/repository/client.sls.bkp: -------------------------------------------------------------------------------- 1 | {% import 'include/myself.sls' as ms with context %} 2 | 3 | {% set type = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type') %} 4 | {% set subtype = salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':subtype') %} 5 | {% set os = salt['pillar.get'](type~'_system:'~subtype~':os') %} 6 | {% set os_release = salt['pillar.get'](type~'_system:'~subtype~':os_release') %} 7 | 8 | /etc/yum.repos.d/{{ms.os}}_{{ms.os_release}}.local.repo: 9 | file: 10 | - managed 11 | - source: salt://repository/os_dvd.local.repo.jinja 12 | - template: jinja 13 | - defaults: 14 | os: {{ms.os}} 15 | os_release: {{ms.os_release}} 16 | 17 | /etc/yum.repos.d/banquise.local.repo: 18 | file: 19 | - managed 20 | - source: salt://repository/banquise.local.repo.jinja 21 | - template: jinja 22 | - defaults: 23 | os: {{ms.os}} 24 | os_release: {{ms.os_release}} 25 | 26 | /etc/yum.repos.d/salt.{{ms.os}}_{{ms.os_release}}.local.repo: 27 | file: 28 | - managed 29 | - source: salt://repository/salt.local.repo.jinja 30 | - template: jinja 31 | - defaults: 32 | os: {{ms.os}} 33 | os_release: {{ms.os_release}} 34 | 35 | 36 | /etc/yum.repos.d/CentOS-Base.repo: 37 | file: 38 | - absent 39 | 40 | /etc/yum.repos.d/CentOS-CR.repo: 41 | file: 42 | - absent 43 | 44 | /etc/yum.repos.d/CentOS-Debuginfo.repo: 45 | file: 46 | - absent 47 | 48 | /etc/yum.repos.d/CentOS-fasttrack.repo: 49 | file: 50 | - absent 51 | 52 | /etc/yum.repos.d/CentOS-Sources.repo: 53 | file: 54 | - absent 55 | 56 | /etc/yum.repos.d/CentOS-Vault.repo: 57 | file: 58 | - absent 59 | 60 | /etc/yum.repos.d/os_dvd.local.reposerver.repo: 61 | file: 62 | - absent 63 | 64 | -------------------------------------------------------------------------------- /salt/repository/genericwithupdate.local.repo.jinja: -------------------------------------------------------------------------------- 1 | [{{reponame}}.local] 2 | name=OS base repository 3 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/{{reponame}}.local.repo 4 | gpgcheck=0 5 | enabled=1 6 | 7 | [{{reponame}}_update.local] 8 | name=OS base repository update 9 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/{{reponame}}_update.local.repo 10 | gpgcheck=0 11 | enabled=1 12 | -------------------------------------------------------------------------------- /salt/repository/os_dvd.local.repo.jinja: -------------------------------------------------------------------------------- 1 | [{{os}}_{{os_release}}.local] 2 | name=OS base repository 3 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/{{os}}_{{os_release}}.local.repo 4 | gpgcheck=0 5 | enabled=1 6 | 7 | [{{os}}_{{os_release}}_update.local] 8 | name=OS base repository update 9 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/{{os}}_{{os_release}}_update.local.repo 10 | gpgcheck=0 11 | enabled=0 12 | -------------------------------------------------------------------------------- /salt/repository/salt.local.repo.jinja: -------------------------------------------------------------------------------- 1 | {% if os == 'Centos' %} 2 | {% if os_release == '7.4.1708' %} 3 | [salt_RH_7.local.repo] 4 | name=salt local repository 5 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/salt_RH_7.local.repo 6 | gpgcheck=0 7 | enabled=1 8 | {% endif %} 9 | {% endif %} 10 | 11 | {% if os == 'Fedora' %} 12 | {% if os_release == '27' %} 13 | [salt_Fedora_27.local.repo] 14 | name=salt local repository 15 | baseurl=http://{{salt['pillar.get']('engine_connect:repository_server_ip')}}/salt_Fedora_27.local.repo 16 | gpgcheck=0 17 | enabled=1 18 | {% endif %} 19 | {% endif %} 20 | -------------------------------------------------------------------------------- /salt/repository/server.sls: -------------------------------------------------------------------------------- 1 | restorecon_var_www: 2 | cmd.run: 3 | - name: restorecon -r /var/www 4 | - unless: test "$(restorecon -r -n /var/www/html/ -v)" = "" 5 | 6 | webserver_pkg: 7 | pkg.installed: 8 | - name: {{ pillar['pkgs']['webserver'] }} 9 | 10 | webserver_service: 11 | service: 12 | - name: {{ pillar['services']['webserver'] }} 13 | - running 14 | - enable: True 15 | - require: 16 | - pkg: webserver_pkg 17 | 18 | -------------------------------------------------------------------------------- /salt/repository/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | httpd: 5 | service_description: check_proc_httpd 6 | check_command: check_nrpe!check_proc_httpd 7 | 8 | command: 9 | 10 | httpd: 11 | command_name: check_proc_httpd 12 | command_path: "/usr/lib64/nagios/plugins/check_procs" 13 | command_arguments: "-w 4:16 -c 1:20 -C httpd" 14 | -------------------------------------------------------------------------------- /salt/shinken/client.sls: -------------------------------------------------------------------------------- 1 | nrpe_pkg: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['nrpe'] }} 4 | 5 | monitoring_proc_pkg: 6 | pkg.installed: 7 | - name: {{ pillar['pkgs']['monitoring_proc'] }} 8 | 9 | 10 | /etc/nagios/nrpe.cfg: 11 | file.line: 12 | - mode: replace 13 | - match: '.*allowed_hosts=127.0.0.1.*' 14 | - content: allowed_hosts=127.0.0.1,{{salt['pillar.get']('engine_connect:monitoring_server_ip')}} 15 | - require: 16 | - pkg: nrpe_pkg 17 | 18 | restorecon_nrpe.cfg: 19 | cmd.run: 20 | - name: restorecon /etc/nagios/nrpe.cfg 21 | - unless: test "$(restorecon -r -n /etc/nagios/nrpe.cfg -v)" = "" 22 | 23 | {% if salt['pillar.get']('monitoring:default_probs:'~salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type')~':disk') %} 24 | 25 | monitoring_disk_pkg: 26 | pkg.installed: 27 | - name: {{ pillar['pkgs']['monitoring_disk'] }} 28 | 29 | disk_1: 30 | file.append: 31 | - name: /etc/nrpe.d/commands.cfg 32 | - text: 'command[check_disk_root]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p / -m' 33 | - unless: grep "check_disk_root" /etc/nrpe.d/commands.cfg 34 | 35 | disk_2: 36 | file.line: 37 | - name: /etc/nrpe.d/commands.cfg 38 | - mode: replace 39 | - content: 'command[check_disk_root]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p / -m' 40 | - match: ".*check_disk_root.*" 41 | {% endif %} 42 | 43 | {% if salt['pillar.get']('monitoring:default_probs:'~salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type')~':zombie') %} 44 | 45 | zombie_1: 46 | file.append: 47 | - name: /etc/nrpe.d/commands.cfg 48 | - text: 'command[check_proc_zombie]=/usr/lib64/nagios/plugins/check_procs -w 5 -c 10 -s Z' 49 | - unless: grep "check_proc_zombie" /etc/nrpe.d/commands.cfg 50 | 51 | zombie_2: 52 | file.line: 53 | - name: /etc/nrpe.d/commands.cfg 54 | - mode: replace 55 | - content: 'command[check_proc_zombie]=/usr/lib64/nagios/plugins/check_procs -w 5 -c 10 -s Z' 56 | - match: ".*check_proc_zombie.*" 57 | {% endif %} 58 | 59 | {% if salt['pillar.get']('monitoring:parameters:enable_states_probs') %} 60 | 61 | {% for state, args in salt['pillar.get']('engine_monitoring:'~salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':type')~":"~salt['pillar.get']('engine_reverse:'~salt['grains.get']('id')~':subtype'), {}).items() %} 62 | {% if args is not none %} 63 | {% for element, argo in args.items() %} {# Node now check for each state in its type:group if there something to do. All of this is listed in engine_monitoring virtual pillar #} 64 | 65 | {% if element == "command" %} 66 | {% for command, argy in argo.items() %} 67 | {{argy.command_name}}_{{state}}1: 68 | file.append: 69 | - name: /etc/nrpe.d/commands.cfg 70 | - text: 'command[{{argy.command_name}}]={{argy.command_path}} {{argy.command_arguments}}' 71 | - unless: grep {{argy.command_name}} /etc/nrpe.d/commands.cfg 72 | 73 | {{argy.command_name}}_{{state}}2: 74 | file.line: 75 | - name: /etc/nrpe.d/commands.cfg 76 | - mode: replace 77 | - content: 'command[{{argy.command_name}}]={{argy.command_path}} {{argy.command_arguments}}' 78 | - match: ".*{{argy.command_name}}.*" 79 | 80 | {% endfor %} 81 | {% endif %} 82 | 83 | {% endfor %} 84 | 85 | {% endif %}{% endfor %} 86 | {% endif %} 87 | 88 | nrpeservice: 89 | service: 90 | - name: {{ pillar['services']['nrpe'] }} 91 | - running 92 | - enable: True 93 | - watch: 94 | - file: /etc/nrpe.d/commands.cfg 95 | - file: /etc/nagios/nrpe.cfg 96 | - require: 97 | - pkg: nrpe_pkg 98 | - file: /etc/nagios/nrpe.cfg 99 | - cmd: restorecon_nrpe.cfg 100 | -------------------------------------------------------------------------------- /salt/shinken/client.sls.bkp: -------------------------------------------------------------------------------- 1 | nrpe_pkg: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['nrpe'] }} 4 | 5 | monitoring_proc_pkg: 6 | pkg.installed: 7 | - name: {{ pillar['pkgs']['monitoring_proc'] }} 8 | 9 | {% for node, argu in salt['pillar.get']('engine_reverse', {}).items() %}{# Node check which type and which group it belongs #} 10 | {% if node~"."~salt['pillar.get']('engine:network:domaine_name') == salt['grains.get']('id') %}{# Node found herself #} 11 | 12 | {%if salt['pillar.get']('monitoring:default_probs:'~argu.type~':disk') %} 13 | 14 | monitoring_disk_pkg: 15 | pkg.installed: 16 | - name: {{ pillar['pkgs']['monitoring_disk'] }} 17 | 18 | disk_1: 19 | file.append: 20 | - name: /etc/nrpe.d/commands.cfg 21 | - text: 'command[check_disk_root]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p / -m' 22 | - unless: grep "check_disk_root" /etc/nrpe.d/commands.cfg 23 | 24 | disk_2: 25 | file.line: 26 | - name: /etc/nrpe.d/commands.cfg 27 | - mode: replace 28 | - content: 'command[check_disk_root]=/usr/lib64/nagios/plugins/check_disk -w 20% -c 10% -p / -m' 29 | - match: ".*check_disk_root.*" 30 | {% endif %} 31 | 32 | {%if salt['pillar.get']('monitoring:default_probs:'~argu.type~':zombie') %} 33 | zombie_1: 34 | file.append: 35 | - name: /etc/nrpe.d/commands.cfg 36 | - text: 'command[check_proc_zombie]=/usr/lib64/nagios/plugins/check_procs -w 5 -c 10 -s Z' 37 | - unless: grep "check_proc_zombie" /etc/nrpe.d/commands.cfg 38 | 39 | zombie_2: 40 | file.line: 41 | - name: /etc/nrpe.d/commands.cfg 42 | - mode: replace 43 | - content: 'command[check_proc_zombie]=/usr/lib64/nagios/plugins/check_procs -w 5 -c 10 -s Z' 44 | - match: ".*check_proc_zombie.*" 45 | {% endif %} 46 | 47 | {% if salt['pillar.get']('monitoring:parameters:enable_states_probs') %} 48 | {% for state, args in salt['pillar.get']('engine_monitoring:'~argu.type~":"~argu.group, {}).items() %}{% if args is not none %}{% for element, argo in args.items() %}{# Node now check for each state in its type:group if there something to do. All of this is listed in engine_monitoring virtual pillar #} 49 | 50 | {% if element == "command" %} 51 | {% for command, argy in argo.items() %} 52 | {{argy.command_name}}_{{state}}1: 53 | file.append: 54 | - name: /etc/nrpe.d/commands.cfg 55 | - text: 'command[{{argy.command_name}}]={{argy.command_path}} {{argy.command_arguments}}' 56 | - unless: grep {{argy.command_name}} /etc/nrpe.d/commands.cfg 57 | 58 | {{argy.command_name}}_{{state}}2: 59 | file.line: 60 | - name: /etc/nrpe.d/commands.cfg 61 | - mode: replace 62 | - content: 'command[{{argy.command_name}}]={{argy.command_path}} {{argy.command_arguments}}' 63 | - match: ".*{{argy.command_name}}.*" 64 | 65 | {% endfor %} 66 | {% endif %} 67 | 68 | {% endfor %} 69 | 70 | {% endif %}{% endfor %} 71 | {% endif %} 72 | 73 | 74 | {% endif %} 75 | {% endfor %} 76 | 77 | nrpeservice: 78 | service: 79 | - name: {{ pillar['services']['nrpe'] }} 80 | - running 81 | - enable: True 82 | - watch: 83 | - file: /etc/nrpe.d/commands.cfg 84 | - require: 85 | - pkg: {{ pillar['pkgs']['nrpe'] }} 86 | 87 | -------------------------------------------------------------------------------- /salt/shinken/cluster-groups.cfg.jinja: -------------------------------------------------------------------------------- 1 | define hostgroup{ 2 | hostgroup_name master 3 | alias master 4 | } 5 | 6 | define hostgroup{ 7 | hostgroup_name bmc 8 | alias bmc 9 | } 10 | 11 | {% for type in salt['pillar.get']('core:types') %} 12 | {% for group, argu in salt['pillar.get'](type, {}).items() %} 13 | define hostgroup{ 14 | hostgroup_name {{type}}_{{group}} 15 | alias {{type}}_{{group}} 16 | } 17 | {% endfor %} 18 | {% endfor %} 19 | 20 | -------------------------------------------------------------------------------- /salt/shinken/computes.cfg.jinja: -------------------------------------------------------------------------------- 1 | {% for group, argu in salt['pillar.get']('computes', {}).items() %} 2 | {% for host, args in argu.items() %} 3 | define host{ 4 | use generic-host 5 | contact_groups admins 6 | host_name {{ host }} 7 | address {{ args.ip }} 8 | hostgroups compute_{{group}} 9 | } 10 | {% endfor %}{% endfor %} 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /salt/shinken/dumb.sls: -------------------------------------------------------------------------------- 1 | /root/dumb: 2 | file: 3 | - managed 4 | - source: salt://shinken/services.cfg.jinja 5 | - template: jinja 6 | 7 | 8 | -------------------------------------------------------------------------------- /salt/shinken/ios.cfg.jinja: -------------------------------------------------------------------------------- 1 | {% for group, argu in salt['pillar.get']('ios', {}).items() %} 2 | {% for host, args in argu.items() %} 3 | define host{ 4 | use generic-host 5 | contact_groups admins 6 | host_name {{ host }} 7 | address {{ args.ip }} 8 | hostgroups io_{{group}} 9 | } 10 | {% endfor %}{% endfor %} 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /salt/shinken/logins.cfg.jinja: -------------------------------------------------------------------------------- 1 | {% for group, argu in salt['pillar.get']('logins', {}).items() %} 2 | {% for host, args in argu.items() %} 3 | define host{ 4 | use generic-host 5 | contact_groups admins 6 | host_name {{ host }} 7 | address {{ args.ip }} 8 | hostgroups login_{{group}} 9 | } 10 | {% endfor %}{% endfor %} 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /salt/shinken/masters.cfg.jinja: -------------------------------------------------------------------------------- 1 | {% set netadmin = salt['pillar.get']('network:global_parameters:admin_network') %} 2 | {% for host, args in salt['pillar.get']('masters',{}).items() %} 3 | define host{ 4 | use generic-host 5 | contact_groups admins 6 | host_name {{ host }} 7 | {%- for network, network_args in args.network.items() %} 8 | {%- if network == netadmin %} 9 | address {{ network_args.ip }} 10 | {%- endif %} 11 | {%- endfor %} 12 | hostgroups master 13 | } 14 | {% endfor %} 15 | -------------------------------------------------------------------------------- /salt/shinken/nodes.cfg.jinja: -------------------------------------------------------------------------------- 1 | {%- for type in salt['pillar.get']('core:types') %} 2 | {%- for group, argu in salt['pillar.get'](type, {}).items() %} 3 | {%- for host, args in argu.items() %} 4 | define host{ 5 | use generic-host 6 | contact_groups admins 7 | host_name {{ host }} 8 | address {{ args.network.net0.ip }} 9 | hostgroups {{type}}_{{group}} 10 | } 11 | 12 | {%- endfor %} 13 | {%- endfor %} 14 | {%- endfor %} 15 | 16 | 17 | -------------------------------------------------------------------------------- /salt/shinken/server.sls.bkp: -------------------------------------------------------------------------------- 1 | shinken_pkg: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['shinken'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/shinken/hostgroups/cluster-groups.cfg: 8 | file: 9 | - managed 10 | - source: salt://shinken/cluster-groups.cfg.jinja 11 | - template: jinja 12 | - require: 13 | - pkg: {{ pillar['pkgs']['shinken_global'] }} 14 | 15 | /etc/shinken/hosts/computes.cfg: 16 | file: 17 | - managed 18 | - source: salt://shinken/computes.cfg.jinja 19 | - template: jinja 20 | - require: 21 | - pkg: {{ pillar['pkgs']['shinken_global'] }} 22 | 23 | /etc/shinken/servicegroups/servicegroup.cfg: 24 | file: 25 | - managed 26 | - source: salt://shinken/servicegroup.cfg.jinja 27 | - template: jinja 28 | - require: 29 | - pkg: {{ pillar['pkgs']['shinken_global'] }} 30 | 31 | /etc/shinken/services/services.cfg: 32 | file: 33 | - managed 34 | - source: salt://shinken/services.cfg.jinja 35 | - template: jinja 36 | - require: 37 | - pkg: {{ pillar['pkgs']['shinken_global'] }} 38 | 39 | /etc/shinken/commands/check_ping.cfg: 40 | file: 41 | - managed 42 | - source: salt://shinken/check_ping.cfg.jinja 43 | - template: jinja 44 | - require: 45 | - pkg: {{ pillar['pkgs']['shinken_global'] }} 46 | 47 | /etc/shinken/resource.d/paths.cfg: 48 | file.line: 49 | - mode: replace 50 | - match: ^\$NAGIOSPLUGINSDIR\$ 51 | - content: '$NAGIOSPLUGINSDIR$=/usr/lib64/nagios/plugins/' 52 | - require: 53 | - pkg: {{ pillar['pkgs']['shinken_global'] }} 54 | 55 | shinken_webui2: 56 | pkg.installed: 57 | - name: {{ pillar['pkgs']['shinken_webui2'] }} 58 | - require: 59 | - sls: repository.client 60 | 61 | /etc/dhcp/dhcpd.conf: 62 | file: 63 | - managed 64 | - source: salt://dhcp/dhcpd.conf.jinja 65 | - template: jinja 66 | - require: 67 | - pkg: {{ pillar['pkgs']['dhcp'] }} 68 | 69 | shinken_all_services: 70 | service: 71 | - name: {{ pillar['services']['shinken_all'] }} 72 | - running 73 | - enable: True 74 | - watch: 75 | - file: /etc/dhcp/dhcpd.conf 76 | - require: 77 | - pkg: {{ pillar['pkgs']['shinken'] }} 78 | - file: /etc/dhcp/dhcpd.conf 79 | 80 | -------------------------------------------------------------------------------- /salt/shinken/servicegroup.cfg.jinja: -------------------------------------------------------------------------------- 1 | define servicegroup{ 2 | servicegroup_name cluster-services 3 | alias Group Services Check 4 | } 5 | 6 | -------------------------------------------------------------------------------- /salt/shinken/services.cfg.jinja: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | ############################################# 5 | ########## MASTERS 6 | ### 7 | 8 | {%if salt['pillar.get']('monitoring:default_probs:masters:disk') %} 9 | define service{ 10 | use generic-service 11 | hostgroup_name master 12 | service_description check_disk_root 13 | check_command check_nrpe!check_disk_root 14 | display_name check_disk_root 15 | } 16 | {% endif %} 17 | 18 | {%if salt['pillar.get']('monitoring:default_probs:masters:zombie') %} 19 | define service{ 20 | use generic-service 21 | hostgroup_name master 22 | service_description check_proc_zombie 23 | check_command check_nrpe!check_proc_zombie 24 | display_name check_proc_zombie 25 | } 26 | {% endif %} 27 | 28 | {% if salt['pillar.get']('monitoring:parameters:enable_states_probs') %} 29 | {% for state, args in salt['pillar.get']('engine_monitoring:masters:masters', {}).items() %}{% if args is not none %}{% for element, argo in args.items() %} 30 | 31 | {% if element == "service" %} 32 | {% for service, argy in argo.items() %} 33 | define service{ 34 | use generic-service 35 | hostgroup_name master 36 | service_description {{argy.service_description}} 37 | check_command {{argy.check_command}} 38 | display_name {{service}} 39 | } 40 | {% endfor %} 41 | {% endif %} 42 | 43 | {% endfor %}{% endif %}{% endfor %} 44 | {% endif %} 45 | 46 | 47 | {% for type in salt['pillar.get']('core:types') %} 48 | ############################################# 49 | ########## {{type}} 50 | ### 51 | 52 | {% for group, argu in salt['pillar.get']('engine_monitoring:'~type, {}).items() %} 53 | 54 | ########## 55 | # {{group}} 56 | 57 | {%- if salt['pillar.get']('monitoring:default_probs:'~type~':disk') %} 58 | define service{ 59 | use generic-service 60 | hostgroup_name {{type}}_{{group}} 61 | service_description check_disk_{{group}} 62 | check_command check_nrpe!check_disk_root 63 | display_name check_disk_root 64 | } 65 | {%- endif %} 66 | {%- if salt['pillar.get']('monitoring:default_probs:'~type~':zombie') %} 67 | define service{ 68 | use generic-service 69 | hostgroup_name {{type}}_{{group}} 70 | service_description check_proc_zombie_{{group}} 71 | check_command check_nrpe!check_proc_zombie 72 | display_name check_proc_zombie 73 | } 74 | {%- endif %} 75 | 76 | {%- if salt['pillar.get']('monitoring:parameters:enable_states_probs') %} 77 | {%- if argu is not none %} 78 | {%- for state, args in argu.items() %}{% if args is not none %}{% for element, argo in args.items() %} 79 | {%- if element == "service" %} 80 | {%- for service, argy in argo.items() %} 81 | define service{ 82 | use generic-service 83 | hostgroup_name {{type}}_{{group}} 84 | service_description {{argy.service_description}}_{{group}} 85 | check_command {{argy.check_command}} 86 | display_name {{service}} 87 | } 88 | {%- endfor %} 89 | {%- endif %} 90 | {%- endfor %} 91 | {%- endif %} 92 | {%- endfor %} 93 | {% endif %} 94 | {%- endif %} 95 | 96 | 97 | {% endfor %} 98 | {% endfor %} 99 | 100 | 101 | -------------------------------------------------------------------------------- /salt/shinken/services.cfg.jinja.old: -------------------------------------------------------------------------------- 1 | define service { 2 | use generic-service 3 | notifications_enabled 0 4 | service_description check_ping 5 | display_name ping 6 | hostgroup_name type-compute 7 | check_command check_ping 8 | servicegroups cluster-services 9 | } 10 | 11 | -------------------------------------------------------------------------------- /salt/slurm/client.sls: -------------------------------------------------------------------------------- 1 | munge: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['munge'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/munge/munge.key: 8 | file.managed: 9 | - source: salt://slurm/munge.key 10 | - mode: '0400' 11 | - user: munge 12 | - group: munge 13 | - require: 14 | - pkg: munge 15 | 16 | /var/run/munge: 17 | file.directory: 18 | - user: munge 19 | - group: munge 20 | - mode: 755 21 | - require: 22 | - pkg: munge 23 | 24 | mungeservice: 25 | service: 26 | - name: {{ pillar['services']['munge'] }} 27 | - running 28 | - enable: True 29 | - watch: 30 | - file: /etc/munge/munge.key 31 | - require: 32 | - pkg: {{ pillar['pkgs']['munge'] }} 33 | - file: /etc/munge/munge.key 34 | 35 | slurm: 36 | pkg.installed: 37 | - name: {{ pillar['pkgs']['slurm'] }} 38 | - require: 39 | - sls: repository.client 40 | 41 | slurm-munge: 42 | pkg.installed: 43 | - name: {{ pillar['pkgs']['slurm_munge'] }} 44 | - require: 45 | - sls: repository.client 46 | 47 | /etc/slurm/slurm.conf: 48 | file.managed: 49 | - source: salt://slurm/slurm.conf.jinja 50 | - template: jinja 51 | - require: 52 | - pkg: {{ pillar['pkgs']['slurm'] }} 53 | 54 | /etc/slurm/slurm.epilog.clean: 55 | file.managed: 56 | - source: salt://slurm/slurm.epilog.clean 57 | - mode: '0700' 58 | - user: slurm 59 | - group: slurm 60 | - require: 61 | - pkg: {{ pillar['pkgs']['slurm'] }} 62 | 63 | slurm-group: 64 | group.present: 65 | - gid: 567 66 | - name: slurm 67 | 68 | slurm-user: 69 | user.present: 70 | - name: slurm 71 | - fullname: slurm user 72 | - shell: /bin/false 73 | - home: /etc/slurm 74 | - uid: 567 75 | - gid: 567 76 | - require: 77 | - group: slurm-group 78 | 79 | /var/spool/slurm: 80 | file.directory: 81 | - user: slurm 82 | - group: slurm 83 | - mode: 755 84 | - require: 85 | - user: slurm-user 86 | 87 | /var/log/slurm: 88 | file.directory: 89 | - user: slurm 90 | - group: slurm 91 | - require: 92 | - user: slurm-user 93 | 94 | /var/spool/slurm/savestate: 95 | file.directory: 96 | - user: slurm 97 | - group: slurm 98 | - require: 99 | - user: slurm-user 100 | - file: /var/spool/slurm 101 | 102 | slurmservice: 103 | service: 104 | - name: {{ pillar['services']['slurmclient'] }} 105 | - running 106 | - enable: True 107 | - require: 108 | - pkg: {{ pillar['pkgs']['slurm'] }} 109 | - pkg: {{ pillar['pkgs']['slurm_munge'] }} 110 | - service: mungeservice 111 | - file: /var/spool/slurm/savestate 112 | - file: /var/log/slurm 113 | - file: /etc/slurm/slurm.epilog.clean 114 | - file: /var/spool/slurm 115 | - file: /etc/slurm/slurm.conf 116 | - sls: dns.client 117 | - sls: network.static 118 | -------------------------------------------------------------------------------- /salt/slurm/client_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | slurmd: 5 | service_description: check_proc_slurmd 6 | check_command: check_nrpe!check_proc_slurmd 7 | 8 | munged: 9 | service_description: check_proc_munged 10 | check_command: check_nrpe!check_proc_munged 11 | 12 | command: 13 | 14 | slurmd: 15 | command_name: check_proc_slurmd 16 | command_path: "/usr/lib64/nagios/plugins/check_procs" 17 | command_arguments: "-w 1: -c 1:1 -C slurmd" 18 | 19 | munged: 20 | command_name: check_proc_munged 21 | command_path: "/usr/lib64/nagios/plugins/check_procs" 22 | command_arguments: "-w 1: -c 1:1 -C munged" 23 | -------------------------------------------------------------------------------- /salt/slurm/login.sls: -------------------------------------------------------------------------------- 1 | munge: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['munge'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/munge/munge.key: 8 | file.managed: 9 | - source: salt://slurm/munge.key 10 | - mode: '0400' 11 | - user: munge 12 | - group: munge 13 | - require: 14 | - pkg: munge 15 | 16 | /var/run/munge: 17 | file.directory: 18 | - user: munge 19 | - group: munge 20 | - mode: 755 21 | - require: 22 | - pkg: munge 23 | 24 | mungeservice: 25 | service: 26 | - name: {{ pillar['services']['munge'] }} 27 | - running 28 | - enable: True 29 | - watch: 30 | - file: /etc/munge/munge.key 31 | - require: 32 | - pkg: {{ pillar['pkgs']['munge'] }} 33 | - file: /etc/munge/munge.key 34 | 35 | slurm: 36 | pkg.installed: 37 | - name: {{ pillar['pkgs']['slurm'] }} 38 | - require: 39 | - sls: repository.client 40 | 41 | slurm-munge: 42 | pkg.installed: 43 | - name: {{ pillar['pkgs']['slurm_munge'] }} 44 | - require: 45 | - sls: repository.client 46 | 47 | /etc/slurm/slurm.conf: 48 | file.managed: 49 | - source: salt://slurm/slurm.conf.jinja 50 | - template: jinja 51 | - require: 52 | - pkg: {{ pillar['pkgs']['slurm'] }} 53 | 54 | slurm-group: 55 | group.present: 56 | - gid: 567 57 | - name: slurm 58 | 59 | slurm-user: 60 | user.present: 61 | - name: slurm 62 | - fullname: slurm user 63 | - shell: /bin/false 64 | - home: /etc/slurm 65 | - uid: 567 66 | - gid: 567 67 | - require: 68 | - group: slurm-group 69 | 70 | /var/spool/slurmd: 71 | file.directory: 72 | - user: slurm 73 | - group: slurm 74 | - mode: 755 75 | - require: 76 | - user: slurm-user 77 | 78 | /var/log/slurm: 79 | file.directory: 80 | - user: slurm 81 | - group: slurm 82 | - require: 83 | - user: slurm-user 84 | 85 | /etc/slurm/savestate: 86 | file.directory: 87 | - user: slurm 88 | - group: slurm 89 | - require: 90 | - user: slurm-user 91 | 92 | slurmservice: 93 | service: 94 | - name: {{ pillar['services']['slurmclient'] }} 95 | - dead 96 | - enable: False 97 | - require: 98 | - pkg: {{ pillar['pkgs']['slurm'] }} 99 | - pkg: {{ pillar['pkgs']['slurm_munge'] }} 100 | - service: mungeservice 101 | - file: /etc/slurm/savestate 102 | - file: /var/log/slurm 103 | - file: /var/spool/slurmd 104 | - file: /etc/slurm/slurm.conf 105 | - sls: dns.client 106 | - sls: network.static 107 | -------------------------------------------------------------------------------- /salt/slurm/munge.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oxedions/banquise/d610f53f2f8e141654280a8cf08191df747decce/salt/slurm/munge.key -------------------------------------------------------------------------------- /salt/slurm/server.sls: -------------------------------------------------------------------------------- 1 | munge: 2 | pkg.installed: 3 | - name: {{ pillar['pkgs']['munge'] }} 4 | - require: 5 | - sls: repository.client 6 | 7 | /etc/munge/munge.key: 8 | file.managed: 9 | - source: salt://slurm/munge.key 10 | - mode: '0400' 11 | - user: munge 12 | - group: munge 13 | - require: 14 | - pkg: munge 15 | 16 | /var/run/munge: 17 | file.directory: 18 | - user: munge 19 | - group: munge 20 | - mode: 755 21 | - require: 22 | - pkg: munge 23 | 24 | mungeservice: 25 | service: 26 | - name: {{ pillar['services']['munge'] }} 27 | - running 28 | - enable: True 29 | - watch: 30 | - file: /etc/munge/munge.key 31 | - require: 32 | - pkg: {{ pillar['pkgs']['munge'] }} 33 | - file: /etc/munge/munge.key 34 | 35 | slurm: 36 | pkg.installed: 37 | - name: {{ pillar['pkgs']['slurm'] }} 38 | - require: 39 | - sls: repository.client 40 | 41 | slurm-munge: 42 | pkg.installed: 43 | - name: {{ pillar['pkgs']['slurm_munge'] }} 44 | - require: 45 | - sls: repository.client 46 | 47 | /etc/slurm/slurm.conf: 48 | file.managed: 49 | - source: salt://slurm/slurm.conf.jinja 50 | - template: jinja 51 | - require: 52 | - pkg: {{ pillar['pkgs']['slurm'] }} 53 | 54 | slurm-group: 55 | group.present: 56 | - gid: 567 57 | - name: slurm 58 | 59 | slurm-user: 60 | user.present: 61 | - name: slurm 62 | - fullname: slurm user 63 | - shell: /bin/false 64 | - home: /etc/slurm 65 | - uid: 567 66 | - gid: 567 67 | - require: 68 | - group: slurm-group 69 | 70 | /var/spool/slurm: 71 | file.directory: 72 | - user: slurm 73 | - group: slurm 74 | - mode: 755 75 | - require: 76 | - user: slurm-user 77 | 78 | /var/log/slurm: 79 | file.directory: 80 | - user: slurm 81 | - group: slurm 82 | - require: 83 | - user: slurm-user 84 | 85 | /var/spool/slurm/savestate: 86 | file.directory: 87 | - user: slurm 88 | - group: slurm 89 | - require: 90 | - user: slurm-user 91 | - file: /var/spool/slurm 92 | 93 | slurmservice: 94 | service: 95 | - name: {{ pillar['services']['slurmserver'] }} 96 | - running 97 | - enable: True 98 | - require: 99 | - pkg: {{ pillar['pkgs']['slurm'] }} 100 | - pkg: {{ pillar['pkgs']['slurm_munge'] }} 101 | - service: mungeservice 102 | - file: /var/log/slurm 103 | - file: /var/spool/slurm 104 | - file: /var/spool/slurm/savestate 105 | - file: /etc/slurm/slurm.conf 106 | # - sls: dns.client 107 | # - sls: network.firewall 108 | -------------------------------------------------------------------------------- /salt/slurm/server_monitoring.sls: -------------------------------------------------------------------------------- 1 | # Monitoring file. Always start with 4 space indent !! 2 | service: 3 | 4 | slurmctld: 5 | service_description: check_proc_slurmctld 6 | check_command: check_nrpe!check_proc_slurmctld 7 | 8 | munged: 9 | service_description: check_proc_munged 10 | check_command: check_nrpe!check_proc_munged 11 | 12 | command: 13 | 14 | slurmctld: 15 | command_name: check_proc_slurmctld 16 | command_path: "/usr/lib64/nagios/plugins/check_procs" 17 | command_arguments: "-w 1: -c 1:1 -C slurmctld" 18 | 19 | munged: 20 | command_name: check_proc_munged 21 | command_path: "/usr/lib64/nagios/plugins/check_procs" 22 | command_arguments: "-w 1: -c 1:1 -C munged" 23 | -------------------------------------------------------------------------------- /salt/slurm/slurm.conf.jinja: -------------------------------------------------------------------------------- 1 | ClusterName={{salt['pillar.get']('core:cluster_name')}} 2 | ControlMachine={{salt['pillar.get']('engine_connect:jobscheduler_server_host')}} 3 | 4 | # Authentication 5 | AuthType=auth/munge 6 | CryptoType=crypto/munge 7 | 8 | # Cleaning 9 | Epilog=/etc/slurm/slurm.epilog.clean 10 | 11 | # Mpi 12 | MpiDefault=none 13 | 14 | # System 15 | ProctrackType=proctrack/cgroup 16 | ReturnToService=0 17 | 18 | # Slurm environment 19 | SlurmctldPidFile=/var/run/slurmctld.pid 20 | SlurmctldPort=6817 21 | SlurmdPidFile=/var/run/slurmd.pid 22 | SlurmdPort=6818 23 | SlurmdSpoolDir=/var/spool/slurm/slurmd 24 | SlurmUser=slurm 25 | StateSaveLocation=/var/spool/slurm/savestate 26 | SwitchType=switch/none 27 | 28 | # Task 29 | TaskPlugin=task/affinity 30 | TaskPluginParam=Sched 31 | 32 | # Timers 33 | InactiveLimit=0 34 | KillWait=30 35 | MinJobAge=300 36 | SlurmctldTimeout=120 37 | SlurmdTimeout=300 38 | Waittime=0 39 | 40 | # Scheduling 41 | FastSchedule=1 42 | SchedulerType=sched/backfill 43 | SelectType=select/cons_res 44 | SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK 45 | 46 | # Logging and accounting 47 | JobCompType=jobcomp/none 48 | SlurmctldDebug=5 49 | SlurmctldLogFile=/var/log/slurm/slurmctld.log 50 | SlurmdDebug=5 51 | SlurmdLogFile=/var/log/slurm/slurmd.log.%h 52 | 53 | {% for subtype, subtype_args in salt['pillar.get']('computes', {}).items() %} 54 | {% for host, host_args in subtype_args.items() %}NodeName={{ host }} Sockets={{salt['pillar.get']('computes_system:'~subtype~':hardware:sockets')}} CoresPerSocket={{salt['pillar.get']('computes_system:'~subtype~':hardware:cores_per_socket')}} ThreadsPerCore={{salt['pillar.get']('computes_system:'~subtype~':hardware:threads_per_core')}} State=UNKNOWN 55 | {% endfor %}{% endfor %} 56 | 57 | PartitionName=all Nodes={% for subtype, subtype_args in salt['pillar.get']('computes', {}).items() %}{% for host, host_args in subtype_args.items() %},{{ host }}{% endfor %}{% endfor %} Default=YES MaxTime=INFINITE State=UP 58 | 59 | {% for subtype, subtype_args in salt['pillar.get']('computes', {}).items() %} 60 | PartitionName={{subtype}} Nodes={% set count = 1 %}{% for host, host_args in subtype_args.items() %}{% if count == 1%}{{ host }}{% set count = 2 %}{% else %},{{ host }}{% endif %}{% endfor %} MaxTime=INFINITE State=UP 61 | {% endfor %} 62 | -------------------------------------------------------------------------------- /salt/slurm/slurm.epilog.clean: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # 3 | # This script will kill any user processes on a node when the last 4 | # SLURM job there ends. For example, if a user directly logs into 5 | # an allocated node SLURM will not kill that process without this 6 | # script being executed as an epilog. 7 | # 8 | # SLURM_BIN can be used for testing with private version of SLURM 9 | #SLURM_BIN="/usr/bin/" 10 | # 11 | if [ x$SLURM_UID = "x" ] ; then 12 | exit 0 13 | fi 14 | if [ x$SLURM_JOB_ID = "x" ] ; then 15 | exit 0 16 | fi 17 | 18 | # 19 | # Don't try to kill user root or system daemon jobs 20 | # 21 | if [ $SLURM_UID -lt 100 ] ; then 22 | exit 0 23 | fi 24 | 25 | job_list=`${SLURM_BIN}squeue --noheader --format=%A --user=$SLURM_UID --node=localhost` 26 | for job_id in $job_list 27 | do 28 | if [ $job_id -ne $SLURM_JOB_ID ] ; then 29 | exit 0 30 | fi 31 | done 32 | 33 | # 34 | # No other SLURM jobs, purge all remaining processes of this user 35 | # 36 | pkill -KILL -U $SLURM_UID 37 | exit 0 38 | -------------------------------------------------------------------------------- /salt/ssh/client.sls: -------------------------------------------------------------------------------- 1 | root_public_key: 2 | ssh_auth.present: 3 | - names: 4 | - {{salt['pillar.get']('ssh_public:ssh_master_public_key')}} 5 | - user: root 6 | - config: '/root/.ssh/authorized_keys' 7 | -------------------------------------------------------------------------------- /salt/ssh/id_rsa.jinja: -------------------------------------------------------------------------------- 1 | {{salt['pillar.get']('ssh_private:ssh_master_private_key')}} 2 | -------------------------------------------------------------------------------- /salt/ssh/id_rsa.pub.jinja: -------------------------------------------------------------------------------- 1 | {{salt['pillar.get']('ssh_public:ssh_master_public_key')}} 2 | -------------------------------------------------------------------------------- /salt/ssh/master.sls: -------------------------------------------------------------------------------- 1 | /root/.ssh/id_rsa: 2 | file: 3 | - managed 4 | - makedirs: True 5 | - source: salt://ssh/id_rsa.jinja 6 | - template: jinja 7 | - user: root 8 | - group: root 9 | - mode: 600 10 | 11 | /root/.ssh/id_rsa.pub: 12 | file: 13 | - managed 14 | - makedirs: True 15 | - source: salt://ssh/id_rsa.pub.jinja 16 | - template: jinja 17 | - user: root 18 | - group: root 19 | - mode: 644 20 | 21 | restorecon_sshkey: 22 | cmd.run: 23 | - name: restorecon -r /root/.ssh 24 | - unless: test "$(restorecon -r -n /root/.ssh/ -v)" = "" 25 | - require: 26 | - file: /root/.ssh/id_rsa 27 | - file: /root/.ssh/id_rsa.pub 28 | -------------------------------------------------------------------------------- /salt/top.sls: -------------------------------------------------------------------------------- 1 | base: 2 | 3 | # masters 4 | {% for master, sta in salt['pillar.get']('masters_states').items() %} 5 | '{{master}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}': 6 | {% for st in sta %} 7 | - {{ st }} 8 | {% endfor %} 9 | {% endfor %} 10 | 11 | {%- for type in salt['pillar.get']('core:types') %} 12 | # {{type}} nodes 13 | {% for group, args in salt['pillar.get'](type).items() %} 14 | {% for node, argo in args.items() %} 15 | {% if salt['pillar.get'](type~'_states:'~group) is not none %} 16 | '{{node}}.{{salt['pillar.get']('network:global_parameters:domain_name')}}': 17 | {% for st in salt['pillar.get'](type~'_states:'~group) %} 18 | - {{ st }} 19 | {% endfor %} 20 | {% endif %} 21 | {% endfor %} 22 | {% endfor %} 23 | {%- endfor %} 24 | -------------------------------------------------------------------------------- /salt/workstation/Centos_7_gnome_desktop.sls: -------------------------------------------------------------------------------- 1 | x_window_system_group: 2 | pkg.group_installed: 3 | - name: "X Window System" 4 | - require: 5 | - sls: repository.client 6 | 7 | gnome_shell_gdm: 8 | pkg.installed: 9 | - pkgs: 10 | - gnome-classic-session 11 | - gnome-terminal 12 | - control-center 13 | - liberation-mono-fonts 14 | - require: 15 | - sls: repository.client 16 | 17 | /etc/systemd/system/default.target: 18 | file.symlink: 19 | - target: /lib/systemd/system/graphical.target 20 | 21 | #reboot_minion: 22 | # salt.function: 23 | # - name: system.reboot 24 | # - require: 25 | # - pkg: x_window_system_group 26 | # - pkg: gnome_shell_gdm 27 | # - file: /etc/systemd/system/default.target 28 | # - tgt: 'compute1' 29 | 30 | #system.reboot: 31 | # module.run: 32 | # - require: 33 | # - pkg: x_window_system_group 34 | # - pkg: gnome_shell_gdm 35 | # - file: /etc/systemd/system/default.target 36 | 37 | 38 | #wait_for_reboots: 39 | # salt.wait_for_event: 40 | # - name: salt/minion/{{salt['grains.get']('id')}}/start 41 | # - require: 42 | # - module: system.reboot 43 | 44 | #system.reboot: 45 | # module.run: 46 | # - require: 47 | # - pkg: x_window_system_group 48 | # - pkg: gnome_shell_gdm 49 | # - file: /etc/systemd/system/default.target 50 | -------------------------------------------------------------------------------- /salt/workstation/Centos_7_office_desktop.sls: -------------------------------------------------------------------------------- 1 | gnome_shell_gdm: 2 | pkg.installed: 3 | - pkgs: 4 | - libreoffice-writer 5 | - libreoffice-calc 6 | - libreoffice-impress 7 | - firefox 8 | - gnome-calculator 9 | - gnome-screenshot 10 | - require: 11 | - sls: workstation.Centos_7_gnome_desktop 12 | -------------------------------------------------------------------------------- /salt/workstation/Fedora_27_xfce_desktop.sls: -------------------------------------------------------------------------------- 1 | xfce_desktop_pkg: 2 | pkg.group_installed: 3 | - name: "Xfce Desktop" 4 | - require: 5 | - sls: repository.client 6 | 7 | gnome_shell_gdm_pkg: 8 | pkg.installed: 9 | - pkgs: 10 | - gdm 11 | - require: 12 | - sls: repository.client 13 | 14 | /etc/systemd/system/default.target: 15 | file.symlink: 16 | - target: /lib/systemd/system/graphical.target 17 | 18 | gdm_service: 19 | service: 20 | - name: {{ gdm }} 21 | - running 22 | - enable: True 23 | - require: 24 | - pkg: xfce_desktop_pkg 25 | - pkg: gnome_shell_gdm_pkg 26 | 27 | -------------------------------------------------------------------------------- /tools/auth_helper.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | echo "Please enter ldap Manager password:" 4 | read ldap_pass 5 | ldap_pass_hash="'"$(slappasswd -s $ldap_pass)"'" 6 | 7 | echo 'ldap_private:' > ../pillar/cluster/authentication/ldap_private.sls 8 | echo " ldap_admin_pass: $ldap_pass # this administration password will only be used during ldap server installation, and is protected by pillar/top.sls" >> ../pillar/cluster/authentication/ldap_private.sls 9 | 10 | echo 'ldap_public:' > ../pillar/cluster/authentication/ldap_public.sls 11 | echo " ldap_admin_pass_ssha: $ldap_pass_hash # Hash obtained using slappasswd with the password defined in private part" >> ../pillar/cluster/authentication/ldap_public.sls 12 | echo ' dc: # Use your domain name here if you don t know what is dc for ldap (cut your domaine name at ".", here sphen.local is cut in two parts)' >> ../pillar/cluster/authentication/ldap_public.sls 13 | echo ' - sphen' >> ../pillar/cluster/authentication/ldap_public.sls 14 | echo ' - local' >> ../pillar/cluster/authentication/ldap_public.sls 15 | 16 | rm -f /tmp/id_rsa* 17 | ssh-keygen -N "" -f /tmp/id_rsa 18 | 19 | echo "ssh_private:" > ../pillar/cluster/authentication/ssh_private.sls 20 | echo " # Put here your ssh private key. It will be installed on the master and used to login on nodes installed thanks to PXE. Note: beware, you need to indent like wiht this example." >> ../pillar/cluster/authentication/ssh_private.sls 21 | echo " ssh_master_private_key: |" >> ../pillar/cluster/authentication/ssh_private.sls 22 | cat /tmp/id_rsa | sed 's/^/ /' >> ../pillar/cluster/authentication/ssh_private.sls 23 | 24 | echo "ssh_public:" > ../pillar/cluster/authentication/ssh_public.sls 25 | echo " # This is the ssh public key related to private key given in private part of ssh." >> ../pillar/cluster/authentication/ssh_public.sls 26 | echo " ssh_master_public_key: $(cat /tmp/id_rsa.pub)" >> ../pillar/cluster/authentication/ssh_public.sls 27 | 28 | echo "Please enter root password for nodes (not master):" 29 | read root_pass 30 | root_pass="'"$root_pass"'" 31 | root_pass_hash=$(python -c "import crypt,random,string; print crypt.crypt($root_pass, '\$6\$' + ''.join([random.choice(string.ascii_letters + string.digits) for _ in range(16)]))") 32 | echo "passwords_public:" > ../pillar/cluster/authentication/passwords_public.sls 33 | echo " root_password_hash: $root_pass_hash # This password hash will be the root password of all nodes installed" >> ../pillar/cluster/authentication/passwords_public.sls 34 | 35 | --------------------------------------------------------------------------------