├── .gitignore ├── FORMULA ├── LICENSE ├── README.rst ├── _modules ├── hubble.py └── oscap.py ├── hubblestack_nova ├── command.py ├── cve_scan.py ├── cve_scan_v2.py ├── firewall.py ├── grep.py ├── misc.py ├── netstat.py ├── openssl.py ├── pkg.py ├── pkgng_audit.py ├── service.py ├── stat.py ├── sysctl.py ├── win_auditpol.py ├── win_firewall.py ├── win_gp.py ├── win_pkg.py ├── win_reg.py └── win_secedit.py ├── hubblestack_nova_profiles ├── centos_6.json ├── centos_7.json ├── cis │ ├── amazon-201409-level-1-scored-v1-0-0.yaml │ ├── amazon-level-1-scored-v1-0-0.yaml │ ├── centos-6-level-1-scored-v1.yaml │ ├── centos-6-level-1-scored-v2-0-1.yaml │ ├── centos-7-level-1-scored-v1.yaml │ ├── centos-7-level-1-scored-v2-1-0.yaml │ ├── centos-7-level-1-scored-v2.yaml │ ├── coreos-level-1.yaml │ ├── debian-8-level-1-scored-v1-0-0.yaml │ ├── debian-8-level-1-scored-v1.yaml │ ├── rhels-5-level-1-scored-v2-2-0.yaml │ ├── rhels-6-level-1-scored-v1.yaml │ ├── rhels-6-level-1-scored-v2-0-1.yaml │ ├── rhels-7-level-1-scored-v1.yaml │ ├── rhels-7-level-1-scored-v2-1-0.yaml │ ├── rhelw-7-level-1-scored-v1.yaml │ ├── rhelw-7-level-1-scored-v2-1-0.yaml │ ├── ubuntu-1404-level-1-scored-v1-0-0.yaml │ ├── ubuntu-1404-level-1-scored-v1.yaml │ ├── ubuntu-1604-level-1-scored-v1-0-0.yaml │ ├── windows-2008r2-level-1-scored-v1.yaml │ ├── windows-2008r2-level-1-scored-v3-0-0.yaml │ ├── windows-2012r2-level-1-scored-v1.yaml │ └── windows-2012r2-level-1-scored-v2-0-0.yaml ├── cve │ ├── centos-6-salt.yaml │ ├── centos-7-salt.yaml │ ├── scan-v1.yaml │ ├── scan-v2-salt.yaml │ └── scan-v2.yaml ├── firewall │ └── ssh.yaml ├── misc.yaml ├── network │ ├── smtp.yaml │ └── ssh.yaml ├── samples │ ├── dont_blame_nrpe.yaml │ ├── sample_cis.yaml │ ├── sample_command.yaml │ ├── sample_control.yaml │ ├── sample_firewall.yaml │ └── sample_openssl.yaml ├── stig │ └── rhel-6-mac-1-classified.yaml └── top.nova └── utils ├── check_yaml.py ├── cve_store.py └── update_tags.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | #Ipython Notebook 62 | .ipynb_checkpoints 63 | 64 | # Pycharm 65 | .idea 66 | 67 | # Mac files 68 | .DS_STORE 69 | .DS_Store 70 | -------------------------------------------------------------------------------- /FORMULA: -------------------------------------------------------------------------------- 1 | name: hubblestack_nova 2 | os: RedHat, CentOS, Debian, Ubuntu 3 | os_family: RedHat, Debian 4 | version: 2016.10.2 5 | release: 1 6 | summary: HubbleStack Nova 7 | description: HubbleStack Nova 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | THIS REPO IS DEPRECATED. PLEASE USE https://github.com/hubblestack/hubble-salt 2 | 3 | .. _nova_introduction: 4 | 5 | Introduction 6 | ============ 7 | 8 | Nova is designed to audit the compliance and security level of a system. It is 9 | composed of multiple modules, which ingest YAML configuration profiles to run a 10 | single or series of audits against a system. 11 | 12 | Two different installation methods are outlined below. The first method is more 13 | stable (and therefore recommended). This method uses Salt's package manager to 14 | track versioned, packaged updates to Hubble's components. 15 | 16 | The second method installs directly from git. It should be considered bleeding 17 | edge and possibly unstable. 18 | 19 | .. _nova_installation: 20 | 21 | Installation 22 | ============ 23 | 24 | Each of the four HubbleStack components have been packaged for use with Salt's 25 | Package Manager (SPM). Note that all SPM installation commands should be done 26 | on the *Salt Master*. 27 | 28 | .. _nova_installation_config: 29 | 30 | **Required Configuration** 31 | 32 | Salt's Package Manager (SPM) installs files into ``/srv/spm/{salt,pillar}``. 33 | Ensure that this path is defined in your Salt Master's ``file_roots``: 34 | 35 | .. code-block:: yaml 36 | 37 | file_roots: 38 | - /srv/salt 39 | - /srv/spm/salt 40 | 41 | .. note:: This should be the default value. To verify run: ``salt-call config.get file_roots`` 42 | 43 | .. tip:: Remember to restart the Salt Master after making this change to the configuration. 44 | 45 | .. _nova_installation_packages: 46 | 47 | Installation (Packages) 48 | ----------------------- 49 | 50 | Installation is as easy as downloading and installing packages. (Note: in 51 | future releases you'll be able to subscribe directly to our HubbleStack SPM 52 | repo for updates and bugfixes!) 53 | 54 | Nova packages have been divided into modules and profiles. This way we can 55 | iterate policy changes separate from the code. 56 | 57 | **Nova Modules** 58 | 59 | .. code-block:: shell 60 | 61 | wget https://spm.hubblestack.io/nova/hubblestack_nova-2016.10.2-1.spm 62 | spm local install hubblestack_nova-2016.10.2-1.spm 63 | 64 | **Nova Profiles** 65 | 66 | .. code-block:: shell 67 | 68 | wget https://spm.hubblestack.io/nova/hubblestack_nova_profiles-20161101-1.spm 69 | spm local install hubblestack_nova_profiles-20161101-1.spm 70 | 71 | You should now be able to sync the new modules to your minion(s) using the 72 | ``sync_modules`` Salt utility: 73 | 74 | .. code-block:: shell 75 | 76 | salt \* saltutil.sync_modules 77 | 78 | Once these modules are synced you are ready to run a HubbleStack Nova audit. 79 | 80 | Skip to :ref:`Usage `. 81 | 82 | .. _nova_installation_manual: 83 | 84 | Installation (Manual) 85 | --------------------- 86 | 87 | Place ``_modules/hubble.py`` into your ``salt/_modules/`` directory, and sync 88 | it to the minions. 89 | 90 | .. code-block:: shell 91 | 92 | git clone https://github.com/hubblestack/nova.git hubblestack-nova.git 93 | cd hubblestack-nova.git 94 | mkdir -p /srv/salt/_modules/ 95 | cp _modules/hubble.py /srv/salt/_modules/ 96 | cp -a hubblestack_nova_profiles /srv/salt/ 97 | cp -a hubblestack_nova /srv/salt/ 98 | 99 | salt \* saltutil.sync_modules 100 | salt \* hubble.sync 101 | 102 | .. _nova_installation_gitfs: 103 | 104 | Installation (GitFS) 105 | -------------------- 106 | 107 | This installation method subscribes directly to our GitHub repository, pinning 108 | to a tag or branch. This method requires no package installation or manual 109 | checkouts. 110 | 111 | Requirements: GitFS support on your Salt Master. 112 | 113 | **/etc/salt/master.d/hubblestack-nova.conf** 114 | 115 | .. code-block:: diff 116 | 117 | gitfs_remotes: 118 | - https://github.com/hubblestack/nova: 119 | - base: v2017.1.0 120 | 121 | .. tip:: Remember to restart the Salt Master after applying this change. 122 | 123 | .. _nova_usage: 124 | 125 | Skip to :ref:`Usage `. 126 | 127 | Usage 128 | ===== 129 | 130 | There are four primary functions in the hubble.py module: 131 | 132 | 1. ``hubble.sync`` will sync the ``hubblestack_nova_profiles/`` and ``hubblestack_nova/`` directories to the minion(s). 133 | 2. ``hubble.load`` will load the synced audit modules and their yaml configuration files. 134 | 3. ``hubble.audit`` will audit the minion(s) using the YAML profile(s) you provide as comma-separated arguments 135 | 4. ``hubble.top`` will audit the minion(s) using the ``top.nova`` configuration. 136 | 137 | ``hubble.audit`` takes two optional arguments. The first is a comma-separated 138 | list of paths. These paths can be files or directories within the 139 | ``hubblestack_nova_profiles`` directory. The second argument allows for 140 | toggling Nova configuration, such as verbosity, level of detail, etc. 141 | 142 | If ``hubble.audit`` is run without targeting any audit configs or directories, 143 | it will instead run ``hubble.top`` with no arguments. 144 | 145 | ``hubble.audit`` will return a list of audits which were successful, and a list 146 | of audits which failed. 147 | 148 | Here are some example calls: 149 | 150 | .. code-block:: bash 151 | 152 | # Run the cve scanner and the CIS profile: 153 | salt \* hubble.audit cve.scan-v2,cis.centos-7-level-1-scored-v1 154 | 155 | # Run hubble.top with the default topfile (top.nova) 156 | salt \* hubble.top 157 | 158 | # Run all yaml configs and tags under salt://hubblestack_nova_profiles/foo/ 159 | # and salt://hubblestack_nova_profiles/bar, but only run audits with tags 160 | # starting with "CIS" 161 | salt \* hubble.audit foo,bar tags='CIS*' 162 | 163 | .. _nova_usage_topfile: 164 | 165 | Nova Topfiles 166 | ------------- 167 | 168 | Nova topfiles look very similar to saltstack topfiles, except the top-level 169 | key is always ``nova``, as nova doesn't have environments. 170 | 171 | .. code-block:: yaml 172 | 173 | nova: 174 | '*': 175 | - cve.scan-v2 176 | - network.ssh 177 | - network.smtp 178 | 'web*': 179 | - cis.centos-7-level-1-scored-v1 180 | - cis.centos-7-level-2-scored-v1 181 | 'G@os_family:debian': 182 | - network.ssh 183 | - cis.debian-7-level-1-scored: 'CIS*' 184 | 185 | Additionally, all nova topfile matches are compound matches, so you never 186 | need to define a match type like you do in saltstack topfiles. 187 | 188 | Each list item is a string representing the dot-separated location of a 189 | yaml file which will be run with hubble.audit. You can also specify a 190 | tag glob to use as a filter for just that yaml file, using a colon 191 | after the yaml file (turning it into a dictionary). See the last two lines 192 | in the yaml above for examples. 193 | 194 | Examples: 195 | 196 | .. code-block:: bash 197 | 198 | salt '*' hubble.top 199 | salt '*' hubble.top foo/bar/top.nova 200 | salt '*' hubble.top foo/bar.nova verbose=True 201 | 202 | .. _nova_usage_control: 203 | 204 | Compensating Control Configuration 205 | ---------------------------------- 206 | 207 | In some cases, your organization may want to skip certain audit checks for 208 | certain hosts. This is supported via compensating control configuration. 209 | 210 | You can skip a check globally by adding a ``control: `` key to the check 211 | itself. This key should be added at the same level as ``description`` and 212 | ``trigger`` pieces of a check. In this case, the check will never run, and will 213 | output under the ``Controlled`` results key. 214 | 215 | Nova also supports separate control profiles, for more fine-grained control 216 | using topfiles. You can use a separate YAML top-level key called ``control``. 217 | Generally, you'll put this top-level key inside of a separate YAML file and 218 | only include it in the top-data for the hosts for which it is relevant. 219 | 220 | For these separate control configs, the audits will always run, whether they 221 | are controlled or not. However, controlled audits which fail will be converted 222 | from ``Failure`` to ``Controlled`` in a post-processing operation. 223 | 224 | The control config syntax is as follows: 225 | 226 | .. code-block:: yaml 227 | 228 | control: 229 | - CIS-2.1.4: This is the reason we control the check 230 | - some_other_tag: 231 | reason: This is the reason we control the check 232 | - a_third_tag_with_no_reason 233 | 234 | Note that providing a reason for the control is optional. Any of the three 235 | formats shown in the yaml list above will work. 236 | 237 | Once you have your compensating control config, just target the yaml to the 238 | hosts you want to control using your topfile. In this case, all the audits will 239 | still run, but if any of the controlled checks fail, they will be removed from 240 | ``Failure`` and added to ``Controlled``, and will be treated as a Success for 241 | the purposes of compliance percentage. 242 | 243 | .. _nova_usage_schedule: 244 | 245 | Schedule 246 | -------- 247 | 248 | In order to run the audits once daily, you can use the following cron job: 249 | 250 | **/etc/cron.d/hubble** 251 | 252 | .. code-block:: yaml 253 | 254 | MAILTO="" 255 | SHELL=/bin/bash 256 | @daily root /usr/bin/salt '*' hubble.top verbose=True,show_profile=True --return splunk_nova_return 257 | 258 | .. _nova_configuration: 259 | 260 | Configuration 261 | ============= 262 | 263 | .. _nova_under_the_hood: 264 | 265 | Under the Hood 266 | ============== 267 | 268 | 1. The directory/environment in which nova searches for audit modules are 269 | configurable via pillar. The defaults are shown below: 270 | 271 | .. code-block:: yaml 272 | 273 | hubblestack: 274 | nova: 275 | saltenv: base 276 | module_dir: salt://hubblestack_nova 277 | profile_dir: salt://hubblestack_nova_profiles 278 | 279 | 2. By default, ``hubble.audit`` will call ``hubble.load`` (which in turn calls 280 | ``hubble.sync``) in order to ensure that it is auditing with the most up-to-date 281 | information. These operations are fairly fast, but if you want to avoid the 282 | additional overhead, you can disable these behaviors via pillar (defaults are 283 | shown, change to False to disable behaviors): 284 | 285 | .. code-block:: yaml 286 | 287 | hubblestack: 288 | nova: 289 | autosync: True 290 | autoload: True 291 | 292 | .. _nova_development: 293 | 294 | Development 295 | =========== 296 | 297 | If you're interested in contributing to this project this section outlines the 298 | structure and requirements for Nova audit module development. 299 | 300 | .. _nova_development_anatomy: 301 | 302 | Anatomy of a Nova audit module 303 | ------------------------------ 304 | 305 | .. code-block:: python 306 | 307 | # -*- encoding: utf-8 -*- 308 | ''' 309 | Loader and primary interface for nova modules 310 | 311 | :maintainer: HubbleStack 312 | :maturity: 20160214 313 | :platform: Linux 314 | :requires: SaltStack 315 | 316 | ''' 317 | from __future__ import absolute_import 318 | import logging 319 | 320 | All Nova plugins should include the above header, expanding the docstring to 321 | include full documentation 322 | 323 | .. code-block:: python 324 | 325 | import fnmatch 326 | import salt.utils 327 | 328 | def __virtual__(): 329 | if salt.utils.is_windows(): 330 | return False, 'This audit module only runs on linux' 331 | return True 332 | 333 | 334 | def audit(data_list, tag, verbose=False, show_profile=False, debug=False): 335 | __tags__ = [] 336 | for profile, data in data_list: 337 | # This is where you process the dictionaries passed in by hubble.py, 338 | # searching for data pertaining to this audit module. Modules which 339 | # require no data should use yaml which is empty except for a 340 | # top-level key, and should only do work if the top-level key is 341 | # found in the data 342 | 343 | # if show_profile is True, then we need to also inject the profile 344 | # in the data for each check so that it appears in verbose output 345 | pass 346 | 347 | ret = {'Success': [], 'Failure': []} 348 | for tag in __tags__: 349 | if fnmatch.fnmatch(tag, tags): 350 | # We should run this tag 351 | # 352 | ret['Success'].append(tag) 353 | return ret 354 | 355 | 356 | All Nova plugins require a ``__virtual__()`` function to determine module 357 | compatibility, and an ``audit()`` function to perform the actual audit 358 | functionality 359 | 360 | The ``audit()`` function must take four arguments, ``data_list``, ``tag``, 361 | ``verbose``, ``show_profile``, and ``debug``. The ``data_list`` argument is a 362 | list of dictionaries passed in by ``hubble.py``. ``hubble.py`` gets this data 363 | from loading the specified yaml for the audit run. Your audit module should 364 | only run if it finds its own data in this list. The ``tag`` argument is a glob 365 | expression for which tags the audit function should run. It is the job of the 366 | audit module to compare the ``tag`` glob with all tags supported by this module 367 | and only run the audits which match. The ``verbose`` argument defines whether 368 | additional information should be returned for audits, such as description and 369 | remediation instructions. The ``show_profile`` argument tells whether the 370 | profile should be injected into the verbose data for each check. The ``debug`` 371 | argument tells whether the module should log additional debugging information 372 | at debug log level. 373 | 374 | The return value should be a dictionary, with optional keys "Success", 375 | "Failure", and "Controlled". The values for these keys should be a list of 376 | one-key dictionaries in the form of ``{: }``, or a 377 | list of one-key dictionaries in the form of ``{: }`` (in the 378 | case of ``verbose``). 379 | 380 | .. _nova_contribute: 381 | 382 | Contribute 383 | ========== 384 | 385 | If you are interested in contributing or offering feedback to this project feel 386 | free to submit an issue or a pull request. We're very open to community 387 | contribution. 388 | -------------------------------------------------------------------------------- /_modules/oscap.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | OpenSCAP scanner execution module. 4 | 5 | :maintainer: HubbleStack / cedwards 6 | :maturity: 2016.7.0 7 | :platform: RedHat 8 | :requires: SaltStack 9 | :upstream: http://open-scap.org 10 | 11 | This execution module uses the openSCAP scanner utility and an argument of an 12 | XML guide. The returned data should be a dictionary of the cmd output. 13 | 14 | The packages are: openscap-scanner openscap 15 | 16 | Configurable options would be: 17 | show_success: True/False 18 | 19 | .. code-block:: yaml 20 | 21 | cve_scan: https://www.redhat.com/security/data/oval/com.redhat.rhsa-RHEL7.xml 22 | 23 | ''' 24 | from __future__ import absolute_import 25 | 26 | # Import python libs 27 | import logging 28 | 29 | # Import salt libs 30 | from salt.ext.six.moves.urllib.parse import urlparse # pylint: disable=no-name-in-module 31 | from salt import utils 32 | 33 | __virtualname__ = 'oscap' 34 | 35 | log = logging.getLogger(__name__) 36 | 37 | _OSCAP = utils.which('oscap') 38 | 39 | 40 | def __virtual__(): 41 | ''' 42 | Compatible with Linux & requires oscap binary 43 | ''' 44 | return True 45 | 46 | 47 | def scan(filename): 48 | ''' 49 | scan function 50 | ''' 51 | parsed = urlparse(filename) 52 | if not parsed.scheme: 53 | filename = 'salt://' + filename 54 | cached_source = __salt__['cp.cache_file'](filename) 55 | 56 | ret = {'Vulnerabilities': []} 57 | 58 | cmd = '{0} oval eval {1}'.format(_OSCAP, cached_source) 59 | salt_ret = __salt__['cmd.run_all'](cmd, python_shell=False) 60 | 61 | items = salt_ret['stdout'].split('\n') 62 | for item in items: 63 | if 'true' in item: 64 | if 'rhsa' in item: 65 | rhsa = item.split(':')[3] 66 | year = item.split(':')[3][:4] 67 | num = item.split(':')[3][4:] 68 | url = 'https://rhn.redhat.com/errata/RHSA-' + year + '-' + num + '.html' 69 | ret['Vulnerabilities'].append('RHSA-' + rhsa + ' : ' + url) 70 | 71 | return ret 72 | -------------------------------------------------------------------------------- /hubblestack_nova/command.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | Hubble Nova plugin for running arbitrary commands and checking the output of 4 | those commands 5 | 6 | This module is deprecated, and must be explicitly enabled in pillar/minion 7 | config via the hubblestack:nova:enable_command_module (should be set to True 8 | to enable this module). This allows nova to run arbitrary commands via yaml 9 | profiles. 10 | 11 | :maintainer: HubbleStack / basepi 12 | :maturity: 2016.7.0 13 | :platform: All 14 | :requires: SaltStack 15 | 16 | Sample YAML data, with inline comments: 17 | 18 | # Top level key lets the module know it should look at this data 19 | command: 20 | # Unique ID for this set of audits 21 | nodev: 22 | data: 23 | # 'osfinger' grain, for multiplatform support 24 | 'Red Hat Enterprise Linux Server-6': 25 | # tag is required 26 | tag: CIS-1.1.10 27 | # `commands` is a list of commands with individual flags 28 | commands: 29 | # Command to be run 30 | - 'grep "[[:space:]]/home[[:space:]]" /etc/fstab': 31 | # Check the output for this pattern 32 | # If match_output not provided, any output will be a match 33 | match_output: nodev 34 | # Use regex when matching the output (default False) 35 | match_output_regex: False 36 | # Invert the success criteria. If True, a match will cause failure (default False) 37 | fail_if_matched: False 38 | - 'mount | grep /home': 39 | match_output: nodev 40 | match_output_regex: False 41 | # Match each line of the output against our pattern 42 | # Any that don't match will make the audit fail (default False) 43 | match_output_by_line: True 44 | - ? 45 | | 46 | echo 'this is a multi-line' 47 | echo 'bash script' 48 | echo 'note the special ? syntax' 49 | : 50 | # Shell through which the script will be run, must be abs path 51 | shell: /bin/bash 52 | match_output: this 53 | # Aggregation strategy for multiple commands. Defaults to 'and', other option is 'or' 54 | aggregation: 'and' 55 | # Catch-all, if no other osfinger match was found 56 | '*': 57 | tag: generic_tag 58 | commands: 59 | - 'grep "[[:space:]]/home[[:space:]]" /etc/fstab': 60 | match_output: nodev 61 | match_output_regex: False 62 | fail_if_matched: False 63 | - 'mount | grep /home': 64 | match_output: nodev 65 | match_output_regex: False 66 | match_output_by_line: True 67 | aggregation: 'and' 68 | # Description will be output with the results 69 | description: '/home should be nodev' 70 | ''' 71 | from __future__ import absolute_import 72 | import logging 73 | 74 | import fnmatch 75 | import yaml 76 | import os 77 | import copy 78 | import re 79 | import salt.utils 80 | 81 | log = logging.getLogger(__name__) 82 | 83 | 84 | def __virtual__(): 85 | if salt.utils.is_windows(): 86 | return False, 'This audit module only runs on linux' 87 | return True 88 | 89 | 90 | def audit(data_list, tags, debug=False): 91 | ''' 92 | Run the command audits contained in the data_list 93 | ''' 94 | __data__ = {} 95 | for profile, data in data_list: 96 | _merge_yaml(__data__, data, profile) 97 | __tags__ = _get_tags(__data__) 98 | 99 | if debug: 100 | log.debug('command audit __data__:') 101 | log.debug(__data__) 102 | log.debug('command audit __tags__:') 103 | log.debug(__tags__) 104 | 105 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 106 | 107 | if __tags__ and not __salt__['config.get']('hubblestack:nova:enable_command_module', 108 | False): 109 | ret['Error'] = ['command module has not been explicitly enabled in ' 110 | 'config. Please set hubblestack:nova:enable_command_module ' 111 | 'to True in pillar or minion config to allow this module.'] 112 | return ret 113 | 114 | for tag in __tags__: 115 | if fnmatch.fnmatch(tag, tags): 116 | for tag_data in __tags__[tag]: 117 | if 'control' in tag_data: 118 | ret['Controlled'].append(tag_data) 119 | continue 120 | if 'commands' not in tag_data: 121 | continue 122 | command_results = [] 123 | for command_data in tag_data['commands']: 124 | for command, command_args in command_data.iteritems(): 125 | if 'shell' in command_args: 126 | cmd_ret = __salt__['cmd.run'](command, 127 | python_shell=True, 128 | shell=command_args['shell']) 129 | else: 130 | cmd_ret = __salt__['cmd.run'](command, 131 | python_shell=True) 132 | 133 | found = False 134 | if cmd_ret: 135 | found = True 136 | 137 | if 'match_output' in command_args: 138 | 139 | if command_args.get('match_output_by_line'): 140 | cmd_ret_lines = cmd_ret.splitlines() 141 | else: 142 | cmd_ret_lines = [cmd_ret] 143 | 144 | for line in cmd_ret_lines: 145 | if command_args.get('match_output_regex'): 146 | if not re.match(command_args['match_output'], line): 147 | found = False 148 | else: # match without regex 149 | if command_args['match_output'] not in line: 150 | found = False 151 | 152 | if command_args.get('fail_if_matched'): 153 | found = not found 154 | 155 | command_results.append(found) 156 | 157 | aggregation = tag_data.get('aggregation', 'and') 158 | 159 | if aggregation.lower() == 'or': 160 | if any(command_results): 161 | ret['Success'].append(tag_data) 162 | else: 163 | ret['Failure'].append(tag_data) 164 | else: # assume 'and' if it's not 'or' 165 | if all(command_results): 166 | ret['Success'].append(tag_data) 167 | else: 168 | ret['Failure'].append(tag_data) 169 | 170 | return ret 171 | 172 | 173 | def _merge_yaml(ret, data, profile=None): 174 | ''' 175 | Merge two yaml dicts together at the command level 176 | ''' 177 | if 'command' not in ret: 178 | ret['command'] = [] 179 | if 'command' in data: 180 | for key, val in data['command'].iteritems(): 181 | if profile and isinstance(val, dict): 182 | val['nova_profile'] = profile 183 | ret['command'].append({key: val}) 184 | return ret 185 | 186 | 187 | def _get_tags(data): 188 | ''' 189 | Retrieve all the tags for this distro from the yaml 190 | ''' 191 | ret = {} 192 | distro = __grains__.get('osfinger') 193 | for audit_dict in data.get('command', []): 194 | # command:0 195 | for audit_id, audit_data in audit_dict.iteritems(): 196 | # command:0:nodev 197 | tags_dict = audit_data.get('data', {}) 198 | # command:0:nodev:data 199 | tags = None 200 | for osfinger in tags_dict: 201 | if osfinger == '*': 202 | continue 203 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 204 | for osfinger_glob in osfinger_list: 205 | if fnmatch.fnmatch(distro, osfinger_glob): 206 | tags = tags_dict.get(osfinger) 207 | break 208 | if tags is not None: 209 | break 210 | # If we didn't find a match, check for a '*' 211 | if tags is None: 212 | tags = tags_dict.get('*', {}) 213 | # command:0:nodev:data:Debian-8 214 | if 'tag' not in tags: 215 | tags['tag'] = '' 216 | tag = tags['tag'] 217 | if tag not in ret: 218 | ret[tag] = [] 219 | formatted_data = {'tag': tag, 220 | 'module': 'command'} 221 | formatted_data.update(audit_data) 222 | formatted_data.update(tags) 223 | formatted_data.pop('data') 224 | ret[tag].append(formatted_data) 225 | return ret 226 | -------------------------------------------------------------------------------- /hubblestack_nova/cve_scan.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova plugin for openscap scanning. 4 | 5 | :maintainer: HubbleStack / cedwards 6 | :maturity: 2016.7.0 7 | :platform: Red Hat 8 | :requires: SaltStack + oscap execution module 9 | 10 | ''' 11 | from __future__ import absolute_import 12 | import salt.utils 13 | import logging 14 | 15 | log = logging.getLogger(__name__) 16 | 17 | 18 | def __virtual__(): 19 | if salt.utils.is_linux() and salt.utils.which('oscap'): 20 | return True 21 | return False, 'This module requires Linux and the oscap binary' 22 | 23 | 24 | def audit(data_list, tags, debug=False): 25 | ''' 26 | Run the network.netstat command 27 | ''' 28 | ret = {'Success': [], 'Failure': []} 29 | 30 | __tags__ = [] 31 | __feed__ = [] 32 | for data in data_list: 33 | if 'cve_scan' in data: 34 | __tags__ = ['cve_scan'] 35 | if isinstance(data['cve_scan'], str): 36 | __feed__.append(data['cve_scan']) 37 | else: # assume list 38 | __feed__.extend(data['cve_scan']) 39 | 40 | if not __tags__: 41 | # No yaml data found, don't do any work 42 | return ret 43 | 44 | for feed in __feed__: 45 | ret['Failure'].append(__salt__['oscap.scan'](feed)) 46 | return ret 47 | -------------------------------------------------------------------------------- /hubblestack_nova/cve_scan_v2.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova plugin for auditing installed packages. 4 | 5 | This module checks all of a system's local packages and reports if the package 6 | is vulnerable to a known cve. The cve vunlerablities are gathered via the url in 7 | the yaml profile, and that data cached at the path 8 | /var/cache/salt/minion/cve_scan_cache/_.json 9 | 10 | :maintainer: HubbleStack / jaredhanson11 11 | :maturity: 2016.7.0 12 | :platform: Linux 13 | :requires: SaltStack 14 | 15 | This audit module requires yaml data to execute. It will search the local 16 | directory for any .yaml files, and if it finds a top-level 'cve_scan_v2' key, it 17 | will use that data. 18 | 19 | Sample YAML data with inline comments: 20 | 21 | cve_scan_v2: 22 | # Seconds until the local cache expires 23 | ttl: 86400 24 | # Source of cve data 25 | url: http://vulners.com/ 26 | # Optional control tag 27 | control: 28 | # minimum score, vulnerabilities with a smaller 29 | # score added to 'Controlled' output 30 | score: 3 31 | 32 | 33 | The source of the cve data can be http://vulners.com/, salt://path/to/json, and 34 | any other url that returns cve data in json format. If the url contains 35 | vulners.com, then this module will use the local system's os and os version to 36 | dynamically query vulner.com/api/v3 for cve data specifically related to your 37 | system. If the url doesn't contain vulners.com, it will query the exact url, so 38 | that endpoint must return cve data specific to the system you are scanning. 39 | 40 | The cve data json must be formatted as follows: 41 | 42 | [ 43 | 44 | {'_source': {'affectedPackage': [{'OS': 'CentOS', 45 | 'OSVersion': '7', 46 | 'operator': 'lt', 47 | 'packageFilename': 'krb5-server-1.13.2-12.el7_2.x86_64.rpm', 48 | 'packageName': 'krb5-server', 49 | 'packageVersion': '1.13.2-12.el7_2'}, 50 | {'OS': 'CentOS', 51 | 'OSVersion': '7', 52 | 'operator': 'lt', 53 | 'packageFilename': 'krb5-libs-1.13.2-12.el7_2.i686.rpm', 54 | 'packageName': 'krb5-libs', 55 | 'packageVersion': '1.13.2-12.el7_2'} 56 | ] 57 | 'cvelist': ['CVE-2015-8631', 58 | 'CVE-2015-8630', 59 | 'CVE-2015-8629'], 60 | 'cvss': {'score': 6.8} 61 | 'href': 'http://lists.centos.org/pipermail/centos-announce/2016-March/021788.html', 62 | 'reporter': 'CentOS Project', 63 | 'title': 'Moderate krb5 Security Update' 64 | } 65 | }, 66 | ... 67 | 68 | ] 69 | 70 | ''' 71 | from __future__ import absolute_import 72 | import logging 73 | 74 | import fnmatch 75 | import hashlib 76 | import json 77 | import os 78 | import re 79 | import requests 80 | 81 | from distutils.version import LooseVersion 82 | from time import time as current_time 83 | from zipfile import ZipFile 84 | 85 | import salt 86 | import salt.utils 87 | 88 | log = logging.getLogger(__name__) 89 | 90 | 91 | def __virtual__(): 92 | return not salt.utils.is_windows() 93 | 94 | 95 | def audit(data_list, tags, debug=False): 96 | ''' 97 | Main audit function. See module docstring for more information on usage. 98 | ''' 99 | os_version = __grains__.get('osmajorrelease', None) 100 | if os_version is None: 101 | os_version = __grains__.get('osrelease', None) 102 | os_name = __grains__['os'].lower() 103 | 104 | log.debug("os_version: %s, os_name: %s", os_version, os_name) 105 | 106 | endpoints = [] 107 | 108 | # Go through yaml to check for cve_scan_v2, 109 | # if its present, check for a cached version 110 | # of the scan. 111 | for profile, data in data_list: 112 | 113 | if 'cve_scan_v2' in data: 114 | 115 | ttl = data['cve_scan_v2']['ttl'] 116 | url = data['cve_scan_v2']['url'] 117 | control = data['cve_scan_v2'].get('control', {}) 118 | # Ability to add more controls easily, in control dict 119 | min_score = float(control.get('score', 0)) 120 | urlhash = hashlib.md5(url).hexdigest() 121 | cached_json = os.path.join(__opts__['cachedir'], 122 | 'cve_scan_cache', 123 | '%s.json' % urlhash) 124 | cached_zip = os.path.join(__opts__['cachedir'], 125 | 'cve_scan_cache', 126 | '%s.zip' % urlhash) 127 | # Make cache directory and all parent directories if it doesn't exist. 128 | if not os.path.exists(os.path.dirname(cached_json)): 129 | os.makedirs(os.path.dirname(cached_json)) 130 | cache = _get_cache(ttl, cached_json) 131 | log.debug("valid cache: %s, for url: %s", cache != [], url) 132 | endpoints.append((url, cache, cached_json, cached_zip, min_score, profile)) 133 | 134 | # If we don't find our module in the yaml 135 | if not endpoints: 136 | return {} 137 | 138 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 139 | # Dictionary of {pkg_name: list(pkg_versions)} 140 | local_pkgs = __salt__['pkg.list_pkgs'](versions_as_list=True) 141 | 142 | for url, cache, cached_json, cached_zip, min_score, profile in endpoints: 143 | log.debug("url: %s, min_score: %s", url, min_score) 144 | if cache: # Valid cached file 145 | master_json = cache 146 | else: # Query the url for cve's 147 | if url.startswith('http://') or url.startswith('https://'): 148 | if 'vulners.com' in url: 149 | # Vulners api can only handles http:// requests from request.get 150 | if url.startswith('https'): 151 | url.replace('https', 'http', 1) 152 | # Format the url for the request based on operating system. 153 | if url.endswith('/'): 154 | url = url[:-1] 155 | url_final = '%s/api/v3/archive/distributive/?os=%s&version=%s' \ 156 | % (url, os_name, os_version) 157 | log.debug('requesting: %s', url_final) 158 | cve_query = requests.get(url_final) 159 | # Confirm that the request was valid. 160 | if cve_query.status_code != 200: 161 | raise Exception('Vulners requests was not successful. Check the url.') 162 | # Save vulners zip attachment in cache location and extract json 163 | try: 164 | with open(cached_zip, 'w') as zip_attachment: 165 | zip_attachment.write(cve_query.content) 166 | zip_file = ZipFile(cached_zip) 167 | zip_file.extractall(os.path.dirname(cached_zip)) 168 | os.remove(cached_zip) 169 | extracted_json = os.path.join(__opts__['cachedir'], 170 | 'cve_scan_cache', 171 | '%s_%s.json' % (os_name, str(os_version).replace('.', ''))) 172 | log.debug('attempting to open %s', extracted_json) 173 | with open(extracted_json, 'r') as json_file: 174 | master_json = json.load(json_file) 175 | os.remove(extracted_json) 176 | except IOError as ioe: 177 | log.error('The json zip attachment was not able to be extracted from vulners.') 178 | raise ioe 179 | else: # Not a vulners request, external source for cve's 180 | log.debug('requesting: %s', url) 181 | cve_query = requests.get(url) 182 | if cve_query.status_code != 200: 183 | log.error('URL request was not successful.') 184 | raise Exception('The url given is invalid.') 185 | master_json = json.loads(cve_query.text) 186 | #Cache results. 187 | try: 188 | with open(cached_json, 'w') as cache_file: 189 | json.dump(master_json, cache_file) 190 | except IOError: 191 | log.error('The cve results weren\'t able to be cached') 192 | elif url.startswith('salt://'): 193 | # Cache the file 194 | log.debug('getting file from %s', url) 195 | cache_file = __salt__['cp.get_file'](url, cached_json) 196 | if cache_file: 197 | master_json = json.load(open(cache_file)) 198 | else: 199 | raise IOError('The file was not able to be retrieved from the salt file server.') 200 | else: 201 | raise Exception('The url is invalid. It does not begin with http(s):// or salt://') 202 | 203 | affected_pkgs = _get_cve_vulnerabilities(master_json, os_version) 204 | 205 | # Check all local packages against cve vulnerablities in affected_pkgs 206 | for local_pkg in local_pkgs: 207 | vulnerable = None 208 | if local_pkg in affected_pkgs: 209 | # There can be multiple versions for a single local package, check all 210 | for local_version in local_pkgs[local_pkg]: 211 | # There can be multiple cve announcements for a single package, check against all 212 | for affected_obj in affected_pkgs[local_pkg]: 213 | affected_version = affected_obj.pkg_version 214 | if _is_vulnerable(local_version, affected_version, affected_obj.operator): 215 | # If the local pkg hasn't been found as vulnerable yet, vulnerable is None 216 | if not vulnerable: 217 | affected_obj.oudated_version = local_version 218 | vulnerable = affected_obj 219 | # If local_pkg has already been marked affected, vulnerable is set. We 220 | # want to report the cve with highest severity 221 | else: 222 | if affected_obj.score > vulnerable.score: 223 | affected_obj.oudated_version = local_version 224 | vulnerable = affected_obj 225 | if vulnerable: 226 | if vulnerable.score < min_score: 227 | ret['Controlled'].append(vulnerable.get_report(profile)) 228 | else: 229 | ret['Failure'].append(vulnerable.get_report(profile)) 230 | 231 | if tags != '*': 232 | log.debug("tags: %s", tags) 233 | remove = [] 234 | for i, failure in enumerate(ret['Failure']): 235 | if not fnmatch.fnmatch(failure.keys()[0], tags): 236 | remove.append(i) 237 | remove.reverse() 238 | for i in remove: 239 | ret['Failure'].pop(i) 240 | 241 | remove = [] 242 | for i, failure in enumerate(ret['Controlled']): 243 | if not fnmatch.fnmatch(failure.keys()[0], tags): 244 | remove.append(i) 245 | remove.reverse() 246 | for i in remove: 247 | ret['Controlled'].pop(i) 248 | 249 | if not ret['Controlled']: 250 | ret.pop('Controlled') 251 | 252 | return ret 253 | 254 | 255 | def _get_cve_vulnerabilities(query_results, os_version): 256 | ''' 257 | Returns dictionary of vulnerablities, mapped as pkg_name:pkgObj. 258 | ''' 259 | 260 | vulnerable_pkgs = {} 261 | 262 | for report in query_results: 263 | try: 264 | reporter = report['_source'].get('reporter', '') 265 | cve_list = report['_source'].get('cvelist', []) 266 | href = report['_source'].get('href', '') 267 | score = report['_source']['cvss'].get('score', 0) 268 | title = report['_source'].get('title', 'No Title Given') 269 | 270 | for pkg in report['_source']['affectedPackage']: 271 | #_source:affectedPackages 272 | if pkg['OSVersion'] in ['any', os_version]: #Only use matching os 273 | pkg_obj = VulnerablePkg(title, pkg['packageName'], pkg['packageVersion'], \ 274 | score, pkg['operator'], reporter, href, cve_list) 275 | if pkg_obj.pkg not in vulnerable_pkgs: 276 | vulnerable_pkgs[pkg_obj.pkg] = [pkg_obj] 277 | else: 278 | vulnerable_pkgs[pkg_obj.pkg].append(pkg_obj) 279 | except KeyError, key_err: 280 | if key_err != '_source': 281 | log.error('Format error at: %s', report) 282 | raise KeyError('The cve data was not formatted correctly at: %s' % pkg) 283 | else: 284 | log.error('Format error at: %s', report) 285 | raise KeyError('The cve data was not formatted correctly') 286 | return vulnerable_pkgs 287 | 288 | 289 | def _is_vulnerable(local_version, affected_version, operator): 290 | ''' 291 | Given two version strings, and operator 292 | returns whether the package is vulnerable or not. 293 | ''' 294 | # Get rid of prefix if version number has one, ex '1:3.4.52' 295 | if ':' in local_version: 296 | _, _, local_version = local_version.partition(':') 297 | if ':' in affected_version: 298 | _, _, affected_version = affected_version.partition(':') 299 | 300 | compare = None 301 | # Try to use salt's built in comparison module, if it exists for distro 302 | if 'pkg.version_cmp' in __salt__: 303 | compare = __salt__['pkg.version_cmp'](local_version, affected_version) 304 | 305 | # When salt can't compare, use LooseVersion 306 | if compare is None: 307 | #Compare from higher order to lower order based on '-' split. 308 | local_version_split = local_version.split('-') 309 | affected_version_split = affected_version.split('-') 310 | 311 | for (order_index, local_version_str) in enumerate(local_version_split): 312 | 313 | local_version_obj = LooseVersion(local_version_str) 314 | affected_version_obj = LooseVersion(affected_version_split[order_index]) 315 | 316 | #Check lower order bits if higher order are equal. 317 | if local_version == affected_version: 318 | continue 319 | 320 | #Return when highest order version is not equal. 321 | elif local_version_obj > affected_version_obj: 322 | compare = 1 323 | break 324 | elif local_version_obj < affected_version_obj: 325 | compare = -1 326 | break 327 | # If for loop exits without break, the versions are equal. 328 | else: 329 | compare = 0 330 | 331 | # Return whether local_version is vulnerable to affected_verison 332 | if operator == 'le': 333 | return compare <= 0 334 | elif operator == 'lt': 335 | return compare < 0 336 | 337 | 338 | def _get_cache(ttl, cache_path): 339 | ''' 340 | If url contains valid cache, returns it, else returns empty list. 341 | ''' 342 | # Check if we have a valid cached version. 343 | try: 344 | cached_time = os.path.getmtime(cache_path) 345 | except OSError: 346 | return [] 347 | if current_time() - cached_time < ttl: 348 | log.debug('%s is less than ttl', cache_path) 349 | try: 350 | with open(cache_path) as json_file: 351 | loaded_json = json.load(json_file) 352 | return loaded_json 353 | except IOError: 354 | return [] 355 | except ValueError: 356 | log.error('%s was not json formatted', cache_path) 357 | return [] 358 | else: 359 | log.debug('%s was older than ttl', cache_path) 360 | return [] 361 | 362 | 363 | class VulnerablePkg: 364 | ''' 365 | Object representing a vulnverable pkg for the current operating system. 366 | ''' 367 | def __init__(self, title, pkg, pkg_version, score, operator, reporter, href, cve_list): 368 | self.title = title 369 | self.pkg = pkg 370 | self.pkg_version = pkg_version 371 | self.score = float(score) 372 | if operator not in ['lt', 'le']: 373 | log.error('pkg:%s contains an operator that\'s not supported and was changed to <') 374 | operator = 'lt' 375 | self.operator = operator 376 | self.href = href 377 | self.cve_list = cve_list 378 | self.reporter = reporter 379 | self.oudated_version = None 380 | 381 | 382 | def get_report(self, profile): 383 | ''' 384 | Return the dictionary of what should be reported in failures, based on verbose. 385 | ''' 386 | return { 387 | 'tag': self.pkg + '-' + self.pkg_version, 388 | 'href': self.href, 389 | 'affected_version': self.pkg_version, 390 | 'reporter': self.reporter, 391 | 'score': self.score, 392 | 'cve_list': self.cve_list, 393 | 'affected_pkg': self.pkg, 394 | 'local_version': self.oudated_version, 395 | 'description': self.title, 396 | 'nova_profile': profile 397 | } 398 | 399 | -------------------------------------------------------------------------------- /hubblestack_nova/firewall.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | Hubble Nova plugin for using iptables to verify firewall rules 4 | 5 | :maintainer: HubbleStack / avb76 6 | :maturity: 2016.7.0 7 | :platform: Linux 8 | :requires: SaltStack 9 | 10 | This audit module requires yaml data to execute. Running hubble.audit will 11 | search the local directory for any .yaml files and it will pass all the data to 12 | this module. If this module find a top-level 'firewall' key, it will use the 13 | data under that key. 14 | 15 | Sample YAML data used by firewall.py, with inline comments: 16 | 17 | 18 | firewall: 19 | whitelist: # whitelist or blacklist 20 | 21 | ssh: # unique id 22 | data: 23 | tag: 'FIREWALL-TCP-22' # audit tag 24 | table: 'filter' # iptables table to check (REQUIRED) 25 | chain: INPUT # INPUT / OUTPUT / FORWARD (REQUIRED) 26 | rule: #dict containing the elements for building the rule 27 | proto: tcp 28 | dport: 22 29 | match: state 30 | connstate: RELATED,ESTABLISHED 31 | jump: ACCEPT 32 | family: 'ipv4' # iptables family (REQUIRED) 33 | description: 'ssh iptables rule check' # description of the check 34 | # The rest of these attributes are optional, and currently not used 35 | alert: email 36 | trigger: state 37 | 38 | A few words about the auditing logic 39 | The audit function uses the iptables.build_rule salt 40 | execution module to build the actual iptables rule to be checked. 41 | How the rules are built? 42 | The elements in the rule dictionary will be used to build the iptables rule. 43 | 44 | Note: table, chain and family are not required under the rule key. 45 | Note: iptables.build_rule does not verify the syntax of the iptables rules. 46 | 47 | Here is a list of accepted iptables rules elements, based on the 48 | iptables.build_rule source code: 49 | - command 50 | - position 51 | - full 52 | - target 53 | - jump 54 | - proto/protocol 55 | - if 56 | - of 57 | - match 58 | - match-set 59 | - connstate 60 | - dport 61 | - sport 62 | - dports 63 | - sports 64 | - comment 65 | - set 66 | - jump 67 | - if it's the case, jump arguments can be passed -- see more details bellow 68 | 69 | Jump arguments 70 | (comments inside the iptables.build_rule source code) 71 | # All jump arguments as extracted from man iptables-extensions, man iptables, 72 | # man xtables-addons and http://www.iptables.info/en/iptables-targets-and-jumps.html 73 | 74 | Check the following links for more details: 75 | - iptables.build_rule SaltStack documentation 76 | (https://docs.saltstack.com/en/latest/ref/modules/all/salt.modules.iptables.html#salt.modules.iptables.build_rule) 77 | - iptables salt execution module source code (search for the build_rule function inside): 78 | (https://github.com/saltstack/salt/blob/develop/salt/modules/iptables.py) 79 | ''' 80 | 81 | from __future__ import absolute_import 82 | import logging 83 | 84 | import fnmatch 85 | import copy 86 | import salt.utils 87 | 88 | log = logging.getLogger(__name__) 89 | 90 | __tags__ = None 91 | __data__ = None 92 | 93 | 94 | def __virtual__(): 95 | if salt.utils.is_windows(): 96 | return False, 'This audit module only runs on linux' 97 | if not salt.utils.which('iptables'): 98 | return (False, 'The iptables execution module cannot be loaded: iptables not installed.') 99 | return True 100 | 101 | 102 | def audit(data_list, tags, debug=False): 103 | __data__ = {} 104 | for profile, data in data_list: 105 | _merge_yaml(__data__, data, profile) 106 | __tags__ = _get_tags(__data__) 107 | 108 | if debug: 109 | log.debug('service audit __data__:') 110 | log.debug(__data__) 111 | log.debug('service audit __tags__:') 112 | log.debug(__tags__) 113 | 114 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 115 | for tag in __tags__: 116 | if fnmatch.fnmatch(tag, tags): 117 | for tag_data in __tags__[tag]: 118 | if 'control' in tag_data: 119 | ret['Controlled'].append(tag_data) 120 | continue 121 | table = tag_data['table'] 122 | chain = tag_data['chain'] 123 | family = tag_data['family'] 124 | 125 | # creating the arguments for the iptables.build_rule salt execution module 126 | args = {'table': table, 127 | 'chain': chain, 128 | 'family': family} 129 | 130 | # since table, chain and family are already given for checking the existence of the rule, 131 | # they are not needed here 132 | if 'table' in tag_data['rule']: 133 | tag_data['rule'].pop('table') 134 | if 'chain' in tag_data['rule']: 135 | tag_data['rule'].pop('chain') 136 | if 'family' in tag_data['rule']: 137 | tag_data['rule'].pop('family') 138 | 139 | args.update(tag_data['rule']) 140 | 141 | # building the rule using iptables.build_rule 142 | rule = __salt__['iptables.build_rule'](**args) 143 | 144 | # replacing all the elements of the rule with the actual rule (for verbose mode) 145 | tag_data['rule'] = rule 146 | 147 | # checking the existence of the rule 148 | salt_ret = __salt__['iptables.check'](table=table, chain=chain, rule=rule, family=family) 149 | 150 | if salt_ret not in (True, False): 151 | log.error(salt_ret) 152 | passed = False 153 | else: 154 | passed = salt_ret 155 | if tag_data['type'] == 'blacklist': 156 | passed = not passed 157 | 158 | if passed: 159 | ret['Success'].append(tag_data) 160 | else: 161 | ret['Failure'].append(tag_data) 162 | 163 | return ret 164 | 165 | 166 | def _merge_yaml(ret, data, profile=None): 167 | ''' 168 | Merge two yaml dicts together at the pkg:blacklist and pkg:whitelist level 169 | ''' 170 | if 'firewall' not in ret: 171 | ret['firewall'] = {} 172 | for topkey in ('blacklist', 'whitelist'): 173 | if topkey in data.get('firewall', {}): 174 | if topkey not in ret['firewall']: 175 | ret['firewall'][topkey] = [] 176 | for key, val in data['firewall'][topkey].iteritems(): 177 | if profile and isinstance(val, dict): 178 | val['nova_profile'] = profile 179 | ret['firewall'][topkey].append({key: val}) 180 | return ret 181 | 182 | 183 | def _get_tags(data): 184 | ret = {} 185 | for toplist, toplevel in data.get('firewall', {}).iteritems(): 186 | for audit_dict in toplevel: 187 | for audit_id, audit_data in audit_dict.iteritems(): 188 | tags_dict = audit_data.get('data', {}) 189 | tag = tags_dict.pop('tag') 190 | if tag not in ret: 191 | ret[tag] = [] 192 | formatted_data = copy.deepcopy(tags_dict) 193 | formatted_data['type'] = toplist 194 | formatted_data['tag'] = tag 195 | formatted_data['module'] = 'firewall' 196 | formatted_data.update(audit_data) 197 | formatted_data.pop('data') 198 | ret[tag].append(formatted_data) 199 | return ret 200 | -------------------------------------------------------------------------------- /hubblestack_nova/grep.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova plugin for using grep to verify settings in files. 4 | 5 | Supports both blacklisting and whitelisting patterns. Blacklisted patterns must 6 | not be found in the specified file. Whitelisted patterns must be found in the 7 | specified file. 8 | 9 | :maintainer: HubbleStack / basepi 10 | :maturity: 2016.7.0 11 | :platform: All 12 | :requires: SaltStack 13 | 14 | This audit module requires yaml data to execute. It will search the local 15 | directory for any .yaml files, and if it finds a top-level 'grep' key, it will 16 | use that data. 17 | 18 | Sample YAML data, with inline comments: 19 | 20 | 21 | grep: 22 | whitelist: # or blacklist 23 | fstab_tmp_partition: # unique ID 24 | data: 25 | CentOS Linux-6: # osfinger grain 26 | - '/etc/fstab': # filename 27 | tag: 'CIS-1.1.1' # audit tag 28 | pattern: '/tmp' # grep pattern 29 | match_output: 'nodev' # string to check for in output of grep command (optional) 30 | match_output_regex: True # whether to use regex when matching output (default: False) 31 | grep_args: # extra args to grep 32 | - '-E' 33 | - '-i' 34 | - '-B2' 35 | match_on_file_missing: True # See (1) below 36 | '*': # wildcard, will be run if no direct osfinger match 37 | - '/etc/fstab': 38 | tag: 'CIS-1.1.1' 39 | pattern: '/tmp' 40 | # The rest of these attributes are optional, and currently not used 41 | description: | 42 | The /tmp directory is intended to be world-writable, which presents a risk 43 | of resource exhaustion if it is not bound to a separate partition. 44 | alert: email 45 | trigger: state 46 | 47 | 48 | (1) If `match_on_file_missing` is ommitted, success/failure will be determined 49 | entirely based on the grep command and other arguments. If it's set to True and 50 | the file is missing, then it will be considered a match (success for whitelist, 51 | failure for blacklist). If it's set to False and the file is missing, then it 52 | will be considered a non-match (success for blacklist, failure for whitelist). 53 | If the file exists, this setting is ignored. 54 | ''' 55 | from __future__ import absolute_import 56 | import logging 57 | 58 | import fnmatch 59 | import yaml 60 | import os 61 | import copy 62 | import salt.utils 63 | import re 64 | 65 | from distutils.version import LooseVersion 66 | 67 | log = logging.getLogger(__name__) 68 | 69 | 70 | def __virtual__(): 71 | if salt.utils.is_windows(): 72 | return False, 'This audit module only runs on linux' 73 | return True 74 | 75 | 76 | def audit(data_list, tags, debug=False): 77 | ''' 78 | Run the grep audits contained in the YAML files processed by __virtual__ 79 | ''' 80 | __data__ = {} 81 | for profile, data in data_list: 82 | _merge_yaml(__data__, data, profile) 83 | __tags__ = _get_tags(__data__) 84 | 85 | if debug: 86 | log.debug('grep audit __data__:') 87 | log.debug(__data__) 88 | log.debug('grep audit __tags__:') 89 | log.debug(__tags__) 90 | 91 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 92 | for tag in __tags__: 93 | if fnmatch.fnmatch(tag, tags): 94 | for tag_data in __tags__[tag]: 95 | if 'control' in tag_data: 96 | ret['Controlled'].append(tag_data) 97 | continue 98 | name = tag_data['name'] 99 | audittype = tag_data['type'] 100 | 101 | if 'pattern' not in tag_data: 102 | log.error('No version found for grep audit {0}, file {1}' 103 | .format(tag, name)) 104 | tag_data = copy.deepcopy(tag_data) 105 | tag_data['error'] = 'No pattern found'.format(mod) 106 | ret['Failure'].append(tag_data) 107 | continue 108 | 109 | grep_args = tag_data.get('grep_args', []) 110 | if isinstance(grep_args, str): 111 | grep_args = [grep_args] 112 | 113 | grep_ret = _grep(name, 114 | tag_data['pattern'], 115 | *grep_args).get('stdout') 116 | 117 | found = False 118 | if grep_ret: 119 | found = True 120 | if 'match_output' in tag_data: 121 | if not tag_data.get('match_output_regex'): 122 | if tag_data['match_output'] not in grep_ret: 123 | found = False 124 | else: # match with regex 125 | if not re.match(tag_data['match_output'], grep_ret): 126 | found = False 127 | 128 | if not os.path.exists(name) and 'match_on_file_missing' in tag_data: 129 | if tag_data['match_on_file_missing']: 130 | found = True 131 | else: 132 | found = False 133 | 134 | # Blacklisted pattern (must not be found) 135 | if audittype == 'blacklist': 136 | if found: 137 | ret['Failure'].append(tag_data) 138 | else: 139 | ret['Success'].append(tag_data) 140 | 141 | # Whitelisted pattern (must be found) 142 | elif audittype == 'whitelist': 143 | if found: 144 | ret['Success'].append(tag_data) 145 | else: 146 | ret['Failure'].append(tag_data) 147 | 148 | return ret 149 | 150 | 151 | def _merge_yaml(ret, data, profile=None): 152 | ''' 153 | Merge two yaml dicts together at the grep:blacklist and grep:whitelist level 154 | ''' 155 | if 'grep' not in ret: 156 | ret['grep'] = {} 157 | for topkey in ('blacklist', 'whitelist'): 158 | if topkey in data.get('grep', {}): 159 | if topkey not in ret['grep']: 160 | ret['grep'][topkey] = [] 161 | for key, val in data['grep'][topkey].iteritems(): 162 | if profile and isinstance(val, dict): 163 | val['nova_profile'] = profile 164 | ret['grep'][topkey].append({key: val}) 165 | return ret 166 | 167 | 168 | def _get_tags(data): 169 | ''' 170 | Retrieve all the tags for this distro from the yaml 171 | ''' 172 | ret = {} 173 | distro = __grains__.get('osfinger') 174 | for toplist, toplevel in data.get('grep', {}).iteritems(): 175 | # grep:blacklist 176 | for audit_dict in toplevel: 177 | # grep:blacklist:0 178 | for audit_id, audit_data in audit_dict.iteritems(): 179 | # grep:blacklist:0:telnet 180 | tags_dict = audit_data.get('data', {}) 181 | # grep:blacklist:0:telnet:data 182 | tags = None 183 | for osfinger in tags_dict: 184 | if osfinger == '*': 185 | continue 186 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 187 | for osfinger_glob in osfinger_list: 188 | if fnmatch.fnmatch(distro, osfinger_glob): 189 | tags = tags_dict.get(osfinger) 190 | break 191 | if tags is not None: 192 | break 193 | # If we didn't find a match, check for a '*' 194 | if tags is None: 195 | tags = tags_dict.get('*', []) 196 | # grep:blacklist:0:telnet:data:Debian-8 197 | if isinstance(tags, dict): 198 | # malformed yaml, convert to list of dicts 199 | tmp = [] 200 | for name, tag in tags.iteritems(): 201 | tmp.append({name: tag}) 202 | tags = tmp 203 | for item in tags: 204 | for name, tag in item.iteritems(): 205 | tag_data = {} 206 | # Whitelist could have a dictionary, not a string 207 | if isinstance(tag, dict): 208 | tag_data = copy.deepcopy(tag) 209 | tag = tag_data.pop('tag') 210 | if tag not in ret: 211 | ret[tag] = [] 212 | formatted_data = {'name': name, 213 | 'tag': tag, 214 | 'module': 'grep', 215 | 'type': toplist} 216 | formatted_data.update(tag_data) 217 | formatted_data.update(audit_data) 218 | formatted_data.pop('data') 219 | ret[tag].append(formatted_data) 220 | return ret 221 | 222 | 223 | def _grep(path, 224 | pattern, 225 | *args): 226 | ''' 227 | Grep for a string in the specified file 228 | 229 | .. note:: 230 | This function's return value is slated for refinement in future 231 | versions of Salt 232 | 233 | path 234 | Path to the file to be searched 235 | 236 | .. note:: 237 | Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing 238 | is being used then the path should be quoted to keep the shell from 239 | attempting to expand the glob expression. 240 | 241 | pattern 242 | Pattern to match. For example: ``test``, or ``a[0-5]`` 243 | 244 | opts 245 | Additional command-line flags to pass to the grep command. For example: 246 | ``-v``, or ``-i -B2`` 247 | 248 | .. note:: 249 | The options should come after a double-dash (as shown in the 250 | examples below) to keep Salt's own argument parser from 251 | interpreting them. 252 | 253 | CLI Example: 254 | 255 | .. code-block:: bash 256 | 257 | salt '*' file.grep /etc/passwd nobody 258 | salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i 259 | salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2 260 | salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l 261 | ''' 262 | path = os.path.expanduser(path) 263 | 264 | if args: 265 | options = ' '.join(args) 266 | else: 267 | options = '' 268 | cmd = ( 269 | r'''grep {options} {pattern} {path}''' 270 | .format( 271 | options=options, 272 | pattern=pattern, 273 | path=path, 274 | ) 275 | ) 276 | 277 | try: 278 | ret = __salt__['cmd.run_all'](cmd, python_shell=False, ignore_retcode=True) 279 | except (IOError, OSError) as exc: 280 | raise CommandExecutionError(exc.strerror) 281 | 282 | return ret 283 | -------------------------------------------------------------------------------- /hubblestack_nova/misc.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | Hubble Nova plugin for running miscellaneous one-off python functions to 4 | run more complex nova audits without allowing arbitrary command execution 5 | from within the yaml profiles. 6 | 7 | :maintainer: HubbleStack / basepi 8 | :maturity: 2016.7.2 9 | :platform: All 10 | :requires: SaltStack 11 | 12 | Sample YAML data, with inline comments: 13 | 14 | # Top level key lets the module know it should look at this data 15 | misc: 16 | # Unique ID for this set of audits 17 | nodev: 18 | data: 19 | # 'osfinger' grain, for multiplatform support 20 | 'Red Hat Enterprise Linux Server-6': 21 | # tag is required 22 | tag: CIS-1.1.10 23 | function: misc_function_name 24 | args: # optional 25 | - first_arg 26 | - second_arg 27 | kwargs: # optional 28 | first_kwarg: value 29 | second_kwarg: value 30 | 31 | # Catch-all, if no other osfinger match was found 32 | '*': 33 | tag: generic_tag 34 | function: misc_function_name 35 | args: # optional 36 | - first_arg 37 | - second_arg 38 | kwargs: # optional 39 | first_kwarg: value 40 | second_kwarg: value 41 | # Description will be output with the results 42 | description: '/home should be nodev' 43 | ''' 44 | from __future__ import absolute_import 45 | import logging 46 | 47 | import fnmatch 48 | import yaml 49 | import os 50 | import copy 51 | import re 52 | import salt.utils 53 | from salt.ext import six 54 | 55 | log = logging.getLogger(__name__) 56 | 57 | 58 | def __virtual__(): 59 | return True 60 | 61 | 62 | def audit(data_list, tags, debug=False): 63 | ''' 64 | Run the misc audits contained in the data_list 65 | ''' 66 | __data__ = {} 67 | for profile, data in data_list: 68 | _merge_yaml(__data__, data, profile) 69 | __tags__ = _get_tags(__data__) 70 | 71 | if debug: 72 | log.debug('misc audit __data__:') 73 | log.debug(__data__) 74 | log.debug('misc audit __tags__:') 75 | log.debug(__tags__) 76 | 77 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 78 | 79 | for tag in __tags__: 80 | if fnmatch.fnmatch(tag, tags): 81 | for tag_data in __tags__[tag]: 82 | if 'control' in tag_data: 83 | ret['Controlled'].append(tag_data) 84 | continue 85 | if 'function' not in tag_data: 86 | continue 87 | 88 | function = FUNCTION_MAP.get(tag_data['function']) 89 | if not function: 90 | if 'Error' not in ret: 91 | ret['Error'] = [] 92 | ret['Error'].append({tag: 'No function {0} found' 93 | .format(tag_data['function'])}) 94 | args = tag_data.get('args', []) 95 | kwargs = tag_data.get('kwargs', {}) 96 | 97 | # Call the function 98 | result = function(*args, **kwargs) 99 | 100 | if result is True: 101 | ret['Success'].append(tag_data) 102 | elif isinstance(result, six.string_types): 103 | tag_data['failure_reason'] = result 104 | ret['Failure'].append(tag_data) 105 | else: 106 | ret['Failure'].append(tag_data) 107 | 108 | return ret 109 | 110 | 111 | def _merge_yaml(ret, data, profile=None): 112 | ''' 113 | Merge two yaml dicts together at the misc level 114 | ''' 115 | if 'misc' not in ret: 116 | ret['misc'] = [] 117 | if 'misc' in data: 118 | for key, val in data['misc'].iteritems(): 119 | if profile and isinstance(val, dict): 120 | val['nova_profile'] = profile 121 | ret['misc'].append({key: val}) 122 | return ret 123 | 124 | 125 | def _get_tags(data): 126 | ''' 127 | Retrieve all the tags for this distro from the yaml 128 | ''' 129 | ret = {} 130 | distro = __grains__.get('osfinger') 131 | for audit_dict in data.get('misc', []): 132 | # misc:0 133 | for audit_id, audit_data in audit_dict.iteritems(): 134 | # misc:0:nodev 135 | tags_dict = audit_data.get('data', {}) 136 | # misc:0:nodev:data 137 | tags = None 138 | for osfinger in tags_dict: 139 | if osfinger == '*': 140 | continue 141 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 142 | for osfinger_glob in osfinger_list: 143 | if fnmatch.fnmatch(distro, osfinger_glob): 144 | tags = tags_dict.get(osfinger) 145 | break 146 | if tags is not None: 147 | break 148 | # If we didn't find a match, check for a '*' 149 | if tags is None: 150 | tags = tags_dict.get('*', {}) 151 | # misc:0:nodev:data:Debian-8 152 | if 'tag' not in tags: 153 | tags['tag'] = '' 154 | tag = tags['tag'] 155 | if tag not in ret: 156 | ret[tag] = [] 157 | formatted_data = {'tag': tag, 158 | 'module': 'misc'} 159 | formatted_data.update(audit_data) 160 | formatted_data.update(tags) 161 | formatted_data.pop('data') 162 | ret[tag].append(formatted_data) 163 | return ret 164 | 165 | 166 | ############################ 167 | # Begin function definitions 168 | ############################ 169 | 170 | 171 | def test_success(): 172 | ''' 173 | Automatically returns success 174 | ''' 175 | return True 176 | 177 | 178 | def test_failure(): 179 | ''' 180 | Automatically returns failure, no reason 181 | ''' 182 | return False 183 | 184 | 185 | def test_failure_reason(reason): 186 | ''' 187 | Automatically returns failure, with a reason (first arg) 188 | ''' 189 | return reason 190 | 191 | 192 | FUNCTION_MAP = { 193 | 'test_success': test_success, 194 | 'test_failure': test_failure, 195 | 'test_failure_reason': test_failure_reason, 196 | } 197 | -------------------------------------------------------------------------------- /hubblestack_nova/netstat.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova module for auditing open ports. 4 | 5 | :maintainer: HubbleStack / basepi 6 | :maturity: 2016.7.0 7 | :platform: Unix 8 | :requires: SaltStack 9 | 10 | Sample data for the netstat whitelist: 11 | 12 | .. code-block:: yaml 13 | 14 | netstat: 15 | ssh: 16 | address: '*:22' 17 | another_identifier: 18 | address: 19 | - 127.0.0.1:80 20 | - 0.0.0.0:80 21 | ''' 22 | from __future__ import absolute_import 23 | 24 | import copy 25 | import fnmatch 26 | import logging 27 | 28 | import salt.utils 29 | 30 | log = logging.getLogger(__name__) 31 | 32 | 33 | def __virtual__(): 34 | if 'network.netstat' in __salt__: 35 | return True 36 | return False, 'No network.netstat function found' 37 | 38 | 39 | def audit(data_list, tags, debug=True): 40 | ''' 41 | Run the network.netstat command 42 | ''' 43 | ret = {'Success': [], 'Failure': []} 44 | 45 | __tags__ = {} 46 | for profile, data in data_list: 47 | if 'netstat' in data: 48 | for check, check_args in data['netstat'].iteritems(): 49 | if 'address' in check_args: 50 | tag_args = copy.deepcopy(check_args) 51 | tag_args['id'] = check 52 | tag_args['nova_profile'] = profile 53 | if isinstance(check_args['address'], list): 54 | for address in check_args['address']: 55 | __tags__[address] = tag_args 56 | else: 57 | __tags__[check_args['address']] = tag_args 58 | 59 | if not __tags__: 60 | # No yaml data found, don't do any work 61 | return ret 62 | 63 | for address_data in __salt__['network.netstat'](): 64 | 65 | success = False 66 | for whitelisted_address in __tags__: 67 | if fnmatch.fnmatch(address_data['local-address'], whitelisted_address): 68 | address_data.update({ 69 | 'tag': __tags__[whitelisted_address]['address'][0], 70 | 'description': __tags__[whitelisted_address]['id'], 71 | 'nova_profile': __tags__[whitelisted_address]['nova_profile'] 72 | }) 73 | ret['Success'].append(address_data) 74 | success = True 75 | break 76 | if success is False: 77 | address_data.update({ 78 | 'tag': address_data['local-address'], 79 | 'description': address_data['program'], 80 | 'nova_profile': 'netstat' 81 | }) 82 | ret['Failure'].append(address_data) 83 | 84 | return ret 85 | -------------------------------------------------------------------------------- /hubblestack_nova/openssl.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova module for auditing SSL certificates. 4 | 5 | :maintainer: HubbleStack / avb76 6 | :maturity: 2016.7.0 7 | :platform: Linux 8 | :requires: SaltStack, python-OpenSSL 9 | 10 | This audit module requires YAML data to execute. It will search the yaml data 11 | received for the topkey 'openssl'. 12 | 13 | Sample YAML data, with in line comments: 14 | 15 | openssl: 16 | google: 17 | data: 18 | tag: 'CERT-001' # required 19 | endpoint: 'www.google.com' # required only if file is not defined 20 | file: null # required only if endpoint is not defined 21 | port: 443 # optional 22 | not_after: 15 # optional 23 | not_before: 2 # optional 24 | fail_if_not_before: False # optional 25 | description: 'google certificate' 26 | 27 | Some words about the elements in the data dictionary: 28 | - tag: this is the tag of the check 29 | - endpoint: 30 | - the ssl endpoint to check 31 | - the module will download the SSL certificate of the endpoint 32 | - endpoint is required only if file is not defined (read bellow) 33 | file: 34 | - the path to the pem file containing the SSL certificate to be checked 35 | - the path is relative to the minion 36 | - the module will try to read the certificate from this file 37 | - if no certificate can be loaded by the OpenSSL library, the check will be failed 38 | - file is required only if endpoint is not defined (read more about this bellow) 39 | port: 40 | - the port is required only if both: 41 | - the endpoint is defined 42 | - https is configured on another port the 443 on the endpoint 43 | - WARNING: if the port is not the on configured for https on the endpoint, downloading the certificate from 44 | the endpoint will timeout and the check will be failed 45 | - if endpoint is defined but the port is not, the module will try, by default, to use port 443 46 | not_after: 47 | - the minimum number of days left until the certificate should expire 48 | - if the certificate will expire in less then the value given here, the check will fail 49 | - if not_after is missing, the default value is 0; basically, the if the expiration date is in the future, the 50 | check will be passed 51 | not_before: 52 | - the expected number of days until the certificate becomes valid 53 | - this is useful only if you expect the certificate to be valid after a certain date 54 | - if missing, 0 is the default value (read more bellow) 55 | fail_if_not_before: 56 | - if True, the check will fail only if not_before is 0 (or missing): if the certificate is not valid yet, but 57 | it is expected to be 58 | - the default value is False - the check will fail only if the certificate expiration date is valid 59 | 60 | Some notes: 61 | - if BOTH file and endpoint are present / missing, the check will fail; only one certificate has to be present for 62 | each check 63 | - the YAML supports also the control key, just as the other modules do 64 | 65 | Known issues: for unknown reasons (yet), the module can fail downloading the certificate from certain endpoints. When 66 | this happens, the check will be failed. 67 | 68 | ''' 69 | 70 | from __future__ import absolute_import 71 | import logging 72 | 73 | import fnmatch 74 | import copy 75 | import salt.utils 76 | import datetime 77 | import time 78 | 79 | import ssl 80 | 81 | try: 82 | import OpenSSL 83 | 84 | _HAS_OPENSSL = True 85 | except ImportError: 86 | _HAS_OPENSSL = False 87 | 88 | log = logging.getLogger(__name__) 89 | 90 | __tags__ = None 91 | __data__ = None 92 | 93 | 94 | def __virtual__(): 95 | if salt.utils.is_windows(): 96 | return False, 'This audit module only runs on linux' 97 | if not _HAS_OPENSSL: 98 | return (False, 'The python-OpenSSL library is missing') 99 | return True 100 | 101 | 102 | def audit(data_list, tags, debug=True): 103 | __data__ = {} 104 | for profile, data in data_list: 105 | _merge_yaml(__data__, data, profile) 106 | __tags__ = _get_tags(__data__) 107 | 108 | if debug: 109 | log.debug('service audit __data__:') 110 | log.debug(__data__) 111 | log.debug('service audit __tags__:') 112 | log.debug(__tags__) 113 | 114 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 115 | for tag in __tags__: 116 | if fnmatch.fnmatch(tag, tags): 117 | for tag_data in __tags__[tag]: 118 | if 'control' in tag_data: 119 | ret['Controlled'].append(tag_data) 120 | continue 121 | 122 | endpoint = tag_data.get('endpoint', None) 123 | pem_file = tag_data.get('file', None) 124 | not_after = tag_data.get('not_after', 0) 125 | not_before = tag_data.get('not_before', 0) 126 | port = tag_data.get('port', 443) 127 | fail_if_not_before = tag_data.get('fail_if_not_before', False) 128 | 129 | if not endpoint and not pem_file: 130 | failing_reason = 'No certificate to be checked' 131 | tag_data['reason'] = failing_reason 132 | ret['Failure'].append(tag_data) 133 | continue 134 | 135 | if endpoint and pem_file: 136 | failing_reason = 'Only one certificate per check is allowed' 137 | tag_data['reason'] = failing_reason 138 | ret['Failure'].append(tag_data) 139 | continue 140 | 141 | cert = _get_cert(endpoint, port) if endpoint else _get_cert(pem_file, from_file=True) 142 | x509 = _load_x509(cert) 143 | (passed, failing_reason) = _check_x509(x509=x509, 144 | not_before=not_before, 145 | not_after=not_after, 146 | fail_if_not_before=fail_if_not_before) 147 | 148 | if passed: 149 | ret['Success'].append(tag_data) 150 | else: 151 | tag_data['reason'] = failing_reason 152 | ret['Failure'].append(tag_data) 153 | 154 | return ret 155 | 156 | 157 | def _merge_yaml(ret, data, profile=None): 158 | if 'openssl' not in ret: 159 | ret['openssl'] = [] 160 | for key, val in data.get('openssl', {}).iteritems(): 161 | if profile and isinstance(val, dict): 162 | val['nova_profile'] = profile 163 | ret['openssl'].append({key: val}) 164 | return ret 165 | 166 | 167 | def _get_tags(data): 168 | ret = {} 169 | for audit_dict in data.get('openssl', {}): 170 | for audit_id, audit_data in audit_dict.iteritems(): 171 | tags_dict = audit_data.get('data', {}) 172 | tag = tags_dict.pop('tag') 173 | if tag not in ret: 174 | ret[tag] = [] 175 | formatted_data = copy.deepcopy(tags_dict) 176 | formatted_data['tag'] = tag 177 | formatted_data['module'] = 'openssl' 178 | formatted_data.update(audit_data) 179 | formatted_data.pop('data') 180 | ret[tag].append(formatted_data) 181 | return ret 182 | 183 | 184 | def _check_x509(x509=None, not_before=0, not_after=0, fail_if_not_before=False): 185 | if not x509: 186 | log.error('No certificate to be checked') 187 | return (False, 'No certificate to be checked') 188 | if x509.has_expired(): 189 | log.info('The certificate has expired') 190 | return (False, 'The certificate has expired') 191 | 192 | stats = _get_x509_days_left(x509) 193 | 194 | if not_after >= stats['not_after']: 195 | log.info('The certificate will expire in less then {0} days'.format(not_after)) 196 | return (False, 197 | 'The certificate will expire in less then {0} days'.format(not_after) 198 | ) 199 | if not_before <= stats['not_before']: 200 | if not_before == 0 and fail_if_not_before: 201 | log.info( 202 | 'The certificate is not yet valid ({0} days left until it will be valid)'.format(stats['not_before'])) 203 | return (False, 204 | 'The certificate is not yet valid ({0} days left until it will be valid)'.format( 205 | stats['not_before']) 206 | ) 207 | log.info('The certificate will be valid in more then {0} days'.format(not_before)) 208 | return (False, 'The certificate will be valid in more then {0} days'.format(not_before)) 209 | 210 | return (True, '') 211 | 212 | 213 | def _load_x509(cert): 214 | if not cert: 215 | log.error('No certificate to be loaded into x509 object') 216 | return None 217 | try: 218 | x509 = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) 219 | except OpenSSL.crypto.Error: 220 | log.error('Unable to load certificate into x509 object') 221 | x509 = None 222 | 223 | return x509 224 | 225 | 226 | def _get_cert(source, port=443, from_file=False): 227 | cert = _get_cert_from_file(source) if from_file else _get_cert_from_endpoint(source, port) 228 | return cert 229 | 230 | 231 | def _get_cert_from_endpoint(server, port=443): 232 | try: 233 | cert = ssl.get_server_certificate((server, port)) 234 | except Exception: 235 | log.error('Unable to retrieve certificate from {0}'.format(server)) 236 | cert = None 237 | if not cert: 238 | return None 239 | 240 | return cert 241 | 242 | 243 | def _get_cert_from_file(cert_file_path): 244 | try: 245 | with open(cert_file_path) as cert_file: 246 | cert = cert_file.read() 247 | except IOError: 248 | log.error('File not found: {0}'.format(cert_file_path)) 249 | return None 250 | 251 | return cert 252 | 253 | 254 | def _get_x509_days_left(x509): 255 | date_fmt = '%Y%m%d%H%M%SZ' 256 | current_datetime = datetime.datetime.utcnow() 257 | not_after = time.strptime(x509.get_notAfter(), date_fmt) 258 | not_before = time.strptime(x509.get_notBefore(), date_fmt) 259 | 260 | ret = {'not_after': (datetime.datetime(*not_after[:6]) - current_datetime).days, 261 | 'not_before': (datetime.datetime(*not_before[:6]) - current_datetime).days} 262 | 263 | return ret 264 | -------------------------------------------------------------------------------- /hubblestack_nova/pkg.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova module for auditing installed packages. 4 | 5 | Supports both blacklisting and whitelisting pacakges. Blacklisted packages 6 | must not be installed. Whitelisted packages must be installed, with options for 7 | requiring a specific version or a minimum or maximum version. 8 | 9 | :maintainer: HubbleStack / basepi 10 | :maturity: 2016.7.0 11 | :platform: All 12 | :requires: SaltStack 13 | 14 | Sample YAML data, with inline comments: 15 | 16 | pkg: 17 | # Must not be installed 18 | blacklist: 19 | # Unique ID for this set of audits 20 | telnet: 21 | data: 22 | # 'osfinger' grain, for multiplatform support 23 | CentOS Linux-6: 24 | # pkg name : tag 25 | - 'telnet': 'CIS-2.1.1' 26 | # Catch-all, if no osfinger match was found 27 | '*': 28 | # pkg name : tag 29 | - 'telnet': 'telnet-bad' 30 | # description/alert/trigger are currently ignored, but may be used in the future 31 | description: 'Telnet is evil' 32 | alert: email 33 | trigger: state 34 | # Must be installed, no version checking (yet) 35 | whitelist: 36 | rsh: 37 | data: 38 | CentOS Linux-6: 39 | # Use dict format to define specific version 40 | - 'rsh': 41 | tag: 'CIS-2.1.3' 42 | version: '4.3.2' 43 | # Dict format can also define ranges (only >= and <= supported) 44 | - 'rsh-client': 45 | tag: 'CIS-2.1.3' 46 | version: '>=4.3.2' 47 | # String format says "package must be installed, at any version" 48 | - 'rsh-server': 'CIS-2.1.4' 49 | CentOS Linux-7: 50 | - 'rsh': 'CIS-2.1.3' 51 | - 'rsh-server': 'CIS-2.1.4' 52 | '*': 53 | - 'rsh-client': 'CIS-5.1.2' 54 | - 'rsh-redone-client': 'CIS-5.1.2' 55 | - 'rsh-server': 'CIS-5.1.3' 56 | - 'rsh-redone-server': 'CIS-5.1.3' 57 | description: 'RSH is awesome' 58 | alert: email 59 | trigger: state 60 | 61 | ''' 62 | from __future__ import absolute_import 63 | import logging 64 | 65 | import fnmatch 66 | import yaml 67 | import os 68 | import copy 69 | import salt.utils 70 | 71 | from distutils.version import LooseVersion 72 | 73 | log = logging.getLogger(__name__) 74 | 75 | 76 | def __virtual__(): 77 | if salt.utils.is_windows(): 78 | return False, 'This audit module only runs on linux' 79 | return True 80 | 81 | 82 | def audit(data_list, tags, debug=False): 83 | ''' 84 | Run the pkg audits contained in the YAML files processed by __virtual__ 85 | ''' 86 | __data__ = {} 87 | for profile, data in data_list: 88 | _merge_yaml(__data__, data, profile) 89 | __tags__ = _get_tags(__data__) 90 | 91 | if debug: 92 | log.debug('pkg audit __data__:') 93 | log.debug(__data__) 94 | log.debug('pkg audit __tags__:') 95 | log.debug(__tags__) 96 | 97 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 98 | for tag in __tags__: 99 | if fnmatch.fnmatch(tag, tags): 100 | for tag_data in __tags__[tag]: 101 | if 'control' in tag_data: 102 | ret['Controlled'].append(tag_data) 103 | continue 104 | name = tag_data['name'] 105 | audittype = tag_data['type'] 106 | 107 | # Blacklisted packages (must not be installed) 108 | if audittype == 'blacklist': 109 | if __salt__['pkg.version'](name): 110 | ret['Failure'].append(tag_data) 111 | else: 112 | ret['Success'].append(tag_data) 113 | 114 | # Whitelisted packages (must be installed) 115 | elif audittype == 'whitelist': 116 | if 'version' in tag_data: 117 | mod, _, version = tag_data['version'].partition('=') 118 | if not version: 119 | version = mod 120 | mod = '' 121 | 122 | if mod == '<': 123 | if (LooseVersion(__salt__['pkg.version'](name)) <= 124 | LooseVersion(version)): 125 | ret['Success'].append(tag_data) 126 | else: 127 | ret['Failure'].append(tag_data) 128 | 129 | elif mod == '>': 130 | if (LooseVersion(__salt__['pkg.version'](name)) >= 131 | LooseVersion(version)): 132 | ret['Success'].append(tag_data) 133 | else: 134 | ret['Failure'].append(tag_data) 135 | 136 | elif not mod: 137 | # Just peg to the version, no > or < 138 | if __salt__['pkg.version'](name) == version: 139 | ret['Success'].append(tag_data) 140 | else: 141 | ret['Failure'].append(tag_data) 142 | 143 | else: 144 | # Invalid modifier 145 | log.error('Invalid modifier in version {0} for pkg {1} audit {2}' 146 | .format(tag_data['version'], name, tag)) 147 | tag_data = copy.deepcopy(tag_data) 148 | # Include an error in the failure 149 | tag_data['error'] = 'Invalid modifier {0}'.format(mod) 150 | ret['Failure'].append(tag_data) 151 | 152 | else: # No version checking 153 | if __salt__['pkg.version'](name): 154 | ret['Success'].append(tag_data) 155 | else: 156 | ret['Failure'].append(tag_data) 157 | 158 | return ret 159 | 160 | 161 | def _merge_yaml(ret, data, profile=None): 162 | ''' 163 | Merge two yaml dicts together at the pkg:blacklist and pkg:whitelist level 164 | ''' 165 | if 'pkg' not in ret: 166 | ret['pkg'] = {} 167 | for topkey in ('blacklist', 'whitelist'): 168 | if topkey in data.get('pkg', {}): 169 | if topkey not in ret['pkg']: 170 | ret['pkg'][topkey] = [] 171 | for key, val in data['pkg'][topkey].iteritems(): 172 | if profile and isinstance(val, dict): 173 | val['nova_profile'] = profile 174 | ret['pkg'][topkey].append({key: val}) 175 | return ret 176 | 177 | 178 | def _get_tags(data): 179 | ''' 180 | Retrieve all the tags for this distro from the yaml 181 | ''' 182 | ret = {} 183 | distro = __grains__.get('osfinger') 184 | for toplist, toplevel in data.get('pkg', {}).iteritems(): 185 | # pkg:blacklist 186 | for audit_dict in toplevel: 187 | # pkg:blacklist:0 188 | for audit_id, audit_data in audit_dict.iteritems(): 189 | # pkg:blacklist:0:telnet 190 | tags_dict = audit_data.get('data', {}) 191 | # pkg:blacklist:0:telnet:data 192 | tags = None 193 | for osfinger in tags_dict: 194 | if osfinger == '*': 195 | continue 196 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 197 | for osfinger_glob in osfinger_list: 198 | if fnmatch.fnmatch(distro, osfinger_glob): 199 | tags = tags_dict.get(osfinger) 200 | break 201 | if tags is not None: 202 | break 203 | # If we didn't find a match, check for a '*' 204 | if tags is None: 205 | tags = tags_dict.get('*', []) 206 | # pkg:blacklist:0:telnet:data:Debian-8 207 | if isinstance(tags, dict): 208 | # malformed yaml, convert to list of dicts 209 | tmp = [] 210 | for name, tag in tags.iteritems(): 211 | tmp.append({name: tag}) 212 | tags = tmp 213 | for item in tags: 214 | for name, tag in item.iteritems(): 215 | tag_data = {} 216 | # Whitelist could have a dictionary, not a string 217 | if isinstance(tag, dict): 218 | tag_data = copy.deepcopy(tag) 219 | tag = tag_data.pop('tag') 220 | if tag not in ret: 221 | ret[tag] = [] 222 | formatted_data = {'name': name, 223 | 'tag': tag, 224 | 'module': 'pkg', 225 | 'type': toplist} 226 | formatted_data.update(tag_data) 227 | formatted_data.update(audit_data) 228 | formatted_data.pop('data') 229 | ret[tag].append(formatted_data) 230 | return ret 231 | -------------------------------------------------------------------------------- /hubblestack_nova/pkgng_audit.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | Hubble Nova plugin for FreeBSD pkgng audit 4 | 5 | :maintainer: HubbleStack / cedwards 6 | :maturity: 2016.7.0 7 | :platform: FreeBSD 8 | :requires: SaltStack 9 | 10 | ''' 11 | from __future__ import absolute_import 12 | import logging 13 | 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | def __virtual__(): 18 | if 'FreeBSD' not in __grains__['os']: 19 | return False, 'This audit module only runs on FreeBSD' 20 | return True 21 | 22 | 23 | def audit(data_list, tags, debug=False): 24 | ''' 25 | Run the pkg.audit command 26 | ''' 27 | ret = {'Success': [], 'Failure': []} 28 | 29 | __tags__ = [] 30 | for profile, data in data_list: 31 | if 'pkgng_audit' in data: 32 | __tags__ = ['pkgng_audit'] 33 | break 34 | 35 | if debug: 36 | log.debug('pkgng audit __tags__:') 37 | log.debug(__tags__) 38 | 39 | if not __tags__: 40 | # No yaml data found, don't do any work 41 | return ret 42 | 43 | salt_ret = __salt__['pkg.audit']() 44 | results = {'pkgng_audit': {'result': salt_ret}} 45 | results['pkng_audit']['nova_profile'] = profile 46 | if not verbose: 47 | results = salt_ret 48 | if '0 problem(s)' not in salt_ret: 49 | ret['Failure'].append(results) 50 | else: 51 | ret['Success'].append(results) 52 | 53 | return ret 54 | -------------------------------------------------------------------------------- /hubblestack_nova/service.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova module for auditing running services. 4 | 5 | Supports both blacklisting and whitelisting services. Blacklisted services 6 | must not be running. Whitelisted services must be running. 7 | 8 | :maintainer: HubbleStack / basepi 9 | :maturity: 2016.7.0 10 | :platform: All 11 | :requires: SaltStack 12 | 13 | This audit module requires yaml data to execute. It will search the local 14 | directory for any .yaml files, and if it finds a top-level 'service' key, it will 15 | use that data. 16 | 17 | Sample YAML data, with inline comments: 18 | 19 | 20 | service: 21 | # Must not be installed 22 | blacklist: 23 | # Unique ID for this set of audits 24 | telnet: 25 | data: 26 | # 'osfinger' grain, for multiplatform support 27 | CentOS Linux-6: 28 | # service name : tag 29 | - 'telnet': 'CIS-2.1.1' 30 | # Catch-all, if no osfinger match was found 31 | '*': 32 | # service name : tag 33 | - 'telnet': 'telnet-bad' 34 | # description/alert/trigger are currently ignored, but may be used in the future 35 | description: 'Telnet is evil' 36 | alert: email 37 | trigger: state 38 | # Must be installed, no version checking (yet) 39 | whitelist: 40 | rsh: 41 | data: 42 | CentOS Linux-7: 43 | - 'rsh': 'CIS-2.1.3' 44 | - 'rsh-server': 'CIS-2.1.4' 45 | '*': 46 | - 'rsh-client': 'CIS-5.1.2' 47 | - 'rsh-redone-client': 'CIS-5.1.2' 48 | - 'rsh-server': 'CIS-5.1.3' 49 | - 'rsh-redone-server': 'CIS-5.1.3' 50 | description: 'RSH is awesome' 51 | alert: email 52 | trigger: state 53 | 54 | ''' 55 | from __future__ import absolute_import 56 | import logging 57 | 58 | import fnmatch 59 | import yaml 60 | import os 61 | import copy 62 | import salt.utils 63 | 64 | from distutils.version import LooseVersion 65 | 66 | log = logging.getLogger(__name__) 67 | 68 | 69 | def __virtual__(): 70 | if salt.utils.is_windows(): 71 | return False, 'This audit module only runs on linux' 72 | return True 73 | 74 | 75 | def audit(data_list, tags, debug=False): 76 | ''' 77 | Run the service audits contained in the YAML files processed by __virtual__ 78 | ''' 79 | __data__ = {} 80 | for profile, data in data_list: 81 | _merge_yaml(__data__, data, profile) 82 | __tags__ = _get_tags(__data__) 83 | 84 | if debug: 85 | log.debug('service audit __data__:') 86 | log.debug(__data__) 87 | log.debug('service audit __tags__:') 88 | log.debug(__tags__) 89 | 90 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 91 | for tag in __tags__: 92 | if fnmatch.fnmatch(tag, tags): 93 | for tag_data in __tags__[tag]: 94 | if 'control' in tag_data: 95 | ret['Controlled'].append(tag_data) 96 | continue 97 | name = tag_data['name'] 98 | audittype = tag_data['type'] 99 | 100 | # Blacklisted packages (must not be installed) 101 | if audittype == 'blacklist': 102 | if __salt__['service.status'](name): 103 | ret['Failure'].append(tag_data) 104 | else: 105 | ret['Success'].append(tag_data) 106 | 107 | # Whitelisted packages (must be installed) 108 | elif audittype == 'whitelist': 109 | if __salt__['service.status'](name): 110 | ret['Success'].append(tag_data) 111 | else: 112 | ret['Failure'].append(tag_data) 113 | 114 | return ret 115 | 116 | 117 | def _merge_yaml(ret, data, profile=None): 118 | ''' 119 | Merge two yaml dicts together at the service:blacklist and service:whitelist level 120 | ''' 121 | if 'service' not in ret: 122 | ret['service'] = {} 123 | for topkey in ('blacklist', 'whitelist'): 124 | if topkey in data.get('service', {}): 125 | if topkey not in ret['service']: 126 | ret['service'][topkey] = [] 127 | for key, val in data['service'][topkey].iteritems(): 128 | if profile and isinstance(val, dict): 129 | val['nova_profile'] = profile 130 | ret['service'][topkey].append({key: val}) 131 | return ret 132 | 133 | 134 | def _get_tags(data): 135 | ''' 136 | Retrieve all the tags for this distro from the yaml 137 | ''' 138 | ret = {} 139 | distro = __grains__.get('osfinger') 140 | for toplist, toplevel in data.get('service', {}).iteritems(): 141 | # service:blacklist 142 | for audit_dict in toplevel: 143 | # service:blacklist:0 144 | for audit_id, audit_data in audit_dict.iteritems(): 145 | # service:blacklist:0:telnet 146 | tags_dict = audit_data.get('data', {}) 147 | # service:blacklist:0:telnet:data 148 | tags = None 149 | for osfinger in tags_dict: 150 | if osfinger == '*': 151 | continue 152 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 153 | for osfinger_glob in osfinger_list: 154 | if fnmatch.fnmatch(distro, osfinger_glob): 155 | tags = tags_dict.get(osfinger) 156 | break 157 | if tags is not None: 158 | break 159 | # If we didn't find a match, check for a '*' 160 | if tags is None: 161 | tags = tags_dict.get('*', []) 162 | # service:blacklist:0:telnet:data:Debian-8 163 | if isinstance(tags, dict): 164 | # malformed yaml, convert to list of dicts 165 | tmp = [] 166 | for name, tag in tags.iteritems(): 167 | tmp.append({name: tag}) 168 | tags = tmp 169 | for item in tags: 170 | for name, tag in item.iteritems(): 171 | if tag not in ret: 172 | ret[tag] = [] 173 | formatted_data = {'name': name, 174 | 'tag': tag, 175 | 'module': 'service', 176 | 'type': toplist} 177 | formatted_data.update(audit_data) 178 | formatted_data.pop('data') 179 | ret[tag].append(formatted_data) 180 | return ret 181 | -------------------------------------------------------------------------------- /hubblestack_nova/stat.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova module for using stat to verify ownership & permissions. 4 | 5 | :maintainer: HubbleStack / avb76 6 | :maturity: 2016.7.0 7 | :platform: Linux 8 | :requires: SaltStack 9 | 10 | This audit module requires yaml data to execute. It will search the local 11 | directory for any .yaml files, and if it finds a top-level 'stat' key, it will 12 | use that data. 13 | 14 | Sample YAML data, with inline comments: 15 | 16 | 17 | stat: 18 | grub_conf_own: # unique ID 19 | data: 20 | 'CentOS-6': # osfinger grain 21 | - '/etc/grub.conf': # filename 22 | tag: 'CIS-1.5.1' #audit tag 23 | user: 'root' #expected owner 24 | uid: 0 #expected uid owner 25 | group: 'root' #expected group owner 26 | gid: 0 #expected gid owner 27 | 'CentOS Linux-7': 28 | - '/etc/grub2/grub.cfg': 29 | tag: 'CIS-1.5.1' 30 | user: 'root' 31 | uid: 0 32 | group: 'root' 33 | gid: 0 34 | # The rest of these attributes are optional, and currently not used 35 | description: 'Grub must be owned by root' 36 | alert: email 37 | trigger: state 38 | ''' 39 | 40 | from __future__ import absolute_import 41 | import logging 42 | 43 | import fnmatch 44 | import yaml 45 | import os 46 | import copy 47 | import salt.utils 48 | 49 | from distutils.version import LooseVersion 50 | 51 | log = logging.getLogger(__name__) 52 | 53 | 54 | def __virtual__(): 55 | if salt.utils.is_windows(): 56 | return False, 'This audit module only runs on linux' 57 | return True 58 | 59 | 60 | def audit(data_list, tags, debug=False): 61 | ''' 62 | Run the stat audits contained in the YAML files processed by __virtual__ 63 | ''' 64 | __data__ = {} 65 | for profile, data in data_list: 66 | _merge_yaml(__data__, data, profile) 67 | __tags__ = _get_tags(__data__) 68 | 69 | if debug: 70 | log.debug('service audit __data__:') 71 | log.debug(__data__) 72 | log.debug('service audit __tags__:') 73 | log.debug(__tags__) 74 | 75 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 76 | 77 | for tag in __tags__: 78 | if fnmatch.fnmatch(tag, tags): 79 | for tag_data in __tags__[tag]: 80 | if 'control' in tag_data: 81 | ret['Controlled'].append(tag_data) 82 | continue 83 | name = tag_data['name'] 84 | expected = {} 85 | for e in ['mode', 'user', 'uid', 'group', 'gid']: 86 | if e in tag_data: 87 | expected[e] = tag_data[e] 88 | 89 | #getting the stats using salt 90 | salt_ret = __salt__['file.stats'](name) 91 | if not salt_ret: 92 | if None in expected.values(): 93 | ret['Success'].append(tag_data) 94 | else: 95 | ret['Failure'].append(tag_data) 96 | continue 97 | 98 | passed = True 99 | reason_dict = {} 100 | for e in expected.keys(): 101 | r = salt_ret[e] 102 | if e == 'mode' and r != '0': 103 | r = r[1:] 104 | if str(expected[e]) != str(r): 105 | passed = False 106 | reason = { 'expected': str(expected[e]), 107 | 'current': str(r) } 108 | reason_dict[e] = reason 109 | 110 | if reason_dict: 111 | tag_data['reason'] = reason_dict 112 | 113 | if passed: 114 | ret['Success'].append(tag_data) 115 | else: 116 | ret['Failure'].append(tag_data) 117 | 118 | return ret 119 | 120 | 121 | 122 | def _merge_yaml(ret, data, profile=None): 123 | ''' 124 | Merge two yaml dicts together 125 | ''' 126 | if 'stat' not in ret: 127 | ret['stat'] = [] 128 | for key, val in data.get('stat', {}).iteritems(): 129 | if profile and isinstance(val, dict): 130 | val['nova_profile'] = profile 131 | ret['stat'].append({key: val}) 132 | return ret 133 | 134 | 135 | def _get_tags(data): 136 | ''' 137 | Retrieve all the tags for this distro from the yaml 138 | ''' 139 | ret = {} 140 | distro = __grains__.get('osfinger') 141 | for audit_dict in data.get('stat', []): 142 | for audit_id, audit_data in audit_dict.iteritems(): 143 | tags_dict = audit_data.get('data', {}) 144 | tags = None 145 | for osfinger in tags_dict: 146 | if osfinger == '*': 147 | continue 148 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 149 | for osfinger_glob in osfinger_list: 150 | if fnmatch.fnmatch(distro, osfinger_glob): 151 | tags = tags_dict.get(osfinger) 152 | break 153 | if tags is not None: 154 | break 155 | # If we didn't find a match, check for a '*' 156 | if tags is None: 157 | tags = tags_dict.get('*', []) 158 | if isinstance(tags, dict): 159 | # malformed yaml, convert to list of dicts 160 | tmp = [] 161 | for name, tag in tags.iteritems(): 162 | tmp.append({name: tag}) 163 | tags = tmp 164 | for item in tags: 165 | for name, tag in item.iteritems(): 166 | if isinstance(tag, dict): 167 | tag_data = copy.deepcopy(tag) 168 | tag = tag_data.pop('tag') 169 | if tag not in ret: 170 | ret[tag] = [] 171 | formatted_data = {'name': name, 172 | 'tag': tag, 173 | 'module': 'stat'} 174 | formatted_data.update(tag_data) 175 | formatted_data.update(audit_data) 176 | formatted_data.pop('data') 177 | ret[tag].append(formatted_data) 178 | return ret 179 | -------------------------------------------------------------------------------- /hubblestack_nova/sysctl.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova module for using sysctl to verify sysctl parameter. 4 | 5 | :maintainer: HubbleStack / avb76 6 | :maturity: 2016.7.0 7 | :platform: Linux 8 | :requires: SaltStack 9 | 10 | This audit module requires yaml data to execute. It will search the local 11 | directory for any .yaml files, and if it finds a top-level 'sysctl' key, it will 12 | use that data. 13 | 14 | Sample YAML data, with inline comments: 15 | 16 | sysctl: 17 | randomize_va_space: # unique ID 18 | data: 19 | 'CentOS-6': #osfinger grain 20 | - 'kernel.randomize_va_space': #sysctl param to check 21 | tag: 'CIS-1.6.3' #audit tag 22 | match_output: '2' #expected value of the checked parameter 23 | 'CentOS-7': 24 | - 'kernel.randomize_va_space': 25 | tag: 'CIS-1.6.2' 26 | match_output: '2' 27 | description: 'Enable Randomized Virtual Memory Region Placement (Scored)' 28 | alert: email 29 | trigger: state 30 | ''' 31 | 32 | from __future__ import absolute_import 33 | import logging 34 | 35 | import fnmatch 36 | import yaml 37 | import os 38 | import copy 39 | import salt.utils 40 | 41 | from distutils.version import LooseVersion 42 | 43 | log = logging.getLogger(__name__) 44 | 45 | 46 | def __virtual__(): 47 | if salt.utils.is_windows(): 48 | return False, 'This audit module only runs on linux' 49 | return True 50 | 51 | 52 | def audit(data_list, tags, debug=False): 53 | ''' 54 | Run the sysctl audits contained in the YAML files processed by __virtual__ 55 | ''' 56 | __data__ = {} 57 | for profile, data in data_list: 58 | _merge_yaml(__data__, data, profile) 59 | __tags__ = _get_tags(__data__) 60 | 61 | if debug: 62 | log.debug('service audit __data__:') 63 | log.debug(__data__) 64 | log.debug('service audit __tags__:') 65 | log.debug(__tags__) 66 | 67 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 68 | 69 | for tag in __tags__: 70 | if fnmatch.fnmatch(tag, tags): 71 | passed = True 72 | for tag_data in __tags__[tag]: 73 | if 'control' in tag_data: 74 | ret['Controlled'].append(tag_data) 75 | continue 76 | name = tag_data['name'] 77 | match_output = tag_data['match_output'] 78 | 79 | salt_ret = __salt__['sysctl.get'](name) 80 | if not salt_ret: 81 | passed = False 82 | if str(salt_ret).startswith('error'): 83 | passed = False 84 | if str(salt_ret) != str(match_output): 85 | passed = False 86 | if passed: 87 | ret['Success'].append(tag_data) 88 | else: 89 | ret['Failure'].append(tag_data) 90 | 91 | return ret 92 | 93 | 94 | def _merge_yaml(ret, data, profile=None): 95 | ''' 96 | Merge two yaml dicts together 97 | ''' 98 | if 'sysctl' not in ret: 99 | ret['sysctl'] = [] 100 | for key, val in data.get('sysctl', {}).iteritems(): 101 | if profile and isinstance(val, dict): 102 | val['nova_profile'] = profile 103 | ret['sysctl'].append({key: val}) 104 | return ret 105 | 106 | 107 | def _get_tags(data): 108 | ''' 109 | Retrieve all the tags for this distro from the yaml 110 | ''' 111 | ret = {} 112 | distro = __grains__.get('osfinger') 113 | for audit_dict in data.get('sysctl', []): 114 | for audit_id, audit_data in audit_dict.iteritems(): 115 | tags_dict = audit_data.get('data', {}) 116 | tags = None 117 | for osfinger in tags_dict: 118 | if osfinger == '*': 119 | continue 120 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 121 | for osfinger_glob in osfinger_list: 122 | if fnmatch.fnmatch(distro, osfinger_glob): 123 | tags = tags_dict.get(osfinger) 124 | break 125 | if tags is not None: 126 | break 127 | # If we didn't find a match, check for a '*' 128 | if tags is None: 129 | tags = tags_dict.get('*', []) 130 | if isinstance(tags, dict): 131 | # malformed yaml, convert to list of dicts 132 | tmp = [] 133 | for name, tag in tags.iteritems(): 134 | tmp.append({name: tag}) 135 | tags = tmp 136 | for item in tags: 137 | for name, tag in item.iteritems(): 138 | if isinstance(tag, dict): 139 | tag_data = copy.deepcopy(tag) 140 | tag = tag_data.pop('tag') 141 | if tag not in ret: 142 | ret[tag] = [] 143 | formatted_data = {'name': name, 144 | 'tag': tag, 145 | 'module': 'sysctl'} 146 | formatted_data.update(tag_data) 147 | formatted_data.update(audit_data) 148 | formatted_data.pop('data') 149 | ret[tag].append(formatted_data) 150 | return ret 151 | -------------------------------------------------------------------------------- /hubblestack_nova/win_auditpol.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | 4 | :maintainer: HubbleStack / madchills 5 | :maturity: 2016.7.0 6 | :platform: Windows 7 | :requires: SaltStack 8 | 9 | ''' 10 | 11 | from __future__ import absolute_import 12 | import copy 13 | import csv 14 | import fnmatch 15 | import logging 16 | import salt.utils 17 | 18 | 19 | log = logging.getLogger(__name__) 20 | __virtualname__ = 'win_auditpol' 21 | 22 | def __virtual__(): 23 | if not salt.utils.is_windows(): 24 | return False, 'This audit module only runs on windows' 25 | return True 26 | 27 | 28 | def audit(data_list, tags, debug=False): 29 | ''' 30 | Runs auditpol on the local machine and audits the return data 31 | with the CIS yaml processed by __virtual__ 32 | ''' 33 | __data__ = {} 34 | __auditdata__ = _auditpol_import() 35 | for profile, data in data_list: 36 | _merge_yaml(__data__, data, profile) 37 | __tags__ = _get_tags(__data__) 38 | if debug: 39 | log.debug('auditpol audit __data__:') 40 | log.debug(__data__) 41 | log.debug('auditpol audit __tags__:') 42 | log.debug(__tags__) 43 | 44 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 45 | for tag in __tags__: 46 | if fnmatch.fnmatch(tag, tags): 47 | for tag_data in __tags__[tag]: 48 | if 'control' in tag_data: 49 | ret['Controlled'].append(tag_data) 50 | continue 51 | name = tag_data['name'] 52 | audit_type = tag_data['type'] 53 | match_output = tag_data['match_output'].lower() 54 | 55 | # Blacklisted audit (do not include) 56 | if 'blacklist' in audit_type: 57 | if name not in __auditdata__: 58 | ret['Success'].append(tag_data) 59 | else: 60 | ret['Failure'].append(tag_data) 61 | 62 | # Whitelisted audit (must include) 63 | if 'whitelist' in audit_type: 64 | if name in __auditdata__: 65 | audit_value = __auditdata__[name].lower() 66 | secret = _translate_value_type(audit_value, tag_data['value_type'], match_output) 67 | if secret: 68 | ret['Success'].append(tag_data) 69 | else: 70 | ret['Failure'].append(tag_data) 71 | else: 72 | log.debug('When trying to audit the advanced auditpol section,' 73 | ' the yaml contained incorrect data for the key') 74 | 75 | return ret 76 | 77 | 78 | def _merge_yaml(ret, data, profile=None): 79 | ''' 80 | Merge two yaml dicts together at the secedit:blacklist and 81 | secedit:whitelist level 82 | ''' 83 | if __virtualname__ not in ret: 84 | ret[__virtualname__] = {} 85 | for topkey in ('blacklist', 'whitelist'): 86 | if topkey in data.get(__virtualname__, {}): 87 | if topkey not in ret[__virtualname__]: 88 | ret[__virtualname__][topkey] = [] 89 | for key, val in data[__virtualname__][topkey].iteritems(): 90 | if profile and isinstance(val, dict): 91 | val['nova_profile'] = profile 92 | ret[__virtualname__][topkey].append({key: val}) 93 | return ret 94 | 95 | 96 | def _get_tags(data): 97 | ''' 98 | Retrieve all the tags for this distro from the yaml 99 | ''' 100 | ret = {} 101 | distro = __grains__.get('osfullname') 102 | for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): 103 | # secedit:whitelist 104 | for audit_dict in toplevel: 105 | for audit_id, audit_data in audit_dict.iteritems(): 106 | # secedit:whitelist:PasswordComplexity 107 | tags_dict = audit_data.get('data', {}) 108 | # secedit:whitelist:PasswordComplexity:data 109 | tags = None 110 | for osfinger in tags_dict: 111 | if osfinger == '*': 112 | continue 113 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 114 | for osfinger_glob in osfinger_list: 115 | if fnmatch.fnmatch(distro, osfinger_glob): 116 | tags = tags_dict.get(osfinger) 117 | break 118 | if tags is not None: 119 | break 120 | # If we didn't find a match, check for a '*' 121 | if tags is None: 122 | tags = tags_dict.get('*', []) 123 | # secedit:whitelist:PasswordComplexity:data:Windows 2012 124 | if isinstance(tags, dict): 125 | # malformed yaml, convert to list of dicts 126 | tmp = [] 127 | for name, tag in tags.iteritems(): 128 | tmp.append({name: tag}) 129 | tags = tmp 130 | for item in tags: 131 | for name, tag in item.iteritems(): 132 | tag_data = {} 133 | # Whitelist could have a dictionary, not a string 134 | if isinstance(tag, dict): 135 | tag_data = copy.deepcopy(tag) 136 | tag = tag_data.pop('tag') 137 | if tag not in ret: 138 | ret[tag] = [] 139 | formatted_data = {'name': name, 140 | 'tag': tag, 141 | 'module': 'win_auditpol', 142 | 'type': toplist} 143 | formatted_data.update(tag_data) 144 | formatted_data.update(audit_data) 145 | formatted_data.pop('data') 146 | ret[tag].append(formatted_data) 147 | return ret 148 | 149 | 150 | def _auditpol_export(): 151 | try: 152 | dump = __salt__['cmd.run']('auditpol /get /category:* /r') 153 | if dump: 154 | dump = dump.split('\n') 155 | return dump 156 | else: 157 | log.error('Nothing was returned from the auditpol command.') 158 | except StandardError: 159 | log.error('An error occurred running the auditpol command.') 160 | 161 | 162 | def _auditpol_import(): 163 | dict_return = {} 164 | export = _auditpol_export() 165 | auditpol_csv = csv.DictReader(export) 166 | for row in auditpol_csv: 167 | if row: 168 | dict_return[row['Subcategory']] = row['Inclusion Setting'] 169 | return dict_return 170 | 171 | 172 | def _translate_value_type(current, value, evaluator): 173 | if 'equal' in value: 174 | if current == evaluator: 175 | return True 176 | else: 177 | return False 178 | -------------------------------------------------------------------------------- /hubblestack_nova/win_firewall.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | HubbleStack Nova Windows Firewall module 4 | 5 | :maintainer: HubbleStack / madchills 6 | :maturity: 2016.7.0 7 | :platform: Windows 8 | :requires: SaltStack 9 | 10 | ''' 11 | 12 | from __future__ import absolute_import 13 | import copy 14 | import fnmatch 15 | import logging 16 | import salt.utils 17 | 18 | 19 | log = logging.getLogger(__name__) 20 | __virtualname__ = 'win_firewall' 21 | 22 | def __virtual__(): 23 | if not salt.utils.is_windows(): 24 | return False, 'This audit module only runs on windows' 25 | return True 26 | 27 | 28 | def audit(data_list, tags, debug=False): 29 | ''' 30 | Runs auditpol on the local machine and audits the return data 31 | with the CIS yaml processed by __virtual__ 32 | ''' 33 | __data__ = {} 34 | __firewalldata__ = _import_firewall() 35 | for profile, data in data_list: 36 | _merge_yaml(__data__, data, profile) 37 | __tags__ = _get_tags(__data__) 38 | if debug: 39 | log.debug('firewall audit __data__:') 40 | log.debug(__data__) 41 | log.debug('firewall audit __tags__:') 42 | log.debug(__tags__) 43 | 44 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 45 | for tag in __tags__: 46 | if fnmatch.fnmatch(tag, tags): 47 | for tag_data in __tags__[tag]: 48 | if 'control' in tag_data: 49 | ret['Controlled'].append(tag_data) 50 | continue 51 | name = tag_data['name'] 52 | audit_type = tag_data['type'] 53 | match_output = tag_data['match_output'].lower() 54 | 55 | # Blacklisted audit (do not include) 56 | if 'blacklist' in audit_type: 57 | if name not in __firewalldata__[tag_data['value_type'].title()]: 58 | ret['Success'].append(tag_data) 59 | else: 60 | ret['Failure'].append(tag_data) 61 | 62 | # Whitelisted audit (must include) 63 | if 'whitelist' in audit_type: 64 | if name in __firewalldata__[tag_data['value_type'].title()]: 65 | audit_value = __firewalldata__[tag_data['value_type'].title()] 66 | audit_value = audit_value[name].lower() 67 | secret = _translate_value_type(audit_value, tag_data['value_type'], match_output) 68 | if secret: 69 | ret['Success'].append(tag_data) 70 | else: 71 | ret['Failure'].append(tag_data) 72 | else: 73 | log.debug('When trying to audit the firewall section,' 74 | ' the yaml contained incorrect data for the key') 75 | 76 | return ret 77 | 78 | 79 | def _merge_yaml(ret, data, profile=None): 80 | ''' 81 | Merge two yaml dicts together at the secedit:blacklist and 82 | secedit:whitelist level 83 | ''' 84 | if __virtualname__ not in ret: 85 | ret[__virtualname__] = {} 86 | for topkey in ('blacklist', 'whitelist'): 87 | if topkey in data.get(__virtualname__, {}): 88 | if topkey not in ret[__virtualname__]: 89 | ret[__virtualname__][topkey] = [] 90 | for key, val in data[__virtualname__][topkey].iteritems(): 91 | if profile and isinstance(val, dict): 92 | val['nova_profile'] = profile 93 | ret[__virtualname__][topkey].append({key: val}) 94 | return ret 95 | 96 | 97 | def _get_tags(data): 98 | ''' 99 | Retrieve all the tags for this distro from the yaml 100 | ''' 101 | ret = {} 102 | distro = __grains__.get('osfullname') 103 | for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): 104 | # secedit:whitelist 105 | for audit_dict in toplevel: 106 | for audit_id, audit_data in audit_dict.iteritems(): 107 | # secedit:whitelist:PasswordComplexity 108 | tags_dict = audit_data.get('data', {}) 109 | # secedit:whitelist:PasswordComplexity:data 110 | tags = None 111 | for osfinger in tags_dict: 112 | if osfinger == '*': 113 | continue 114 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 115 | for osfinger_glob in osfinger_list: 116 | if fnmatch.fnmatch(distro, osfinger_glob): 117 | tags = tags_dict.get(osfinger) 118 | break 119 | if tags is not None: 120 | break 121 | # If we didn't find a match, check for a '*' 122 | if tags is None: 123 | tags = tags_dict.get('*', []) 124 | # secedit:whitelist:PasswordComplexity:data:Windows 2012 125 | if isinstance(tags, dict): 126 | # malformed yaml, convert to list of dicts 127 | tmp = [] 128 | for name, tag in tags.iteritems(): 129 | tmp.append({name: tag}) 130 | tags = tmp 131 | for item in tags: 132 | for name, tag in item.iteritems(): 133 | tag_data = {} 134 | # Whitelist could have a dictionary, not a string 135 | if isinstance(tag, dict): 136 | tag_data = copy.deepcopy(tag) 137 | tag = tag_data.pop('tag') 138 | if tag not in ret: 139 | ret[tag] = [] 140 | formatted_data = {'name': name, 141 | 'tag': tag, 142 | 'module': 'win_auditpol', 143 | 'type': toplist} 144 | formatted_data.update(tag_data) 145 | formatted_data.update(audit_data) 146 | formatted_data.pop('data') 147 | ret[tag].append(formatted_data) 148 | return ret 149 | 150 | def _export_firewall(): 151 | dump = [] 152 | try: 153 | temp = __salt__['cmd.run']('Get-NetFirewallProfile -PolicyStore ActiveStore', shell='powershell', python_shell=True) 154 | temp = temp.split('\r\n\r\n') 155 | if temp: 156 | for item in temp: 157 | if item != '': 158 | dump.append(item) 159 | return dump 160 | else: 161 | log.error('Nothing was returned from the auditpol command.') 162 | except StandardError: 163 | log.error('An error occurred running the auditpol command.') 164 | 165 | 166 | def _import_firewall(): 167 | dict_return = {} 168 | temp_vals = {} 169 | export = _export_firewall() 170 | for line in export: 171 | vals = line.split('\n') 172 | for val in vals: 173 | if val: 174 | v = val.split(':') 175 | if len(v) < 2: continue 176 | temp_vals[v[0].strip()] = v[1].strip() 177 | dict_return[temp_vals['Name']] = temp_vals 178 | return dict_return 179 | 180 | 181 | def _translate_value_type(current, value, evaluator): 182 | if value in ('public', 'private', 'domain'): 183 | if current == evaluator: 184 | return True 185 | else: 186 | return False 187 | -------------------------------------------------------------------------------- /hubblestack_nova/win_gp.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | 4 | :maintainer: HubbleStack 5 | :maturity: 2016.7.0 6 | :platform: Windows 7 | :requires: SaltStack 8 | 9 | ''' 10 | 11 | from __future__ import absolute_import 12 | import copy 13 | import fnmatch 14 | import logging 15 | import salt.utils 16 | 17 | 18 | log = logging.getLogger(__name__) 19 | __virtualname__ = 'win_gp' 20 | 21 | 22 | def __virtual__(): 23 | if not salt.utils.is_windows(): 24 | return False, 'This audit module only runs on windows' 25 | return True 26 | 27 | 28 | def audit(data_list, tags, debug=False): 29 | ''' 30 | Runs auditpol on the local machine and audits the return data 31 | with the CIS yaml processed by __virtual__ 32 | ''' 33 | __data__ = {} 34 | __gpdata__ = _get_gp_templates() 35 | for profile, data in data_list: 36 | _merge_yaml(__data__, data, profile) 37 | __tags__ = _get_tags(__data__) 38 | if debug: 39 | log.debug('firewall audit __data__:') 40 | log.debug(__data__) 41 | log.debug('firewall audit __tags__:') 42 | log.debug(__tags__) 43 | 44 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 45 | for tag in __tags__: 46 | if fnmatch.fnmatch(tag, tags): 47 | for tag_data in __tags__[tag]: 48 | if 'control' in tag_data: 49 | ret['Controlled'].append(tag_data) 50 | continue 51 | name = tag_data['name'] 52 | audit_type = tag_data['type'] 53 | match_output = tag_data['match_output'].lower() 54 | 55 | # Blacklisted audit (do not include) 56 | if 'blacklist' in audit_type: 57 | if name not in __gpdata__: 58 | ret['Success'].append(tag_data) 59 | else: 60 | ret['Failure'].append(tag_data) 61 | 62 | # Whitelisted audit (must include) 63 | if 'whitelist' in audit_type: 64 | if name in __gpdata__: 65 | audit_value = True 66 | secret = _translate_value_type(audit_value, tag_data['value_type'], match_output) 67 | if secret: 68 | ret['Success'].append(tag_data) 69 | else: 70 | ret['Failure'].append(tag_data) 71 | else: 72 | log.debug('When trying to audit the firewall section,' 73 | ' the yaml contained incorrect data for the key') 74 | 75 | return ret 76 | 77 | 78 | def _merge_yaml(ret, data, profile=None): 79 | ''' 80 | Merge two yaml dicts together at the secedit:blacklist and 81 | secedit:whitelist level 82 | ''' 83 | if __virtualname__ not in ret: 84 | ret[__virtualname__] = {} 85 | for topkey in ('blacklist', 'whitelist'): 86 | if topkey in data.get(__virtualname__, {}): 87 | if topkey not in ret[__virtualname__]: 88 | ret[__virtualname__][topkey] = [] 89 | for key, val in data[__virtualname__][topkey].iteritems(): 90 | if profile and isinstance(val, dict): 91 | val['nova_profile'] = profile 92 | ret[__virtualname__][topkey].append({key: val}) 93 | return ret 94 | 95 | 96 | def _get_tags(data): 97 | ''' 98 | Retrieve all the tags for this distro from the yaml 99 | ''' 100 | ret = {} 101 | distro = __grains__.get('osfullname') 102 | for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): 103 | # secedit:whitelist 104 | for audit_dict in toplevel: 105 | for audit_id, audit_data in audit_dict.iteritems(): 106 | # secedit:whitelist:PasswordComplexity 107 | tags_dict = audit_data.get('data', {}) 108 | # secedit:whitelist:PasswordComplexity:data 109 | tags = None 110 | for osfinger in tags_dict: 111 | if osfinger == '*': 112 | continue 113 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 114 | for osfinger_glob in osfinger_list: 115 | if fnmatch.fnmatch(distro, osfinger_glob): 116 | tags = tags_dict.get(osfinger) 117 | break 118 | if tags is not None: 119 | break 120 | # If we didn't find a match, check for a '*' 121 | if tags is None: 122 | tags = tags_dict.get('*', []) 123 | # secedit:whitelist:PasswordComplexity:data:Windows 2012 124 | if isinstance(tags, dict): 125 | # malformed yaml, convert to list of dicts 126 | tmp = [] 127 | for name, tag in tags.iteritems(): 128 | tmp.append({name: tag}) 129 | tags = tmp 130 | for item in tags: 131 | for name, tag in item.iteritems(): 132 | tag_data = {} 133 | # Whitelist could have a dictionary, not a string 134 | if isinstance(tag, dict): 135 | tag_data = copy.deepcopy(tag) 136 | tag = tag_data.pop('tag') 137 | if tag not in ret: 138 | ret[tag] = [] 139 | formatted_data = {'name': name, 140 | 'tag': tag, 141 | 'module': 'win_auditpol', 142 | 'type': toplist} 143 | formatted_data.update(tag_data) 144 | formatted_data.update(audit_data) 145 | formatted_data.pop('data') 146 | ret[tag].append(formatted_data) 147 | return ret 148 | 149 | 150 | def _get_gp_templates(): 151 | domain_check = __salt__['system.get_domain_workgroup']() 152 | if 'Workgroup' in domain_check: 153 | return False 154 | else: 155 | domain_check = domain_check['Domain'] 156 | list = __salt__['cmd.run']('Get-ChildItem //{0}/SYSVOL/{0}/Policies/PolicyDefinitions | Format-List ' 157 | '-Property Name, SID'.format(domain_check), shell='powershell', python_shell=True) 158 | return list 159 | 160 | def _translate_value_type(current, value, evaluator): 161 | if 'equal' in value: 162 | if current == evaluator: 163 | return True 164 | else: 165 | return False 166 | -------------------------------------------------------------------------------- /hubblestack_nova/win_pkg.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | 4 | :maintainer: HubbleStack 5 | :maturity: 2016.7.0 6 | :platform: Windows 7 | :requires: SaltStack 8 | 9 | ''' 10 | from __future__ import absolute_import 11 | 12 | import copy 13 | import fnmatch 14 | import logging 15 | import salt.utils 16 | from salt.exceptions import CommandExecutionError 17 | 18 | 19 | log = logging.getLogger(__name__) 20 | __virtualname__ = 'win_pkg' 21 | 22 | def __virtual__(): 23 | if not salt.utils.is_windows(): 24 | return False, 'This audit module only runs on windows' 25 | return True 26 | 27 | 28 | def audit(data_list, tags, debug=False): 29 | ''' 30 | Runs auditpol on the local machine and audits the return data 31 | with the CIS yaml processed by __virtual__ 32 | ''' 33 | __data__ = {} 34 | try: 35 | __pkgdata__ = __salt__['pkg.list_pkgs']() 36 | except CommandExecutionError: 37 | __salt__['pkg.refresh_db']() 38 | __pkgdata__ = __salt__['pkg.list_pkgs']() 39 | for profile, data in data_list: 40 | _merge_yaml(__data__, data, profile) 41 | __tags__ = _get_tags(__data__) 42 | if debug: 43 | log.debug('package audit __data__:') 44 | log.debug(__data__) 45 | log.debug('package audit __tags__:') 46 | log.debug(__tags__) 47 | 48 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 49 | for tag in __tags__: 50 | if fnmatch.fnmatch(tag, tags): 51 | for tag_data in __tags__[tag]: 52 | if 'control' in tag_data: 53 | ret['Controlled'].append(tag_data) 54 | continue 55 | name = tag_data['name'] 56 | audit_type = tag_data['type'] 57 | match_output = tag_data['match_output'].lower() 58 | 59 | # Blacklisted audit (do not include) 60 | if 'blacklist' in audit_type: 61 | if name not in __pkgdata__: 62 | ret['Success'].append(tag_data) 63 | else: 64 | ret['Failure'].append(tag_data) 65 | 66 | # Whitelisted audit (must include) 67 | if 'whitelist' in audit_type: 68 | if name in __pkgdata__: 69 | audit_value = __pkgdata__['name'] 70 | secret = _translate_value_type(audit_value, tag_data['value_type'], match_output) 71 | if secret: 72 | ret['Success'].append(tag_data) 73 | else: 74 | ret['Failure'].append(tag_data) 75 | else: 76 | ret['Failure'].append(tag_data) 77 | 78 | return ret 79 | 80 | 81 | def _merge_yaml(ret, data, profile=None): 82 | ''' 83 | Merge two yaml dicts together at the secedit:blacklist and 84 | secedit:whitelist level 85 | ''' 86 | if __virtualname__ not in ret: 87 | ret[__virtualname__] = {} 88 | for topkey in ('blacklist', 'whitelist'): 89 | if topkey in data.get(__virtualname__, {}): 90 | if topkey not in ret[__virtualname__]: 91 | ret[__virtualname__][topkey] = [] 92 | for key, val in data[__virtualname__][topkey].iteritems(): 93 | if profile and isinstance(val, dict): 94 | val['nova_profile'] = profile 95 | ret[__virtualname__][topkey].append({key: val}) 96 | return ret 97 | 98 | 99 | def _get_tags(data): 100 | ''' 101 | Retrieve all the tags for this distro from the yaml 102 | ''' 103 | ret = {} 104 | distro = __grains__.get('osfullname') 105 | for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): 106 | # secedit:whitelist 107 | for audit_dict in toplevel: 108 | for audit_id, audit_data in audit_dict.iteritems(): 109 | # secedit:whitelist:PasswordComplexity 110 | tags_dict = audit_data.get('data', {}) 111 | # secedit:whitelist:PasswordComplexity:data 112 | tags = None 113 | for osfinger in tags_dict: 114 | if osfinger == '*': 115 | continue 116 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 117 | for osfinger_glob in osfinger_list: 118 | if fnmatch.fnmatch(distro, osfinger_glob): 119 | tags = tags_dict.get(osfinger) 120 | break 121 | if tags is not None: 122 | break 123 | # If we didn't find a match, check for a '*' 124 | if tags is None: 125 | tags = tags_dict.get('*', []) 126 | # secedit:whitelist:PasswordComplexity:data:Windows 2012 127 | if isinstance(tags, dict): 128 | # malformed yaml, convert to list of dicts 129 | tmp = [] 130 | for name, tag in tags.iteritems(): 131 | tmp.append({name: tag}) 132 | tags = tmp 133 | for item in tags: 134 | for name, tag in item.iteritems(): 135 | tag_data = {} 136 | # Whitelist could have a dictionary, not a string 137 | if isinstance(tag, dict): 138 | tag_data = copy.deepcopy(tag) 139 | tag = tag_data.pop('tag') 140 | if tag not in ret: 141 | ret[tag] = [] 142 | formatted_data = {'name': name, 143 | 'tag': tag, 144 | 'module': 'win_auditpol', 145 | 'type': toplist} 146 | formatted_data.update(tag_data) 147 | formatted_data.update(audit_data) 148 | formatted_data.pop('data') 149 | ret[tag].append(formatted_data) 150 | return ret 151 | 152 | 153 | def _translate_value_type(current, value, evaluator): 154 | if int(current) >= int(evaluator): 155 | return True 156 | else: 157 | return False 158 | -------------------------------------------------------------------------------- /hubblestack_nova/win_reg.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | 4 | :maintainer: HubbleStack / madchills 5 | :maturity: 2016.7.0 6 | :platform: Windows 7 | :requires: SaltStack 8 | 9 | ''' 10 | 11 | from __future__ import absolute_import 12 | import copy 13 | import fnmatch 14 | import logging 15 | import salt.utils 16 | 17 | 18 | log = logging.getLogger(__name__) 19 | __virtualname__ = 'win_reg' 20 | 21 | def __virtual__(): 22 | if not salt.utils.is_windows(): 23 | return False, 'This audit module only runs on windows' 24 | return True 25 | 26 | 27 | def audit(data_list, tags, debug=False): 28 | ''' 29 | Runs auditpol on the local machine and audits the return data 30 | with the CIS yaml processed by __virtual__ 31 | ''' 32 | __data__ = {} 33 | for profile, data in data_list: 34 | _merge_yaml(__data__, data, profile) 35 | __tags__ = _get_tags(__data__) 36 | if debug: 37 | log.debug('registry audit __data__:') 38 | log.debug(__data__) 39 | log.debug('registry audit __tags__:') 40 | log.debug(__tags__) 41 | 42 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 43 | for tag in __tags__: 44 | if fnmatch.fnmatch(tag, tags): 45 | for tag_data in __tags__[tag]: 46 | if 'control' in tag_data: 47 | ret['Controlled'].append(tag_data) 48 | continue 49 | name = tag_data['name'] 50 | audit_type = tag_data['type'] 51 | match_output = tag_data['match_output'].lower() 52 | reg_dict = _reg_path_splitter(name) 53 | 54 | # Blacklisted audit (do not include) 55 | if 'blacklist' in audit_type: 56 | secret = _find_option_value_in_reg(reg_dict['hive'], reg_dict['key'], reg_dict['value']) 57 | if secret: 58 | ret['Failure'].append(tag_data) 59 | else: 60 | ret['Success'].append(tag_data) 61 | 62 | # Whitelisted audit (must include) 63 | if 'whitelist' in audit_type: 64 | current = _find_option_value_in_reg(reg_dict['hive'], reg_dict['key'], reg_dict['value']) 65 | if isinstance(current, list): 66 | if False in current: 67 | ret['Failure'].append(tag_data) 68 | else: 69 | for key in current: 70 | secret = _translate_value_type(key, tag_data['value_type'], match_output) 71 | if not secret: 72 | break 73 | if secret: 74 | ret['Success'].append(tag_data) 75 | else: 76 | ret['Failure'].append(tag_data) 77 | if current: 78 | secret = _translate_value_type(current, tag_data['value_type'], match_output) 79 | if secret: 80 | ret['Success'].append(tag_data) 81 | else: 82 | tag_data['value_found'] = current 83 | ret['Failure'].append(tag_data) 84 | 85 | else: 86 | tag_data['value_found'] = None 87 | ret['Failure'].append(tag_data) 88 | 89 | return ret 90 | 91 | 92 | def _merge_yaml(ret, data, profile=None): 93 | ''' 94 | Merge two yaml dicts together at the secedit:blacklist and 95 | secedit:whitelist level 96 | ''' 97 | if __virtualname__ not in ret: 98 | ret[__virtualname__] = {} 99 | for topkey in ('blacklist', 'whitelist'): 100 | if topkey in data.get(__virtualname__, {}): 101 | if topkey not in ret[__virtualname__]: 102 | ret[__virtualname__][topkey] = [] 103 | for key, val in data[__virtualname__][topkey].iteritems(): 104 | if profile and isinstance(val, dict): 105 | val['nova_profile'] = profile 106 | ret[__virtualname__][topkey].append({key: val}) 107 | return ret 108 | 109 | 110 | def _get_tags(data): 111 | ''' 112 | Retrieve all the tags for this distro from the yaml 113 | ''' 114 | ret = {} 115 | distro = __grains__.get('osfullname') 116 | for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): 117 | # secedit:whitelist 118 | for audit_dict in toplevel: 119 | for audit_id, audit_data in audit_dict.iteritems(): 120 | # secedit:whitelist:PasswordComplexity 121 | tags_dict = audit_data.get('data', {}) 122 | # secedit:whitelist:PasswordComplexity:data 123 | tags = None 124 | for osfinger in tags_dict: 125 | if osfinger == '*': 126 | continue 127 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 128 | for osfinger_glob in osfinger_list: 129 | if fnmatch.fnmatch(distro, osfinger_glob): 130 | tags = tags_dict.get(osfinger) 131 | break 132 | if tags is not None: 133 | break 134 | # If we didn't find a match, check for a '*' 135 | if tags is None: 136 | tags = tags_dict.get('*', []) 137 | # secedit:whitelist:PasswordComplexity:data:Windows 2012 138 | if isinstance(tags, dict): 139 | # malformed yaml, convert to list of dicts 140 | tmp = [] 141 | for name, tag in tags.iteritems(): 142 | tmp.append({name: tag}) 143 | tags = tmp 144 | for item in tags: 145 | for name, tag in item.iteritems(): 146 | tag_data = {} 147 | # Whitelist could have a dictionary, not a string 148 | if isinstance(tag, dict): 149 | tag_data = copy.deepcopy(tag) 150 | tag = tag_data.pop('tag') 151 | if tag not in ret: 152 | ret[tag] = [] 153 | formatted_data = {'name': name, 154 | 'tag': tag, 155 | 'module': 'win_reg', 156 | 'type': toplist} 157 | formatted_data.update(tag_data) 158 | formatted_data.update(audit_data) 159 | formatted_data.pop('data') 160 | ret[tag].append(formatted_data) 161 | return ret 162 | 163 | 164 | def _reg_path_splitter(reg_path): 165 | dict_return = {} 166 | dict_return['hive'], temp = reg_path.split('\\', 1) 167 | dict_return['key'], dict_return['value'] = temp.rsplit('\\', 1) 168 | 169 | return dict_return 170 | 171 | 172 | 173 | def _find_option_value_in_reg(reg_hive, reg_key, reg_value): 174 | ''' 175 | helper function to retrieve Windows registry settings for a particular 176 | option 177 | ''' 178 | if reg_hive.lower() in ('hku', 'hkey_users'): 179 | key_list = [] 180 | ret_list = [] 181 | sid_return = __salt__['cmd.run']('reg query hku').split('\n') 182 | for line in sid_return: 183 | if '\\' in line: 184 | key_list.append(line.split('\\')[1].strip()) 185 | for sid in key_list: 186 | reg_key.replace('', sid) 187 | reg_result = __salt__['reg.read_value'](reg_hive, reg_key, reg_value) 188 | if reg_result['success']: 189 | ret_list.append(reg_result['vdata']) 190 | else: 191 | ret_list.append(False) 192 | if False in ret_list: 193 | return False 194 | else: 195 | return ret_list 196 | 197 | 198 | else: 199 | reg_result = __salt__['reg.read_value'](reg_hive, reg_key, reg_value) 200 | if reg_result['success']: 201 | return reg_result['vdata'] 202 | else: 203 | return False 204 | 205 | 206 | def _translate_evaluator(output): 207 | '''Helper function to return valid output you would find in the registry''' 208 | if 'enabled' in output: 209 | return '1' 210 | if 'disabled' in output: 211 | return '0' 212 | 213 | 214 | def _translate_value_type(current, value, evaluator): 215 | evaluator = _translate_evaluator(evaluator) 216 | if 'all' in value: 217 | if current == evaluator: 218 | return True 219 | else: 220 | return False 221 | if 'domain' in value: 222 | pass 223 | 224 | -------------------------------------------------------------------------------- /hubblestack_nova/win_secedit.py: -------------------------------------------------------------------------------- 1 | # -*- encoding: utf-8 -*- 2 | ''' 3 | 4 | :maintainer: HubbleStack / madchills 5 | :maturity: 2016.7.0 6 | :platform: Windows 7 | :requires: SaltStack 8 | 9 | ''' 10 | 11 | from __future__ import absolute_import 12 | import copy 13 | import fnmatch 14 | import logging 15 | import salt.utils 16 | 17 | try: 18 | import codecs 19 | import uuid 20 | HAS_WINDOWS_MODULES = True 21 | except ImportError: 22 | HAS_WINDOWS_MODULES = False 23 | 24 | log = logging.getLogger(__name__) 25 | __virtualname__ = 'win_secedit' 26 | 27 | def __virtual__(): 28 | if not salt.utils.is_windows() or not HAS_WINDOWS_MODULES: 29 | return False, 'This audit module only runs on windows' 30 | return True 31 | 32 | 33 | def audit(data_list, tags, debug=False): 34 | ''' 35 | Runs secedit on the local machine and audits the return data 36 | with the CIS yaml processed by __virtual__ 37 | ''' 38 | __data__ = {} 39 | __secdata__ = _secedit_export() 40 | __sidaccounts__ = _get_account_sid() 41 | for profile, data in data_list: 42 | _merge_yaml(__data__, data, profile) 43 | __tags__ = _get_tags(__data__) 44 | if debug: 45 | log.debug('secedit audit __data__:') 46 | log.debug(__data__) 47 | log.debug('secedit audit __tags__:') 48 | log.debug(__tags__) 49 | 50 | ret = {'Success': [], 'Failure': [], 'Controlled': []} 51 | for tag in __tags__: 52 | if fnmatch.fnmatch(tag, tags): 53 | for tag_data in __tags__[tag]: 54 | if 'control' in tag_data: 55 | ret['Controlled'].append(tag_data) 56 | continue 57 | name = tag_data['name'] 58 | audit_type = tag_data['type'] 59 | output = tag_data['match_output'].lower() 60 | 61 | # Blacklisted audit (do not include) 62 | if audit_type == 'blacklist': 63 | if 'no one' in output: 64 | if name not in __secdata__: 65 | ret['Success'].append(tag_data) 66 | else: 67 | ret['Failure'].append(tag_data) 68 | else: 69 | if name in __secdata__: 70 | secret = _translate_value_type(sec_value, tag_data['value_type'], match_output) 71 | if secret: 72 | ret['Failure'].append(tag_data) 73 | else: 74 | ret['Success'].append(tag_data) 75 | 76 | # Whitelisted audit (must include) 77 | if audit_type == 'whitelist': 78 | if name in __secdata__: 79 | sec_value = __secdata__[name] 80 | if 'machine\\' in output: 81 | match_output = _reg_value_translator(tag_data['match_output']) 82 | else: 83 | match_output = tag_data['match_output'] 84 | if 'account' in tag_data['value_type']: 85 | secret = _translate_value_type(sec_value, tag_data['value_type'], match_output, __sidaccounts__) 86 | else: 87 | secret = _translate_value_type(sec_value, tag_data['value_type'], match_output) 88 | if secret: 89 | ret['Success'].append(tag_data) 90 | else: 91 | ret['Failure'].append(tag_data) 92 | else: 93 | ret['Failure'].append(tag_data) 94 | 95 | return ret 96 | 97 | 98 | def _merge_yaml(ret, data, profile=None): 99 | ''' 100 | Merge two yaml dicts together at the secedit:blacklist and 101 | secedit:whitelist level 102 | ''' 103 | if __virtualname__ not in ret: 104 | ret[__virtualname__] = {} 105 | for topkey in ('blacklist', 'whitelist'): 106 | if topkey in data.get(__virtualname__, {}): 107 | if topkey not in ret[__virtualname__]: 108 | ret[__virtualname__][topkey] = [] 109 | for key, val in data[__virtualname__][topkey].iteritems(): 110 | if profile and isinstance(val, dict): 111 | val['nova_profile'] = profile 112 | ret[__virtualname__][topkey].append({key: val}) 113 | return ret 114 | 115 | 116 | def _get_tags(data): 117 | ''' 118 | Retrieve all the tags for this distro from the yaml 119 | ''' 120 | ret = {} 121 | distro = __grains__.get('osfullname') 122 | for toplist, toplevel in data.get(__virtualname__, {}).iteritems(): 123 | # secedit:whitelist 124 | for audit_dict in toplevel: 125 | for audit_id, audit_data in audit_dict.iteritems(): 126 | # secedit:whitelist:PasswordComplexity 127 | tags_dict = audit_data.get('data', {}) 128 | # secedit:whitelist:PasswordComplexity:data 129 | tags = None 130 | for osfinger in tags_dict: 131 | if osfinger == '*': 132 | continue 133 | osfinger_list = [finger.strip() for finger in osfinger.split(',')] 134 | for osfinger_glob in osfinger_list: 135 | if fnmatch.fnmatch(distro, osfinger_glob): 136 | tags = tags_dict.get(osfinger) 137 | break 138 | if tags is not None: 139 | break 140 | # If we didn't find a match, check for a '*' 141 | if tags is None: 142 | tags = tags_dict.get('*', []) 143 | # secedit:whitelist:PasswordComplexity:data:Server 2012 144 | if isinstance(tags, dict): 145 | # malformed yaml, convert to list of dicts 146 | tmp = [] 147 | for name, tag in tags.iteritems(): 148 | tmp.append({name: tag}) 149 | tags = tmp 150 | for item in tags: 151 | for name, tag in item.iteritems(): 152 | tag_data = {} 153 | # Whitelist could have a dictionary, not a string 154 | if isinstance(tag, dict): 155 | tag_data = copy.deepcopy(tag) 156 | tag = tag_data.pop('tag') 157 | if tag not in ret: 158 | ret[tag] = [] 159 | formatted_data = {'name': name, 160 | 'tag': tag, 161 | 'module': 'win_secedit', 162 | 'type': toplist} 163 | formatted_data.update(tag_data) 164 | formatted_data.update(audit_data) 165 | formatted_data.pop('data') 166 | ret[tag].append(formatted_data) 167 | return ret 168 | 169 | 170 | def _secedit_export(): 171 | '''Helper function that will create(dump) a secedit inf file. You can 172 | specify the location of the file and the file will persist, or let the 173 | function create it and the file will be deleted on completion. Should 174 | only be called once.''' 175 | dump = "C:\ProgramData\{}.inf".format(uuid.uuid4()) 176 | try: 177 | ret = __salt__['cmd.run']('secedit /export /cfg {0}'.format(dump)) 178 | if ret: 179 | secedit_ret = _secedit_import(dump) 180 | ret = __salt__['file.remove'](dump) 181 | return secedit_ret 182 | except StandardError: 183 | log.debug('Error occurred while trying to get / export secedit data') 184 | return False, None 185 | 186 | 187 | def _secedit_import(inf_file): 188 | '''This function takes the inf file that SecEdit dumps 189 | and returns a dictionary''' 190 | sec_return = {} 191 | with codecs.open(inf_file, 'r', encoding='utf-16') as f: 192 | for line in f: 193 | line = str(line).replace('\r\n', '') 194 | if not line.startswith('[') and not line.startswith('Unicode'): 195 | if line.find(' = ') != -1: 196 | k, v = line.split(' = ') 197 | sec_return[k] = v 198 | else: 199 | k, v = line.split('=') 200 | sec_return[k] = v 201 | return sec_return 202 | 203 | 204 | def _get_account_sid(): 205 | '''This helper function will get all the users and groups on the computer 206 | and return a dictionary''' 207 | win32 = __salt__['cmd.run']('Get-WmiObject win32_useraccount -Filter "localaccount=\'True\'"' 208 | ' | Format-List -Property Name, SID', shell='powershell', 209 | python_shell=True) 210 | win32 += '\n' 211 | win32 += __salt__['cmd.run']('Get-WmiObject win32_group -Filter "localaccount=\'True\'" | ' 212 | 'Format-List -Property Name, SID', shell='powershell', 213 | python_shell=True) 214 | if win32: 215 | 216 | dict_return = {} 217 | lines = win32.split('\n') 218 | lines = filter(None, lines) 219 | if 'local:' in lines: 220 | lines.remove('local:') 221 | for line in lines: 222 | line = line.strip() 223 | if line != '' and ' : ' in line: 224 | k, v = line.split(' : ') 225 | if k.lower() == 'name': 226 | key = v 227 | else: 228 | dict_return[key] = v 229 | if dict_return: 230 | if 'LOCAL SERVICE' not in dict_return: 231 | dict_return['LOCAL SERVICE'] = 'S-1-5-19' 232 | if 'NETWORK SERVICE' not in dict_return: 233 | dict_return['NETWORK SERVICE'] = 'S-1-5-20' 234 | if 'SERVICE' not in dict_return: 235 | dict_return['SERVICE'] = 'S-1-5-6' 236 | return dict_return 237 | else: 238 | log.debug('Error parsing the data returned from powershell') 239 | return False 240 | else: 241 | log.debug('error occurred while trying to run powershell ' 242 | 'get-wmiobject command') 243 | return False 244 | 245 | 246 | def _translate_value_type(current, value, evaluator, __sidaccounts__=False): 247 | '''This will take a value type and convert it to what it needs to do. 248 | Under the covers you have conversion for more, less, and equal''' 249 | value = value.lower() 250 | if 'more' in value: 251 | if ',' in evaluator: 252 | evaluator = evaluator.split(',')[1] 253 | if ',' in current: 254 | current = current.split(',')[1] 255 | if '"' in current: 256 | current = current.replace('"', '') 257 | if '"' in evaluator: 258 | evaluator = evaluator.replace('"', '') 259 | if int(current) > int(evaluator): 260 | return True 261 | else: 262 | return False 263 | elif 'less' in value: 264 | if ',' in evaluator: 265 | evaluator = evaluator.split(',')[1] 266 | if ',' in current: 267 | current = current.split(',')[1] 268 | if '"' in current: 269 | current = current.replace('"', '') 270 | if '"' in evaluator: 271 | evaluator = evaluator.replace('"', '') 272 | if int(current) < int(evaluator): 273 | if current != '0': 274 | return True 275 | else: 276 | return False 277 | else: 278 | return False 279 | elif 'equal' in value: 280 | if ',' not in evaluator: 281 | evaluator = _evaluator_translator(evaluator) 282 | 283 | if current.lower() == evaluator: 284 | return True 285 | else: 286 | return False 287 | elif 'account' in value: 288 | evaluator = _account_audit(evaluator, __sidaccounts__) 289 | evaluator_list = evaluator.split(',') 290 | current_list = current.split(',') 291 | list_match = False 292 | for list_item in evaluator_list: 293 | if list_item in current_list: 294 | list_match = True 295 | else: 296 | list_match = False 297 | break 298 | if list_match: 299 | for list_item in current_list: 300 | if list_item in evaluator_list: 301 | list_match = True 302 | else: 303 | list_match = False 304 | break 305 | else: 306 | return False 307 | if list_match: 308 | return True 309 | else: 310 | return False 311 | elif 'configured' in value: 312 | if current == '': 313 | return False 314 | elif current == value: 315 | return True 316 | else: 317 | return False 318 | else: 319 | return 'Undefined' 320 | 321 | 322 | def _evaluator_translator(input_string): 323 | '''This helper function takes words from the CIS yaml and replaces 324 | them with what you actually find in the secedit dump''' 325 | input_string = input_string.replace(' ','').lower() 326 | if 'enabled' in input_string: 327 | return '1' 328 | elif 'disabled' in input_string: 329 | return '0' 330 | elif 'success' in input_string: 331 | return '1' 332 | elif 'failure' in input_string: 333 | return '2' 334 | elif input_string == 'success,failure' or input_string == 'failure,success': 335 | return '3' 336 | else: 337 | log.debug('error translating evaluator from enabled/disabled or success/failure.' 338 | ' Could have received incorrect string') 339 | return 'undefined' 340 | 341 | 342 | def _account_audit(current, __sidaccounts__): 343 | '''This helper function takes the account names from the cis yaml and 344 | replaces them with the account SID that you find in the secedit dump''' 345 | user_list = current.split(', ') 346 | ret_string = '' 347 | if __sidaccounts__: 348 | for usr in user_list: 349 | if usr == 'Guest': 350 | if not ret_string: 351 | ret_string = usr 352 | else: 353 | ret_string += ',' + usr 354 | if usr in __sidaccounts__: 355 | if not ret_string: 356 | ret_string = '*' + __sidaccounts__[usr] 357 | else: 358 | ret_string += ',*' + __sidaccounts__[usr] 359 | return ret_string 360 | else: 361 | log.debug('getting the SIDs for each account failed') 362 | return False 363 | 364 | 365 | def _reg_value_translator(input_string): 366 | input_string.lower() 367 | if input_string == 'enabled': 368 | return '4,1' 369 | elif input_string == 'disabled': 370 | return '4,0' 371 | elif input_string == 'users cant add or log on with microsoft accounts': 372 | return '4,3' 373 | elif input_string == 'administrators': 374 | return '1,"0"' 375 | elif input_string == 'lock workstation': 376 | return '1,"1"' 377 | elif input_string == 'accept if provided by client': 378 | return '4,1' 379 | elif input_string == 'classic - local users authenticate as themselves': 380 | return '4,1' 381 | elif input_string == 'rc4_hmac_md5, aes128_hmac_SHA1, aes256_hmac_sha1, future encryption types': 382 | return '4,2147483644' 383 | elif input_string == 'send ntlmv2 response only. Refuse lm & ntlm': 384 | return '4,5' 385 | elif input_string == 'negotiate signing': 386 | return '4,1' 387 | elif input_string == 'Require ntlmv2 session security, require 128-bit encryption': 388 | return '4,537395200' 389 | elif input_string == 'prompt for consent on the secure desktop': 390 | return '4,2' 391 | elif input_string == 'automatically deny elevation requests': 392 | return '4,0' 393 | elif input_string == 'Defined (blank)': 394 | return '7,' 395 | else: 396 | return input_string 397 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/cve/centos-6-salt.yaml: -------------------------------------------------------------------------------- 1 | cve_scan_v2: 2 | ttl: 86400 3 | url: "salt://hubblestack_nova_profiles/centos_6.json" 4 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/cve/centos-7-salt.yaml: -------------------------------------------------------------------------------- 1 | cve_scan_v2: 2 | ttl: 86400 3 | url: "salt://hubblestack_nova_profiles/centos_7.json" 4 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/cve/scan-v1.yaml: -------------------------------------------------------------------------------- 1 | cve_scan: https://www.redhat.com/security/data/oval/com.redhat.rhsa-RHEL7.xml 2 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/cve/scan-v2-salt.yaml: -------------------------------------------------------------------------------- 1 | cve_scan_v2: 2 | ttl: 86400 3 | url: "salt://hubblestack_nova/centos_7.json" 4 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/cve/scan-v2.yaml: -------------------------------------------------------------------------------- 1 | cve_scan_v2: 2 | ttl: 86400 3 | url: "http://vulners.com" 4 | # control: 5 | # score: 4 6 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/firewall/ssh.yaml: -------------------------------------------------------------------------------- 1 | iptables: 2 | whitelist: 3 | 4 | ssh: 5 | data: 6 | tag: 'IPTABLES-TCP-22' 7 | table: 'filter' 8 | chain: INPUT 9 | family: 'ipv4' 10 | rule: 11 | proto: tcp 12 | dport: 22 13 | match: state 14 | connstate: ESTABLISHED,RELATED 15 | jump: ACCEPT 16 | description: 'iptables: filter ipv4 tcp 22 established,related accept' 17 | 18 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/misc.yaml: -------------------------------------------------------------------------------- 1 | # This is a general-use profile to fill with your implementation-specific 2 | # checks 3 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/network/smtp.yaml: -------------------------------------------------------------------------------- 1 | netstat: 2 | smtp: 3 | address: 4 | - '127.0.0.1:25' 5 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/network/ssh.yaml: -------------------------------------------------------------------------------- 1 | netstat: 2 | ssh: 3 | address: 4 | - '*:22' 5 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/samples/dont_blame_nrpe.yaml: -------------------------------------------------------------------------------- 1 | grep: 2 | blacklist: 3 | 4 | dont_blame_nrpe: 5 | data: 6 | '*': 7 | - '/etc/nrpe.cfg': 8 | tag: 'CVE-2014-2913' 9 | pattern: 'dont_blame_nrpe=1' 10 | description: 'NRPE - Nagios Remote Plugin Executor' 11 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/samples/sample_command.yaml: -------------------------------------------------------------------------------- 1 | command: 2 | nodev: 3 | data: 4 | 'Red Hat Enterprise Linux Server-6': 5 | tag: CIS-1.1.10 6 | commands: 7 | - 'grep "[[:space:]]/home[[:space:]]" /etc/fstab': 8 | match_output: nodev 9 | match_output_regex: False 10 | fail_if_matched: False 11 | - 'mount | grep /home': 12 | match_output: nodev 13 | match_output_regex: False 14 | match_output_by_line: True 15 | aggregation: 'and' 16 | description: '/home should be nodev' 17 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/samples/sample_control.yaml: -------------------------------------------------------------------------------- 1 | stat: 2 | grub_conf_own: 3 | data: 4 | 'CentOS-6': 5 | - '/etc/grub.conf': 6 | tag: 'CIS-1.5.1' 7 | user: 'root' 8 | uid: 0 9 | group: 'root' 10 | gid: 0 11 | 'CentOS Linux-7': 12 | - '/etc/grub2/grub.cfg': 13 | tag: 'CIS-1.5.1' 14 | user: 'root' 15 | uid: 0 16 | group: 'root' 17 | gid: 0 18 | description: 'Grub must be owned by root (Scored)' 19 | control: 'We do not care about this' 20 | 21 | grub_conf_perm: 22 | data: 23 | 'CentOS-6': 24 | - '/etc/grub.conf': 25 | tag: 'CIS-1.5.2' 26 | mode: 600 27 | 'CentOS Linux-7': 28 | - '/etc/grub2/grub.cfg': 29 | tag: 'CIS-1.5.2' 30 | mode: 600 31 | description: 'Grub must have permissions 600 (Scored)' 32 | 33 | hosts_allow: 34 | data: 35 | 'CentOS-6': 36 | - '/etc/hosts.allow': 37 | tag: 'CIS-4.5.3' 38 | mode: 644 39 | 'CentOS Linux-7': 40 | - '/etc/hosts.allow': 41 | tag: 'CIS-4.5.3' 42 | mode: 644 43 | description: '/etc/hosts.allow must have permissions 644 (Scored)' 44 | control: 'We do not care about this' 45 | 46 | hosts_deny: 47 | data: 48 | 'CentOS-6': 49 | - '/etc/hosts.deny': 50 | tag: 'CIS-4.5.5' 51 | mode: 644 52 | 'CentOS Linux-7': 53 | - '/etc/hosts.deny': 54 | tag: 'CIS-4.5.5' 55 | mode: 644 56 | description: '/etc/hosts.deny must have persmissions 644 (Scored)' 57 | control: 'We do not care about this' 58 | 59 | anacrontab: 60 | data: 61 | 'CentOS-6': 62 | - '/etc/anacrontab': 63 | tag: 'CIS-6.1.3' 64 | mode: 600 65 | user: 'root' 66 | uid: 0 67 | group: 'root' 68 | gid: 0 69 | 'CentOS Linux-7': 70 | - '/etc/anacrontab': 71 | tag: 'CIS-6.1.3' 72 | mode: 600 73 | user: 'root' 74 | uid: 0 75 | group: 'root' 76 | gid: 0 77 | description: '/etc/anacrontab file be owned by root and must have permissions 600 (Scored)' 78 | 79 | 80 | 81 | pkg: 82 | blacklist: 83 | 84 | telnet: 85 | data: 86 | 'CentOS-6': 87 | - 'telnet-server': 'CIS-2.1.1' 88 | - 'telnet': 'CIS-2.1.2' 89 | 'CentOS Linux-7': 90 | - 'telnet-server': 'CIS-2.1.1' 91 | - 'telnet': 'CIS-2.1.2' 92 | description: 'Remove telnet and telnet-server (Scored)' 93 | 94 | rsh: 95 | data: 96 | 'CentOS-6': 97 | - 'rsh-server': 'CIS-2.1.3' 98 | - 'rsh': 'CIS-2.1.4' 99 | 'CentOS Linux-7': 100 | - 'rsh-server': 'CIS-2.1.3' 101 | - 'rsh': 'CIS-2.1.4' 102 | description: 'Remove rsh and rsh-server (Scored)' 103 | control: 'We do not care about this' 104 | 105 | nis: 106 | data: 107 | 'CentOS-6': 108 | - 'ypbind': 'CIS-2.1.5' 109 | - 'ypserv': 'CIS-2.1.6' 110 | 'CentOS Linux-7': 111 | - 'ypbind': 'CIS-2.1.5' 112 | - 'ypserv': 'CIS-2.1.6' 113 | description: 'Remove nis client and nis server (Scored)' 114 | 115 | tftp: 116 | data: 117 | 'CentOS-6': 118 | - 'tftp': 'CIS-2.1.7' 119 | - 'tftp-server': 'CIS-2.1.8' 120 | 'CentOS Linux-7': 121 | - 'tftp': 'CIS-2.1.7' 122 | - 'tftp-server': 'CIS-2.1.8' 123 | description: 'Remove tftp and tftp-server (Scored)' 124 | control: 'We do not care about this' 125 | 126 | 127 | sysctl: 128 | restrict_suid_core_dumps: 129 | data: 130 | 'CentOS-6': 131 | - 'fs.suid_dumpable': 132 | tag: 'CIS-1.6.1' 133 | match_output: '0' 134 | description: 'Restrict SUID Core Dumps (Scored)' 135 | 136 | exec_shield: 137 | data: 138 | 'CentOS-6': 139 | - 'kernel.exec-shield': 140 | tag: 'CIS-1.6.2' 141 | match_output: '1' 142 | description: 'Configure ExecShield (Scored)' 143 | control: 'We do not care about this' 144 | 145 | randomize_va_space: 146 | data: 147 | 'CentOS-6': 148 | - 'kernel.randomize_va_space': 149 | tag: 'CIS-1.6.3' 150 | match_output: '2' 151 | 'CentOS Linux-7': 152 | - 'kernel.randomize_va_space': 153 | tag: 'CIS-1.6.2' 154 | match_output: '2' 155 | description: 'Enable Randomized Virtual Memory Region Placement (Scored)' 156 | 157 | 158 | 159 | grep: 160 | whitelist: 161 | 162 | fstab_tmp_partition: 163 | data: 164 | CentOS-6: 165 | - '/etc/fstab': 166 | tag: 'CIS-1.1.1' 167 | pattern: '/tmp' 168 | CentOS Linux-7: 169 | - '/etc/fstab': 170 | tag: 'CIS-1.1.1' 171 | pattern: '/tmp' 172 | Ubuntu-14.04: 173 | - '/etc/fstab': 174 | tag: 'CIS-2.1' 175 | pattern: '/tmp' 176 | description: 'Create Separate Partition for /tmp (Scored)' 177 | control: 'We do not care about this' 178 | 179 | fstab_tmp_partition_nodev: 180 | data: 181 | CentOS-6: 182 | - '/etc/fstab': 183 | tag: 'CIS-1.1.2' 184 | pattern: '/tmp' 185 | match_output: 'nodev' 186 | CentOS Linux-7: 187 | - '/etc/fstab': 188 | tag: 'CIS-1.1.2' 189 | pattern: '/tmp' 190 | match_output: 'nodev' 191 | Ubuntu-14.04: 192 | - '/etc/fstab': 193 | tag: 'CIS-2.2' 194 | pattern: '/tmp' 195 | match_output: 'nodev' 196 | description: 'Set nodev option for /tmp Partition (Scored)' 197 | 198 | blacklist: 199 | legacy_passwd_entries_passwd: 200 | data: 201 | 'CentOS-6': 202 | - '/etc/passwd': 203 | tag: 'CIS-9.2.2' 204 | pattern: "^+:" 205 | CentOS Linux-7: 206 | - '/etc/passwd': 207 | tag: 'CIS-9.2.2' 208 | pattern: "^+:" 209 | description: 'Verify No Legacy "+" Entries Exist in /etc/passwd (Scored)' 210 | control: 'We do not care about this' 211 | 212 | legacy_passwd_entries_shadow: 213 | data: 214 | 'CentOS-6': 215 | - '/etc/shadow': 216 | tag: 'CIS-9.2.3' 217 | pattern: "^+:" 218 | CentOS Linux-7: 219 | - '/etc/shadow': 220 | tag: 'CIS-9.2.3' 221 | pattern: "^+:" 222 | description: 'Verify No Legacy "+" Entries Exist in /etc/shadow (Scored)' 223 | 224 | legacy_passwd_entries_group: 225 | data: 226 | 'CentOS-6': 227 | - '/etc/group': 228 | tag: 'CIS-9.2.4' 229 | pattern: "^+:" 230 | CentOS Linux-7: 231 | - '/etc/group': 232 | tag: 'CIS-9.2.4' 233 | pattern: "^+:" 234 | description: 'Verify No Legacy "+" Entries Exist in /etc/group (Scored)' 235 | control: 'We do not care about this' 236 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/samples/sample_firewall.yaml: -------------------------------------------------------------------------------- 1 | firewall: 2 | whitelist: 3 | 4 | ssh: 5 | data: 6 | tag: 'FIREWALL-TCP-22' 7 | table: 'filter' 8 | chain: INPUT 9 | family: 'ipv4' 10 | rule: 11 | proto: tcp 12 | dport: 22 13 | match: state 14 | connstate: RELATED,ESTABLISHED 15 | jump: ACCEPT 16 | description: 'ssh iptables rule check' 17 | 18 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/samples/sample_openssl.yaml: -------------------------------------------------------------------------------- 1 | openssl: 2 | google: 3 | data: 4 | tag: 'CERT-001' # tag (required) 5 | endpoint: 'www.google.com' # required if file is not defined 6 | file: null # /path/to/the/pem/file (required if endpoint is not defined) 7 | port: 443 # required only if both 8 | # - endpoint is defined 9 | # - https is not configured on port 443 10 | not_after: 30 # minimum number of days until expiration (default value: 0) 11 | # the check is failed if the certificate expires in less then 30 days 12 | not_before: 10 # number of days until the ceriticate becomes valid (default value: 0) 13 | # the check is failed if the certificate becomes valid in more then 10 days 14 | fail_if_not_before: True # fails the check if the certificate is not valid yet 15 | description: 'google certificate' 16 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/stig/rhel-6-mac-1-classified.yaml: -------------------------------------------------------------------------------- 1 | ####################################################################################### 2 | # This is the Hubblestack Nova Auditing profile for the DISA SIGS: 3 | # 4 | # Source: https://www.stigviewer.com/stig/red_hat_enterprise_linux_6/2015-05-26/MAC-1_Classified/ 5 | # OS Finger: Red Hat Enterprise Linux Server-6 6 | # Audit Level: MAC-I Classified 7 | # 8 | # Usage: 9 | # salt hubble.audit 10 | # salt hubble.audit 11 | # salt hubble.audit 12 | # 13 | # Tags use the Vulnerability ID from the STIGs 14 | # Example: You can check for a specific tag with this usage: 15 | # 16 | # salt hubble.audit stig-rhel6-mac1-classifed.yaml V-38677 17 | # 18 | # The Description field is structured following this scheme: (Severity) 19 | # 20 | # Coverage: 21 | # NOTE: At this time, hubblestack provides 000% coverage of stig inspections 22 | # due to the suite of available modules. As more modules become available, 23 | # coverage will increase. 24 | # 25 | # Current coverage: XX / 264 = 000% 26 | # High Severity: 09 Done, 06 Partial, 2 Blocker = 15 / 17 = 88% 27 | # - V-38666: (Partial) 28 | # - Tailored: Checking for Clam AV packages installed and in cron.daily 29 | # - Cannot verify the output of the nails status command for 30 | # McAfee scans a this time 31 | # - Cannot verify the age of AV definitions as a result of the output 32 | # of a command at this time 33 | # - V-38476: Cannot verify the output of the rpm command at this time 34 | # - V-38491: Cannot verify presence of a file within an discovered list of directories 35 | # - V-38602: (Partial) checking for running service, but cannot verify the 36 | # output of the chkconfig command 37 | # - V-38594: (Partial) checking for running service, but cannot verify the 38 | # output of the chkconfig command 39 | # - V-38598: (Partial) checking for running service, but cannot verify the 40 | # output of the chkconfig command 41 | # - V-38589: (Partial) checking for running service, but cannot verify the 42 | # output of the chkconfig command 43 | # - V-38701: Potentially a false positive if the file does not exist. 44 | # Medium Severity: XX / 146 = 000% 45 | # Low Severity: XX / 101 = 000% 46 | # 47 | # Tailoring: 48 | # You may need to tailor some of these inspections to your system/site to account 49 | # for: 50 | # 1. your environmental configuration 51 | # ex: using McAfee AV Scan vs ClamAV 52 | # 2. compensating controls you may have 53 | # 3. tailoring you've done for your specific system 54 | # 55 | ####################################################################################### 56 | grep: 57 | blacklist: 58 | snmpd_not_use_default_passwd: 59 | data: 60 | Red Hat Enterprise Linux Server-6: 61 | - /etc/snmp/snmpd.conf: 62 | pattern: '^[^#]' 63 | match_output: public 64 | tag: V-38653 65 | description: (HIGH) The snmpd service must not use a default password. 66 | rpm_cryptographically_verify_packages: 67 | data: 68 | Red Hat Enterprise Linux Server-6: 69 | - /etc/rpmrc: 70 | pattern: nosignature 71 | tag: V-38462 72 | - /usr/lib/rpm/rpmrc: 73 | pattern: nosignature 74 | tag: V-38462 75 | - /usr/lib/rpm/redhat/rpmrc: 76 | pattern: nosignature 77 | tag: V-38462 78 | - /root/.rpmrc: 79 | pattern: nosignature 80 | tag: V-38462 81 | description: | 82 | (HIGH) The RPM package management tool must cryptographically verify 83 | the authenticity of all software packages during installation. 84 | null_passwords_cannot_be_used: 85 | data: 86 | Red Hat Enterprise Linux Server-6: 87 | - /etc/pam.d/system-auth: 88 | pattern: nullok 89 | tag: V-38497 90 | - /etc/pam.d/system-auth-ac: 91 | pattern: nullok 92 | tag: V-38497 93 | - /etc/pam.d/password-auth: 94 | pattern: nullok 95 | tag: V-38497 96 | - /etc/pam.d/password-auth-ac: 97 | pattern: nullok 98 | tag: V-38497 99 | - /etc/pam.d/sshd: 100 | pattern: nullok 101 | tag: V-38497 102 | description: (HIGH) The system must not allow null passwords to be used. 103 | nfs_no_insecure_file_locking: 104 | data: 105 | Red Hat Enterprise Linux Server-6: 106 | - /etc/exports: 107 | pattern: insecure_locks 108 | tag: V-38677 109 | description: (HIGH) The NFS server must not have the insecure file locking option enabled. 110 | sshd_no_empty_passwords: 111 | data: 112 | Red Hat Enterprise Linux Server-6: 113 | - /etc/ssh/sshd_config: 114 | pattern: '^PermitEmptyPasswords' 115 | match_output: "yes" 116 | tag: V-38614 117 | - /etc/ssh/sshd_config: 118 | pattern: '^PermitEmptyPasswords' 119 | match_output: "Yes" 120 | tag: V-38614 121 | description: (HIGH) The SSH daemon must not allow authentication using an empty password. 122 | 123 | whitelist: 124 | x86_ctrl_alt_del_disabled: 125 | data: 126 | Red Hat Enterprise Linux Server-6: 127 | - /etc/init/control-alt-delete.override: 128 | pattern: '^exec /usr/bin/logger' 129 | match_output: security.info "Control-Alt-Delete pressed" 130 | tag: V-38668 131 | description: (HIGH) The x86 Ctrl-Alt-Delete key sequence must be disabled. 132 | sshd_use_only_SSHv2_protocol: 133 | data: 134 | Red Hat Enterprise Linux Server-6: 135 | - /etc/ssh/sshd_config: 136 | pattern: '^Protocol' 137 | match_output: Protocol 2 138 | tag: V-38607 139 | description: (HIGH) The SSH daemon must be configured to use only the SSHv2 protocol. 140 | tftp_daemon_operate_in_secure_mode: 141 | # NOTE: potentially a false positive if the file does not exist 142 | data: 143 | Red Hat Enterprise Linux Server-6: 144 | - /etc/xinetd.d/tftp: 145 | pattern: '^server_args' 146 | match_output: -s 147 | tag: V-38701 148 | description: | 149 | (HIGH) The TFTP daemon must operate in secure mode which provides 150 | access only to a single directory on the host file system. Potentially 151 | a false positive if this file does not exist. 152 | 153 | pkg: 154 | blacklist: 155 | rsh-server_not_installed: 156 | data: 157 | Red Hat Enterprise Linux Server-6: 158 | - rsh-server: V-38591 159 | description: (HIGH) The rsh-server package must not be installed. 160 | telnet-server_not_installed: 161 | data: 162 | Red Hat Enterprise Linux Server-6: 163 | - telnet-server: V-38587 164 | - telnet: V-38587 165 | description: (HIGH) The telnet-server and telnet package must not be installed. 166 | 167 | whitelist: 168 | approved_virus_scan_program: 169 | # NOTE: This will need to be udated for your respective organization. 170 | # This particular check is validating that clamav package is installed. 171 | # This is a multi-part check to verify V-38666. Under the stat section, 172 | # there is a check to verify cron.daily script for clamav inspection. 173 | data: 174 | Red Hat Enterprise Linux Server-6: 175 | - clamav: V-38666 176 | - clamd: V-38666 177 | description: (HIGH) The system must use and update a DoD-approved virus scan program. 178 | 179 | service: 180 | blacklist: 181 | rlogind_not_running: 182 | # This is partially implemented to ensure that the service is not running. 183 | # This inspection alone does not fully satisfy the STIG check as it does 184 | # not current check the output of the chkconfig command 185 | data: 186 | Red Hat Enterprise Linux Server-6: 187 | - rlogin: V-38602 188 | description: (High) The rlogind service must not be running. 189 | rshd_not_running: 190 | # This is partially implemented to ensure that the service is not running. 191 | # This inspection alone does not fully satisfy the STIG check as it does 192 | # not current check the output of the chkconfig command 193 | data: 194 | Red Hat Enterprise Linux Server-6: 195 | - rsh: V-38594 196 | description: (High) The rshd service must not be running. 197 | rexecd_not_running: 198 | # This is partially implemented to ensure that the service is not running. 199 | # This inspection alone does not fully satisfy the STIG check as it does 200 | # not current check the output of the chkconfig command 201 | data: 202 | Red Hat Enterprise Linux Server-6: 203 | - rexec: V-38598 204 | description: (High) The rexecd service must not be running. 205 | telnet_not_running: 206 | # This is partially implemented to ensure that the service is not running. 207 | # This inspection alone does not fully satisfy the STIG check as it does 208 | # not current check the output of the chkconfig command 209 | data: 210 | Red Hat Enterprise Linux Server-6: 211 | - telnet: V-38589 212 | description: (High) The telnet daemon must not be running. 213 | 214 | stat: 215 | cron_daily_clamscan_host: 216 | # NOTE: This will need to be udated for your respective organization. 217 | # This particular check is validating that clamav is run on a daily basis. 218 | # This is a multi-part check to verify V-38666. Under the pkg section, 219 | # there is a check to verify clam is installed. 220 | data: 221 | Red Hat Enterprise Linux Server-6: 222 | - /etc/cron.daily/clamscan_host.sh: 223 | group: root 224 | user: root 225 | mode: 755 226 | tag: V-38666 227 | description: (HIGH) The system must use and update a DoD-approved virus scan program. 228 | -------------------------------------------------------------------------------- /hubblestack_nova_profiles/top.nova: -------------------------------------------------------------------------------- 1 | # Default top.nova 2 | # 3 | # Subscribes to CIS, cve_scan, and misc.yaml for miscellaneous checks 4 | 5 | nova: 6 | 'G@osfinger:*CoreOS*': 7 | - cis.coreos-level-1 8 | 'G@osfinger:CentOS-6': 9 | - cis.centos-6-level-1-scored-v2-0-1 10 | 'G@osfinger:CentOS*Linux-7': 11 | - cis.centos-7-level-1-scored-v2-1-0 12 | 'G@osfinger:Debian-8': 13 | - cis.debian-8-level-1-scored-v1-0-0 14 | 'G@osfinger:Red*Hat*Enterprise*Linux*Server-6': 15 | - cis.rhels-6-level-1-scored-v2-0-1 16 | 'G@osfinger:Red*Hat*Enterprise*Linux*Server-7': 17 | - cis.rhels-7-level-1-scored-v2-1-0 18 | 'G@osfinger:Red*Hat*Enterprise*Linux*Workstation-7': 19 | - cis.rhelw-7-level-1-scored-v2-1-0 20 | 'G@osfinger:Ubuntu-14.04': 21 | - cis.ubuntu-1404-level-1-scored-v1-0-0 22 | 'G@osfinger:Ubuntu-16.04': 23 | - cis.ubuntu-1604-level-1-scored-v1-0-0 24 | 'G@osfullname:Microsoft*Windows*Server*2008*': 25 | - cis.windows-2008r2-level-1-scored-v3-0-0 26 | 'G@osfullname:Microsoft*Windows*Server*2012*': 27 | - cis.windows-2012r2-level-1-scored-v2-0-0 28 | 'G@osfinger:Amazon*Linux*2014*': 29 | - cis.amazon-201409-level-1-scored-v1-0-0 30 | 'G@osfinger:Amazon*Linux*2015*': 31 | - cis.amazon-level-1-scored-v1-0-0 32 | 'G@osfinger:Amazon*Linux*2016*': 33 | - cis.amazon-level-1-scored-v1-0-0 34 | #'*': 35 | # - misc 36 | #'G@kernel:Linux and not G@osfinger:*CoreOS*': 37 | # - cve.scan-v2 38 | -------------------------------------------------------------------------------- /utils/check_yaml.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A simple script for testing the syntax of the files. 3 | It uses just the messages of the reading and parsing exceptions in the yaml library. 4 | Run the script multiple times until you solve all the syntax errors in the file. 5 | 6 | Usage: check_yaml.py <yaml_file_to_check> 7 | ''' 8 | 9 | 10 | import yaml 11 | import sys 12 | 13 | if len(sys.argv) != 2: 14 | print 'Usage: %s <yaml_file_to_check>' % (sys.argv[0]) 15 | exit(1) 16 | 17 | try: 18 | f = open(sys.argv[1]) 19 | except IOError as e: 20 | print "I/O error(%s): %s" % (e.errno, e.strerror) 21 | exit(1) 22 | 23 | try: 24 | yaml.safe_load(f) 25 | print 'YAML syntax is OK' 26 | exit(0) 27 | except yaml.reader.ReaderError as e: 28 | print "YAML reader error: %s" % (e) 29 | exit(1) 30 | except yaml.parser.ParserError as e: 31 | print "YAML parser error: %s" % (e) 32 | exit(1) 33 | -------------------------------------------------------------------------------- /utils/cve_store.py: -------------------------------------------------------------------------------- 1 | ''' 2 | A script that will query vulners.com/api for cve data related to given operating systems. 3 | The data is returned in a valid json format for use in the cve_scan_v2 module. The json file 4 | is stored at the local directory under <os_name>_<version>.json. Inputs must be in the form 5 | os-version, like 'centos-7' or 'ubuntu-16.04' etc. 6 | 7 | usage: # python cve_store.py (<os-version>) [<os-version> ...] 8 | ''' 9 | from zipfile import ZipFile 10 | import os 11 | import sys 12 | import requests 13 | 14 | def main(): 15 | ''' 16 | Tries to save cve scans for inputs. Specify type and version. 17 | Ex: # python cve_store.py centos-7 ubuntu-16.04 18 | ''' 19 | if len(sys.argv) == 1: 20 | print "No inputs were given." 21 | for distro in sys.argv[1:]: 22 | print 23 | try: 24 | _save_json(distro) 25 | except Exception, exc: 26 | print 'Error saving: %s' % distro 27 | print exc 28 | 29 | 30 | def _save_json(distro): 31 | ''' 32 | Returns json from vulner.com api for specified distro. 33 | Exceptions thrown are caught by main() 34 | ''' 35 | split = distro.split('-') 36 | if len(split) != 2: 37 | raise Exception('%s is improperly formatted.' % distro) 38 | version = split[1] 39 | distro_name = split[0] 40 | print 'Getting cve\'s for %s version %s' % (distro_name, version) 41 | url_final = 'http://www.vulners.com/api/v3/archive/distributive/?os=%s&version=%s' \ 42 | % (distro_name, version) 43 | cve_query = requests.get(url_final) 44 | # Filenames returned don't contain periods. 45 | version = version.replace('.', '') 46 | _zip = '%s_%s.zip' % (distro_name, version) 47 | _json = '%s_%s.json' % (distro_name, version) 48 | # Confirm that the request was valid. 49 | if cve_query.status_code != 200: 50 | raise Exception('Bad Request for url: %s' % url_final) 51 | # Save vulners zip attachment in cache location and extract json 52 | with open(_zip, 'w') as zip_attachment: 53 | zip_attachment.write(cve_query.content) 54 | zip_file = ZipFile(_zip) 55 | zip_file.extractall(os.path.dirname(_zip)) 56 | os.remove(_zip) 57 | print 'Saved: %s' % _json 58 | 59 | 60 | main() 61 | -------------------------------------------------------------------------------- /utils/update_tags.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import logging 3 | try: 4 | import xlrd 5 | except ImportError: 6 | print '\n\npackage "xlrd" is required. Try:\n# pip install xlrd\n\n' 7 | exit() 8 | 9 | def main(): 10 | help_message = \ 11 | ''' 12 | Usage: 13 | # python update_tags.py <optional_tag> <.yaml> <.xls> 14 | Optional tags: 15 | -t : templates cis standards that aren't included in yaml into the updated version. 16 | 17 | This script has does four things: 18 | 1. Updates tags in yaml profile to match the cis standards, saved at <.yaml>_updated.yaml. 19 | It updates the tags based on the matching the tag description to cis standard title. 20 | 2. Finds format errors in yaml profile. 21 | 3. Finds outdated audits in the yaml profile. 22 | 4. Finds audits in the cis standards that aren't found in the yaml profile. 23 | It also saves a log of the results of each run at <.yaml>_updated.log\n 24 | ''' 25 | args = sys.argv 26 | if len(args) != 3 and len(args) != 4: 27 | print help_message 28 | exit() 29 | 30 | try: 31 | cis_xls = args[3] 32 | yaml_profile = args[2] 33 | optional = args[1] 34 | except IndexError: 35 | cis_xls = sys.argv[2] 36 | yaml_profile = sys.argv[1] 37 | optional = None 38 | 39 | if not cis_xls.endswith('.xls') or not yaml_profile.endswith('.yaml'): 40 | print help_message 41 | exit() 42 | if optional not in [None, '-t']: 43 | print help_message 44 | exit() 45 | 46 | report = create_report(yaml_profile, cis_xls) 47 | _update_yaml(report, yaml_profile, optional) 48 | _log_report(report, yaml_profile) 49 | 50 | def create_report(yaml_profile, cis_xls): 51 | ''' 52 | Return a report on what needs to be fixed in the yaml file 53 | based on the cis_standards. 54 | ''' 55 | if 'rhelw' in yaml_profile: 56 | sheet_index = 3 57 | else: 58 | sheet_index=1 59 | 60 | cis_standards = _get_cis(cis_xls, sheet_index) 61 | yaml_lines = open(yaml_profile).readlines() 62 | 63 | ret = {'format_errors': [], 'updated_uids': [], 64 | 'missing_standards': [], 'outdated_uids': []} 65 | checked_standards = set() 66 | 67 | for i, line in enumerate(yaml_lines): 68 | if 'data:' in line: 69 | if i == 0: 70 | ret['format_errors'].append(('None', i)) 71 | continue 72 | uid = yaml_lines[i-1].strip().strip(':') 73 | desc, tag = _get_data(yaml_lines, i) 74 | if not desc or not tag: 75 | ret['format_errors'].append((uid, i)) 76 | continue 77 | if desc not in cis_standards: 78 | ret['outdated_uids'].append((uid, i)) 79 | continue 80 | cis_tag = cis_standards[desc] 81 | checked_standards.add(desc) 82 | if '_' in tag: 83 | _, addon = tag.split('_') 84 | cis_tag += '_' + addon 85 | if cis_tag != tag: 86 | ret['updated_uids'].append((uid, i, tag, cis_tag)) 87 | continue 88 | leftover_standards = cis_standards.keys() 89 | for desc in checked_standards: 90 | leftover_standards.remove(desc) 91 | for desc in leftover_standards: 92 | _tag = cis_standards[desc] 93 | ret['missing_standards'].append((desc, _tag)) 94 | return ret 95 | 96 | 97 | def _get_data(yaml_lines, data_index): 98 | ''' 99 | Return the tag and description for a given data layer, 100 | or (None, None) if there is a format error. 101 | ''' 102 | data_indent = _get_indent(yaml_lines[data_index]) 103 | description = None 104 | tag = None 105 | for i, line in enumerate(yaml_lines[data_index+1:]): 106 | line_indent = _get_indent(line) 107 | if line_indent < data_indent: 108 | if tag and description: 109 | return (description.lower(), tag) 110 | else: 111 | return (None, None) 112 | if 'description:' in line: 113 | if description is not None: 114 | return (None, None) 115 | else: 116 | description = line.strip().lstrip('description:').strip() 117 | elif 'CIS-' in line: 118 | _, _tag = line.split('CIS-') 119 | _tag = 'CIS-' + _tag.strip() 120 | if tag is not None: 121 | if tag != _tag: 122 | return (None, None) 123 | else: 124 | tag = _tag 125 | # End of file 126 | else: 127 | if tag and description: 128 | return (description.lower(), tag) 129 | return (None, None) 130 | 131 | 132 | def _log_report(report, yaml_filename): 133 | yaml_filename = yaml_filename.rstrip('.yaml') + '_updated.log' 134 | logging.basicConfig( 135 | level=logging.INFO, 136 | filename=yaml_filename, 137 | filemode='w', 138 | format='%(name)s - %(message)s' 139 | ) 140 | format_log = logging.getLogger('format_error') 141 | outdated_log = logging.getLogger('outdated_yaml') 142 | missing_log = logging.getLogger('missing_audits') 143 | updated_log = logging.getLogger('updated_tags') 144 | linebreak = logging.getLogger(' ') 145 | 146 | err_message = 'uid: %s, line: %s' 147 | for uid, line in report['format_errors']: 148 | format_log.info(err_message, uid, line) 149 | linebreak.info('') 150 | for uid, line in report['outdated_uids']: 151 | outdated_log.info(err_message, uid, line) 152 | linebreak.info('') 153 | 154 | outdated_message = 'tag: %s, description: %s' 155 | for desc, tag in report['missing_standards']: 156 | missing_log.info(outdated_message, tag, desc) 157 | linebreak.info('') 158 | 159 | updated_message = '(%s --> %s), uid: %s, line %s' 160 | for uid, line, old_tag, new_tag in report['updated_uids']: 161 | updated_log.info(updated_message, old_tag, new_tag, uid, line) 162 | if report['updated_uids']: 163 | linebreak.info('') 164 | linebreak.info('Saved updates at %s', yaml_filename.replace('.log', '.yaml')) 165 | logfile = open(yaml_filename) 166 | print logfile.read() 167 | print 'Saved logfile at %s' % yaml_filename 168 | logfile.close() 169 | 170 | def _get_indent(line): 171 | '''Return length of indent of line''' 172 | return len(line) - len(line.lstrip(' ')) 173 | 174 | 175 | def _update_yaml(report, yaml_filename, optional): 176 | '''Write a new file with the updates as described by updated_uids''' 177 | updates = report['updated_uids'] 178 | yaml_lines = open(yaml_filename).readlines() 179 | _updates = [] 180 | for uid, data_index, old_tag, new_tag in updates: 181 | _updates.extend(_update_data(yaml_lines, data_index, new_tag=new_tag)) 182 | for i, updated_line in _updates: 183 | yaml_lines[i] = updated_line 184 | yaml_filename = yaml_filename.rstrip('.yaml') + '_updated.yaml' 185 | new_yaml = open(yaml_filename, 'w') 186 | for line in yaml_lines: 187 | new_yaml.write(line) 188 | if optional == '-t': 189 | template = \ 190 | ''' 191 | changeme: 192 | data: 193 | %s 194 | - changeme: 195 | tag: %s 196 | - changeme: %s 197 | description: %s 198 | ''' 199 | osfinger = _get_osfinger(yaml_lines) 200 | for tag, desc in report['missing_standards']: 201 | new_yaml.write(template % (osfinger, tag, tag, desc)) 202 | new_yaml.close() 203 | 204 | def _get_osfinger(yaml_lines): 205 | for i, line in enumerate(yaml_lines): 206 | if ' data:' in line: 207 | os_finger = yaml_lines[i+1].strip() 208 | return os_finger 209 | return 'changeme:' 210 | 211 | 212 | def _update_data(yaml_lines, data_index, new_tag=None): 213 | '''Return list of lines to update per data layer.''' 214 | updates = [] 215 | data_indent = _get_indent(yaml_lines[data_index]) 216 | for i, line in enumerate(yaml_lines[data_index:]): 217 | line_indent = _get_indent(line) 218 | if line_indent < data_indent: 219 | break 220 | if 'CIS-' in line: 221 | if not new_tag: 222 | continue 223 | updated_line, _ = line.split('CIS-') 224 | updated_line += new_tag + '\n' 225 | updates.append((i + data_index, updated_line)) 226 | return updates 227 | 228 | 229 | def _get_cis(xls_filename, sheet_index=1): 230 | '''Return dictionary of cis title's and their corresponding cis tag''' 231 | tag_col = 1 232 | title_col = 2 233 | score_col = 4 234 | 235 | workbook = xlrd.open_workbook(xls_filename) 236 | worksheet = workbook.sheet_by_index(sheet_index) 237 | 238 | ret = {} 239 | 240 | for row_num in range(1,worksheet.nrows): 241 | scoring_status = worksheet.cell(row_num, score_col).value 242 | if scoring_status != 'scored': 243 | continue 244 | 245 | title = str(worksheet.cell(row_num, title_col).value).lower() 246 | rec_num = worksheet.cell(row_num, tag_col).value 247 | if isinstance(rec_num, float): 248 | rec_num = str(rec_num) + '0' 249 | rec_num = 'CIS-' + str(rec_num) 250 | ret[title] = rec_num 251 | return ret 252 | 253 | if __name__ == '__main__': 254 | main() 255 | --------------------------------------------------------------------------------