├── .gitignore ├── LICENSE ├── MANIFEST.in ├── README.rst ├── build.py ├── docs ├── Makefile ├── _static │ └── hold ├── conf.py ├── index.rst ├── make.bat ├── releases │ ├── 3.rst │ ├── 4.rst │ ├── 5.1.rst │ └── 5.rst └── topics │ ├── azure_quickstart.rst │ ├── extending.rst │ ├── intro.rst │ ├── migrate_salt.rst │ ├── takara.rst │ └── transparent_req.rst ├── idem ├── conf.py ├── exec │ └── test.py ├── idem │ ├── compiler │ │ ├── .init.py.kate-swp │ │ ├── 0010_extend.py │ │ ├── 0020_verify_high.py │ │ ├── 0030_req_in.py │ │ ├── 0040_exclude.py │ │ ├── 0050_compile.py │ │ └── 0060_treq.py │ ├── exclude.py │ ├── extend.py │ ├── get.py │ ├── init.py │ ├── low.py │ ├── req │ │ ├── init.py │ │ ├── onchanges.py │ │ ├── onfail.py │ │ ├── require.py │ │ └── watch.py │ ├── req_in.py │ ├── resolve.py │ ├── rules │ │ ├── changes.py │ │ ├── changes_post.py │ │ ├── init.py │ │ └── result.py │ ├── run │ │ ├── init.py │ │ ├── parallel.py │ │ └── serial.py │ ├── tools.py │ ├── treq.py │ └── verify.py ├── output │ └── idem.py ├── scripts.py ├── sls │ └── file_sls.py ├── states │ └── test.py └── version.py ├── requirements.txt ├── run.py ├── setup.py └── tests ├── conftest.py ├── nest └── nest │ ├── again │ ├── another │ │ └── test.py │ └── test.py │ ├── params.py │ └── test.py ├── sls ├── bang.sls ├── blocks.sls ├── changes.sls ├── dupkeys.sls ├── fails.sls ├── nest.sls ├── order.sls ├── params.sls ├── recreq.sls ├── req.sls ├── simple.sls ├── takara1.sls ├── treq.sls ├── ugly1.sls ├── update.sls └── watch.sls └── unit ├── __init__.py ├── test_basic.py └── test_takara.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[codx] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Ignore generated C files (Cython) 9 | *.c 10 | 11 | # iPython notebooks and checkpoints 12 | *.ipynb 13 | .ipynb_checkpoints 14 | 15 | # patch files 16 | *.orig 17 | 18 | # Distribution / packaging 19 | .Python 20 | env/ 21 | bin/ 22 | build/ 23 | develop-eggs/ 24 | dist/ 25 | eggs/ 26 | lib/ 27 | lib64/ 28 | parts/ 29 | sdist/ 30 | var/ 31 | *.egg-info/ 32 | .installed.cfg 33 | *.egg 34 | MANIFEST 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .cache 45 | .pytest_cache 46 | nosetests.xml 47 | coverage.xml 48 | 49 | # Pytest 50 | pytest.ini 51 | 52 | # Translations 53 | *.mo 54 | 55 | # macOS 56 | .DS_Store 57 | 58 | # Mr Developer 59 | .mr.developer.cfg 60 | .project 61 | .pydevproject 62 | 63 | # Rope 64 | .ropeproject 65 | 66 | # Django stuff: 67 | *.log 68 | *.pot 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # Local VIM RC 74 | .lvimrc 75 | 76 | # Swap files 77 | .*.swp 78 | 79 | # nvim 80 | *.un~ 81 | *~ 82 | 83 | # setup compile left-overs 84 | *.py_orig 85 | 86 | # Nuitka build 87 | nuitka/ 88 | nuitka_standalone/ 89 | 90 | # Tags 91 | TAGS 92 | tags 93 | 94 | # kdevelop 95 | *.kdev4 96 | 97 | # pycharm 98 | .idea* 99 | 100 | # VSCode 101 | .vscode* 102 | 103 | # pyenv 104 | .python-version 105 | 106 | .ci/.rootdir 107 | 108 | # Coverage data files 109 | .coverage.* 110 | 111 | runpytest.sh 112 | 113 | # vscode 114 | .vscode 115 | .pytest_cache 116 | 117 | 118 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [2019] [Thomas S Hatch] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | prune examples/sample?/build 3 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ==== 2 | Idem 3 | ==== 4 | 5 | MOVED TO GITLAB 6 | =============== 7 | 8 | POP projects developed by Saltstack are being moved to Gitlab. 9 | 10 | The new location of idem is here: 11 | 12 | https://gitlab.com/saltstack/pop/idem 13 | 14 | 15 | Intro 16 | ===== 17 | 18 | Idem is an idempotent dataflow programming language. It exposes stateful 19 | programming constructs that makes things like enforcing the state 20 | of an application, configuration, SaaS system, or others very 21 | simple. 22 | 23 | Since Idem is a programming language, it can also be used for data 24 | processing and pipelining. Idem can be used not only to manage 25 | the configuration of interfaces, but also for complex rule engines 26 | and processing files or workflows. 27 | 28 | Idem is a language to glue together management of all sorts of 29 | interfaces. You can think of it like having idempotent 30 | scripts. Automation that can be run over and over again that 31 | enforces a specific state or process. 32 | 33 | Idem is unique in that it is built purely as a language. It 34 | can be added to any type of management system out there and can 35 | be applied in a cross platform way easily. 36 | 37 | Idem's functionality can also be expanded easily. Instead of storing 38 | all of the language components in a single place, the libraries 39 | used by Idem can be written independently and seamlessly merged 40 | into Idem, just like a normal programming language! 41 | 42 | What does Idempotent mean? 43 | ========================== 44 | 45 | The concept of Idempotent is simple! It just means that every time 46 | something is run, it always has the same end result regardless of the state 47 | of a system when the run starts! 48 | 49 | At first glance this might seem useless, but think more deeply. Have you 50 | ever needed to make sure that something was set up in a consistent way? It 51 | can be very nice to be able to enforce that setup without worrying about 52 | breaking it. Or think about data pipelines, have you ever had input data 53 | that needed to be processed? Idempotent systems allow for data to be 54 | easily processed in a consistent way, over and over again! 55 | 56 | How Does This Language Work? 57 | ============================ 58 | 59 | Idem is built using two critical technologies, `Python` and `POP`. Since Idem 60 | is built on Python it should be easy to extend for most software developers. 61 | Extending Idem can be very easy because simple Python modules are all you need 62 | to add capabilities! 63 | 64 | The other technology, `POP`, may be new to you. This is the truly secret sauce 65 | behind Idem as well as a number of emerging exciting technologies. `POP` stands 66 | for Plugin Oriented Programming. It is the brainchild of `the creator of 67 | Salt `_ and a new way to write software. The `POP` 68 | system makes the creation of higher level paradigms like Idem possible, but also 69 | provides the needed components to make Idem extensible and flexible. If `POP` 70 | is a new concept to you, 71 | `check it out `_! 72 | 73 | Idem works by taking language files called `sls` files and compiling them 74 | down to data instructions. These data instructions are then run through the 75 | Idem runtime. These instructions inform Idem what routines to call to 76 | enforce state or process data. It allows you to take a high level dataset 77 | as your input, making the use of the system very easy. 78 | 79 | Paradigms and Languages, This Sounds Complicated! 80 | ================================================= 81 | 82 | Under the hood, it is complicated! The guts of a programming language are 83 | complicated, but it is all there to make your life easier! You don't need to 84 | understand complex computer science theory to benefit from Idem. You just need 85 | to learn a few simple things and you can start making your life easier today! 86 | 87 | -------------------------------------------------------------------------------- /build.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import sys 3 | import os 4 | import shutil 5 | import subprocess 6 | import tempfile 7 | import venv 8 | import argparse 9 | 10 | OMIT = ('__pycache__', 'PyInstaller', 'pip', 'setuptools', 'pkg_resources', '__pycache__', 'dist-info', 'egg-info') 11 | 12 | 13 | def parse(): 14 | ''' 15 | Parse the cli args 16 | ''' 17 | parser = argparse.ArgumentParser() 18 | parser.add_argument( 19 | 'name', 20 | help='The name of the script to build from the run.py') 21 | args = parser.parse_args() 22 | return args.__dict__ 23 | 24 | 25 | class Builder: 26 | def __init__(self): 27 | self.opts = parse() 28 | self.name = self.opts['name'] 29 | self.cwd = os.getcwd() 30 | self.run = os.path.join(self.cwd, 'run.py') 31 | self.venv_dir = tempfile.mkdtemp(prefix='pop_', suffix='_venv') 32 | self.python_bin = os.path.join(self.venv_dir, 'bin', 'python') 33 | self.vroot = os.path.join(self.venv_dir, 'lib') 34 | self.all_paths = set() 35 | self.imports = set() 36 | self.datas = set() 37 | self.cmd = f'{self.python_bin} -B -OO -m PyInstaller ' 38 | self.s_path = os.path.join(self.venv_dir, 'bin', self.name) 39 | self.pyi_args = [ 40 | self.s_path, 41 | '--log-level=INFO', 42 | '--noconfirm', 43 | '--onefile', 44 | '--clean', 45 | ] 46 | 47 | def create(self): 48 | ''' 49 | Make a virtual environment based on the version of python used to call this script 50 | ''' 51 | venv.create(self.venv_dir, clear=True, with_pip=True) 52 | pip_bin = os.path.join(self.venv_dir, 'bin', 'pip') 53 | subprocess.call([pip_bin, 'install', '-r', 'requirements.txt']) 54 | subprocess.call([pip_bin, 'install', 'PyInstaller']) 55 | subprocess.call([pip_bin, '-v', 'install', self.cwd]) 56 | 57 | def omit(self, test): 58 | for bad in OMIT: 59 | if bad in test: 60 | return True 61 | return False 62 | 63 | def scan(self): 64 | ''' 65 | Scan the new venv for files and imports 66 | ''' 67 | for root, dirs, files in os.walk(self.vroot): 68 | if self.omit(root): 69 | continue 70 | for d in dirs: 71 | full = os.path.join(root, d) 72 | if self.omit(full): 73 | continue 74 | self.all_paths.add(full) 75 | for f in files: 76 | full = os.path.join(root, f) 77 | if self.omit(full): 78 | continue 79 | self.all_paths.add(full) 80 | 81 | def to_import(self, path): 82 | ret = path[path.index('site-packages') + 14:].replace(os.sep, '.') 83 | if ret.endswith('.py'): 84 | ret = ret[:-3] 85 | return ret 86 | 87 | def to_data(self, path): 88 | dest = path[path.index('site-packages') + 14:] 89 | src = path 90 | if not dest.strip(): 91 | return None 92 | ret = f'{src}{os.pathsep}{dest}' 93 | return ret 94 | 95 | def mk_adds(self): 96 | ''' 97 | make the imports and datas for pyinstaller 98 | ''' 99 | for path in self.all_paths: 100 | if not 'site-packages' in path: 101 | continue 102 | if os.path.isfile(path): 103 | if not path.endswith('.py'): 104 | continue 105 | if path.endswith('__init__.py'): 106 | # Skip it, we will get the dir 107 | continue 108 | imp = self.to_import(path) 109 | if imp: 110 | self.imports.add(imp) 111 | if os.path.isdir(path): 112 | data = self.to_data(path) 113 | imp = self.to_import(path) 114 | if imp: 115 | self.imports.add(imp) 116 | if data: 117 | self.datas.add(data) 118 | 119 | def mk_cmd(self): 120 | ''' 121 | Create the pyinstaller command 122 | ''' 123 | for imp in self.imports: 124 | self.pyi_args.append(f'--hidden-import={imp}') 125 | for data in self.datas: 126 | self.pyi_args.append(f'--add-data={data}') 127 | for arg in self.pyi_args: 128 | self.cmd += f'{arg} ' 129 | 130 | def pyinst(self): 131 | shutil.copy(self.run, self.s_path) 132 | subprocess.call(self.cmd, shell=True) 133 | 134 | def report(self): 135 | art = os.path.join(self.cwd, 'dist', self.name) 136 | print(f'Executable created in {art}') 137 | print('To create a more portable and fully static binary install run staticx against your new build') 138 | 139 | def clean(self): 140 | shutil.rmtree(self.venv_dir) 141 | shutil.rmtree(os.path.join(self.cwd, 'build')) 142 | os.remove(os.path.join(self.cwd, f'{self.name}.spec')) 143 | 144 | def build(self): 145 | self.create() 146 | self.scan() 147 | self.mk_adds() 148 | self.mk_cmd() 149 | self.pyinst() 150 | self.report() 151 | self.clean() 152 | 153 | 154 | if __name__ == '__main__': 155 | builder = Builder() 156 | builder.build() 157 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = . 8 | BUILDDIR = _build 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /docs/_static/hold: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/idem/aaef3789b30172864db8a6b03fd1b7914a9b3b27/docs/_static/hold -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # Configuration file for the Sphinx documentation builder. 4 | # 5 | # This file does only contain a selection of the most common options. For a 6 | # full list see the documentation: 7 | # http://www.sphinx-doc.org/en/master/config 8 | 9 | # -- Path setup -------------------------------------------------------------- 10 | 11 | # If extensions (or modules to document with autodoc) are in another directory, 12 | # add these directories to sys.path here. If the directory is relative to the 13 | # documentation root, use os.path.abspath to make it absolute, like shown here. 14 | # 15 | # import os 16 | # import sys 17 | # sys.path.insert(0, os.path.abspath('.')) 18 | 19 | 20 | # -- Project information ----------------------------------------------------- 21 | 22 | project = 'idem' 23 | copyright = '2020, Thomas S Hatch' 24 | author = 'Thomas S Hatch' 25 | 26 | # The short X.Y version 27 | version = '5.1' 28 | # The full version, including alpha/beta/rc tags 29 | release = '5.1' 30 | 31 | 32 | # -- General configuration --------------------------------------------------- 33 | 34 | # If your documentation needs a minimal Sphinx version, state it here. 35 | # 36 | # needs_sphinx = '1.0' 37 | 38 | # Add any Sphinx extension module names here, as strings. They can be 39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 40 | # ones. 41 | extensions = [ 42 | 'sphinx.ext.autodoc', 43 | ] 44 | 45 | # Add any paths that contain templates here, relative to this directory. 46 | templates_path = ['_templates'] 47 | 48 | # The suffix(es) of source filenames. 49 | # You can specify multiple suffix as a list of string: 50 | # 51 | # source_suffix = ['.rst', '.md'] 52 | source_suffix = '.rst' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # The language for content autogenerated by Sphinx. Refer to documentation 58 | # for a list of supported languages. 59 | # 60 | # This is also used if you do content translation via gettext catalogs. 61 | # Usually you set "language" from the command line for these cases. 62 | language = None 63 | 64 | # List of patterns, relative to source directory, that match files and 65 | # directories to ignore when looking for source files. 66 | # This pattern also affects html_static_path and html_extra_path. 67 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 68 | 69 | # The name of the Pygments (syntax highlighting) style to use. 70 | pygments_style = None 71 | 72 | 73 | # -- Options for HTML output ------------------------------------------------- 74 | 75 | # The theme to use for HTML and HTML Help pages. See the documentation for 76 | # a list of builtin themes. 77 | # 78 | html_theme = 'alabaster' 79 | 80 | # Theme options are theme-specific and customize the look and feel of a theme 81 | # further. For a list of options available for each theme, see the 82 | # documentation. 83 | # 84 | # html_theme_options = {} 85 | 86 | # Add any paths that contain custom static files (such as style sheets) here, 87 | # relative to this directory. They are copied after the builtin static files, 88 | # so a file named "default.css" will overwrite the builtin "default.css". 89 | html_static_path = ['_static'] 90 | 91 | # Custom sidebar templates, must be a dictionary that maps document names 92 | # to template names. 93 | # 94 | # The default sidebars (for documents that don't match any pattern) are 95 | # defined by theme itself. Builtin themes are using these templates by 96 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 97 | # 'searchbox.html']``. 98 | # 99 | # html_sidebars = {} 100 | 101 | 102 | # -- Options for HTMLHelp output --------------------------------------------- 103 | 104 | # Output file base name for HTML help builder. 105 | htmlhelp_basename = 'idemdoc' 106 | 107 | 108 | # -- Options for LaTeX output ------------------------------------------------ 109 | 110 | latex_elements = { 111 | # The paper size ('letterpaper' or 'a4paper'). 112 | # 113 | # 'papersize': 'letterpaper', 114 | 115 | # The font size ('10pt', '11pt' or '12pt'). 116 | # 117 | # 'pointsize': '10pt', 118 | 119 | # Additional stuff for the LaTeX preamble. 120 | # 121 | # 'preamble': '', 122 | 123 | # Latex figure (float) alignment 124 | # 125 | # 'figure_align': 'htbp', 126 | } 127 | 128 | # Grouping the document tree into LaTeX files. List of tuples 129 | # (source start file, target name, title, 130 | # author, documentclass [howto, manual, or own class]). 131 | latex_documents = [ 132 | (master_doc, 'idem.tex', 'idem Documentation', 133 | 'Thomas S Hatch', 'manual'), 134 | ] 135 | 136 | 137 | # -- Options for manual page output ------------------------------------------ 138 | 139 | # One entry per manual page. List of tuples 140 | # (source start file, name, description, authors, manual section). 141 | man_pages = [ 142 | (master_doc, 'idem', 'idem Documentation', 143 | [author], 1) 144 | ] 145 | 146 | 147 | # -- Options for Texinfo output ---------------------------------------------- 148 | 149 | # Grouping the document tree into Texinfo files. List of tuples 150 | # (source start file, target name, title, author, 151 | # dir menu entry, description, category) 152 | texinfo_documents = [ 153 | (master_doc, 'idem', 'idem Documentation', 154 | author, 'idem', 'One line description of project.', 155 | 'Miscellaneous'), 156 | ] 157 | 158 | 159 | # -- Options for Epub output ------------------------------------------------- 160 | 161 | # Bibliographic Dublin Core info. 162 | epub_title = project 163 | 164 | # The unique identifier of the text. This can be a ISBN number 165 | # or the project homepage. 166 | # 167 | # epub_identifier = '' 168 | 169 | # A unique identification for the text. 170 | # 171 | # epub_uid = '' 172 | 173 | # A list of files that should not be packed into the epub file. 174 | epub_exclude_files = ['search.html'] 175 | 176 | 177 | # -- Extension configuration ------------------------------------------------- 178 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. idem documentation master file, created by 2 | sphinx-quickstart on Wed Feb 20 15:36:02 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to idem's documentation! 7 | ================================ 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | :glob: 12 | 13 | topics/intro 14 | topics/azure_quickstart 15 | topics/extending 16 | topics/migrate_salt 17 | releases/* 18 | 19 | 20 | Indices and tables 21 | ================== 22 | 23 | * :ref:`genindex` 24 | * :ref:`modindex` 25 | * :ref:`search` 26 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=. 11 | set BUILDDIR=_build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/releases/3.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | Idem Release 3 3 | ============== 4 | 5 | This is the initial public release of Idem, the release number 3 was chosen 6 | because the Salt State system should be considered version 1, with an internal 7 | version 2. 8 | 9 | This release introduces Idem to the world, it takes the Salt State system and 10 | migrates it to POP. In doing so the Salt State system has been simplified, 11 | extended, and revamped to become a standalone language and interface while 12 | following the ideals of POP to make it pluggable into other application 13 | stacks. 14 | 15 | Now Pluggable! 16 | ============== 17 | 18 | The Salt State system exists as a single large .py file inside of Salt, the 19 | compiler and runtime are all inside a couple of classes and the system is tightly 20 | coupled with the Salt minion and execution runtime and environment. This also 21 | made the Salt state system very static and difficult to extend. For instance, 22 | an old saying on the Salt developer team was "How do we create new requisites 23 | for Salt? Ask Tom to make it". 24 | 25 | My goal in Idem was to make it in such a way that it could be completely decoupled 26 | from Salt, modernize the foundation, add asyncio, and make the system easier 27 | to extend. Now the render, compile, and runtime have been separated out, the 28 | runtime has been completely rewritten and things like requisites can be added 29 | as plugins and runtime rules. Idem can also execute multiple runs concurrently 30 | within the same process, and can execute states in parallel or serially. 31 | 32 | Idem can execute states in an imperative way or in a declarative way using 33 | requisites. This gives developers the best of both worlds. The ability to 34 | optimize execution for time or for ease of development and debugging. 35 | 36 | Runs Standalone! 37 | ================ 38 | 39 | The Idem command can be executed against a code tree directly just like a 40 | programming language. Instead of setting up minions, masters etc, just 41 | make a code tree with sls files and run Idem with the sls fils(s) you 42 | want to execute. 43 | 44 | Code Sources are Pluggable 45 | ========================== 46 | 47 | Instead of tying the runtime statically to grabbing sources via Salt, the 48 | sources are now pluggable. This release only has a local filesystem plugin 49 | but it will be easy to add code sources that are over network connections. 50 | This should make Idem execution function without needing to have any 51 | form of code deployment, but that Idem will be able to execute directly 52 | from any network source, like http, S3, or git. 53 | 54 | Rendering is Separate 55 | ===================== 56 | 57 | The render system in Salt turned out to be a generally useful system with 58 | virtually every attempt to read in files with structured data wanting to be 59 | processed though the render system. So for Idem the render system has been 60 | separated into a standalone project called `rend`. This project is written 61 | in POP and can be app-merged into any other POP project (like idem!). This 62 | makes the powerful render system from Salt available to other projects. In 63 | fact it is already being used bu other projects like `heist`. 64 | 65 | Idem is a Language Runtime 66 | ========================== 67 | 68 | One of the main issues with configuration management tools is that we end 69 | up needing to re-write the backend components to work in additional languages 70 | and interfaces. The goal of Idem is to make this limitation go away! Instead 71 | of making yet another language, Idem ingests structured data. This means that 72 | any language can be written on top of Idem as an extension to `rend`. So Idem 73 | can be seen not as a yaml based language for idempotent management. But instead 74 | as assembly code that languages can be built on top of. 75 | 76 | I feel that the language war in configuration management is one of the primary 77 | limiting factors for the industry, and why we end up producing new languages 78 | to solve specific problems. My hope here is that support for all the managed 79 | interfaces can be built into Idem and then made available to any app that wants 80 | to use them. 81 | -------------------------------------------------------------------------------- /docs/releases/4.rst: -------------------------------------------------------------------------------- 1 | ==================== 2 | Idem 4 - Beyond Salt 3 | ==================== 4 | 5 | Idem 4 is a monumental release! This marks the first release where major 6 | support for an interface has been made available to app-merge into Idem. 7 | This release also marks the first major feature additions to Idem beyond 8 | the capabilities found in the Salt state system. 9 | 10 | Late Rendering With Render Blocks 11 | ================================= 12 | 13 | This release adds the ability to to execute late rendering using a new 14 | feature in `rend` called render blocks. This allows for blocks of code 15 | to be rendered during the runtime and added to the overall execution of 16 | the state. This makes it easy to break apart the execution to be able to 17 | take arbitrary data during the run and apply it to the execution. 18 | 19 | Transparent Requisites 20 | ====================== 21 | 22 | Transparent requisites is a powerhouse feature! This new capability allows 23 | for state plugins to define requisites that will be automatically added into 24 | the mix. This makes it possible for the author of a state plugin to define that 25 | if a certain state is ever used, Idem will search the runtime to determine if 26 | any of the states defined as transparent requisites have been used and apply 27 | them with the desired requite. 28 | -------------------------------------------------------------------------------- /docs/releases/5.1.rst: -------------------------------------------------------------------------------- 1 | ======== 2 | Idem 5.1 3 | ======== 4 | 5 | This is a bugfix release of Idem. This release fixes a few issues found inside 6 | the state runtime. 7 | 8 | For details on the repaired issues please see the following issues on Github: 9 | 10 | #11 11 | #12 12 | #13 13 | #14 14 | -------------------------------------------------------------------------------- /docs/releases/5.rst: -------------------------------------------------------------------------------- 1 | ========================== 2 | Idem 5 - Encrypted Secrets 3 | ========================== 4 | 5 | Idem 5 comes with a much needed addition, the ability to store encrypted data at 6 | rest. This addition introduces a new dep and project that is used for the work 7 | of encrypted datastore - Takara. Takara is the standalone manager for keeping 8 | track of this data at rest, it allows for data to be easily stored in a pluggable 9 | and dynamic way. Takara has also been app-merged into Idem, so you can initiate, 10 | unseal, and use takara secret data stores from Idem. 11 | -------------------------------------------------------------------------------- /docs/topics/azure_quickstart.rst: -------------------------------------------------------------------------------- 1 | ======================== 2 | Quickstart - Azure Cloud 3 | ======================== 4 | 5 | Setting up cloud resources using Idem is easy to do, Start by making sure that 6 | idem and the idem Azure providers are installed: 7 | 8 | .. code-block:: bash 9 | 10 | pip install idem 11 | 12 | This command will install idem from pypi as well as the azure cloud provider. 13 | With these in place start by making a new directory to place your Idem code 14 | tree: 15 | 16 | .. code-block:: bash 17 | 18 | mkdir idem_azure 19 | cd idem_azure 20 | 21 | This is where your automation for Azure will be placed. When using Idem you 22 | create automation formulas, these formulas are stored in files with the extension 23 | `.sls` and can be stored in subdirectories. For purposes of making this tutorial 24 | easy we will just use a single sls file to write our formula. 25 | 26 | Lets start by making some variables for idem to use, this data gets reused in the 27 | Azure formula. This is in plain text here but a secure secret store system will be 28 | made available soon. Open a file called azure.sls and add your login data: 29 | 30 | .. code-block:: yaml 31 | 32 | # azure.sls 33 | 34 | {% set profile = { 35 | 'client_id': '', 36 | 'secret': '', 37 | 'subscription_id': '', 38 | 'tenant': 'YOUR TENANT' }% } 39 | 40 | Now that your Azure login data is taken care of, we can focus on defining the 41 | interfaces that we need to build. In this example we will be creating a 42 | resource group, network security group, virtual network, and a virtual machine. 43 | Idem's Azure provider supports many more interfaces on Azure, but this is 44 | a good start to show how Idem can be used to get the ball rolling. 45 | 46 | Next add the resource group to the same file: 47 | 48 | .. code-block:: yaml 49 | 50 | # azure.sls 51 | 52 | {% set profile = { 53 | 'client_id': '', 54 | 'secret': '', 55 | 'subscription_id': '', 56 | 'tenant': 'YOUR TENANT' }% } 57 | 58 | Azure Resouse Group: 59 | azurerm.resource.group.present: 60 | - name: idem 61 | - location: eastus 62 | - tags: 63 | contact_name: Ashley Miller 64 | organiaztion: SuperCo 65 | - connection_auth: {{ profile }} 66 | 67 | This stanza defines a resource group. Every stanza needs an ID, in this example 68 | the ID is `Azure Resource Group`. Then we need to reference the underlying 69 | idempotent function to call. The function reference used here is 70 | `azurerm.resource.group.present`. After the function reference we pass the 71 | configuration for the given function. Every function takes a name option and 72 | then the options specific to the defined operation. In this case we are defining 73 | that this resource group needs to be in the eastus location, has some tags, and 74 | pass the connection authentication data. 75 | 76 | Now we can add something more complicated, security! The management of a 77 | security group can be complicated, but Idem makes applying the many components 78 | of a security group easy! 79 | 80 | .. code-block:: yaml 81 | 82 | # azure.sls 83 | 84 | {% set profile = { 85 | 'client_id': '', 86 | 'secret': '', 87 | 'subscription_id': '', 88 | 'tenant': 'YOUR TENANT' }% } 89 | 90 | Azure Resouse Group: 91 | azurerm.resource.group.present: 92 | - name: idem 93 | - location: eastus 94 | - tags: 95 | contact_name: Ashley Miller 96 | organiaztion: Acme 97 | - connection_auth: {{ profile }} 98 | 99 | Network Security Group: 100 | azurerm.network.network_security_group.present: 101 | - name: nsg1 102 | - resource_group: idem 103 | - security_rules: 104 | - name: nsg1_rule1 105 | priority: 100 106 | protocol: tcp 107 | access: allow 108 | direction: outbound 109 | source_address_prefix: virtualnetwork 110 | destination_address_prefix: internet 111 | source_port_range: '*' 112 | destination_port_range: '*' 113 | - name: nsg1_rule2 114 | priority: 101 115 | protocol: tcp 116 | access: allow 117 | direction: inbound 118 | source_address_prefix: internet 119 | destination_address_prefix: virtualnetwork 120 | source_port_range: '*' 121 | destination_port_ranges: 122 | - '22' 123 | - '443' 124 | - tags: 125 | contact_name: Ashley MIller 126 | organization: Acme 127 | - connection_auth: {{ profile }} 128 | 129 | The model continues, with another stanza, ID, function and arguments. This simple 130 | model gets re-used over and over again. Making the setup easy to learn and use. 131 | Even situations where very complicated datasets are required, like a security 132 | group, the data can be passed through! 133 | 134 | Now lets add the virtual network: 135 | 136 | .. code-block:: yaml 137 | 138 | # azure.sls 139 | 140 | {% set profile = { 141 | 'client_id': '', 142 | 'secret': '', 143 | 'subscription_id': '', 144 | 'tenant': 'YOUR TENANT' }% } 145 | 146 | Azure Resouse Group: 147 | azurerm.resource.group.present: 148 | - name: idem 149 | - location: eastus 150 | - tags: 151 | contact_name: Ashley Miller 152 | organiaztion: Acme 153 | - connection_auth: {{ profile }} 154 | 155 | Network Security Group: 156 | azurerm.network.network_security_group.present: 157 | - name: nsg1 158 | - resource_group: idem 159 | - security_rules: 160 | - name: nsg1_rule1 161 | priority: 100 162 | protocol: tcp 163 | access: allow 164 | direction: outbound 165 | source_address_prefix: virtualnetwork 166 | destination_address_prefix: internet 167 | source_port_range: '*' 168 | destination_port_range: '*' 169 | - name: nsg1_rule2 170 | priority: 101 171 | protocol: tcp 172 | access: allow 173 | direction: inbound 174 | source_address_prefix: internet 175 | destination_address_prefix: virtualnetwork 176 | source_port_range: '*' 177 | destination_port_ranges: 178 | - '22' 179 | - '443' 180 | - tags: 181 | contact_name: Ashley MIller 182 | organization: Acme 183 | - connection_auth: {{ profile }} 184 | 185 | Virtual Network: 186 | azurerm.network.virtual_network.present: 187 | - name: vnet1 188 | - resource_group: idem 189 | - address_prefixes: 190 | - '10.0.0.0/8' 191 | - subnets: 192 | - name: default 193 | address_prefix: '10.0.0.0/8' 194 | network_security_group: 195 | id: /subscriptions/{{ profile['subscription_id'] }}/resourceGroups/idem/providers/Microsoft.Network/networkSecurityGroups/nsg1 196 | - tags: 197 | contact_name: Elmer Fudd Gantry 198 | organization: Everest 199 | - connection_auth: {{ profile }} 200 | 201 | Finally, we can add a virtual machine, Idem can add availability sets and much 202 | more complicated systems, but this is a quickstart! So add the last stanza: 203 | 204 | .. code-block:: yaml 205 | 206 | # azure.sls 207 | 208 | {% set profile = { 209 | 'client_id': '', 210 | 'secret': '', 211 | 'subscription_id': '', 212 | 'tenant': 'YOUR TENANT' }% } 213 | 214 | Azure Resouse Group: 215 | azurerm.resource.group.present: 216 | - name: idem 217 | - location: eastus 218 | - tags: 219 | contact_name: Ashley Miller 220 | organiaztion: Acme 221 | - connection_auth: {{ profile }} 222 | 223 | Network Security Group: 224 | azurerm.network.network_security_group.present: 225 | - name: nsg1 226 | - resource_group: idem 227 | - security_rules: 228 | - name: nsg1_rule1 229 | priority: 100 230 | protocol: tcp 231 | access: allow 232 | direction: outbound 233 | source_address_prefix: virtualnetwork 234 | destination_address_prefix: internet 235 | source_port_range: '*' 236 | destination_port_range: '*' 237 | - name: nsg1_rule2 238 | priority: 101 239 | protocol: tcp 240 | access: allow 241 | direction: inbound 242 | source_address_prefix: internet 243 | destination_address_prefix: virtualnetwork 244 | source_port_range: '*' 245 | destination_port_ranges: 246 | - '22' 247 | - '443' 248 | - tags: 249 | contact_name: Ashley Miller 250 | organization: Acme 251 | - connection_auth: {{ profile }} 252 | 253 | Virtual Network: 254 | azurerm.network.virtual_network.present: 255 | - name: vnet1 256 | - resource_group: idem 257 | - address_prefixes: 258 | - '10.0.0.0/8' 259 | - subnets: 260 | - name: default 261 | address_prefix: '10.0.0.0/8' 262 | network_security_group: 263 | id: /subscriptions/{{ profile['subscription_id'] }}/resourceGroups/idem/providers/Microsoft.Network/networkSecurityGroups/nsg1 264 | - tags: 265 | contact_name: Ashley Miller 266 | organization: Acme 267 | - connection_auth: {{ profile }} 268 | 269 | Virtual Machine: 270 | azurerm.compute.virtual_machine.present: 271 | - name: idem-vm01 272 | - resource_group: idem 273 | - vm_size: Standard_B1s 274 | - image: 'Canonical|UbuntuServer|18.04-LTS|latest' 275 | - virtual_network: vnet1 276 | - subnet: default 277 | - allocate_public_ip: True 278 | - ssh_public_keys: 279 | - /home/localuser/.ssh/id_rsa.pub 280 | - tags: 281 | contact_name: Ashley Miller 282 | organization: Acme 283 | - connection_auth: {{ profile }} 284 | 285 | Here we see that we can define an image to use, resource group, vm options, 286 | tags, and ssh login credentials. Now that our formula is complete we can 287 | execute it! But not so fast! We can run the formula in test mode first so 288 | we can ensure that it will make the changes we expect: 289 | 290 | .. code-block:: bash 291 | 292 | idem --sls azure --test 293 | 294 | Now you can get a report on all of the resources you are about to create. 295 | If everything looks go go ahead and run it for real! 296 | 297 | .. code-block:: bash 298 | 299 | idem --sls azure 300 | 301 | 302 | That's it! Idem will now execute against the code defined in `azure.sls`. 303 | 304 | The `idem` command here assumes that you are in the code dir. This is not 305 | necessary, the idem command can be run with the `-T` option: 306 | 307 | .. code-block:: bash 308 | 309 | idem -T --sls azure 310 | -------------------------------------------------------------------------------- /docs/topics/extending.rst: -------------------------------------------------------------------------------- 1 | ============== 2 | Extending Idem 3 | ============== 4 | 5 | Extending Idem is simple, but it does require a few steps. To extend 6 | Idem you need to create a new Idem plugin project using `POP`. Now don't run 7 | away, this has been designed to be easy! 8 | 9 | What is POP? 10 | ============ 11 | 12 | You don't need to understand the inner workings of Plugin Oriented Programming 13 | or `pop` to extend Idem, just think about it as a system for writing and 14 | managing plugins. Idem is all about plugins! 15 | 16 | If you want to learn more about the details of `POP`, take a look at the docs. 17 | It is powerful stuff and might change how you program forever: 18 | https://pop.readthedocs.io 19 | 20 | Lets Get Down to Business 21 | ========================= 22 | 23 | Start by installing `idem`: 24 | 25 | .. code-block:: bash 26 | 27 | pip install idem 28 | 29 | This will download and install both `idem` and `pop`. Now you can start your 30 | project by calling `pop-seed` to make the structure you need: 31 | 32 | .. code-block:: bash 33 | 34 | pop-seed idem_tester -t v -d exec states 35 | 36 | By passing `-t v` to `pop-seed` we are telling `pop-seed` that this is a 37 | *Vertical App Merge* project. By passing `-d exec states` we are asking 38 | `pop-seed` to add the 2 dynamic names `exec` and `states` to the project. 39 | 40 | This will create a new project called `idem_tester` with everything you need 41 | to get the ball rolling. 42 | 43 | 44 | Making Your First Idem State 45 | ============================ 46 | 47 | In your new project there will be a directory called `idem_tester/states`, in 48 | this directory add a file called `trial.py`: 49 | 50 | .. code-block:: python 51 | 52 | async def run(hub, name): 53 | ''' 54 | Do a simple trial run 55 | ''' 56 | return { 57 | 'name': name, 58 | 'result': True, 59 | 'changes': {}, 60 | 'comment': 'It Ran!', 61 | } 62 | 63 | For idem to run, `states` functions need to return a python dict that has 4 fields, 64 | `name`, `result`, `changes`, and `comment`. These fields are used by Idem to not 65 | only expose data to the user, but also to track the internal execution of the system. 66 | 67 | Next install your new project. For `idem` to be able to use it your project, it 68 | needs to be in the python path. There are a lot of convenient ways to manage the 69 | installation and deployment of `POP` projects, but for now we can just use good 70 | old `pip`: 71 | 72 | .. code-block:: bash 73 | 74 | pip install -e 75 | 76 | Now you can execute a state with `idem`. As you will see, `pop` and `idem` are 77 | all about hierarchical code. `Idem` runs code out of a directory, you need to 78 | point `idem` to a directory that contains `sls` files. Go ahead and `cd` to 79 | another directory and make a new `sls` directory. 80 | 81 | .. code-block:: bash 82 | 83 | mkdir try 84 | cd try 85 | 86 | Now open a file called `try.sls`: 87 | 88 | .. code-block:: yaml 89 | 90 | try something: 91 | trial.run 92 | 93 | Now from that directory run idem: 94 | 95 | .. code-block:: bash 96 | 97 | idem --sls try 98 | 99 | And you will see the results from running your trial.run state! 100 | -------------------------------------------------------------------------------- /docs/topics/intro.rst: -------------------------------------------------------------------------------- 1 | ==== 2 | Idem 3 | ==== 4 | 5 | Idem is an idempotent dataflow programming language. It exposes stateful 6 | programming constructs that makes things like enforcing the state 7 | of an application, configuration, SaaS system, or others very 8 | simple. 9 | 10 | Since Idem is a programming language, it can also be used for data 11 | processing and pipelining. Idem can be used not only to manage 12 | the configuration of interfaces, but also for complex rule engines 13 | and processing files or workflows. 14 | 15 | Idem is a language to glue together management of all sorts of 16 | interfaces. You can think of it like having idempotent 17 | scripts. Automation that can be run over and over again that 18 | enforces a specific state or process. 19 | 20 | Idem is unique in that it is built purely as a language. It 21 | can be added to any type of management system out there and can 22 | be applied in a cross platform way easily. 23 | 24 | Idem's functionality can also be expanded easily. Instead of storing 25 | all of the language components in a single place, the libraries 26 | used by Idem can be written independently and seamlessly merged 27 | into Idem, just like a normal programming language! 28 | 29 | What does Idempotent mean? 30 | ========================== 31 | 32 | The concept of Idempotent is simple! It just means that every time 33 | something is run, it always has the same end result regardless of the state 34 | of a system when the run starts! 35 | 36 | At first glance this might seem useless, but think more deeply. Have you 37 | ever needed to make sure that something was set up in a consistent way? It 38 | can be very nice to be able to enforce that setup without worrying about 39 | breaking it. Or think about data pipelines, have you ever had input data 40 | that needed to be processed? Idempotent systems allow for data to be 41 | easily processed in a consistent way, over and over again! 42 | 43 | How Does This Language Work? 44 | ============================ 45 | 46 | Idem is built using two critical technologies, `Python` and `POP`. Since Idem 47 | is built on Python it should be easy to extend for most software developers. 48 | Extending Idem can be very easy because simple Python modules are all you need 49 | to add capabilities! 50 | 51 | The other technology, `POP`, may be new to you. This is the truly secret sauce 52 | behind Idem as well as a number of emerging exciting technologies. `POP` stands 53 | for Plugin Oriented Programming. It is the brainchild of `the creator of 54 | Salt `_ and a new way to write software. The `POP` 55 | system makes the creation of higher level paradigms like Idem possible, but also 56 | provides the needed components to make Idem extensible and flexible. If `POP` 57 | is a new concept to you, 58 | `check it out `_! 59 | 60 | Idem works by taking language files called `sls` files and compiling them 61 | down to data instructions. These data instructions are then run through the 62 | Idem runtime. These instructions inform Idem what routines to call to 63 | enforce state or process data. It allows you to take a high level dataset 64 | as your input, making the use of the system very easy. 65 | 66 | Paradigms and Languages, This Sounds Complicated! 67 | ================================================= 68 | 69 | Under the hood, it is complicated! The guts of a programming language are 70 | complicated, but it is all there to make your life easier! You don't need to 71 | understand complex computer science theory to benefit from Idem. You just need 72 | to learn a few simple things and you can start making your life easier today! 73 | -------------------------------------------------------------------------------- /docs/topics/migrate_salt.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | Migrating Support From Salt 3 | =========================== 4 | 5 | Idem is not too far from Salt States. Idem extends Salt State functionality 6 | though, and uses slightly different underlying interfaces. for instance, Idem 7 | does not use `__salt__` or any of the dunder constructs, all of this 8 | information is now on the hub. But migration is intended to be easy! 9 | 10 | Exec Modules and State Modules 11 | ============================== 12 | 13 | Idem follows the same constructs as Salt in seperating execution 14 | functionality from idempotent enforcement into two seperate subsystems. 15 | The idea is that these are seperate concerns and that raw execution 16 | presents value in itself making the code more reusable. 17 | 18 | salt/modules to exec 19 | -------------------- 20 | 21 | Modules inside of `salt/modules` should be implemented as 22 | `exec` modules in Idem. References on the hub should be changed from 23 | `__salt__['test.ping']` to references on the hub, like 24 | `hub.exec.test.ping`. 25 | 26 | salt/states to states 27 | --------------------- 28 | 29 | Modules inside of `salt/states` should be implemented as `states` 30 | modules in Idem. References on the hub should be changed from 31 | `__states__['pkg.installed']` to `hub.states.pkg.installed`. 32 | 33 | salt/utils to exec 34 | ------------------ 35 | 36 | Many Salt modules use functions inside of utils. This grew in Salt out 37 | of limitations from the salt loader and how shared code was originally 38 | developed. 39 | 40 | For Idem anything that is in utils should be moved into `exec`. This makes 41 | those functions generally available for everything else on the hub which 42 | solves the problem that created the utils system in Salt to begin with. 43 | 44 | Namespaces 45 | ========== 46 | 47 | Unlike Salt's loader, POP allows for nested plugin subsystems. Idem 48 | recursively loads all lower subsystems for `exec` and `states` subsystems. 49 | 50 | This means that you can move `exec` and `states` plugins into subdirectories! 51 | So when porting a module called `salt/modules/boto_s3.py` it could be ported 52 | to `exec/boto/s3.py`, or it could be ported to `exec/aws/s3.py` or 53 | `exec/aws/storage/s3.py`. The location of the file reflects the location 54 | on the hub, so these locations get referenced on the hub as `hub.exec.boto.s3`, 55 | `hub.exec.aws.s3`, `hub.exec.aws.storage.s3` respectively. 56 | 57 | Exec Function Calls 58 | =================== 59 | 60 | All function calls now need to accept the hub as the first argument. Functions 61 | should also be changed to be async functions where appropriate. So this 62 | `exec` function signature: 63 | 64 | .. code-block:: python 65 | 66 | def upload_file( 67 | source, 68 | name, 69 | extra_args=None, 70 | region=None, 71 | key=None, 72 | keyid=None, 73 | profile=None, 74 | ): 75 | 76 | Gets changed to look like this: 77 | 78 | .. code-block:: python 79 | 80 | async def upload_file( 81 | hub, 82 | source, 83 | name, 84 | extra_args=None, 85 | region=None, 86 | key=None, 87 | keyid=None, 88 | profile=None, 89 | ): 90 | 91 | States Function Calls 92 | ===================== 93 | 94 | States function calls now accept a `ctx` argument. This allows us to send 95 | an execution context into the function. The `ctx` is a dict with the keys 96 | `test` and `run_name`. The `test` value is a boolean telling the state if it 97 | is running is test mode. The `run_name` is the name of the run as it is stored 98 | on the hub, using the `run_name` you can gain access to the internal tracking 99 | data for the execution of the Idem run located in `hub.idem.RUNS[ctx['run_name']]`. 100 | 101 | So a state function signature that looks like this in Salt: 102 | 103 | .. code-block:: python 104 | 105 | def object_present( 106 | name, 107 | source=None, 108 | hash_type=None, 109 | extra_args=None, 110 | extra_args_from_pillar='boto_s3_object_extra_args', 111 | region=None, 112 | key=None, 113 | keyid=None, 114 | profile=None): 115 | 116 | Will look like this in Idem: 117 | 118 | .. code-block:: python 119 | 120 | async def object_present( 121 | hub, 122 | ctx, 123 | name, 124 | source=None, 125 | hash_type=None, 126 | extra_args=None, 127 | extra_args_from_pillar='boto_s3_object_extra_args', 128 | region=None, 129 | key=None, 130 | keyid=None, 131 | profile=None): 132 | 133 | Full Function Example 134 | ===================== 135 | 136 | This example takes everything into account given a state function before and 137 | after. Doc strings are omitted for brevity but should be preserved. 138 | 139 | Salt Function 140 | ------------- 141 | 142 | .. code-block:: python 143 | 144 | def object_present( 145 | name, 146 | source=None, 147 | hash_type=None, 148 | extra_args=None, 149 | extra_args_from_pillar='boto_s3_object_extra_args', 150 | region=None, 151 | key=None, 152 | keyid=None, 153 | profile=None, 154 | ): 155 | ret = { 156 | 'name': name, 157 | 'comment': '', 158 | 'changes': {}, 159 | } 160 | 161 | if extra_args is None: 162 | extra_args = {} 163 | combined_extra_args = copy.deepcopy( 164 | __salt__['config.option'](extra_args_from_pillar, {}) 165 | ) 166 | __utils__['dictupdate.update'](combined_extra_args, extra_args) 167 | if combined_extra_args: 168 | supported_args = STORED_EXTRA_ARGS | UPLOAD_ONLY_EXTRA_ARGS 169 | combined_extra_args_keys = frozenset(six.iterkeys(combined_extra_args)) 170 | extra_keys = combined_extra_args_keys - supported_args 171 | if extra_keys: 172 | msg = 'extra_args keys {0} are not supported'.format(extra_keys) 173 | return {'error': msg} 174 | 175 | # Get the hash of the local file 176 | if not hash_type: 177 | hash_type = __opts__['hash_type'] 178 | try: 179 | digest = salt.utils.hashutils.get_hash(source, form=hash_type) 180 | except IOError as e: 181 | ret['result'] = False 182 | ret['comment'] = "Could not read local file {0}: {1}".format( 183 | source, 184 | e, 185 | ) 186 | return ret 187 | except ValueError as e: 188 | # Invalid hash type exception from get_hash 189 | ret['result'] = False 190 | ret['comment'] = 'Could not hash local file {0}: {1}'.format( 191 | source, 192 | e, 193 | ) 194 | return ret 195 | 196 | HASH_METADATA_KEY = 'salt_managed_content_hash' 197 | combined_extra_args.setdefault('Metadata', {}) 198 | if HASH_METADATA_KEY in combined_extra_args['Metadata']: 199 | # Be lenient, silently allow hash metadata key if digest value matches 200 | if combined_extra_args['Metadata'][HASH_METADATA_KEY] != digest: 201 | ret['result'] = False 202 | ret['comment'] = ( 203 | 'Salt uses the {0} metadata key internally,' 204 | 'do not pass it to the boto_s3.object_present state.' 205 | ).format(HASH_METADATA_KEY) 206 | return ret 207 | combined_extra_args['Metadata'][HASH_METADATA_KEY] = digest 208 | # Remove upload-only keys from full set of extra_args 209 | # to create desired dict for comparisons 210 | desired_metadata = dict( 211 | (k, v) for k, v in six.iteritems(combined_extra_args) 212 | if k not in UPLOAD_ONLY_EXTRA_ARGS 213 | ) 214 | 215 | # Some args (SSE-C, RequestPayer) must also be passed to get_metadata 216 | metadata_extra_args = dict( 217 | (k, v) for k, v in six.iteritems(combined_extra_args) 218 | if k in GET_METADATA_EXTRA_ARGS 219 | ) 220 | r = __salt__['boto_s3.get_object_metadata']( 221 | name, 222 | extra_args=metadata_extra_args, 223 | region=region, 224 | key=key, 225 | keyid=keyid, 226 | profile=profile, 227 | ) 228 | if 'error' in r: 229 | ret['result'] = False 230 | ret['comment'] = 'Failed to check if S3 object exists: {0}.'.format( 231 | r['error'], 232 | ) 233 | return ret 234 | 235 | if r['result']: 236 | # Check if content and metadata match 237 | # A hash of the content is injected into the metadata, 238 | # so we can combine both checks into one 239 | # Only check metadata keys specified by the user, 240 | # ignore other fields that have been set 241 | s3_metadata = dict( 242 | (k, r['result'][k]) for k in STORED_EXTRA_ARGS 243 | if k in desired_metadata and k in r['result'] 244 | ) 245 | if s3_metadata == desired_metadata: 246 | ret['result'] = True 247 | ret['comment'] = 'S3 object {0} is present.'.format(name) 248 | return ret 249 | action = 'update' 250 | else: 251 | s3_metadata = None 252 | action = 'create' 253 | 254 | def _yaml_safe_dump(attrs): 255 | ''' 256 | Safely dump YAML using a readable flow style 257 | ''' 258 | dumper_name = 'IndentedSafeOrderedDumper' 259 | dumper = __utils__['yaml.get_dumper'](dumper_name) 260 | return __utils__['yaml.dump']( 261 | attrs, 262 | default_flow_style=False, 263 | Dumper=dumper) 264 | 265 | changes_diff = ''.join(difflib.unified_diff( 266 | _yaml_safe_dump(s3_metadata).splitlines(True), 267 | _yaml_safe_dump(desired_metadata).splitlines(True), 268 | )) 269 | 270 | if __opts__['test']: 271 | ret['result'] = None 272 | ret['comment'] = 'S3 object {0} set to be {1}d.'.format(name, action) 273 | ret['comment'] += '\nChanges:\n{0}'.format(changes_diff) 274 | ret['changes'] = {'diff': changes_diff} 275 | return ret 276 | 277 | r = __salt__['boto_s3.upload_file']( 278 | source, 279 | name, 280 | extra_args=combined_extra_args, 281 | region=region, 282 | key=key, 283 | keyid=keyid, 284 | profile=profile, 285 | ) 286 | 287 | if 'error' in r: 288 | ret['result'] = False 289 | ret['comment'] = 'Failed to {0} S3 object: {1}.'.format( 290 | action, 291 | r['error'], 292 | ) 293 | return ret 294 | 295 | ret['result'] = True 296 | ret['comment'] = 'S3 object {0} {1}d.'.format(name, action) 297 | ret['comment'] += '\nChanges:\n{0}'.format(changes_diff) 298 | ret['changes'] = {'diff': changes_diff} 299 | return ret 300 | 301 | Idem State Function 302 | ------------------- 303 | 304 | .. code-block:: python 305 | 306 | async def object_present( 307 | hub, 308 | ctx, 309 | name, 310 | source=None, 311 | hash_type=None, 312 | extra_args=None, 313 | region=None, 314 | key=None, 315 | keyid=None, 316 | profile=None): 317 | ret = { 318 | 'name': name, 319 | 'comment': '', 320 | 'changes': {}, 321 | } 322 | 323 | if extra_args is None: 324 | extra_args = {} 325 | # Pull out args for pillar 326 | 327 | # Get the hash of the local file 328 | if not hash_type: 329 | hash_type = hub.OPT['idem']['hash_type'] # Pull opts from hub.OPT 330 | try: 331 | # Some functions from utils will need to be ported over. Some general 332 | # Use functions should be sent upstream to be included in Idem. 333 | digest = hub.exec.utils.hashutils.get_hash(source, form=hash_type) 334 | except IOError as e: 335 | ret['result'] = False 336 | # Idem requires Python 3.6 and higher, use f-strings 337 | ret['comment'] = f'Could not read local file {source}: {e}' 338 | return ret 339 | except ValueError as e: 340 | # Invalid hash type exception from get_hash 341 | ret['result'] = False 342 | ret['comment'] = f'Could not hash local file {source}: {e}' 343 | return ret 344 | 345 | HASH_METADATA_KEY = 'idem_managed_content_hash' # Change salt refs to idem 346 | combined_extra_args.setdefault('Metadata', {}) 347 | if HASH_METADATA_KEY in combined_extra_args['Metadata']: 348 | # Be lenient, silently allow hash metadata key if digest value matches 349 | if combined_extra_args['Metadata'][HASH_METADATA_KEY] != digest: 350 | ret['result'] = False 351 | ret['comment'] = ( 352 | f'Salt uses the {HASH_METADATA_KEY} metadata key internally,' 353 | 'do not pass it to the boto_s3.object_present state.' 354 | return ret 355 | combined_extra_args['Metadata'][HASH_METADATA_KEY] = digest 356 | # Remove upload-only keys from full set of extra_args 357 | # to create desired dict for comparisons 358 | desired_metadata = dict( 359 | (k, v) for k, v in combined_extra_args.items() # No need to six anymore 360 | if k not in UPLOAD_ONLY_EXTRA_ARGS 361 | ) 362 | 363 | # Some args (SSE-C, RequestPayer) must also be passed to get_metadata 364 | metadata_extra_args = dict( 365 | (k, v) for k, v in combined_extra_args.items() # No need for six anymore 366 | if k in GET_METADATA_EXTRA_ARGS 367 | ) 368 | r = await hub.exec.boto.s3.get_object_metadata( 369 | name, 370 | extra_args=metadata_extra_args, 371 | region=region, 372 | key=key, 373 | keyid=keyid, 374 | profile=profile, 375 | ) 376 | if 'error' in r: 377 | ret['result'] = False 378 | ret['comment'] = f'Failed to check if S3 object exists: {r["error"]}.' # Use fstrings 379 | return ret 380 | 381 | if r['result']: 382 | # Check if content and metadata match 383 | # A hash of the content is injected into the metadata, 384 | # so we can combine both checks into one 385 | # Only check metadata keys specified by the user, 386 | # ignore other fields that have been set 387 | s3_metadata = dict( 388 | (k, r['result'][k]) for k in STORED_EXTRA_ARGS 389 | if k in desired_metadata and k in r['result'] 390 | ) 391 | if s3_metadata == desired_metadata: 392 | ret['result'] = True 393 | ret['comment'] = f'S3 object {name} is present.' 394 | return ret 395 | action = 'update' 396 | else: 397 | s3_metadata = None 398 | action = 'create' 399 | 400 | # Some Salt code goes out of its way to use salt libs, often it 401 | # is more appropriate to just call the supporting lib directly 402 | changes_diff = ''.join(difflib.unified_diff( 403 | yaml.dump(s3_metadata, default_flow_style=False).splitlines(True), 404 | yaml.dump(desired_metadata, default_flow_style=False).splitlines(True), 405 | )) 406 | 407 | if ctx['test']: 408 | ret['result'] = None 409 | ret['comment'] = f'S3 object {name} set to be {action}d.' 410 | ret['comment'] += f'\nChanges:\n{changes_diff}' 411 | ret['changes'] = {'diff': changes_diff} 412 | return ret 413 | 414 | r = await hub.boto.s3.upload_file( 415 | source, 416 | name, 417 | extra_args=combined_extra_args, 418 | region=region, 419 | key=key, 420 | keyid=keyid, 421 | profile=profile, 422 | ) 423 | 424 | if 'error' in r: 425 | ret['result'] = False 426 | ret['comment'] = f'Failed to {action} S3 object: {r["error"]}.' 427 | return ret 428 | 429 | ret['result'] = True 430 | ret['comment'] = f'S3 object {name} {action}d.' 431 | ret['comment'] += f'\nChanges:\n{changes_diff}' 432 | ret['changes'] = {'diff': changes_diff} 433 | return ret 434 | -------------------------------------------------------------------------------- /docs/topics/takara.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Encrypted Data Storage 3 | ====================== 4 | 5 | .. note:: 6 | 7 | Takara is very new and this interface is going to evolve over time. The 8 | basic interface presented here will not likely change, but many more 9 | options are planned 10 | 11 | It is very common that you need to store credentials on disk. Idem makes this 12 | easy via the `takara` system. Using `takara` you can store encrypted data 13 | securely and then call it up from within your `sls` files! 14 | 15 | Using `takara` from within Idem is easy! First take a look at the `takara` 16 | docs on how to set up a secret storage unit, and how to set and get secrets 17 | from that unit. 18 | 19 | Now make a new `sls` file that calls `takara.init.get` from the hub: 20 | 21 | .. code-block:: yaml 22 | 23 | takara_test: 24 | test.succeed_with_comment: 25 | - comment: {{ hub.takara.init.get(unit='main', path='foo/bar/baz') }} 26 | 27 | Now you can run `idem` with the `--takara-unit` or `-u` option to define what 28 | unit to unseal for the use of this `idem` run: 29 | 30 | .. code-block:: bash 31 | 32 | idem -u main --sls test 33 | 34 | Idem will prompt you to unseal the `takara` store, making the secrets in the named 35 | unit available to the sls files as they run. 36 | -------------------------------------------------------------------------------- /docs/topics/transparent_req.rst: -------------------------------------------------------------------------------- 1 | ====================== 2 | Transparent Requisites 3 | ====================== 4 | 5 | Transparent requisites is a powerful feature inside of Idem. It 6 | allows requisites to be defines on a function by function basis. This 7 | means that a given function can always requires any instance of 8 | another function, in the background. This makes it easy for 9 | state authors to ensure that executions are always executed in the 10 | correct order without the end user needing to define those orders. 11 | 12 | It is easy to do, at the top of your system module just define the 13 | `TREQ` dict, this dict defines what functions will require what 14 | other functions: 15 | 16 | .. code-block:: python 17 | 18 | TREQ = { 19 | 'treq': { 20 | 'require': [ 21 | 'test.nop', 22 | ] 23 | }, 24 | } 25 | 26 | This stanza will look for the function named `treq` inside of the module 27 | that it is deinfed in, then it will add `require : - test.nop` for every 28 | instance found of `test.nop` in the current run. If test.nop is never used, 29 | then no requisites are set. Any requisite can be used, and multiple requisites 30 | can be used. 31 | -------------------------------------------------------------------------------- /idem/conf.py: -------------------------------------------------------------------------------- 1 | CLI_CONFIG = { 2 | 'sls_sources': { 3 | 'default': ['file://'], 4 | 'nargs': '*', 5 | 'help': 'list off the sources that should be used for gathering sls files and data', 6 | }, 7 | 'test': { 8 | 'options': ['-t'], 9 | 'default': False, 10 | 'action': 'store_true', 11 | 'help': 'Set the idem run to execute in test mode. No changes will be made, idem will only detect if changes will be made in a real run.', 12 | }, 13 | 'tree': { 14 | 'default': '', 15 | 'options': ['-T'], 16 | 'help': 'The directory containing sls files', 17 | }, 18 | 'takara_unit': { 19 | 'options': ['-u'], 20 | 'default': None, 21 | 'help': 'The Takara unit to work with, This enables Takara as a backend for secret storage in your idem states', 22 | }, 23 | 'seal_raw': { 24 | 'default': None, 25 | 'help': 'DO NOT USE! This option allows you to pass Takara unsealing secrets as command line arguments! This should only be used for testing!!', 26 | }, 27 | 'cache_dir': { 28 | 'default': '/var/cache/idem', 29 | 'help': 'The location to use for the cache directory', 30 | }, 31 | 'root_dir': { 32 | 'default': '/', 33 | 'help': 'The root directory to run idem from. By default it will be "/", or in the case of running as non-root it is set to /.idem', 34 | }, 35 | 'render': { 36 | 'default': 'jinja|yaml', 37 | 'help': 'The render pipe to use, this allows for the language to be specified', 38 | }, 39 | 'runtime': { 40 | 'default': 'serial', 41 | 'help': 'Select which execution runtime to use', 42 | }, 43 | 'output': { 44 | 'default': 'idem', 45 | 'help': 'The putputter to use to display data', 46 | }, 47 | 'sls': { 48 | 'default': [], 49 | 'nargs': '*', 50 | 'help': 'A space delimited list of sls refs to execute', 51 | }, 52 | } 53 | CONFIG = {} 54 | GLOBAL = {} 55 | SUBS = {} 56 | DYNE = { 57 | 'exec': ['exec'], 58 | 'states': ['states'], 59 | 'output': ['output'], 60 | } 61 | -------------------------------------------------------------------------------- /idem/exec/test.py: -------------------------------------------------------------------------------- 1 | def ping(hub): 2 | return True 3 | -------------------------------------------------------------------------------- /idem/idem/compiler/.init.py.kate-swp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/idem/aaef3789b30172864db8a6b03fd1b7914a9b3b27/idem/idem/compiler/.init.py.kate-swp -------------------------------------------------------------------------------- /idem/idem/compiler/0010_extend.py: -------------------------------------------------------------------------------- 1 | def stage(hub, name): 2 | ''' 3 | Take the highdata and reconcoile the extend keyword 4 | ''' 5 | high, errors = hub.idem.extend.reconcile(hub.idem.RUNS[name]['high']) 6 | hub.idem.RUNS[name]['high'] = high 7 | hub.idem.RUNS[name]['errors'] = errors -------------------------------------------------------------------------------- /idem/idem/compiler/0020_verify_high.py: -------------------------------------------------------------------------------- 1 | def stage(hub, name): 2 | high, errors = hub.idem.verify.high(hub.idem.RUNS[name]['high']) 3 | hub.idem.RUNS[name]['high'] = high 4 | hub.idem.RUNS[name]['errors'] = errors 5 | -------------------------------------------------------------------------------- /idem/idem/compiler/0030_req_in.py: -------------------------------------------------------------------------------- 1 | def stage(hub, name): 2 | high, errors = hub.idem.req_in.reconcile(hub.idem.RUNS[name]['high']) 3 | hub.idem.RUNS[name]['high'] = high 4 | hub.idem.RUNS[name]['errors'] = errors 5 | -------------------------------------------------------------------------------- /idem/idem/compiler/0040_exclude.py: -------------------------------------------------------------------------------- 1 | def stage(hub, name): 2 | ''' 3 | Apply the exclude value 4 | ''' 5 | high = hub.idem.exclude.apply(hub.idem.RUNS[name]['high']) 6 | hub.idem.RUNS[name]['high'] = high 7 | -------------------------------------------------------------------------------- /idem/idem/compiler/0050_compile.py: -------------------------------------------------------------------------------- 1 | def stage(hub, name): 2 | ''' 3 | Apply the exclude value 4 | ''' 5 | low = hub.idem.low.compile(hub.idem.RUNS[name]['high'], hub.idem.RUNS[name]['add_low']) 6 | hub.idem.RUNS[name]['low'] = low 7 | -------------------------------------------------------------------------------- /idem/idem/compiler/0060_treq.py: -------------------------------------------------------------------------------- 1 | def stage(hub, name): 2 | ''' 3 | Apply the exclude value 4 | ''' 5 | low = hub.idem.treq.apply( 6 | hub.idem.RUNS[name]['subs'], 7 | hub.idem.RUNS[name]['low'], 8 | ) 9 | hub.idem.RUNS[name]['low'] = low 10 | -------------------------------------------------------------------------------- /idem/idem/exclude.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import fnmatch 3 | 4 | 5 | def apply(self, high): 6 | ''' 7 | Read in the __exclude__ list and remove all excluded objects from the 8 | high data 9 | ''' 10 | if '__exclude__' not in high: 11 | return high 12 | ex_sls = set() 13 | ex_id = set() 14 | exclude = high.pop('__exclude__') 15 | for exc in exclude: 16 | if isinstance(exc, str): 17 | # The exclude statement is a string, assume it is an sls 18 | ex_sls.add(exc) 19 | if isinstance(exc, dict): 20 | # Explicitly declared exclude 21 | if len(exc) != 1: 22 | continue 23 | key = next(exc.keys()) 24 | if key == 'sls': 25 | ex_sls.add(exc['sls']) 26 | elif key == 'id': 27 | ex_id.add(exc['id']) 28 | # Now the excludes have been simplified, use them 29 | if ex_sls: 30 | # There are sls excludes, find the associated ids 31 | for name, body in high.items(): 32 | if name.startswith('__'): 33 | continue 34 | sls = body.get('__sls__', '') 35 | if not sls: 36 | continue 37 | for ex_ in ex_sls: 38 | if fnmatch.fnmatch(sls, ex_): 39 | ex_id.add(name) 40 | for id_ in ex_id: 41 | if id_ in high: 42 | high.pop(id_) 43 | return high 44 | 45 | -------------------------------------------------------------------------------- /idem/idem/extend.py: -------------------------------------------------------------------------------- 1 | def reconcile(hub, high): 2 | ''' 3 | Take the extend statement and reconcile it back into the highdata 4 | ''' 5 | errors = [] 6 | if '__extend__' not in high: 7 | return high, errors 8 | ext = high.pop('__extend__') 9 | for ext_chunk in ext: 10 | for id_, body in ext_chunk: 11 | if id_ not in high: 12 | state_type = next( 13 | x for x in body if not x.startswith('__') 14 | ) 15 | # Check for a matching 'name' override in high data 16 | ids = hub.idem.tools.find_id(id_, state_type, high) 17 | if len(ids) != 1: 18 | errors.append( 19 | 'Cannot extend ID \'{0}\' in \'{1}:{2}\'. It is not ' 20 | 'part of the high state.\n' 21 | 'This is likely due to a missing include statement ' 22 | 'or an incorrectly typed ID.\nEnsure that a ' 23 | 'state with an ID of \'{0}\' is available\nin ' 24 | 'environment \'{1}\' and to SLS \'{2}\''.format( 25 | id_, 26 | body.get('__env__', 'base'), 27 | body.get('__sls__', 'base')) 28 | ) 29 | continue 30 | else: 31 | id_ = ids[0][0] 32 | 33 | for state, run in body.items(): 34 | if state.startswith('__'): 35 | continue 36 | if state not in high[id_]: 37 | high[id_][state] = run 38 | continue 39 | for arg in run: 40 | update = False 41 | for hind in range(len(high[id_][state])): 42 | if isinstance(arg, str) and isinstance(high[id_][state][hind], str): 43 | # replacing the function, replace the index 44 | high[id_][state].pop(hind) 45 | high[id_][state].insert(hind, arg) 46 | update = True 47 | continue 48 | if isinstance(arg, dict) and isinstance(high[id_][state][hind], dict): 49 | # It is an option, make sure the options match 50 | argfirst = next(iter(arg)) 51 | if argfirst == next(iter(high[id_][state][hind])): 52 | # If argfirst is a requisite then we must merge 53 | # our requisite with that of the target state 54 | if argfirst in STATE_REQUISITE_KEYWORDS: 55 | high[id_][state][hind][argfirst].extend(arg[argfirst]) 56 | # otherwise, its not a requisite and we are just extending (replacing) 57 | else: 58 | high[id_][state][hind] = arg 59 | update = True 60 | if (argfirst == 'name' and 61 | next(iter(high[id_][state][hind])) == 'names'): 62 | # If names are overwritten by name use the name 63 | high[id_][state][hind] = arg 64 | if not update: 65 | high[id_][state].append(arg) 66 | return high, errors 67 | -------------------------------------------------------------------------------- /idem/idem/get.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This file contains routines to get sls files from references 3 | ''' 4 | # Import python libs 5 | import os 6 | 7 | 8 | async def ref(hub, name, sls): 9 | ''' 10 | Cache the given file from the named reference point 11 | ''' 12 | for source in hub.idem.RUNS[name]['sls_sources']: 13 | proto = source[:source.index(':')] 14 | path = sls.replace('.', '/') 15 | locs = [f'{path}.sls', f'{path}/init.sls'] 16 | for loc in locs: 17 | full = os.path.join(source, loc) 18 | cfn = await hub.pop.ref.last(f'sls.{proto}.cache')(hub.idem.RUNS[name]['cache_dir'], full) 19 | if cfn: 20 | return cfn 21 | -------------------------------------------------------------------------------- /idem/idem/init.py: -------------------------------------------------------------------------------- 1 | # The order of the sequence that needs to be implemented: 2 | # Start with a single sls file, just like you started with salt 3 | # Stub out the routines around gathering the initial sls file 4 | # Just use a yaml renderer and get it to where we can manage some basic 5 | # includes to drive to highdata 6 | 7 | # Then we can start to fill out renderers while at the same time 8 | # deepening the compiler 9 | 10 | # Import python libs 11 | import asyncio 12 | import os 13 | import copy 14 | 15 | __func_alias__ = {'compile_': 'compile'} 16 | 17 | 18 | def __init__(hub): 19 | hub.pop.sub.load_subdirs(hub.idem) 20 | hub.idem.RUNS = {} 21 | hub.pop.sub.add('idem.sls') 22 | hub.pop.sub.add(dyne_name='rend') 23 | hub.pop.sub.add(dyne_name='output') 24 | hub.pop.sub.add(dyne_name='exec') 25 | hub.pop.sub.load_subdirs(hub.exec, recurse=True) 26 | hub.pop.sub.add(dyne_name='states') 27 | hub.pop.sub.add(dyne_name='takara') 28 | hub.pop.sub.load_subdirs(hub.states, recurse=True) 29 | hub.idem.init.req_map() 30 | 31 | 32 | def req_map(hub): 33 | ''' 34 | Gather the requisite restrtrictions and populate the requisite behavior map 35 | ''' 36 | rmap = {} 37 | for mod in hub.idem.req: 38 | if mod.__sub_name__ == 'init': 39 | continue 40 | if hasattr(mod, 'define'): 41 | rmap[mod.__sub_name__] = mod.define() 42 | hub.idem.RMAP = rmap 43 | 44 | 45 | def cli(hub): 46 | ''' 47 | Execute a single idem run from the cli 48 | ''' 49 | hub.pop.conf.integrate(['idem', 'takara'], cli='idem', roots=True) 50 | hub.pop.loop.start(hub.idem.init.cli_apply()) 51 | 52 | 53 | async def cli_apply(hub): 54 | ''' 55 | Run the CLI routine in a loop 56 | ''' 57 | sls_sources = hub.OPT['idem']['sls_sources'] 58 | if hub.OPT['idem']['takara_unit']: 59 | hub.idem.init.init_takara( 60 | hub.OPT['idem']['takara_unit'], 61 | hub.OPT['idem']['seal_raw'], 62 | **hub.OPT['takara']) 63 | if hub.OPT['idem']['tree']: 64 | src = os.path.join('file://', hub.OPT['idem']['tree']) 65 | if len(sls_sources) == 1: 66 | if sls_sources[0] == 'file://': 67 | sls_sources = [src] 68 | else: 69 | sls_sources.append(src) 70 | await hub.idem.init.apply( 71 | 'cli', 72 | sls_sources, 73 | hub.OPT['idem']['render'], 74 | hub.OPT['idem']['runtime'], 75 | ['states'], 76 | hub.OPT['idem']['cache_dir'], 77 | hub.OPT['idem']['sls'], 78 | hub.OPT['idem']['test'], 79 | ) 80 | 81 | errors = hub.idem.RUNS['cli']['errors'] 82 | if errors: 83 | display = getattr(hub, 'output.nested.display')(errors) 84 | print(errors) 85 | return 86 | running = hub.idem.RUNS['cli']['running'] 87 | output = hub.OPT['idem']['output'] 88 | display = getattr(hub, f'output.{output}.display')(running) 89 | print(display) 90 | 91 | 92 | async def init_takara(hub, unit, seal_raw, **tkw): 93 | ''' 94 | Setup and unseal a connection to takara 95 | ''' 96 | tkw['unit'] = unit 97 | tkw['seal_raw'] = seal_raw 98 | await hub.takara.init.setup(**tkw) 99 | await hub.takara.init.unseal(**tkw) 100 | 101 | 102 | def create(hub, name, sls_sources, render, runtime, subs, cache_dir, test): 103 | ''' 104 | Create a new instance to execute against 105 | ''' 106 | hub.idem.RUNS[name] = { 107 | 'sls_sources': sls_sources, 108 | 'render': render, 109 | 'runtime': runtime, 110 | 'subs': subs, 111 | 'cache_dir': cache_dir, 112 | 'states': {}, 113 | 'test': test, 114 | 'resolved': set(), 115 | 'files': set(), 116 | 'high': {}, 117 | 'errors': [], 118 | 'iorder': 100000, 119 | 'sls_refs': {}, 120 | 'blocks': {}, 121 | 'running': {}, 122 | 'run_num': 1, 123 | 'add_low': [], 124 | } 125 | 126 | 127 | async def apply( 128 | hub, 129 | name, 130 | sls_sources, 131 | render, 132 | runtime, 133 | subs, 134 | cache_dir, 135 | sls, 136 | test=False): 137 | ''' 138 | Run idem! 139 | ''' 140 | hub.idem.init.create(name, sls_sources, render, runtime, subs, cache_dir, test) 141 | # Get the sls file 142 | # render it 143 | # compile high data to "new" low data (bypass keyword issues) 144 | # Run the low data using act/idem 145 | await hub.idem.resolve.gather(name, *sls) 146 | if hub.idem.RUNS[name]['errors']: 147 | return 148 | await hub.idem.init.compile(name) 149 | if hub.idem.RUNS[name]['errors']: 150 | return 151 | ret = await hub.idem.run.init.start(name) 152 | 153 | 154 | async def compile_(hub, name): 155 | ''' 156 | Compile the data defined in the given run name 157 | ''' 158 | for mod in hub.idem.compiler: 159 | if hasattr(mod, 'stage'): 160 | ret = mod.stage(name) 161 | if asyncio.iscoroutine(ret): 162 | await ret 163 | -------------------------------------------------------------------------------- /idem/idem/low.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import copy 3 | 4 | 5 | def order_chunks(hub, chunks): 6 | ''' 7 | Sort the chunk list verifying that the chunks follow the order 8 | specified in the order options. 9 | ''' 10 | cap = 1 11 | for chunk in chunks: 12 | if 'order' in chunk: 13 | if not isinstance(chunk['order'], int): 14 | continue 15 | 16 | chunk_order = chunk['order'] 17 | if chunk_order > cap - 1 and chunk_order > 0: 18 | cap = chunk_order + 100 19 | for chunk in chunks: 20 | if 'order' not in chunk: 21 | chunk['order'] = cap 22 | continue 23 | 24 | if not isinstance(chunk['order'], (int, float)): 25 | if chunk['order'] == 'last': 26 | chunk['order'] = cap + 1000000 27 | elif chunk['order'] == 'first': 28 | chunk['order'] = 0 29 | else: 30 | chunk['order'] = cap 31 | if 'name_order' in chunk: 32 | chunk['order'] = chunk['order'] + chunk.pop('name_order') / 10000.0 33 | if chunk['order'] < 0: 34 | chunk['order'] = cap + 1000000 + chunk['order'] 35 | chunks.sort(key=lambda chunk: (chunk['order'], '{0[state]}{0[name]}{0[fun]}'.format(chunk))) 36 | return chunks 37 | 38 | 39 | def compile(hub, high, add_low): 40 | ''' 41 | "Compile" the high data as it is retrieved from the CLI or YAML into 42 | the individual state executor structures 43 | ''' 44 | chunks = [] 45 | for name, body in high.items(): 46 | if name.startswith('__'): 47 | continue 48 | for state, run in body.items(): 49 | funcs = set() 50 | names = [] 51 | if state.startswith('__'): 52 | continue 53 | chunk = {} 54 | chunk['state'] = state 55 | chunk['name'] = name 56 | if '__sls__' in body: 57 | chunk['__sls__'] = body['__sls__'] 58 | if '__env__' in body: 59 | chunk['__env__'] = body['__env__'] 60 | chunk['__id__'] = name 61 | for arg in run: 62 | if isinstance(arg, str): 63 | funcs.add(arg) 64 | continue 65 | if isinstance(arg, dict): 66 | for key, val in arg.items(): 67 | if key == 'names': 68 | for _name in val: 69 | if _name not in names: 70 | names.append(_name) 71 | elif key == 'state': 72 | # Don't pass down a state override 73 | continue 74 | elif (key == 'name' and 75 | not isinstance(val, str)): 76 | # Invalid name, fall back to ID 77 | chunk[key] = name 78 | else: 79 | chunk[key] = val 80 | if names: 81 | name_order = 1 82 | for entry in names: 83 | live = copy.deepcopy(chunk) 84 | if isinstance(entry, dict): 85 | low_name = next(entry.keys()) 86 | live['name'] = low_name 87 | list(map(live.update, entry[low_name])) 88 | else: 89 | live['name'] = entry 90 | live['name_order'] = name_order 91 | name_order += 1 92 | for fun in funcs: 93 | live['fun'] = fun 94 | chunks.append(live) 95 | else: 96 | live = copy.deepcopy(chunk) 97 | for fun in funcs: 98 | live['fun'] = fun 99 | chunks.append(live) 100 | chunks.extend(add_low) 101 | chunks = hub.idem.low.order_chunks(chunks) 102 | return chunks 103 | -------------------------------------------------------------------------------- /idem/idem/req/init.py: -------------------------------------------------------------------------------- 1 | def seq(hub, low, running): 2 | ''' 3 | Return the sequence map that should be used to execute the lowstate 4 | The sequence needs to identify: 5 | 1. recursive requisites 6 | 2. what chunks are free to run 7 | 3. Behavior augments for the next chunk to run 8 | ''' 9 | ret = {} 10 | for ind, chunk in enumerate(low): 11 | tag = hub.idem.tools.gen_tag(chunk) 12 | if tag in running: 13 | # Already ran this one, don't add it to the sequence 14 | continue 15 | ret[ind] = {'chunk': chunk, 'reqrets': [], 'unmet': set()} 16 | for req in hub.idem.RMAP: 17 | if req in chunk: 18 | for rdef in chunk[req]: 19 | if not isinstance(rdef, dict): 20 | # TODO: Error check 21 | continue 22 | state = next(iter(rdef)) 23 | name = rdef[state] 24 | r_chunks = hub.idem.tools.get_chunks(low, state, name) 25 | if not r_chunks: 26 | ret[ind]['errors'].append(f'Requisite {req} {state}:{name} not found') 27 | for r_chunk in r_chunks: 28 | r_tag = hub.idem.tools.gen_tag(r_chunk) 29 | if r_tag in running: 30 | reqret = { 31 | 'req': req, 32 | 'name': name, 33 | 'state': state, 34 | 'r_tag': r_tag, 35 | 'ret': running[r_tag], 36 | } 37 | # it has been run, check the rules 38 | ret[ind]['reqrets'].append(reqret) 39 | else: 40 | ret[ind]['unmet'].add(r_tag) 41 | return ret 42 | -------------------------------------------------------------------------------- /idem/idem/req/onchanges.py: -------------------------------------------------------------------------------- 1 | def define(hub): 2 | ''' 3 | Define how the onchanges requisite should run 4 | ''' 5 | return { 6 | 'result': True, 7 | 'changes': True, 8 | } 9 | -------------------------------------------------------------------------------- /idem/idem/req/onfail.py: -------------------------------------------------------------------------------- 1 | def define(hub): 2 | ''' 3 | Define how the onfail requisite should behave 4 | ''' 5 | return { 6 | 'result': False, 7 | } 8 | -------------------------------------------------------------------------------- /idem/idem/req/require.py: -------------------------------------------------------------------------------- 1 | def define(hub): 2 | ''' 3 | Return the definition used by the runtime to insert the conditions of the 4 | given requisite 5 | ''' 6 | return { 7 | 'result': [True, None], 8 | } 9 | -------------------------------------------------------------------------------- /idem/idem/req/watch.py: -------------------------------------------------------------------------------- 1 | def define(hub): 2 | ''' 3 | Define how the watch requisite should behave 4 | ''' 5 | return { 6 | 'result': True, 7 | 'changes_post': 'mod_watch', 8 | } 9 | -------------------------------------------------------------------------------- /idem/idem/req_in.py: -------------------------------------------------------------------------------- 1 | def reconcile(hub, high, disabled_reqs=None): 2 | ''' 3 | Extend the data reference with requisite_in arguments 4 | ''' 5 | req_in = {'require_in', 'watch_in', 'onfail_in', 'onchanges_in', 'use', 'use_in', 'prereq', 'prereq_in'} 6 | req_in_all = req_in.union({'require', 'watch', 'onfail', 'onfail_stop', 'onchanges'}) 7 | extend = {} 8 | errors = [] 9 | if disabled_reqs is None: 10 | disabled_reqs = [] 11 | if not isinstance(disabled_reqs, list): 12 | disabled_reqs = [disabled_reqs] 13 | # Highdata iterator 14 | for id_, body, state, run, arg in hub.idem.tools.iter_high(high): 15 | # Iterator yields args 16 | if isinstance(arg, dict): 17 | # It is not a function, verify that the arg is a 18 | # requisite in statement 19 | if len(arg) < 1: 20 | # Empty arg dict 21 | # How did we get this far? 22 | continue 23 | # Split out the components 24 | key = next(iter(arg)) 25 | if key not in req_in: 26 | continue 27 | if key in disabled_reqs: 28 | continue 29 | rkey = key.split('_')[0] 30 | items = arg[key] 31 | if isinstance(items, dict): 32 | # Formatted as a single req_in 33 | for _state, name in items.items(): 34 | # Not a use requisite_in 35 | found = False 36 | if name not in extend: 37 | extend[name] = {} 38 | if '.' in _state: 39 | errors.append( 40 | f'Invalid requisite in {rkey}: {_state} for ' 41 | f'{name}, in SLS "{body["__sls__"]}". Requisites must ' 42 | f'not contain dots, did you mean "{_state[:_state.find(".")]}"?' 43 | ) 44 | _state = _state.split('.')[0] 45 | if _state not in extend[name]: 46 | extend[name][_state] = [] 47 | extend[name]['__env__'] = body['__env__'] 48 | extend[name]['__sls__'] = body['__sls__'] 49 | for ind in range(len(extend[name][_state])): 50 | if next(iter( 51 | extend[name][_state][ind])) == rkey: 52 | # Extending again 53 | extend[name][_state][ind][rkey].append( 54 | {state: id_} 55 | ) 56 | found = True 57 | if found: 58 | continue 59 | # The rkey is not present yet, create it 60 | extend[name][_state].append( 61 | {rkey: [{state: id_}]} 62 | ) 63 | if isinstance(items, list): 64 | # Formed as a list of requisite additions 65 | hinges = [] 66 | for ind in items: 67 | if not isinstance(ind, dict): 68 | # Malformed req_in 69 | if ind in high: 70 | _ind_high = [x for x 71 | in high[ind] 72 | if not x.startswith('__')] 73 | ind = {_ind_high[0]: ind} 74 | else: 75 | found = False 76 | for _id in iter(high): 77 | for state in [state for state 78 | in iter(high[_id]) 79 | if not state.startswith('__')]: 80 | for j in iter(high[_id][state]): 81 | if isinstance(j, dict) and 'name' in j: 82 | if j['name'] == ind: 83 | ind = {state: _id} 84 | found = True 85 | if not found: 86 | continue 87 | if len(ind) < 1: 88 | continue 89 | pstate = next(iter(ind)) 90 | pname = ind[pstate] 91 | if pstate == 'sls': 92 | # Expand hinges here 93 | hinges = find_sls_ids(pname, high) 94 | else: 95 | hinges.append((pname, pstate)) 96 | if '.' in pstate: 97 | errors.append( 98 | 'Invalid requisite in {0}: {1} for ' 99 | '{2}, in SLS \'{3}\'. Requisites must ' 100 | 'not contain dots, did you mean \'{4}\'?' 101 | .format( 102 | rkey, 103 | pstate, 104 | pname, 105 | body['__sls__'], 106 | pstate[:pstate.find('.')] 107 | ) 108 | ) 109 | pstate = pstate.split(".")[0] 110 | for tup in hinges: 111 | name, _state = tup 112 | if key == 'prereq_in': 113 | # Add prerequired to origin 114 | if id_ not in extend: 115 | extend[id_] = {} 116 | if state not in extend[id_]: 117 | extend[id_][state] = [] 118 | extend[id_][state].append( 119 | {'prerequired': [{_state: name}]} 120 | ) 121 | if key == 'prereq': 122 | # Add prerequired to prereqs 123 | ext_ids = find_name(name, _state, high) 124 | for ext_id, _req_state in ext_ids: 125 | if ext_id not in extend: 126 | extend[ext_id] = {} 127 | if _req_state not in extend[ext_id]: 128 | extend[ext_id][_req_state] = [] 129 | extend[ext_id][_req_state].append( 130 | {'prerequired': [{state: id_}]} 131 | ) 132 | continue 133 | if key == 'use_in': 134 | # Add the running states args to the 135 | # use_in states 136 | ext_ids = find_name(name, _state, high) 137 | for ext_id, _req_state in ext_ids: 138 | if not ext_id: 139 | continue 140 | ext_args = state_args(ext_id, _state, high) 141 | if ext_id not in extend: 142 | extend[ext_id] = {} 143 | if _req_state not in extend[ext_id]: 144 | extend[ext_id][_req_state] = [] 145 | ignore_args = req_in_all.union(ext_args) 146 | for arg in high[id_][state]: 147 | if not isinstance(arg, dict): 148 | continue 149 | if len(arg) != 1: 150 | continue 151 | if next(iter(arg)) in ignore_args: 152 | continue 153 | # Don't use name or names 154 | if next(arg.keys()) == 'name': 155 | continue 156 | if next(arg.keys()) == 'names': 157 | continue 158 | extend[ext_id][_req_state].append(arg) 159 | continue 160 | if key == 'use': 161 | # Add the use state's args to the 162 | # running state 163 | ext_ids = find_name(name, _state, high) 164 | for ext_id, _req_state in ext_ids: 165 | if not ext_id: 166 | continue 167 | loc_args = state_args(id_, state, high) 168 | if id_ not in extend: 169 | extend[id_] = {} 170 | if state not in extend[id_]: 171 | extend[id_][state] = [] 172 | ignore_args = req_in_all.union(loc_args) 173 | for arg in high[ext_id][_req_state]: 174 | if not isinstance(arg, dict): 175 | continue 176 | if len(arg) != 1: 177 | continue 178 | if next(iter(arg)) in ignore_args: 179 | continue 180 | # Don't use name or names 181 | if next(arg.keys()) == 'name': 182 | continue 183 | if next(arg.keys()) == 'names': 184 | continue 185 | extend[id_][state].append(arg) 186 | continue 187 | found = False 188 | if name not in extend: 189 | extend[name] = {} 190 | if _state not in extend[name]: 191 | extend[name][_state] = [] 192 | extend[name]['__env__'] = body['__env__'] 193 | extend[name]['__sls__'] = body['__sls__'] 194 | for ind in range(len(extend[name][_state])): 195 | if next(iter( 196 | extend[name][_state][ind])) == rkey: 197 | # Extending again 198 | extend[name][_state][ind][rkey].append( 199 | {state: id_} 200 | ) 201 | found = True 202 | if found: 203 | continue 204 | # The rkey is not present yet, create it 205 | extend[name][_state].append( 206 | {rkey: [{state: id_}]} 207 | ) 208 | high['__extend__'] = [] 209 | for key, val in extend.items(): 210 | high['__extend__'].append({key: val}) 211 | req_in_high, req_in_errors = hub.idem.extend.reconcile(high) 212 | errors.extend(req_in_errors) 213 | return req_in_high, errors -------------------------------------------------------------------------------- /idem/idem/resolve.py: -------------------------------------------------------------------------------- 1 | ''' 2 | The sls resolver is used to gather sls files, render them and return the initial 3 | phase 1 highdata. This involves translating sls references into file paths, 4 | downloading those sls files and then rendering them. 5 | 6 | Once an sls file is rendered the include statements are resolved as well. 7 | ''' 8 | # Import python libs 9 | import re 10 | 11 | 12 | async def gather(hub, name, *sls): 13 | ''' 14 | Gather the named sls references 15 | ''' 16 | await hub.idem.resolve.get_blocks(name, sls) 17 | await hub.idem.resolve.render(name) 18 | 19 | 20 | async def get_blocks(hub, name, sls): 21 | for sls_ref in sls: 22 | cfn = await hub.idem.get.ref(name, sls_ref) 23 | if not cfn: 24 | hub.idem.RUNS[name]['errors'].append('SLS ref {sls_ref} did not resolve to a file') 25 | continue 26 | blocks = hub.rend.init.blocks(cfn) 27 | hub.idem.RUNS[name]['blocks'][sls_ref] = blocks 28 | hub.idem.RUNS[name]['sls_refs'][sls_ref] = cfn 29 | hub.idem.RUNS[name]['resolved'].add(sls_ref) 30 | hub.idem.RUNS[name]['files'].add(cfn) 31 | 32 | 33 | async def render(hub, name): 34 | ''' 35 | Pop the available blocks and render them if they have satisfied requisites 36 | ''' 37 | rendered = {} 38 | for sls_ref, blocks in hub.idem.RUNS[name]['blocks'].items(): 39 | cfn = hub.idem.RUNS[name]['sls_refs'][sls_ref] 40 | for bname, block in blocks.items(): 41 | clear = True 42 | for key, val in block.get('keys', {}).items(): 43 | # TODO: This should be an aditional render requisite plugin 44 | # subsystem, change it to a subsystem as soon as any new conditionals 45 | # are added!! 46 | clear = False 47 | if key == 'require': 48 | for tag, data in hub.idem.RUNS[name]['running'].items(): 49 | if data['name'] == val: 50 | clear = True 51 | break 52 | if clear: 53 | continue 54 | if clear: 55 | state = await hub.rend.init.parse_bytes(block, hub.idem.RUNS[name]['render']) 56 | await hub.idem.resolve.introduce(name, state, sls_ref, cfn) 57 | rendered[sls_ref] = bname 58 | for sls_ref, bname in rendered.items(): 59 | hub.idem.RUNS[name]['blocks'][sls_ref].pop(bname) 60 | 61 | 62 | async def introduce(hub, name, state, sls_ref, cfn): 63 | ''' 64 | Introduce the raw state into the running dataset 65 | ''' 66 | if not isinstance(state, dict): 67 | hub.idem.RUNS[name]['errors'].append(f'SLS {sls_ref} is not formed as a dict but as a {type(state)}') 68 | return 69 | if 'include' in state: 70 | if not isinstance(state['include'], list): 71 | hub.idem.RUNS[name]['errors'].append(f'Include Declaration in SLS {sls_ref} is not formed as a list but as a {type(state["include"])}') 72 | include = state.pop(include) 73 | else: 74 | include = [] 75 | hub.idem.resolve.extend(name, state, sls_ref) 76 | hub.idem.resolve.exclude(name, state, sls_ref) 77 | hub.idem.resolve.decls(name, state, sls_ref) 78 | hub.idem.resolve.iorder(name, state) 79 | await hub.idem.resolve.includes(name, include, state, sls_ref, cfn) 80 | hub.idem.RUNS[name]['high'].update(state) 81 | 82 | 83 | def iorder(hub, name, state): 84 | ''' 85 | Take a state and apply the iorder system 86 | ''' 87 | for id_ in state: 88 | for s_dec in state[id_]: 89 | if not isinstance(s_dec, str): 90 | # PyDSL OrderedDict? 91 | continue 92 | 93 | if not isinstance(state[id_], dict): 94 | # Include's or excludes as lists? 95 | continue 96 | if not isinstance(state[id_][s_dec], list): 97 | # Bad syntax, let the verify seq pick it up later on 98 | continue 99 | 100 | found = False 101 | if s_dec.startswith('_'): 102 | continue 103 | 104 | for arg in state[id_][s_dec]: 105 | if isinstance(arg, dict): 106 | if len(arg) > 0: 107 | if next(iter(arg)) == 'order': 108 | found = True 109 | if not found: 110 | if not isinstance(state[id_][s_dec], list): 111 | # quite certainly a syntax error, managed elsewhere 112 | continue 113 | state[id_][s_dec].append( 114 | {'order': hub.idem.RUNS[name]['iorder']} 115 | ) 116 | hub.idem.RUNS[name]['iorder'] += 1 117 | 118 | 119 | def extend(hub, name, state, sls_ref): 120 | ''' 121 | Resolve the extend statement 122 | ''' 123 | if 'extend' in state: 124 | ext = state.pop('extend') 125 | if not isinstance(ext, dict): 126 | hub.idem.RUNS[name]['errors'].append( 127 | f'Extension value in SLS "{sls_ref}" is not a dictionary') 128 | return 129 | for id_ in ext: 130 | if not isinstance(ext[id_], dict): 131 | hub.idem.RUNS[name]['errors'].append( 132 | f'Extension ID "{id_}" in SLS "{sls_ref}" is not a dictionary') 133 | continue 134 | if '__sls__' not in ext[id_]: 135 | ext[id_]['__sls__'] = sls_ref 136 | #if '__env__' not in ext[id_]: 137 | # ext[id_]['__env__'] = saltenv 138 | for key in list(ext[id_]): 139 | if key.startswith('_'): 140 | continue 141 | if not isinstance(ext[id_][key], list): 142 | continue 143 | if '.' in key: 144 | comps = key.split('.') 145 | ext[id_][comps[0]] = ext[id_].pop(key) 146 | ext[id_][comps[0]].append(comps[1]) 147 | state.setdefault('__extend__', []).append(ext) 148 | 149 | 150 | def exclude(hub, name, state, sls_ref): 151 | ''' 152 | Resolve any exclude statements 153 | ''' 154 | if 'exclude' in state: 155 | exc = state.pop('exclude') 156 | if not isinstance(exc, list): 157 | hub.idem.RUNS[name]['errors'].append( 158 | f'Exclude Declaration in SLS {sls_ref} is not formed as a list') 159 | state.setdefault('__exclude__', []).extend(exc) 160 | 161 | 162 | 163 | def decls(hub, name, state, sls_ref): 164 | ''' 165 | Resolve and state formatting and data insertion 166 | ''' 167 | for id_ in state: 168 | if not isinstance(state[id_], dict): 169 | if id_ == '__extend__': 170 | continue 171 | if id_ == '__exclude__': 172 | continue 173 | 174 | if isinstance(state[id_], str): 175 | # Is this is a short state, it needs to be padded 176 | if '.' in state[id_]: 177 | comps = state[id_].split('.') 178 | state[id_] = {'__sls__': sls_ref, 179 | comps[0]: [comps[1]]} 180 | continue 181 | hub.idem.RUNS[name]['errors'].append( 182 | f'ID {id_} in SLS {sls_ref} is not a dictionary') 183 | continue 184 | skeys = set() 185 | for key in list(state[id_]): 186 | if key.startswith('_'): 187 | continue 188 | if not isinstance(state[id_][key], list): 189 | continue 190 | if '.' in key: 191 | comps = key.split('.') 192 | # Idem doesn't support state files such as: 193 | # 194 | # /etc/redis/redis.conf: 195 | # file.managed: 196 | # - user: redis 197 | # - group: redis 198 | # - mode: 644 199 | # file.comment: 200 | # - regex: ^requirepass 201 | ref = '.'.join(comps[:-1]) 202 | if ref in skeys: 203 | hub.idem.RUNS[name]['errors'].append( 204 | f'ID "{id_}" in SLS "{sls_ref}" contains multiple state declarations of the same type' 205 | ) 206 | continue 207 | state[id_][ref] = state[id_].pop(key) 208 | state[id_][ref].append(comps[-1]) 209 | skeys.add(ref) 210 | continue 211 | skeys.add(key) 212 | if '__sls__' not in state[id_]: 213 | state[id_]['__sls__'] = sls_ref 214 | 215 | 216 | async def includes(hub, name, include, state, sls_ref, cfn): 217 | ''' 218 | Parse through the includes and download not-yet-resolved includes 219 | ''' 220 | for inc_sls in include: 221 | if inc_sls.startswith('.'): 222 | match = re.match(r'^(\.+)(.*)$', inc_sls) 223 | if match: 224 | levels, include = match.groups() 225 | else: 226 | hub.idem.RUNS[name]['errors'].append( 227 | f'Badly formatted include {inc_sls} found in SLS "{sls_ref}"') 228 | continue 229 | level_count = len(levels) 230 | p_comps = sls_ref.split('.') 231 | if cfn.endswith('/init.sls'): 232 | p_comps.append('init') 233 | if level_count > len(p_comps): 234 | hub.idem.RUNS[name]['errors'].append( 235 | f'Attempted relative include of "{inc_sls}" within SLS {sls_ref} goes beyond top level package') 236 | continue 237 | inc_sls = '.'.join(p_comps[:-level_count] + [include]) 238 | if inc_sls not in hub.idem.RUNS[name]['resolved']: 239 | await hub.sls.resolve.gather(name, inc_sls) 240 | -------------------------------------------------------------------------------- /idem/idem/rules/changes.py: -------------------------------------------------------------------------------- 1 | def check(hub, condition, reqret, chunk): 2 | ''' 3 | Check to see if the result is True 4 | ''' 5 | run = False 6 | if isinstance(condition, bool): 7 | if bool(reqret['ret']['changes']) is condition: 8 | return {} 9 | # TODO: Add the ability to make more granular changes condition definitions 10 | elif reqret['ret']['changes'] == condition: 11 | return {} 12 | return {'errors': [f'Changes from {reqret["r_tag"]} is "{reqret["ret"]["result"]}", not "{condition}"']} 13 | -------------------------------------------------------------------------------- /idem/idem/rules/changes_post.py: -------------------------------------------------------------------------------- 1 | def check(hub, condition, req_ret, chunk): 2 | ''' 3 | If changes are made then run the configured post command 4 | ''' 5 | ret = {} 6 | if req_ret['ret']['changes']: 7 | func = getattr(hub, f'states.{chunk["state"]}.{condition}') 8 | if func: 9 | ret['post'] = func 10 | return ret 11 | -------------------------------------------------------------------------------- /idem/idem/rules/init.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import asyncio 3 | 4 | # import local libs 5 | import pop.loader 6 | 7 | 8 | # These are keywords passed to state module functions which are to be used 9 | # by salt in this state module and not on the actual state module function 10 | STATE_REQUISITE_KEYWORDS = frozenset([ 11 | 'onchanges', 12 | 'onchanges_any', 13 | 'onfail', 14 | 'onfail_any', 15 | 'onfail_all', 16 | 'onfail_stop', 17 | 'prereq', 18 | 'prerequired', 19 | 'watch', 20 | 'watch_any', 21 | 'require', 22 | 'require_any', 23 | 'listen', 24 | ]) 25 | STATE_REQUISITE_IN_KEYWORDS = frozenset([ 26 | 'onchanges_in', 27 | 'onfail_in', 28 | 'prereq_in', 29 | 'watch_in', 30 | 'require_in', 31 | 'listen_in', 32 | ]) 33 | STATE_RUNTIME_KEYWORDS = frozenset([ 34 | 'fun', 35 | 'state', 36 | 'check_cmd', 37 | 'failhard', 38 | 'onlyif', 39 | 'unless', 40 | 'retry', 41 | 'order', 42 | 'parallel', 43 | 'prereq', 44 | 'prereq_in', 45 | 'prerequired', 46 | 'reload_modules', 47 | 'reload_grains', 48 | 'reload_pillar', 49 | 'runas', 50 | 'runas_password', 51 | 'fire_event', 52 | 'saltenv', 53 | 'use', 54 | 'use_in', 55 | '__run_name', 56 | '__env__', 57 | '__sls__', 58 | '__id__', 59 | '__orchestration_jid__', 60 | '__pub_user', 61 | '__pub_arg', 62 | '__pub_jid', 63 | '__pub_fun', 64 | '__pub_tgt', 65 | '__pub_ret', 66 | '__pub_pid', 67 | '__pub_tgt_type', 68 | '__prereq__', 69 | ]) 70 | 71 | STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(STATE_REQUISITE_IN_KEYWORDS).union(STATE_RUNTIME_KEYWORDS) 72 | 73 | 74 | def get_func(hub, name, chunk): 75 | ''' 76 | Given the runtime name and the chunk in question, determine what function 77 | on the hub that can be run 78 | ''' 79 | s_ref = chunk['state'] 80 | for sub in hub.idem.RUNS[name]['subs']: 81 | test = f'{sub}.{s_ref}.{chunk["fun"]}' 82 | try: 83 | func = getattr(hub, test) 84 | except AttributeError: 85 | continue 86 | if isinstance(func, pop.loader.LoadedMod): 87 | continue 88 | if func is None: 89 | continue 90 | return func 91 | return None 92 | 93 | 94 | async def run(hub, name, ctx, low, seq_comp, running, run_num): 95 | ''' 96 | All requisites have been met for this low chunk. 97 | ''' 98 | chunk = seq_comp['chunk'] 99 | tag = hub.idem.tools.gen_tag(chunk) 100 | rdats = [] 101 | errors = [] 102 | for reqret in seq_comp.get('reqrets', []): 103 | req = reqret['req'] 104 | rules = hub.idem.RMAP[req] 105 | for rule in rules: 106 | if hasattr(hub.idem.rules, rule): 107 | rdat = (getattr(hub.idem.rules, rule).check(rules[rule], reqret, chunk)) 108 | if rdat.get('errors'): 109 | errors.extend(rdat['errors']) 110 | rdats.append(rdat) 111 | if errors: 112 | running[tag] = { 113 | 'name': chunk['name'], 114 | 'changes': {}, 115 | 'comment': '\n'.join(errors), 116 | 'result': False, 117 | '__run_num': run_num} 118 | return 119 | func = hub.idem.rules.init.get_func(name, chunk) 120 | if func is None: 121 | running[tag] = { 122 | 'name': chunk['name'], 123 | 'changes': {}, 124 | 'comment': f'The named state {chunk["state"]} is not available', 125 | 'result': False, 126 | '__run_num': run_num} 127 | return 128 | chunk['ctx'] = ctx 129 | call = hub.idem.tools.format_call(func, chunk, expected_extra_kws=STATE_INTERNAL_KEYWORDS) 130 | for rdat in rdats: 131 | if 'pre' in rdat: 132 | ret = rdat['pre'](*call['args'], **call['kwargs']) 133 | if asyncio.iscoroutine(ret): 134 | ret = await ret 135 | ret = func(*call['args'], **call['kwargs']) 136 | if asyncio.iscoroutine(ret): 137 | ret = await ret 138 | for rdat in rdats: 139 | if 'post' in rdat: 140 | ret = rdat['post'](*call['args'], **call['kwargs']) 141 | if asyncio.iscoroutine(ret): 142 | ret = await ret 143 | ret['__run_num'] = run_num 144 | running[tag] = ret 145 | -------------------------------------------------------------------------------- /idem/idem/rules/result.py: -------------------------------------------------------------------------------- 1 | def check(hub, condition, reqret, chunk): 2 | ''' 3 | Check to see if the result is True 4 | ''' 5 | if isinstance(condition, list): 6 | if reqret['ret']['result'] in condition: 7 | return {} 8 | if reqret['ret']['result'] is condition: 9 | return {} 10 | else: 11 | return {'errors': [f'Result of require {reqret["r_tag"]} is "{reqret["ret"]["result"]}", not "{condition}"']} 12 | -------------------------------------------------------------------------------- /idem/idem/run/init.py: -------------------------------------------------------------------------------- 1 | async def start(hub, name): 2 | ''' 3 | Called only after the named run has compiled low data. If no low data 4 | is present an exception will be raised 5 | ''' 6 | if not hub.idem.RUNS[name].get('low'): 7 | raise ValueError() 8 | ctx = {'run_name': name, 'test': hub.idem.RUNS[name]['test']} 9 | rtime = hub.idem.RUNS[name]['runtime'] 10 | low = hub.idem.RUNS[name].get('low') 11 | ref = f'idem.run.{rtime}.runtime' 12 | old_seq = {} 13 | old_seq_len = -1 14 | while True: 15 | # TODO: make the errors float up 16 | seq = hub.idem.req.init.seq(low, hub.idem.RUNS[name]['running']) 17 | if seq == old_seq: 18 | raise Exception() 19 | await getattr(hub, ref)(name, ctx, seq, low, hub.idem.RUNS[name]['running']) 20 | await hub.idem.resolve.render(name) 21 | await hub.idem.init.compile(name) 22 | low = hub.idem.RUNS[name].get('low') 23 | if len(low) <= len(hub.idem.RUNS[name]['running']): 24 | break 25 | if len(seq) == old_seq_len: 26 | # We made no progress! Recursive requisite! 27 | raise Exception() 28 | old_seq = seq 29 | old_seq_len = len(seq) 30 | # Check for any new, available blocks to render 31 | -------------------------------------------------------------------------------- /idem/idem/run/parallel.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import asyncio 3 | 4 | 5 | async def runtime(hub, name, ctx, seq, low, running): 6 | ''' 7 | Execute the runtime in parallel mode 8 | ''' 9 | inds = [] 10 | for ind in seq: 11 | if seq[ind].get('unmet'): 12 | # Requisites are unmet, skip this one 13 | continue 14 | inds.append(ind) 15 | if not inds: 16 | # Nothing can be run, we have hit recursive requisite, 17 | # or we are done 18 | pass 19 | coros = [] 20 | for ind in inds: 21 | coros.append( 22 | hub.idem.rules.init.run( 23 | name, 24 | ctx, 25 | low, 26 | seq[ind], 27 | running, 28 | hub.idem.RUNS[name]['run_num'], 29 | ) 30 | ) 31 | hub.idem.RUNS[name]['run_num'] += 1 32 | for fut in asyncio.as_completed(coros): 33 | await fut 34 | if len(low) <= len(running): 35 | return 36 | -------------------------------------------------------------------------------- /idem/idem/run/serial.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import asyncio 3 | 4 | 5 | async def runtime(hub, name, ctx, seq, low, running): 6 | ''' 7 | Execute the runtime in parallel mode 8 | ''' 9 | inds = [] 10 | for ind in seq: 11 | if seq[ind].get('unmet'): 12 | # Requisites are unmet, skip this one 13 | continue 14 | inds.append(ind) 15 | if not inds: 16 | # Nothing can be run, we have hit recursive requisite, 17 | # or we are done 18 | pass 19 | for ind in inds: 20 | await hub.idem.rules.init.run(name, ctx, low, seq[ind], running, hub.idem.RUNS[name]['run_num']) 21 | hub.idem.RUNS[name]['run_num'] += 1 22 | if len(low) <= len(running): 23 | return 24 | -------------------------------------------------------------------------------- /idem/idem/tools.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import copy 3 | import fnmatch 4 | import inspect 5 | 6 | 7 | def gen_tag(hub, chunk): 8 | ''' 9 | Generate the unique tag used to track the execution of the chunk 10 | ''' 11 | return f'{chunk["state"]}_|-{chunk["__id__"]}_|-{chunk["name"]}_|-{chunk["fun"]}' 12 | 13 | 14 | def get_chunks(hub, low, state, name): 15 | ''' 16 | Search in the low state for the chunk with the given designation 17 | ''' 18 | rets = [] 19 | for chunk in low: 20 | if state == 'sls': 21 | if fnmatch.fnmatch(chunk['__sls__'], name): 22 | rets.append(chunk) 23 | continue 24 | if state == chunk['state']: 25 | if (fnmatch.fnmatch(chunk['name'], name) or fnmatch.fnmatch(chunk['__id__'], name)): 26 | rets.append(chunk) 27 | return rets 28 | 29 | 30 | def find_name(hub, name, state, high): 31 | ''' 32 | Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match 33 | Note: if `state` is sls, then we are looking for all IDs that match the given SLS 34 | ''' 35 | ext_id = [] 36 | if name in high: 37 | ext_id.append((name, state)) 38 | # if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS 39 | elif state == 'sls': 40 | for nid, item in high.items(): 41 | if item['__sls__'] == name: 42 | ext_id.append((nid, next(iter(item)))) 43 | # otherwise we are requiring a single state, lets find it 44 | else: 45 | # We need to scan for the name 46 | for nid in high: 47 | if state in high[nid]: 48 | if isinstance(high[nid][state], list): 49 | for arg in high[nid][state]: 50 | if not isinstance(arg, dict): 51 | continue 52 | if len(arg) != 1: 53 | continue 54 | if arg[next(iter(arg))] == name: 55 | ext_id.append((nid, state)) 56 | return ext_id 57 | 58 | 59 | def format_call(hub, 60 | fun, 61 | data, 62 | initial_ret=None, 63 | expected_extra_kws=(), 64 | is_class_method=None): 65 | ''' 66 | Build the required arguments and keyword arguments required for the passed 67 | function. 68 | :param fun: The function to get the argspec from 69 | :param data: A dictionary containing the required data to build the 70 | arguments and keyword arguments. 71 | :param initial_ret: The initial return data pre-populated as dictionary or 72 | None 73 | :param expected_extra_kws: Any expected extra keyword argument names which 74 | should not trigger a :ref:`SaltInvocationError` 75 | :param is_class_method: Pass True if you are sure that the function being passed 76 | is a class method. The reason for this is that on Python 3 77 | ``inspect.ismethod`` only returns ``True`` for bound methods, 78 | while on Python 2, it returns ``True`` for bound and unbound 79 | methods. So, on Python 3, in case of a class method, you'd 80 | need the class to which the function belongs to be instantiated 81 | and this is not always wanted. 82 | :returns: A dictionary with the function required arguments and keyword 83 | arguments. 84 | ''' 85 | ret = initial_ret is not None and initial_ret or {} 86 | 87 | ret['args'] = [] 88 | ret['kwargs'] = {} 89 | args = [] 90 | kwargs = {} 91 | keywords = False 92 | 93 | sig = fun.signature 94 | for name, param in sig.parameters.items(): 95 | if name == 'hub': 96 | continue 97 | if param.kind.name == 'POSITIONAL_OR_KEYWORD': 98 | if isinstance(param.default, inspect._empty): 99 | args.append(name) 100 | else: 101 | kwargs[name] = param.default 102 | if param.kind.name == 'KEYWORD_ONLY': 103 | kwargs[name] = param.default 104 | if param.kind.name == 'VAR_KEYWORD': 105 | keywords = True 106 | ret['avail_kwargs'] = copy.copy(kwargs) 107 | ret['avail_args'] = copy.copy(args) 108 | ret['keywords'] = keywords 109 | 110 | # Since we WILL be changing the data dictionary, let's change a copy of it 111 | data = data.copy() 112 | 113 | missing_args = [] 114 | 115 | for key in kwargs: 116 | try: 117 | kwargs[key] = data.pop(key) 118 | except KeyError: 119 | # Let's leave the default value in place 120 | pass 121 | 122 | while args: 123 | arg = args.pop(0) 124 | try: 125 | ret['args'].append(data.pop(arg)) 126 | except KeyError: 127 | missing_args.append(arg) 128 | 129 | if missing_args: 130 | used_args_count = len(ret['args']) + len(args) 131 | args_count = used_args_count + len(missing_args) 132 | #raise SaltInvocationError( 133 | # '{0} takes at least {1} argument{2} ({3} given)'.format( 134 | # fun.__name__, 135 | # args_count, 136 | # args_count > 1 and 's' or '', 137 | # used_args_count 138 | # ) 139 | #) 140 | 141 | ret['kwargs'].update(kwargs) 142 | 143 | if keywords: 144 | # The function accepts **kwargs, any non expected extra keyword 145 | # arguments will made available. 146 | for key, value in data.items(): 147 | if key in expected_extra_kws: 148 | continue 149 | ret['kwargs'][key] = value 150 | 151 | # No need to check for extra keyword arguments since they are all 152 | # **kwargs now. Return 153 | return ret 154 | 155 | # Did not return yet? Lets gather any remaining and unexpected keyword 156 | # arguments 157 | extra = {} 158 | for key, value in data.items(): 159 | if key in expected_extra_kws: 160 | continue 161 | extra[key] = copy.deepcopy(value) 162 | 163 | if extra: 164 | # Found unexpected keyword arguments, raise an error to the user 165 | if len(extra) == 1: 166 | msg = '\'{0[0]}\' is an invalid keyword argument for \'{1}\''.format( 167 | list(extra.keys()), 168 | ret.get( 169 | # In case this is being called for a state module 170 | 'full', 171 | # Not a state module, build the name 172 | '{0}.{1}'.format(fun.__module__, fun.__name__) 173 | ) 174 | ) 175 | else: 176 | msg = '{0} and \'{1}\' are invalid keyword arguments for \'{2}\''.format( 177 | ', '.join(['\'{0}\''.format(e) for e in extra][:-1]), 178 | list(extra.keys())[-1], 179 | ret.get( 180 | # In case this is being called for a state module 181 | 'full', 182 | # Not a state module, build the name 183 | '{0}.{1}'.format(fun.__module__, fun.__name__) 184 | ) 185 | ) 186 | #raise SaltInvocationError(msg) 187 | return ret 188 | 189 | 190 | def ishashable(hub, obj): 191 | ''' 192 | A simple test to verify if a given object is hashable and can therefore 193 | be used as a key in a dict 194 | ''' 195 | try: 196 | hash(obj) 197 | except TypeError: 198 | return False 199 | return True 200 | 201 | 202 | def iter_high(hub, high): 203 | ''' 204 | Take a highstate strucutre and iterate over it yielding the elements down to the 205 | execution args 206 | Yields (id_, body, state, run, arg) 207 | ''' 208 | for id_, body in high.items(): 209 | if not isinstance(body, dict): 210 | continue 211 | for state, run in body.items(): 212 | if state.startswith('__'): 213 | continue 214 | for arg in run: 215 | yield id_, body, state, run, arg 216 | -------------------------------------------------------------------------------- /idem/idem/treq.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This plugin is used to resolve transparent requisites adn apply them to 3 | the lowstate 4 | ''' 5 | # Import pop libs 6 | import pop.loader 7 | # TREQ = {func_D requires func_a, B, C} 8 | #TREQ = {'func_D': 9 | # 'require': [ 10 | # 'foo.bar.baz.func_A', 11 | # 'test.func_B', 12 | # ], 13 | # 'soft_require': [ 14 | # 'cheese.func_C', 15 | # ], 16 | # } 17 | 18 | 19 | def gather(hub, subs, low): 20 | ''' 21 | Given the runtime name and the chunk in question, determine what function 22 | on the hub that can be run 23 | ''' 24 | ret = {} 25 | for chunk in low: 26 | s_ref = chunk['state'] 27 | if s_ref in ret: 28 | continue 29 | for sub in subs: 30 | test = f'{sub}.{s_ref}' 31 | try: 32 | mod = getattr(hub, test) 33 | except AttributeError: 34 | continue 35 | if not isinstance(mod, pop.loader.LoadedMod): 36 | continue 37 | if mod is None: 38 | continue 39 | if hasattr(mod, 'TREQ'): 40 | ret.update({s_ref: mod.TREQ}) 41 | return ret 42 | 43 | 44 | def apply(hub, subs, low): 45 | ''' 46 | Look up the transparetn requisites as defined in state modules and apply 47 | them to the respective low chunks 48 | ''' 49 | treq = hub.idem.treq.gather(subs, low) 50 | refs = {} 51 | for ind, chunk in enumerate(low): 52 | path = f'{chunk["state"]}.{chunk["fun"]}' 53 | if path not in refs: 54 | refs[path] = [] 55 | # I am using a list to maintain requisite ordering. if a set is used 56 | # The we will have no deterministic ordering, which would be BAD!!! 57 | if ind not in refs[path]: 58 | refs[path].append(ind) 59 | for chunk in low: 60 | if not chunk['state'] in treq: 61 | continue 62 | if not chunk['fun'] in treq[chunk['state']]: 63 | continue 64 | rule = treq[chunk['state']][chunk['fun']] 65 | for req, r_refs in rule.items(): 66 | for ref in r_refs: 67 | if ref not in refs: 68 | continue 69 | for rind in refs[path]: 70 | req_chunk = low[rind] 71 | if req not in chunk: 72 | chunk[req] = [] 73 | chunk[req].append({req_chunk['state']: req_chunk['__id__']}) 74 | return low 75 | -------------------------------------------------------------------------------- /idem/idem/verify.py: -------------------------------------------------------------------------------- 1 | def high(hub, high): 2 | ''' 3 | Verify that the high data is viable and follows the data structure 4 | ''' 5 | errors = [] 6 | if not isinstance(high, dict): 7 | errors.append('High data is not a dictionary and is invalid') 8 | reqs = {} 9 | for id_, body in high.items(): 10 | if id_.startswith('__'): 11 | continue 12 | if not isinstance(id_, str): 13 | errors.append( 14 | f'ID "{id_}" in SLS "{body["__sls__"]}" is not formed as a string, but ' 15 | f'is a {type(id_).__name__}' 16 | ) 17 | if not isinstance(body, dict): 18 | err = f'The type {id_} in {body} is not formatted as a dictionary' 19 | errors.append(err) 20 | continue 21 | for state in body: 22 | if state.startswith('__'): 23 | continue 24 | if not isinstance(body[state], list): 25 | errors.append( 26 | f'State "{id_}" in SLS "{body["__sls__"]}" is not formed as a list' 27 | ) 28 | else: 29 | fun = 0 30 | if '.' in state: 31 | fun += 1 32 | for arg in body[state]: 33 | if isinstance(arg, str): 34 | fun += 1 35 | if ' ' in arg.strip(): 36 | errors.append(( 37 | f'The function "{arg}" in state ' 38 | f'"{id_}" in SLS "{body["__sls__"]}" has ' 39 | 'whitespace, a function with whitespace is ' 40 | 'not supported, perhaps this is an argument ' 41 | 'that is missing a ":"')) 42 | elif isinstance(arg, dict): 43 | # The arg is a dict, if the arg is require or 44 | # watch, it must be a list. 45 | # 46 | # Add the requires to the reqs dict and check them 47 | # all for recursive requisites. 48 | argfirst = next(iter(arg)) 49 | if argfirst in ('require', 'watch', 'prereq', 'onchanges'): 50 | if not isinstance(arg[argfirst], list): 51 | errors.append((f'The {argfirst}' 52 | f' statement in state "{id_}" in SLS "{body["__sls__"]}" ' 53 | 'needs to be formed as a list')) 54 | # It is a list, verify that the members of the 55 | # list are all single key dicts. 56 | else: 57 | reqs[id_] = {'state': state} 58 | for req in arg[argfirst]: 59 | if isinstance(req, str): 60 | req = {'id': req} 61 | if not isinstance(req, dict): 62 | err = (f'Requisite declaration {req}' 63 | f' in SLS {body["__sls__"]} is not formed as a' 64 | ' single key dictionary') 65 | errors.append(err) 66 | continue 67 | req_key = next(iter(req)) 68 | req_val = req[req_key] 69 | if '.' in req_key: 70 | errors.append(( 71 | f'Invalid requisite type "{req_key}" ' 72 | f'in state "{id_}", in SLS ' 73 | f'"{body["__sls__"]}". Requisite types must ' 74 | 'not contain dots, did you ' 75 | f'mean "{req_key[:req_key.find(".")]}"?' 76 | )) 77 | if not hub.idem.tools.ishashable(req_val): 78 | errors.append(( 79 | f'Illegal requisite "{str(req_val)}", ' 80 | f'is SLS {body["__sls__"]}\n' 81 | )) 82 | continue 83 | # Check for global recursive requisites 84 | reqs[id_][req_val] = req_key 85 | if req_val in reqs: 86 | if id_ in reqs[req_val]: 87 | if reqs[req_val][id_] == state: 88 | if reqs[req_val]['state'] == reqs[id_][req_val]: 89 | err = ('A recursive ' 90 | 'requisite was found, SLS ' 91 | f'"{body["__sls__"]}" ID "{id_}" ID "{req_val}"' 92 | ) 93 | errors.append(err) 94 | # Make sure that there is only one key in the 95 | # dict 96 | if len(list(arg)) != 1: 97 | errors.append(('Multiple dictionaries ' 98 | f'defined in argument of state "{id_}" in SLS "{body["__sls__"]}"' 99 | )) 100 | if not fun: 101 | if state == 'require' or state == 'watch': 102 | continue 103 | errors.append((f'No function declared in state "{state}" in' 104 | f' SLS "{body["__sls__"]}"')) 105 | elif fun > 1: 106 | errors.append( 107 | f'Too many functions declared in state "{state}" in ' 108 | f'SLS "{body["__sls__"]}"' 109 | ) 110 | return high, errors -------------------------------------------------------------------------------- /idem/output/idem.py: -------------------------------------------------------------------------------- 1 | # NOTES 2 | # This is a VERY simple outputter for idem, it does not do everything the 3 | # Salt highstate outputter does, and nor should it! This outputter should 4 | # not become hyper complicated, things like terse should be another 5 | # outputter, this should really just get things like errors added 6 | 7 | # Import third party libs 8 | from colored import fg, attr 9 | 10 | 11 | def display(hub, data): 12 | ''' 13 | Display the data from an idem run 14 | ''' 15 | endc = attr(0) 16 | strs = [] 17 | for tag in sorted( 18 | data, 19 | key=lambda k: data[k].get('__run_num', 0)): 20 | ret = data[tag] 21 | comps = tag.split('_|-') 22 | state = comps[0] 23 | id_ = comps[1] 24 | fun = comps[3] 25 | name = ret['name'] 26 | result = ret['result'] 27 | comment = ret['comment'] 28 | changes = hub.output.nested.display(ret['changes']) 29 | if result is True and changes: 30 | tcolor = fg(6) 31 | elif result is True: 32 | tcolor = fg(2) 33 | elif result is None: 34 | tcolor = fg(11) 35 | elif result is False: 36 | tcolor = fg(9) 37 | 38 | strs.append(f'{tcolor}--------{endc}') 39 | strs.append(f'{tcolor} ID: {id_}{endc}') 40 | strs.append(f'{tcolor}Function: {state}.{fun}{endc}') 41 | strs.append(f'{tcolor} Result: {result}{endc}') 42 | strs.append(f'{tcolor} Comment: {comment}{endc}') 43 | strs.append(f'{tcolor} Changes: {changes}{endc}') 44 | return '\n'.join(strs) 45 | -------------------------------------------------------------------------------- /idem/scripts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import pop.hub 3 | 4 | 5 | def start(): 6 | hub = pop.hub.Hub() 7 | hub.pop.sub.add('idem.idem') 8 | hub.idem.init.cli() 9 | -------------------------------------------------------------------------------- /idem/sls/file_sls.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This module is used to retrive files from a local source 3 | ''' 4 | # Import python libs 5 | import os 6 | import shutil 7 | 8 | 9 | __virtualname__ = 'file' 10 | 11 | 12 | async def cache(hub, cache_dir, full): 13 | ''' 14 | Take a file from a location definition and cache it in the target location 15 | ''' 16 | if full.startswith('file://'): 17 | full = full[7:] 18 | c_tgt = os.path.join(cache_dir, full.lstrip(os.sep)) 19 | c_dir = os.path.dirname(c_tgt) 20 | try: 21 | os.makedirs(c_dir) 22 | except FileExistsError: 23 | pass 24 | if not os.path.isfile(full): 25 | return None 26 | shutil.copy(full, c_tgt) 27 | return c_tgt 28 | -------------------------------------------------------------------------------- /idem/states/test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | Test States 4 | =========== 5 | 6 | Provide test case states that enable easy testing of things to do with state 7 | calls, e.g. running, calling, logging, output filtering etc. 8 | 9 | .. code-block:: yaml 10 | 11 | always-passes-with-any-kwarg: 12 | test.nop: 13 | - name: foo 14 | - something: else 15 | - foo: bar 16 | 17 | always-passes: 18 | test.succeed_without_changes: 19 | - name: foo 20 | 21 | always-fails: 22 | test.fail_without_changes: 23 | - name: foo 24 | 25 | always-changes-and-succeeds: 26 | test.succeed_with_changes: 27 | - name: foo 28 | 29 | always-changes-and-fails: 30 | test.fail_with_changes: 31 | - name: foo 32 | ''' 33 | # Import Python libs 34 | import random 35 | 36 | TREQ = { 37 | 'treq': { 38 | 'require': [ 39 | 'test.nop', 40 | ] 41 | }, 42 | } 43 | 44 | 45 | def treq(hub, ctx, name, **kwargs): 46 | ''' 47 | Ensure that a transparent requisite is applied 48 | ''' 49 | return succeed_without_changes(hub, ctx, name) 50 | 51 | 52 | def nop(hub, ctx, name, **kwargs): 53 | ''' 54 | A no-op state that does nothing. Useful in conjunction with the `use` 55 | requisite, or in templates which could otherwise be empty due to jinja 56 | rendering 57 | ''' 58 | if ctx['test']: 59 | return none_without_changes(hub, ctx, name) 60 | return succeed_without_changes(hub, ctx, name) 61 | 62 | 63 | def succeed_with_comment(hub, ctx, name, comment, **kwargs): 64 | ret = { 65 | 'name': name, 66 | 'changes': {}, 67 | 'result': True, 68 | 'comment': comment, 69 | } 70 | return ret 71 | 72 | 73 | def succeed_without_changes(hub, ctx, name, **kwargs): 74 | ''' 75 | name 76 | A unique string. 77 | ''' 78 | ret = { 79 | 'name': name, 80 | 'changes': {}, 81 | 'result': True, 82 | 'comment': 'Success!' 83 | } 84 | return ret 85 | 86 | 87 | def none_without_changes(hub, ctx, name, **kwargs): 88 | ''' 89 | name 90 | A unique string. 91 | ''' 92 | ret = { 93 | 'name': name, 94 | 'changes': {}, 95 | 'result': None, 96 | 'comment': 'Success!' 97 | } 98 | return ret 99 | 100 | 101 | def fail_without_changes(hub, ctx, name, **kwargs): 102 | ''' 103 | Returns failure. 104 | 105 | name: 106 | A unique string. 107 | ''' 108 | ret = { 109 | 'name': name, 110 | 'changes': {}, 111 | 'result': False, 112 | 'comment': 'Failure!' 113 | } 114 | 115 | return ret 116 | 117 | 118 | def succeed_with_changes(hub, ctx, name, **kwargs): 119 | ''' 120 | Returns successful and changes is not empty 121 | 122 | name: 123 | A unique string. 124 | ''' 125 | ret = { 126 | 'name': name, 127 | 'changes': {}, 128 | 'result': True, 129 | 'comment': 'Success!' 130 | } 131 | 132 | ret['changes'] = { 133 | 'testing': { 134 | 'old': 'Unchanged', 135 | 'new': 'Something pretended to change' 136 | } 137 | } 138 | 139 | return ret 140 | 141 | 142 | def fail_with_changes(hub, ctx, name, **kwargs): 143 | ''' 144 | Returns failure and changes is not empty. 145 | 146 | name: 147 | A unique string. 148 | ''' 149 | ret = { 150 | 'name': name, 151 | 'changes': {}, 152 | 'result': False, 153 | 'comment': 'Failure!' 154 | } 155 | ret['changes'] = { 156 | 'testing': { 157 | 'old': 'Unchanged', 158 | 'new': 'Something pretended to change' 159 | } 160 | } 161 | return ret 162 | 163 | 164 | def update_low(hub, ctx, name): 165 | ''' 166 | Use the __run_name to add a run to the low 167 | ''' 168 | extra = { 169 | '__sls__': 'none', 170 | 'name': 'totally_extra_alls', 171 | '__id__': 'king_arthur', 172 | 'state': 'test', 173 | 'fun': 'nop'} 174 | hub.idem.RUNS[ctx['run_name']]['add_low'].append(extra) 175 | return succeed_without_changes(hub, ctx, name) 176 | 177 | 178 | def mod_watch(hub, ctx, name, **kwargs): 179 | ''' 180 | Return a mod_watch call for test 181 | ''' 182 | ret = { 183 | 'name': name, 184 | 'changes': {'watch': True}, 185 | 'result': True, 186 | 'comment': 'Watch ran!' 187 | } 188 | return ret 189 | 190 | 191 | def configurable_test_state(hub, ctx, name, changes=True, result=True, comment='', **kwargs): 192 | ''' 193 | A configurable test state which determines its output based on the inputs. 194 | 195 | name: 196 | A unique string. 197 | changes: 198 | Do we return anything in the changes field? 199 | Accepts True, False, and 'Random' 200 | Default is True 201 | result: 202 | Do we return successfully or not? 203 | Accepts True, False, and 'Random' 204 | Default is True 205 | If test is True and changes is True, this will be None. If test is 206 | True and and changes is False, this will be True. 207 | comment: 208 | String to fill the comment field with. 209 | Default is '' 210 | ''' 211 | ret = { 212 | 'name': name, 213 | 'changes': {}, 214 | 'result': False, 215 | 'comment': comment 216 | } 217 | 218 | change_data = { 219 | 'testing': { 220 | 'old': 'Unchanged', 221 | 'new': 'Something pretended to change' 222 | } 223 | } 224 | 225 | # If changes is True, then we place our dummy change dictionary into it 226 | if changes == 'Random': 227 | if random.choice([True, False]): 228 | ret['changes'] = change_data 229 | elif changes is True: 230 | ret['changes'] = change_data 231 | elif changes is False: 232 | ret['changes'] = {} 233 | 234 | if result == 'Random': 235 | ret['result'] = random.choice([True, False]) 236 | elif result is True: 237 | ret['result'] = True 238 | elif result is False: 239 | ret['result'] = False 240 | 241 | if ctx['test']: 242 | ret['result'] = True if changes is False else None 243 | ret['comment'] = 'This is a test' if not comment else comment 244 | 245 | return ret 246 | -------------------------------------------------------------------------------- /idem/version.py: -------------------------------------------------------------------------------- 1 | version = '5.1' 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyYaml 2 | wheel 3 | jinja2 4 | pop>=7.5 5 | rend>=4 6 | toml 7 | takara 8 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import pop.hub 3 | 4 | def start(): 5 | hub = pop.hub.Hub() 6 | hub.pop.sub.add('idem.idem') 7 | hub.idem.init.cli() 8 | 9 | 10 | start() 11 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | 4 | # Import python libs 5 | import os 6 | import sys 7 | import shutil 8 | from setuptools import setup, Command 9 | 10 | NAME = 'idem' 11 | DESC = ('Transform configuration into idempotent action.') 12 | 13 | with open('README.rst', encoding='utf-8') as f: 14 | LONG_DESC = f.read() 15 | 16 | with open('requirements.txt') as f: 17 | REQUIREMENTS = f.read().splitlines() 18 | 19 | # Version info -- read without importing 20 | _locals = {} 21 | with open('{}/version.py'.format(NAME)) as fp: 22 | exec(fp.read(), None, _locals) 23 | VERSION = _locals['version'] 24 | SETUP_DIRNAME = os.path.dirname(__file__) 25 | if not SETUP_DIRNAME: 26 | SETUP_DIRNAME = os.getcwd() 27 | 28 | 29 | class Clean(Command): 30 | user_options = [] 31 | def initialize_options(self): 32 | pass 33 | 34 | def finalize_options(self): 35 | pass 36 | 37 | def run(self): 38 | for subdir in (NAME, 'tests'): 39 | for root, dirs, files in os.walk(os.path.join(os.path.dirname(__file__), subdir)): 40 | for dir_ in dirs: 41 | if dir_ == '__pycache__': 42 | shutil.rmtree(os.path.join(root, dir_)) 43 | 44 | 45 | def discover_packages(): 46 | modules = [] 47 | for package in (NAME, ): 48 | for root, _, files in os.walk(os.path.join(SETUP_DIRNAME, package)): 49 | pdir = os.path.relpath(root, SETUP_DIRNAME) 50 | modname = pdir.replace(os.sep, '.') 51 | modules.append(modname) 52 | return modules 53 | 54 | 55 | setup(name=NAME, 56 | author='Thomas S Hatch', 57 | author_email='thatch45@gmail.com', 58 | url='https://idem.readthedocs.io', 59 | version=VERSION, 60 | description=DESC, 61 | install_requires=REQUIREMENTS, 62 | long_description=LONG_DESC, 63 | long_description_content_type='text/x-rst', 64 | classifiers=[ 65 | 'Operating System :: OS Independent', 66 | 'Programming Language :: Python', 67 | 'Programming Language :: Python :: 3.6', 68 | 'Programming Language :: Python :: 3.7', 69 | 'Programming Language :: Python :: 3.8', 70 | 'Development Status :: 5 - Production/Stable', 71 | ], 72 | entry_points={ 73 | 'console_scripts': [ 74 | 'idem = idem.scripts:start', 75 | ], 76 | }, 77 | packages=discover_packages(), 78 | cmdclass={'clean': Clean}, 79 | ) 80 | -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | # Import python libs 4 | import os 5 | import sys 6 | import glob 7 | import logging 8 | 9 | CODE_DIR = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) 10 | 11 | if CODE_DIR in sys.path: 12 | sys.path.remove(CODE_DIR) 13 | sys.path.insert(0, CODE_DIR) 14 | 15 | # Import 3rd-party libs 16 | import pytest 17 | 18 | 19 | log = logging.getLogger('idem.tests') 20 | 21 | def pytest_runtest_protocol(item, nextitem): 22 | ''' 23 | implements the runtest_setup/call/teardown protocol for 24 | the given test item, including capturing exceptions and calling 25 | reporting hooks. 26 | ''' 27 | log.debug('>>>>> START >>>>> {0}'.format(item.name)) 28 | 29 | 30 | def pytest_runtest_teardown(item): 31 | ''' 32 | called after ``pytest_runtest_call`` 33 | ''' 34 | log.debug('<<<<< END <<<<<<< {0}'.format(item.name)) 35 | 36 | 37 | @pytest.fixture 38 | def os_sleep_secs(): 39 | if 'CI_RUN' in os.environ: 40 | return 1.75 41 | return 0.5 42 | 43 | 44 | -------------------------------------------------------------------------------- /tests/nest/nest/again/another/test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | Test States 4 | =========== 5 | 6 | Provide test case states that enable easy testing of things to do with state 7 | calls, e.g. running, calling, logging, output filtering etc. 8 | 9 | .. code-block:: yaml 10 | 11 | always-passes-with-any-kwarg: 12 | test.nop: 13 | - name: foo 14 | - something: else 15 | - foo: bar 16 | 17 | always-passes: 18 | test.succeed_without_changes: 19 | - name: foo 20 | 21 | always-fails: 22 | test.fail_without_changes: 23 | - name: foo 24 | 25 | always-changes-and-succeeds: 26 | test.succeed_with_changes: 27 | - name: foo 28 | 29 | always-changes-and-fails: 30 | test.fail_with_changes: 31 | - name: foo 32 | ''' 33 | # Import Python libs 34 | import random 35 | 36 | 37 | def nop(hub, ctx, name, **kwargs): 38 | ''' 39 | A no-op state that does nothing. Useful in conjunction with the `use` 40 | requisite, or in templates which could otherwise be empty due to jinja 41 | rendering 42 | ''' 43 | return succeed_without_changes(hub, ctx, name) 44 | 45 | 46 | def succeed_without_changes(hub, ctx, name, **kwargs): 47 | ''' 48 | name 49 | A unique string. 50 | ''' 51 | ret = { 52 | 'name': name, 53 | 'changes': {}, 54 | 'result': True, 55 | 'comment': 'Success!' 56 | } 57 | return ret 58 | 59 | 60 | def fail_without_changes(hub, ctx, name, **kwargs): 61 | ''' 62 | Returns failure. 63 | 64 | name: 65 | A unique string. 66 | ''' 67 | ret = { 68 | 'name': name, 69 | 'changes': {}, 70 | 'result': False, 71 | 'comment': 'Failure!' 72 | } 73 | 74 | return ret 75 | 76 | 77 | def succeed_with_changes(hub, ctx, name, **kwargs): 78 | ''' 79 | Returns successful and changes is not empty 80 | 81 | name: 82 | A unique string. 83 | ''' 84 | ret = { 85 | 'name': name, 86 | 'changes': {}, 87 | 'result': True, 88 | 'comment': 'Success!' 89 | } 90 | 91 | ret['changes'] = { 92 | 'testing': { 93 | 'old': 'Unchanged', 94 | 'new': 'Something pretended to change' 95 | } 96 | } 97 | 98 | return ret 99 | 100 | 101 | def fail_with_changes(hub, ctx, name, **kwargs): 102 | ''' 103 | Returns failure and changes is not empty. 104 | 105 | name: 106 | A unique string. 107 | ''' 108 | ret = { 109 | 'name': name, 110 | 'changes': {}, 111 | 'result': False, 112 | 'comment': 'Failure!' 113 | } 114 | ret['changes'] = { 115 | 'testing': { 116 | 'old': 'Unchanged', 117 | 'new': 'Something pretended to change' 118 | } 119 | } 120 | return ret 121 | 122 | 123 | def update_low(hub, ctx, name): 124 | ''' 125 | Use the __run_name to add a run to the low 126 | ''' 127 | extra = { 128 | '__sls__': 'none', 129 | 'name': 'totally_extra_alls', 130 | '__id__': 'king_arthur', 131 | 'state': 'test', 132 | 'fun': 'nop'} 133 | hub.idem.RUNS[ctx['run_name']]['low'].append(extra) 134 | return succeed_without_changes(hub, ctx, name) 135 | 136 | 137 | def mod_watch(hub, ctx, name, **kwargs): 138 | ''' 139 | Return a mod_watch call for test 140 | ''' 141 | ret = { 142 | 'name': name, 143 | 'changes': {'watch': True}, 144 | 'result': True, 145 | 'comment': 'Watch ran!' 146 | } 147 | return ret 148 | -------------------------------------------------------------------------------- /tests/nest/nest/again/test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | Test States 4 | =========== 5 | 6 | Provide test case states that enable easy testing of things to do with state 7 | calls, e.g. running, calling, logging, output filtering etc. 8 | 9 | .. code-block:: yaml 10 | 11 | always-passes-with-any-kwarg: 12 | test.nop: 13 | - name: foo 14 | - something: else 15 | - foo: bar 16 | 17 | always-passes: 18 | test.succeed_without_changes: 19 | - name: foo 20 | 21 | always-fails: 22 | test.fail_without_changes: 23 | - name: foo 24 | 25 | always-changes-and-succeeds: 26 | test.succeed_with_changes: 27 | - name: foo 28 | 29 | always-changes-and-fails: 30 | test.fail_with_changes: 31 | - name: foo 32 | ''' 33 | # Import Python libs 34 | import random 35 | 36 | 37 | def nop(hub, ctx, name, **kwargs): 38 | ''' 39 | A no-op state that does nothing. Useful in conjunction with the `use` 40 | requisite, or in templates which could otherwise be empty due to jinja 41 | rendering 42 | ''' 43 | return succeed_without_changes(hub, ctx, name) 44 | 45 | 46 | def succeed_without_changes(hub, ctx, name, **kwargs): 47 | ''' 48 | name 49 | A unique string. 50 | ''' 51 | ret = { 52 | 'name': name, 53 | 'changes': {}, 54 | 'result': True, 55 | 'comment': 'Success!' 56 | } 57 | return ret 58 | 59 | 60 | def fail_without_changes(hub, ctx, name, **kwargs): 61 | ''' 62 | Returns failure. 63 | 64 | name: 65 | A unique string. 66 | ''' 67 | ret = { 68 | 'name': name, 69 | 'changes': {}, 70 | 'result': False, 71 | 'comment': 'Failure!' 72 | } 73 | 74 | return ret 75 | 76 | 77 | def succeed_with_changes(hub, ctx, name, **kwargs): 78 | ''' 79 | Returns successful and changes is not empty 80 | 81 | name: 82 | A unique string. 83 | ''' 84 | ret = { 85 | 'name': name, 86 | 'changes': {}, 87 | 'result': True, 88 | 'comment': 'Success!' 89 | } 90 | 91 | ret['changes'] = { 92 | 'testing': { 93 | 'old': 'Unchanged', 94 | 'new': 'Something pretended to change' 95 | } 96 | } 97 | 98 | return ret 99 | 100 | 101 | def fail_with_changes(hub, ctx, name, **kwargs): 102 | ''' 103 | Returns failure and changes is not empty. 104 | 105 | name: 106 | A unique string. 107 | ''' 108 | ret = { 109 | 'name': name, 110 | 'changes': {}, 111 | 'result': False, 112 | 'comment': 'Failure!' 113 | } 114 | ret['changes'] = { 115 | 'testing': { 116 | 'old': 'Unchanged', 117 | 'new': 'Something pretended to change' 118 | } 119 | } 120 | return ret 121 | 122 | 123 | def update_low(hub, ctx, name): 124 | ''' 125 | Use the __run_name to add a run to the low 126 | ''' 127 | extra = { 128 | '__sls__': 'none', 129 | 'name': 'totally_extra_alls', 130 | '__id__': 'king_arthur', 131 | 'state': 'test', 132 | 'fun': 'nop'} 133 | hub.idem.RUNS[ctx['run_name']]['low'].append(extra) 134 | return succeed_without_changes(hub, ctx, name) 135 | 136 | 137 | def mod_watch(hub, ctx, name, **kwargs): 138 | ''' 139 | Return a mod_watch call for test 140 | ''' 141 | ret = { 142 | 'name': name, 143 | 'changes': {'watch': True}, 144 | 'result': True, 145 | 'comment': 'Watch ran!' 146 | } 147 | return ret 148 | -------------------------------------------------------------------------------- /tests/nest/nest/params.py: -------------------------------------------------------------------------------- 1 | def kwargs(hub, ctx, name, one=None, two=None, three=None): 2 | return { 3 | 'name': name, 4 | 'result': True, 5 | 'comment': f'{one} {two} {three}', 6 | 'changes': {}, 7 | } 8 | -------------------------------------------------------------------------------- /tests/nest/nest/test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | ''' 3 | Test States 4 | =========== 5 | 6 | Provide test case states that enable easy testing of things to do with state 7 | calls, e.g. running, calling, logging, output filtering etc. 8 | 9 | .. code-block:: yaml 10 | 11 | always-passes-with-any-kwarg: 12 | test.nop: 13 | - name: foo 14 | - something: else 15 | - foo: bar 16 | 17 | always-passes: 18 | test.succeed_without_changes: 19 | - name: foo 20 | 21 | always-fails: 22 | test.fail_without_changes: 23 | - name: foo 24 | 25 | always-changes-and-succeeds: 26 | test.succeed_with_changes: 27 | - name: foo 28 | 29 | always-changes-and-fails: 30 | test.fail_with_changes: 31 | - name: foo 32 | ''' 33 | # Import Python libs 34 | import random 35 | 36 | 37 | def nop(hub, ctx, name, **kwargs): 38 | ''' 39 | A no-op state that does nothing. Useful in conjunction with the `use` 40 | requisite, or in templates which could otherwise be empty due to jinja 41 | rendering 42 | ''' 43 | return succeed_without_changes(hub, ctx, name) 44 | 45 | 46 | def succeed_without_changes(hub, ctx, name, **kwargs): 47 | ''' 48 | name 49 | A unique string. 50 | ''' 51 | ret = { 52 | 'name': name, 53 | 'changes': {}, 54 | 'result': True, 55 | 'comment': 'Success!' 56 | } 57 | return ret 58 | 59 | 60 | def fail_without_changes(hub, ctx, name, **kwargs): 61 | ''' 62 | Returns failure. 63 | 64 | name: 65 | A unique string. 66 | ''' 67 | ret = { 68 | 'name': name, 69 | 'changes': {}, 70 | 'result': False, 71 | 'comment': 'Failure!' 72 | } 73 | 74 | return ret 75 | 76 | 77 | def succeed_with_changes(hub, ctx, name, **kwargs): 78 | ''' 79 | Returns successful and changes is not empty 80 | 81 | name: 82 | A unique string. 83 | ''' 84 | ret = { 85 | 'name': name, 86 | 'changes': {}, 87 | 'result': True, 88 | 'comment': 'Success!' 89 | } 90 | 91 | ret['changes'] = { 92 | 'testing': { 93 | 'old': 'Unchanged', 94 | 'new': 'Something pretended to change' 95 | } 96 | } 97 | 98 | return ret 99 | 100 | 101 | def fail_with_changes(hub, ctx, name, **kwargs): 102 | ''' 103 | Returns failure and changes is not empty. 104 | 105 | name: 106 | A unique string. 107 | ''' 108 | ret = { 109 | 'name': name, 110 | 'changes': {}, 111 | 'result': False, 112 | 'comment': 'Failure!' 113 | } 114 | ret['changes'] = { 115 | 'testing': { 116 | 'old': 'Unchanged', 117 | 'new': 'Something pretended to change' 118 | } 119 | } 120 | return ret 121 | 122 | 123 | def update_low(hub, ctx, name): 124 | ''' 125 | Use the __run_name to add a run to the low 126 | ''' 127 | extra = { 128 | '__sls__': 'none', 129 | 'name': 'totally_extra_alls', 130 | '__id__': 'king_arthur', 131 | 'state': 'test', 132 | 'fun': 'nop'} 133 | hub.idem.RUNS[ctx['run_name']]['low'].append(extra) 134 | return succeed_without_changes(hub, ctx, name) 135 | 136 | 137 | def mod_watch(hub, ctx, name, **kwargs): 138 | ''' 139 | Return a mod_watch call for test 140 | ''' 141 | ret = { 142 | 'name': name, 143 | 'changes': {'watch': True}, 144 | 'result': True, 145 | 'comment': 'Watch ran!' 146 | } 147 | return ret 148 | -------------------------------------------------------------------------------- /tests/sls/bang.sls: -------------------------------------------------------------------------------- 1 | #!jinja|yaml 2 | 3 | test: 4 | test.nop 5 | -------------------------------------------------------------------------------- /tests/sls/blocks.sls: -------------------------------------------------------------------------------- 1 | happy: 2 | test.nop 3 | #!require:happy 4 | wow: 5 | test.nop 6 | -------------------------------------------------------------------------------- /tests/sls/changes.sls: -------------------------------------------------------------------------------- 1 | changes: 2 | test.succeed_with_changes 3 | 4 | watch_changes: 5 | test.nop: 6 | - onchanges: 7 | - test: changes 8 | -------------------------------------------------------------------------------- /tests/sls/dupkeys.sls: -------------------------------------------------------------------------------- 1 | key: 2 | test.nop 3 | key: 4 | test.nop 5 | key: 6 | test.nop 7 | key: 8 | test.nop 9 | key: 10 | test.nop 11 | key: 12 | test.nop 13 | key: 14 | test.nop 15 | key: 16 | test.nop 17 | key: 18 | test.nop 19 | key: 20 | test.nop 21 | key: 22 | test.nop 23 | -------------------------------------------------------------------------------- /tests/sls/fails.sls: -------------------------------------------------------------------------------- 1 | fails: 2 | test.fail_without_changes 3 | 4 | runs: 5 | test.nop: 6 | - onfail: 7 | - test: fails 8 | 9 | bad: 10 | test.nop: 11 | - require: 12 | - test: fails -------------------------------------------------------------------------------- /tests/sls/nest.sls: -------------------------------------------------------------------------------- 1 | req: 2 | test.nop: 3 | - require: 4 | - nest.again.another.test: baz 5 | 6 | foo: 7 | nest.test.nop: [] 8 | 9 | bar: 10 | nest.again.test.nop: [] 11 | 12 | baz: 13 | nest.again.another.test.nop: [] 14 | 15 | quo: 16 | idem.init.create: [] 17 | 18 | -------------------------------------------------------------------------------- /tests/sls/order.sls: -------------------------------------------------------------------------------- 1 | first: 2 | test.noop 3 | second: 4 | test.noop 5 | third: 6 | test.noop 7 | forth: 8 | test.noop 9 | fifth: 10 | test.noop 11 | sixth: 12 | test.noop 13 | seventh: 14 | test.noop 15 | eighth: 16 | test.noop 17 | ninth: 18 | test.noop 19 | tenth: 20 | test.noop 21 | -------------------------------------------------------------------------------- /tests/sls/params.sls: -------------------------------------------------------------------------------- 1 | positional_params: 2 | nest.params.kwargs: 3 | - one: bar 4 | - three: baz 5 | -------------------------------------------------------------------------------- /tests/sls/recreq.sls: -------------------------------------------------------------------------------- 1 | first thing: 2 | test.nop 3 | 4 | second thing: 5 | test.nop: 6 | - require: 7 | - test: first thing 8 | 9 | third thing: 10 | test.nop: 11 | - require: 12 | - test: second thing 13 | 14 | -------------------------------------------------------------------------------- /tests/sls/req.sls: -------------------------------------------------------------------------------- 1 | needed: 2 | test.nop 3 | 4 | needs: 5 | test.nop: 6 | - require: 7 | - test: needed 8 | 9 | fails: 10 | test.fail_without_changes 11 | 12 | needs_fail: 13 | test.nop: 14 | - require: 15 | - test: fails 16 | -------------------------------------------------------------------------------- /tests/sls/simple.sls: -------------------------------------------------------------------------------- 1 | happy: 2 | test.nop 3 | 4 | sad: 5 | test.fail_without_changes -------------------------------------------------------------------------------- /tests/sls/takara1.sls: -------------------------------------------------------------------------------- 1 | foo: 2 | test.succeed_with_comment: 3 | - comment: {{ hub.takara.init.get(unit='main', path='foo/bar/baz') }} 4 | -------------------------------------------------------------------------------- /tests/sls/treq.sls: -------------------------------------------------------------------------------- 1 | to_treq: 2 | test.treq 3 | 4 | nope: 5 | test.nop 6 | 7 | nope_the_flamethrower: 8 | test.nop 9 | 10 | nope_merchendising: 11 | test.nop 12 | -------------------------------------------------------------------------------- /tests/sls/ugly1.sls: -------------------------------------------------------------------------------- 1 | foo: 2 | - bar.baz: 3 | quo: qux 4 | -------------------------------------------------------------------------------- /tests/sls/update.sls: -------------------------------------------------------------------------------- 1 | update: 2 | test.update_low 3 | -------------------------------------------------------------------------------- /tests/sls/watch.sls: -------------------------------------------------------------------------------- 1 | changes: 2 | test.succeed_with_changes 3 | 4 | watch_changes: 5 | test.nop: 6 | - watch: 7 | - test: changes 8 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /tests/unit/test_basic.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import os 3 | import tempfile 4 | 5 | # Import pop libs 6 | import pop.hub 7 | import idem.conf 8 | import rend.exc 9 | 10 | # Import third party libs 11 | import pytest 12 | 13 | 14 | def run_sls(sls, runtime='parallel', test=False): 15 | ''' 16 | Pass in an sls list and run it! 17 | ''' 18 | name = 'test' 19 | hub = pop.hub.Hub() 20 | hub.pop.sub.add('idem.idem', init=True) 21 | hub.pop.sub.add('nest') 22 | hub.pop.sub.load_subdirs(hub.nest) 23 | hub.pop.sub.load_subdirs(hub.nest.nest) 24 | hub.pop.sub.load_subdirs(hub.nest.nest.again) 25 | render = 'jinja|yaml' 26 | cache_dir = tempfile.mkdtemp() 27 | sls_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'sls') 28 | sls_sources = [f'file://{sls_dir}'] 29 | hub.pop.loop.start(hub.idem.init.apply(name, sls_sources, render, runtime, ['states', 'nest'], cache_dir, sls, test)) 30 | errors = hub.idem.RUNS[name]['errors'] 31 | if errors: 32 | return errors 33 | ret = hub.idem.RUNS[name]['running'] 34 | return ret 35 | 36 | 37 | def test_treq(): 38 | ret = run_sls(['treq']) 39 | assert ret['test_|-to_treq_|-to_treq_|-treq']['__run_num'] == 4 40 | 41 | 42 | def test_ugly1(): 43 | ret = run_sls(['ugly1']) 44 | assert ret == ['ID foo in SLS ugly1 is not a dictionary'] 45 | 46 | 47 | def test_shebang(): 48 | ret = run_sls(['bang']) 49 | assert 'test_|-test_|-test_|-nop' in ret 50 | 51 | 52 | def test_req_chain(): 53 | ''' 54 | Test that you can chain requisites, bug #11 55 | ''' 56 | ret = run_sls(['recreq']) 57 | assert ret.get('test_|-first thing_|-first thing_|-nop', {}).get('__run_num') == 1 58 | assert ret.get('test_|-second thing_|-second thing_|-nop', {}).get('__run_num') == 2 59 | assert ret.get('test_|-third thing_|-third thing_|-nop', {}).get('__run_num') == 3 60 | 61 | 62 | def test_nest(): 63 | ret = run_sls(['nest']) 64 | assert ret['nest.again.another.test_|-baz_|-baz_|-nop']['result'] 65 | assert ret['nest.again.test_|-bar_|-bar_|-nop']['result'] 66 | assert ret['nest.test_|-foo_|-foo_|-nop']['result'] 67 | # verify that the invalid state is not run 68 | assert not ret['idem.init_|-quo_|-quo_|-create']['result'] 69 | assert ret['test_|-req_|-req_|-nop']['__run_num'] == 5 70 | 71 | 72 | def test_basic(): 73 | ''' 74 | Test the basic funcitonality of Idem 75 | ''' 76 | ret = run_sls(['simple']) 77 | assert ret['test_|-happy_|-happy_|-nop']['result'] == True 78 | assert ret['test_|-happy_|-happy_|-nop']['changes'] == {} 79 | assert ret['test_|-happy_|-happy_|-nop']['name'] == 'happy' 80 | assert ret['test_|-sad_|-sad_|-fail_without_changes']['result'] == False 81 | assert ret['test_|-sad_|-sad_|-fail_without_changes']['name'] == 'sad' 82 | assert ret['test_|-sad_|-sad_|-fail_without_changes']['changes'] == {} 83 | 84 | 85 | def test_req(): 86 | ''' 87 | Test basic requisites 88 | ''' 89 | ret = run_sls(['req']) 90 | assert ret['test_|-needs_fail_|-needs_fail_|-nop']['result'] == False 91 | assert ret['test_|-needs_fail_|-needs_fail_|-nop']['__run_num'] == 4 92 | assert ret['test_|-needs_|-needs_|-nop']['__run_num'] == 3 93 | assert ret['test_|-needs_|-needs_|-nop']['result'] == True 94 | 95 | 96 | def test_req_test_mode(): 97 | ''' 98 | Test basic requisites in test mode 99 | ''' 100 | ret = run_sls(['req'], test=True) 101 | assert ret['test_|-needs_fail_|-needs_fail_|-nop']['result'] == False 102 | assert ret['test_|-needs_fail_|-needs_fail_|-nop']['__run_num'] == 4 103 | assert ret['test_|-needs_|-needs_|-nop']['__run_num'] == 3 104 | # "needed" returned None and needs did not fail to run 105 | assert ret['test_|-needs_|-needs_|-nop']['result'] == None 106 | 107 | 108 | def test_watch(): 109 | ''' 110 | Test basic requisites 111 | ''' 112 | ret = run_sls(['watch']) 113 | assert ret['test_|-watch_changes_|-watch_changes_|-nop']['__run_num'] == 2 114 | assert ret['test_|-watch_changes_|-watch_changes_|-nop']['comment'] == 'Watch ran!' 115 | assert ret['test_|-watch_changes_|-watch_changes_|-nop']['result'] == True 116 | assert ret['test_|-changes_|-changes_|-succeed_with_changes']['result'] == True 117 | assert ret['test_|-changes_|-changes_|-succeed_with_changes']['changes'] 118 | 119 | 120 | def test_onfail(): 121 | ''' 122 | Test basic requisites 123 | ''' 124 | ret = run_sls(['fails']) 125 | assert ret['test_|-runs_|-runs_|-nop']['__run_num'] == 2 126 | assert ret['test_|-runs_|-runs_|-nop']['result'] == True 127 | assert ret['test_|-bad_|-bad_|-nop']['result'] == False 128 | assert ret['test_|-bad_|-bad_|-nop']['__run_num'] == 3 129 | assert ret['test_|-fails_|-fails_|-fail_without_changes']['__run_num'] == 1 130 | assert ret['test_|-fails_|-fails_|-fail_without_changes']['result'] == False 131 | 132 | 133 | def test_onchanges(): 134 | ret = run_sls(['changes']) 135 | assert ret['test_|-watch_changes_|-watch_changes_|-nop']['__run_num'] == 2 136 | assert ret['test_|-watch_changes_|-watch_changes_|-nop']['result'] == True 137 | 138 | 139 | def test_run_name(): 140 | ret = run_sls(['update']) 141 | assert ret['test_|-king_arthur_|-totally_extra_alls_|-nop']['__run_num'] == 2 142 | 143 | 144 | def test_params(): 145 | ret = run_sls(['order'], runtime='serial') 146 | assert ret['test_|-first_|-first_|-noop']['__run_num'] == 1 147 | assert ret['test_|-second_|-second_|-noop']['__run_num'] == 2 148 | assert ret['test_|-third_|-third_|-noop']['__run_num'] == 3 149 | assert ret['test_|-forth_|-forth_|-noop']['__run_num'] == 4 150 | assert ret['test_|-fifth_|-fifth_|-noop']['__run_num'] == 5 151 | assert ret['test_|-sixth_|-sixth_|-noop']['__run_num'] == 6 152 | assert ret['test_|-seventh_|-seventh_|-noop']['__run_num'] == 7 153 | assert ret['test_|-eighth_|-eighth_|-noop']['__run_num'] == 8 154 | assert ret['test_|-ninth_|-ninth_|-noop']['__run_num'] == 9 155 | assert ret['test_|-tenth_|-tenth_|-noop']['__run_num'] == 10 156 | 157 | def test_blocks(): 158 | ret = run_sls(['blocks']) 159 | assert 'test_|-wow_|-wow_|-nop' in ret 160 | 161 | 162 | def test_dup_keys(): 163 | with pytest.raises(rend.exc.RenderException): 164 | ret = run_sls(['dupkeys']) 165 | -------------------------------------------------------------------------------- /tests/unit/test_takara.py: -------------------------------------------------------------------------------- 1 | # Import python libs 2 | import os 3 | import tempfile 4 | import shutil 5 | 6 | # Import rosso libs 7 | import pop.hub 8 | import idem.conf 9 | 10 | 11 | def run_sls(sls, runtime='parallel'): 12 | ''' 13 | Pass in an sls list and run it! 14 | ''' 15 | name = 'test' 16 | hub = pop.hub.Hub() 17 | hub.pop.sub.add('idem.idem', init=True) 18 | hub.pop.sub.add('nest') 19 | hub.pop.sub.add(dyne_name='takara') 20 | hub.pop.sub.load_subdirs(hub.nest) 21 | hub.pop.sub.load_subdirs(hub.nest.nest) 22 | hub.pop.sub.load_subdirs(hub.nest.nest.again) 23 | render = 'jinja|yaml' 24 | cache_dir = tempfile.mkdtemp() 25 | sls_dir = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'sls') 26 | sls_sources = [f'file://{sls_dir}'] 27 | hub.pop.loop.start(takara_sls(hub, name, sls_sources, render, runtime, ['states', 'nest'], cache_dir, sls)) 28 | errors = hub.idem.RUNS[name]['errors'] 29 | if errors: 30 | return errors 31 | ret = hub.idem.RUNS[name]['running'] 32 | return ret 33 | 34 | async def takara_sls(hub, name, sls_sources, render, runtime, subs, cache_dir, sls): 35 | unit_dir = tempfile.mkdtemp() 36 | data_dir = tempfile.mkdtemp() 37 | kw = { 38 | 'unit': 'main', 39 | 'seal_raw': 'foobar', 40 | 'unit_dir': unit_dir, 41 | 'data_dir': data_dir, 42 | 'store': 'file', 43 | 'cipher': 'fernet', 44 | 'seal': 'passwd', 45 | 'path': 'foo/bar/baz', 46 | 'string': 'cheese', 47 | } 48 | await hub.takara.init.create(**kw) 49 | await hub.takara.init.set(**kw) 50 | await hub.idem.init.apply(name, sls_sources, render, runtime, subs, cache_dir, sls) 51 | shutil.rmtree(unit_dir) 52 | shutil.rmtree(data_dir) 53 | 54 | 55 | def test_takara(): 56 | ret = run_sls(['takara1']) 57 | assert ret['test_|-foo_|-foo_|-succeed_with_comment']['comment'] == 'cheese' 58 | --------------------------------------------------------------------------------