├── .dockerignore ├── .github └── workflows │ └── reuse.yml ├── .gitignore ├── .pylintrc ├── .reuse └── dep5 ├── LICENSE ├── LICENSES └── Apache-2.0.txt ├── MANIFEST.in ├── README.md ├── bin └── infrabox ├── examples ├── 1_hello_world │ ├── Dockerfile │ └── infrabox.json ├── 2_dependencies │ ├── Dockerfile_consumer │ ├── Dockerfile_producer │ └── infrabox.json ├── 3_testresult │ ├── Dockerfile │ ├── entrypoint.sh │ ├── infrabox.json │ └── result.json └── 6_dependency_conditions │ ├── Dockerfile_consumer1 │ ├── Dockerfile_consumer2 │ ├── Dockerfile_consumer3 │ ├── Dockerfile_consumer_error │ ├── Dockerfile_producer │ ├── Dockerfile_producer_error │ └── infrabox.json ├── infrabox.json ├── infrabox ├── infraboxcli │ ├── Dockerfile_python2 │ ├── Dockerfile_python3 │ └── entrypoint.sh └── pyinfrabox │ ├── Dockerfile │ └── entrypoint.sh ├── infraboxcli ├── __init__.py ├── console.py ├── dashboard │ ├── README.md │ ├── __init__.py │ ├── cli_client.py │ ├── external.py │ ├── local_config.py │ ├── project.py │ ├── remotes.py │ └── user.py ├── env.py ├── execute.py ├── graph.py ├── init.py ├── install.py ├── job_list.py ├── list_jobs.py ├── log.py ├── pull.py ├── push.py ├── run.py ├── validate.py └── workflow.py ├── pyinfrabox ├── __init__.py ├── badge │ └── __init__.py ├── docker_compose │ └── __init__.py ├── infrabox │ └── __init__.py ├── markup │ └── __init__.py ├── test.py ├── testresult │ └── __init__.py ├── tests │ ├── __init__.py │ ├── test │ │ ├── empty.yml │ │ ├── invalid_version.yml │ │ ├── no_services.yml │ │ ├── no_version.yml │ │ ├── unsupported_option.yml │ │ ├── unsupported_top_level.yml │ │ ├── unsupported_version.yml │ │ ├── valid_1.yml │ │ └── valid_markup.json │ ├── test_docker_compose.py │ ├── test_json.py │ ├── test_markup.py │ └── test_testresult.py └── utils.py ├── setup.cfg └── setup.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .infrabox 3 | build 4 | Dockerfile 5 | dist 6 | .dockerignore 7 | infraboxcli.egg-info 8 | *.pyc 9 | __pycache__ 10 | -------------------------------------------------------------------------------- /.github/workflows/reuse.yml: -------------------------------------------------------------------------------- 1 | name: REUSE 2 | 3 | # Controls when the action will run. Triggers the workflow on push or pull request 4 | # events but only for the master branch 5 | on: 6 | push: 7 | branches: [ master ] 8 | pull_request: 9 | branches: [ master ] 10 | 11 | jobs: 12 | reuse: 13 | # The type of runner that the job will run on 14 | runs-on: ubuntu-latest 15 | 16 | steps: 17 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 18 | - uses: actions/checkout@v2 19 | 20 | # Check REUSE Compliance 21 | - name: REUSE Compliance Check 22 | uses: fsfe/reuse-action@v1.1 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.infrabox* 2 | # Byte-compiled / optimized / DLL files 3 | __pycache__/ 4 | *.py[cod] 5 | *$py.class 6 | 7 | *.swp 8 | 9 | .idea/ 10 | 11 | # C extensions 12 | *.so 13 | 14 | # Distribution / packaging 15 | .Python 16 | env/ 17 | build/ 18 | develop-eggs/ 19 | dist/ 20 | downloads/ 21 | eggs/ 22 | .eggs/ 23 | lib/ 24 | lib64/ 25 | parts/ 26 | sdist/ 27 | var/ 28 | wheels/ 29 | *.egg-info/ 30 | .installed.cfg 31 | *.egg 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *.cover 52 | .hypothesis/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/_build/ 71 | 72 | # PyBuilder 73 | target/ 74 | 75 | # Jupyter Notebook 76 | .ipynb_checkpoints 77 | 78 | # pyenv 79 | .python-version 80 | 81 | # celery beat schedule file 82 | celerybeat-schedule 83 | 84 | # SageMath parsed files 85 | *.sage.py 86 | 87 | # dotenv 88 | .env 89 | 90 | # virtualenv 91 | .venv 92 | venv/ 93 | ENV/ 94 | 95 | # Spyder project settings 96 | .spyderproject 97 | .spyproject 98 | 99 | # Rope project settings 100 | .ropeproject 101 | 102 | # mkdocs documentation 103 | /site 104 | 105 | # mypy 106 | .mypy_cache/ 107 | 108 | # exuberant ctags 109 | tags 110 | 111 | .vscode 112 | 113 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code 6 | extension-pkg-whitelist= 7 | 8 | # Add files or directories to the blacklist. They should be base names, not 9 | # paths. 10 | ignore=CVS 11 | 12 | # Add files or directories matching the regex patterns to the blacklist. The 13 | # regex matches against base names, not paths. 14 | ignore-patterns= 15 | 16 | # Python code to execute, usually for sys.path manipulation such as 17 | # pygtk.require(). 18 | #init-hook= 19 | 20 | # Use multiple processes to speed up Pylint. 21 | jobs=1 22 | 23 | # List of plugins (as comma separated values of python modules names) to load, 24 | # usually to register additional checkers. 25 | load-plugins= 26 | 27 | # Pickle collected data for later comparisons. 28 | persistent=yes 29 | 30 | # Specify a configuration file. 31 | #rcfile= 32 | 33 | # Allow loading of arbitrary C extensions. Extensions are imported into the 34 | # active Python interpreter and may run arbitrary code. 35 | unsafe-load-any-extension=no 36 | 37 | 38 | [MESSAGES CONTROL] 39 | 40 | # Only show warnings with the listed confidence levels. Leave empty to show 41 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 42 | confidence= 43 | 44 | # Disable the message, report, category or checker with the given id(s). You 45 | # can either give multiple identifiers separated by comma (,) or put this 46 | # option multiple times (only on the command line, not in the configuration 47 | # file where it should appear only once).You can also use "--disable=all" to 48 | # disable everything first and then reenable specific checks. For example, if 49 | # you want to run only the similarities checker, you can use "--disable=all 50 | # --enable=similarities". If you want to run only the classes checker, but have 51 | # no Warning level messages displayed, use"--disable=all --enable=classes 52 | # --disable=W" 53 | disable=print-statement,parameter-unpacking,unpacking-in-except,old-raise-syntax,backtick,long-suffix,old-ne-operator,old-octal-literal,import-star-module-level,raw-checker-failed,bad-inline-option,locally-disabled,locally-enabled,file-ignored,suppressed-message,useless-suppression,deprecated-pragma,apply-builtin,basestring-builtin,buffer-builtin,cmp-builtin,coerce-builtin,execfile-builtin,file-builtin,long-builtin,raw_input-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,no-absolute-import,old-division,dict-iter-method,dict-view-method,next-method-called,metaclass-assignment,indexing-exception,raising-string,reload-builtin,oct-method,hex-method,nonzero-method,cmp-method,input-builtin,round-builtin,intern-builtin,unichr-builtin,map-builtin-not-iterating,zip-builtin-not-iterating,range-builtin-not-iterating,filter-builtin-not-iterating,using-cmp-argument,eq-without-hash,div-method,idiv-method,rdiv-method,exception-message-attribute,invalid-str-codec,sys-max-int,bad-python3-import,deprecated-string-function,deprecated-str-translate-call,missing-docstring,invalid-name,bare-except,no-self-use,too-many-instance-attributes 54 | 55 | # Enable the message, report, category or checker with the given id(s). You can 56 | # either give multiple identifier separated by comma (,) or put this option 57 | # multiple time (only on the command line, not in the configuration file where 58 | # it should appear only once). See also the "--disable" option for examples. 59 | enable= 60 | 61 | 62 | [REPORTS] 63 | 64 | # Python expression which should return a note less than 10 (10 is the highest 65 | # note). You have access to the variables errors warning, statement which 66 | # respectively contain the number of errors / warnings messages and the total 67 | # number of statements analyzed. This is used by the global evaluation report 68 | # (RP0004). 69 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 70 | 71 | # Template used to display messages. This is a python new-style format string 72 | # used to format the message information. See doc for all details 73 | #msg-template= 74 | 75 | # Set the output format. Available formats are text, parseable, colorized, json 76 | # and msvs (visual studio).You can also give a reporter class, eg 77 | # mypackage.mymodule.MyReporterClass. 78 | output-format=text 79 | 80 | # Tells whether to display a full report or only the messages 81 | reports=no 82 | 83 | # Activate the evaluation score. 84 | score=yes 85 | 86 | 87 | [REFACTORING] 88 | 89 | # Maximum number of nested blocks for function / method body 90 | max-nested-blocks=5 91 | 92 | 93 | [BASIC] 94 | 95 | # Naming hint for argument names 96 | argument-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 97 | 98 | # Regular expression matching correct argument names 99 | argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 100 | 101 | # Naming hint for attribute names 102 | attr-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 103 | 104 | # Regular expression matching correct attribute names 105 | attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 106 | 107 | # Bad variable names which should always be refused, separated by a comma 108 | bad-names=foo,bar,baz,toto,tutu,tata 109 | 110 | # Naming hint for class attribute names 111 | class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 112 | 113 | # Regular expression matching correct class attribute names 114 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 115 | 116 | # Naming hint for class names 117 | class-name-hint=[A-Z_][a-zA-Z0-9]+$ 118 | 119 | # Regular expression matching correct class names 120 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 121 | 122 | # Naming hint for constant names 123 | const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 124 | 125 | # Regular expression matching correct constant names 126 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 127 | 128 | # Minimum line length for functions/classes that require docstrings, shorter 129 | # ones are exempt. 130 | docstring-min-length=-1 131 | 132 | # Naming hint for function names 133 | function-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 134 | 135 | # Regular expression matching correct function names 136 | function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 137 | 138 | # Good variable names which should always be accepted, separated by a comma 139 | good-names=i,j,k,ex,Run,_ 140 | 141 | # Include a hint for the correct naming format with invalid-name 142 | include-naming-hint=no 143 | 144 | # Naming hint for inline iteration names 145 | inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ 146 | 147 | # Regular expression matching correct inline iteration names 148 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 149 | 150 | # Naming hint for method names 151 | method-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 152 | 153 | # Regular expression matching correct method names 154 | method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 155 | 156 | # Naming hint for module names 157 | module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 158 | 159 | # Regular expression matching correct module names 160 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 161 | 162 | # Colon-delimited sets of names that determine each other's naming style when 163 | # the name regexes allow several styles. 164 | name-group= 165 | 166 | # Regular expression which should only match function or class names that do 167 | # not require a docstring. 168 | no-docstring-rgx=^_ 169 | 170 | # List of decorators that produce properties, such as abc.abstractproperty. Add 171 | # to this list to register other decorators that produce valid properties. 172 | property-classes=abc.abstractproperty 173 | 174 | # Naming hint for variable names 175 | variable-name-hint=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 176 | 177 | # Regular expression matching correct variable names 178 | variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 179 | 180 | 181 | [FORMAT] 182 | 183 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 184 | expected-line-ending-format= 185 | 186 | # Regexp for a line that is allowed to be longer than the limit. 187 | ignore-long-lines=^\s*(# )??$ 188 | 189 | # Number of spaces of indent required inside a hanging or continued line. 190 | indent-after-paren=4 191 | 192 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 193 | # tab). 194 | indent-string=' ' 195 | 196 | # Maximum number of characters on a single line. 197 | max-line-length=120 198 | 199 | # Maximum number of lines in a module 200 | max-module-lines=1000 201 | 202 | # List of optional constructs for which whitespace checking is disabled. `dict- 203 | # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. 204 | # `trailing-comma` allows a space between comma and closing bracket: (a, ). 205 | # `empty-line` allows space-only lines. 206 | no-space-check=trailing-comma,dict-separator 207 | 208 | # Allow the body of a class to be on the same line as the declaration if body 209 | # contains single statement. 210 | single-line-class-stmt=no 211 | 212 | # Allow the body of an if to be on the same line as the test if there is no 213 | # else. 214 | single-line-if-stmt=no 215 | 216 | 217 | [MISCELLANEOUS] 218 | 219 | # List of note tags to take in consideration, separated by a comma. 220 | notes=FIXME,XXX,TODO 221 | 222 | 223 | [SIMILARITIES] 224 | 225 | # Ignore comments when computing similarities. 226 | ignore-comments=yes 227 | 228 | # Ignore docstrings when computing similarities. 229 | ignore-docstrings=yes 230 | 231 | # Ignore imports when computing similarities. 232 | ignore-imports=no 233 | 234 | # Minimum lines number of a similarity. 235 | min-similarity-lines=4 236 | 237 | 238 | [VARIABLES] 239 | 240 | # List of additional names supposed to be defined in builtins. Remember that 241 | # you should avoid to define new builtins when possible. 242 | additional-builtins= 243 | 244 | # Tells whether unused global variables should be treated as a violation. 245 | allow-global-unused-variables=yes 246 | 247 | # List of strings which can identify a callback function by name. A callback 248 | # name must start or end with one of those strings. 249 | callbacks=cb_,_cb 250 | 251 | # A regular expression matching the name of dummy variables (i.e. expectedly 252 | # not used). 253 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 254 | 255 | # Argument names that match this expression will be ignored. Default to name 256 | # with leading underscore 257 | ignored-argument-names=_.*|^ignored_|^unused_ 258 | 259 | # Tells whether we should check for unused import in __init__ files. 260 | init-import=no 261 | 262 | # List of qualified module names which can have objects that can redefine 263 | # builtins. 264 | redefining-builtins-modules=six.moves,future.builtins 265 | 266 | 267 | [LOGGING] 268 | 269 | # Logging modules to check that the string format arguments are in logging 270 | # function parameter format 271 | logging-modules=logging 272 | 273 | 274 | [TYPECHECK] 275 | 276 | # List of decorators that produce context managers, such as 277 | # contextlib.contextmanager. Add to this list to register other decorators that 278 | # produce valid context managers. 279 | contextmanager-decorators=contextlib.contextmanager 280 | 281 | # List of members which are set dynamically and missed by pylint inference 282 | # system, and so shouldn't trigger E1101 when accessed. Python regular 283 | # expressions are accepted. 284 | generated-members= 285 | 286 | # Tells whether missing members accessed in mixin class should be ignored. A 287 | # mixin class is detected if its name ends with "mixin" (case insensitive). 288 | ignore-mixin-members=yes 289 | 290 | # This flag controls whether pylint should warn about no-member and similar 291 | # checks whenever an opaque object is returned when inferring. The inference 292 | # can return multiple potential results while evaluating a Python object, but 293 | # some branches might not be evaluated, which results in partial inference. In 294 | # that case, it might be useful to still emit no-member and other checks for 295 | # the rest of the inferred objects. 296 | ignore-on-opaque-inference=yes 297 | 298 | # List of class names for which member attributes should not be checked (useful 299 | # for classes with dynamically set attributes). This supports the use of 300 | # qualified names. 301 | ignored-classes=optparse.Values,thread._local,_thread._local 302 | 303 | # List of module names for which member attributes should not be checked 304 | # (useful for modules/projects where namespaces are manipulated during runtime 305 | # and thus existing member attributes cannot be deduced by static analysis. It 306 | # supports qualified module names, as well as Unix pattern matching. 307 | ignored-modules= 308 | 309 | # Show a hint with possible names when a member name was not found. The aspect 310 | # of finding the hint is based on edit distance. 311 | missing-member-hint=yes 312 | 313 | # The minimum edit distance a name should have in order to be considered a 314 | # similar match for a missing member name. 315 | missing-member-hint-distance=1 316 | 317 | # The total number of similar names that should be taken in consideration when 318 | # showing a hint for a missing member. 319 | missing-member-max-choices=1 320 | 321 | 322 | [SPELLING] 323 | 324 | # Spelling dictionary name. Available dictionaries: none. To make it working 325 | # install python-enchant package. 326 | spelling-dict= 327 | 328 | # List of comma separated words that should not be checked. 329 | spelling-ignore-words= 330 | 331 | # A path to a file that contains private dictionary; one word per line. 332 | spelling-private-dict-file= 333 | 334 | # Tells whether to store unknown words to indicated private dictionary in 335 | # --spelling-private-dict-file option instead of raising a message. 336 | spelling-store-unknown-words=no 337 | 338 | 339 | [IMPORTS] 340 | 341 | # Allow wildcard imports from modules that define __all__. 342 | allow-wildcard-with-all=no 343 | 344 | # Analyse import fallback blocks. This can be used to support both Python 2 and 345 | # 3 compatible code, which means that the block might have code that exists 346 | # only in one or another interpreter, leading to false positives when analysed. 347 | analyse-fallback-blocks=no 348 | 349 | # Deprecated modules which should not be used, separated by a comma 350 | deprecated-modules=regsub,TERMIOS,Bastion,rexec 351 | 352 | # Create a graph of external dependencies in the given file (report RP0402 must 353 | # not be disabled) 354 | ext-import-graph= 355 | 356 | # Create a graph of every (i.e. internal and external) dependencies in the 357 | # given file (report RP0402 must not be disabled) 358 | import-graph= 359 | 360 | # Create a graph of internal dependencies in the given file (report RP0402 must 361 | # not be disabled) 362 | int-import-graph= 363 | 364 | # Force import order to recognize a module as part of the standard 365 | # compatibility libraries. 366 | known-standard-library= 367 | 368 | # Force import order to recognize a module as part of a third party library. 369 | known-third-party=enchant 370 | 371 | 372 | [CLASSES] 373 | 374 | # List of method names used to declare (i.e. assign) instance attributes. 375 | defining-attr-methods=__init__,__new__,setUp 376 | 377 | # List of member names, which should be excluded from the protected access 378 | # warning. 379 | exclude-protected=_asdict,_fields,_replace,_source,_make 380 | 381 | # List of valid names for the first argument in a class method. 382 | valid-classmethod-first-arg=cls 383 | 384 | # List of valid names for the first argument in a metaclass class method. 385 | valid-metaclass-classmethod-first-arg=mcs 386 | 387 | 388 | [DESIGN] 389 | 390 | # Maximum number of arguments for function / method 391 | max-args=10 392 | 393 | # Maximum number of attributes for a class (see R0902). 394 | max-attributes=7 395 | 396 | # Maximum number of boolean expressions in a if statement 397 | max-bool-expr=5 398 | 399 | # Maximum number of branch for function / method body 400 | max-branches=20 401 | 402 | # Maximum number of locals for function / method body 403 | max-locals=25 404 | 405 | # Maximum number of parents for a class (see R0901). 406 | max-parents=7 407 | 408 | # Maximum number of public methods for a class (see R0904). 409 | max-public-methods=20 410 | 411 | # Maximum number of return / yield for function / method body 412 | max-returns=10 413 | 414 | # Maximum number of statements in function / method body 415 | max-statements=100 416 | 417 | # Minimum number of public methods for a class (see R0903). 418 | min-public-methods=2 419 | 420 | 421 | [EXCEPTIONS] 422 | 423 | # Exceptions that will emit a warning when being caught. Defaults to 424 | # "Exception" 425 | overgeneral-exceptions= 426 | -------------------------------------------------------------------------------- /.reuse/dep5: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: InfraBox-cli 3 | Source: https://github.com/SAP/InfraBox-cli 4 | Disclaimer: The code in this project may include calls to APIs (“API Calls”) of 5 | SAP or third-party products or services developed outside of this project 6 | (“External Products”). 7 | “APIs” means application programming interfaces, as well as their respective 8 | specifications and implementing code that allows software to communicate with 9 | other software. 10 | API Calls to External Products are not licensed under the open source license 11 | that governs this project. The use of such API Calls and related External 12 | Products are subject to applicable additional agreements with the relevant 13 | provider of the External Products. In no event shall the open source license 14 | that governs this project grant any rights in or to any External Products,or 15 | alter, expand or supersede any terms of the applicable additional agreements. 16 | If you have a valid license agreement with SAP for the use of a particular SAP 17 | External Product, then you may make use of any API Calls included in this 18 | project’s code for that SAP External Product, subject to the terms of such 19 | license agreement. If you do not have a valid license agreement for the use of 20 | a particular SAP External Product, then you may only make use of any API Calls 21 | in this project for that SAP External Product for your internal, non-productive 22 | and non-commercial test and evaluation of such API Calls. Nothing herein grants 23 | you any rights to use or access any SAP External Product, or provide any third 24 | parties the right to use of access any SAP External Product, through API Calls. 25 | 26 | Files: * 27 | Copyright: 2018-2020 SAP SE or an SAP affiliate company and InfraBox-cli contributors 28 | License: Apache-2.0 -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2018-2020 SAP SE or an SAP affiliate company and InfraBox-cli contributors. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /LICENSES/Apache-2.0.txt: -------------------------------------------------------------------------------- 1 | Apache License 2 | 3 | Version 2.0, January 2004 4 | 5 | http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, 6 | AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | 11 | 12 | "License" shall mean the terms and conditions for use, reproduction, and distribution 13 | as defined by Sections 1 through 9 of this document. 14 | 15 | 16 | 17 | "Licensor" shall mean the copyright owner or entity authorized by the copyright 18 | owner that is granting the License. 19 | 20 | 21 | 22 | "Legal Entity" shall mean the union of the acting entity and all other entities 23 | that control, are controlled by, or are under common control with that entity. 24 | For the purposes of this definition, "control" means (i) the power, direct 25 | or indirect, to cause the direction or management of such entity, whether 26 | by contract or otherwise, or (ii) ownership of fifty percent (50%) or more 27 | of the outstanding shares, or (iii) beneficial ownership of such entity. 28 | 29 | 30 | 31 | "You" (or "Your") shall mean an individual or Legal Entity exercising permissions 32 | granted by this License. 33 | 34 | 35 | 36 | "Source" form shall mean the preferred form for making modifications, including 37 | but not limited to software source code, documentation source, and configuration 38 | files. 39 | 40 | 41 | 42 | "Object" form shall mean any form resulting from mechanical transformation 43 | or translation of a Source form, including but not limited to compiled object 44 | code, generated documentation, and conversions to other media types. 45 | 46 | 47 | 48 | "Work" shall mean the work of authorship, whether in Source or Object form, 49 | made available under the License, as indicated by a copyright notice that 50 | is included in or attached to the work (an example is provided in the Appendix 51 | below). 52 | 53 | 54 | 55 | "Derivative Works" shall mean any work, whether in Source or Object form, 56 | that is based on (or derived from) the Work and for which the editorial revisions, 57 | annotations, elaborations, or other modifications represent, as a whole, an 58 | original work of authorship. For the purposes of this License, Derivative 59 | Works shall not include works that remain separable from, or merely link (or 60 | bind by name) to the interfaces of, the Work and Derivative Works thereof. 61 | 62 | 63 | 64 | "Contribution" shall mean any work of authorship, including the original version 65 | of the Work and any modifications or additions to that Work or Derivative 66 | Works thereof, that is intentionally submitted to Licensor for inclusion in 67 | the Work by the copyright owner or by an individual or Legal Entity authorized 68 | to submit on behalf of the copyright owner. For the purposes of this definition, 69 | "submitted" means any form of electronic, verbal, or written communication 70 | sent to the Licensor or its representatives, including but not limited to 71 | communication on electronic mailing lists, source code control systems, and 72 | issue tracking systems that are managed by, or on behalf of, the Licensor 73 | for the purpose of discussing and improving the Work, but excluding communication 74 | that is conspicuously marked or otherwise designated in writing by the copyright 75 | owner as "Not a Contribution." 76 | 77 | 78 | 79 | "Contributor" shall mean Licensor and any individual or Legal Entity on behalf 80 | of whom a Contribution has been received by Licensor and subsequently incorporated 81 | within the Work. 82 | 83 | 2. Grant of Copyright License. Subject to the terms and conditions of this 84 | License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, 85 | no-charge, royalty-free, irrevocable copyright license to reproduce, prepare 86 | Derivative Works of, publicly display, publicly perform, sublicense, and distribute 87 | the Work and such Derivative Works in Source or Object form. 88 | 89 | 3. Grant of Patent License. Subject to the terms and conditions of this License, 90 | each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, 91 | no-charge, royalty-free, irrevocable (except as stated in this section) patent 92 | license to make, have made, use, offer to sell, sell, import, and otherwise 93 | transfer the Work, where such license applies only to those patent claims 94 | licensable by such Contributor that are necessarily infringed by their Contribution(s) 95 | alone or by combination of their Contribution(s) with the Work to which such 96 | Contribution(s) was submitted. If You institute patent litigation against 97 | any entity (including a cross-claim or counterclaim in a lawsuit) alleging 98 | that the Work or a Contribution incorporated within the Work constitutes direct 99 | or contributory patent infringement, then any patent licenses granted to You 100 | under this License for that Work shall terminate as of the date such litigation 101 | is filed. 102 | 103 | 4. Redistribution. You may reproduce and distribute copies of the Work or 104 | Derivative Works thereof in any medium, with or without modifications, and 105 | in Source or Object form, provided that You meet the following conditions: 106 | 107 | (a) You must give any other recipients of the Work or Derivative Works a copy 108 | of this License; and 109 | 110 | (b) You must cause any modified files to carry prominent notices stating that 111 | You changed the files; and 112 | 113 | (c) You must retain, in the Source form of any Derivative Works that You distribute, 114 | all copyright, patent, trademark, and attribution notices from the Source 115 | form of the Work, excluding those notices that do not pertain to any part 116 | of the Derivative Works; and 117 | 118 | (d) If the Work includes a "NOTICE" text file as part of its distribution, 119 | then any Derivative Works that You distribute must include a readable copy 120 | of the attribution notices contained within such NOTICE file, excluding those 121 | notices that do not pertain to any part of the Derivative Works, in at least 122 | one of the following places: within a NOTICE text file distributed as part 123 | of the Derivative Works; within the Source form or documentation, if provided 124 | along with the Derivative Works; or, within a display generated by the Derivative 125 | Works, if and wherever such third-party notices normally appear. The contents 126 | of the NOTICE file are for informational purposes only and do not modify the 127 | License. You may add Your own attribution notices within Derivative Works 128 | that You distribute, alongside or as an addendum to the NOTICE text from the 129 | Work, provided that such additional attribution notices cannot be construed 130 | as modifying the License. 131 | 132 | You may add Your own copyright statement to Your modifications and may provide 133 | additional or different license terms and conditions for use, reproduction, 134 | or distribution of Your modifications, or for any such Derivative Works as 135 | a whole, provided Your use, reproduction, and distribution of the Work otherwise 136 | complies with the conditions stated in this License. 137 | 138 | 5. Submission of Contributions. Unless You explicitly state otherwise, any 139 | Contribution intentionally submitted for inclusion in the Work by You to the 140 | Licensor shall be under the terms and conditions of this License, without 141 | any additional terms or conditions. Notwithstanding the above, nothing herein 142 | shall supersede or modify the terms of any separate license agreement you 143 | may have executed with Licensor regarding such Contributions. 144 | 145 | 6. Trademarks. This License does not grant permission to use the trade names, 146 | trademarks, service marks, or product names of the Licensor, except as required 147 | for reasonable and customary use in describing the origin of the Work and 148 | reproducing the content of the NOTICE file. 149 | 150 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to 151 | in writing, Licensor provides the Work (and each Contributor provides its 152 | Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 153 | KIND, either express or implied, including, without limitation, any warranties 154 | or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR 155 | A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness 156 | of using or redistributing the Work and assume any risks associated with Your 157 | exercise of permissions under this License. 158 | 159 | 8. Limitation of Liability. In no event and under no legal theory, whether 160 | in tort (including negligence), contract, or otherwise, unless required by 161 | applicable law (such as deliberate and grossly negligent acts) or agreed to 162 | in writing, shall any Contributor be liable to You for damages, including 163 | any direct, indirect, special, incidental, or consequential damages of any 164 | character arising as a result of this License or out of the use or inability 165 | to use the Work (including but not limited to damages for loss of goodwill, 166 | work stoppage, computer failure or malfunction, or any and all other commercial 167 | damages or losses), even if such Contributor has been advised of the possibility 168 | of such damages. 169 | 170 | 9. Accepting Warranty or Additional Liability. While redistributing the Work 171 | or Derivative Works thereof, You may choose to offer, and charge a fee for, 172 | acceptance of support, warranty, indemnity, or other liability obligations 173 | and/or rights consistent with this License. However, in accepting such obligations, 174 | You may act only on Your own behalf and on Your sole responsibility, not on 175 | behalf of any other Contributor, and only if You agree to indemnify, defend, 176 | and hold each Contributor harmless for any liability incurred by, or claims 177 | asserted against, such Contributor by reason of your accepting any such warranty 178 | or additional liability. END OF TERMS AND CONDITIONS 179 | 180 | APPENDIX: How to apply the Apache License to your work. 181 | 182 | To apply the Apache License to your work, attach the following boilerplate 183 | notice, with the fields enclosed by brackets "[]" replaced with your own identifying 184 | information. (Don't include the brackets!) The text should be enclosed in 185 | the appropriate comment syntax for the file format. We also recommend that 186 | a file or class name and description of purpose be included on the same "printed 187 | page" as the copyright notice for easier identification within third-party 188 | archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | 194 | you may not use this file except in compliance with the License. 195 | 196 | You may obtain a copy of the License at 197 | 198 | http://www.apache.org/licenses/LICENSE-2.0 199 | 200 | Unless required by applicable law or agreed to in writing, software 201 | 202 | distributed under the License is distributed on an "AS IS" BASIS, 203 | 204 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 205 | 206 | See the License for the specific language governing permissions and 207 | 208 | limitations under the License. 209 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include LICENSE 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![REUSE status](https://api.reuse.software/badge/github.com/SAP/InfraBox-cli)](https://api.reuse.software/info/github.com/SAP/InfraBox-cli) 2 | 3 | # InfraBox CLI 4 | With the InfraBox CLI you can run your InfraBox jobs on your local machine and configure your project. 5 | 6 | ## Install 7 | To install infraboxcli you need to have these requirements already installed: 8 | 9 | - git 10 | - docker 11 | - python & pip 12 | 13 | Then simply run: 14 | 15 | pip install infraboxcli 16 | 17 | You can validate your installation by running: 18 | 19 | infrabox version 20 | 21 | ## List Jobs 22 | If you have a more complex project it may be helpful to list all available jobs in it. For this you may use: 23 | 24 | infrabox list 25 | 26 | It outputs the names of all available jobs. An example output may look like this: 27 | 28 | tutorial-1 29 | tutorial-1/step1 30 | tutorial-1/step2 31 | tutorial-1/step3 32 | tutorial-1/step4 33 | tutorial-1/step5 34 | tutorial-1/tutorial-1/step1/tests 35 | tutorial-1/tutorial-1/step2/tests 36 | tutorial-1/tutorial-1/step3/tests 37 | tutorial-1/tutorial-1/step4/tests 38 | tutorial-1/tutorial-1/step5/tests 39 | 40 | ## Run a Job 41 | InfraBox CLI may be used to run you jobs on your local machine. It will also respect all the dependencies and run the jobs in the correct order. Available options are: 42 | 43 | usage: infrabox run [-h] [--no-rm] [-t TAG] [--local-cache LOCAL_CACHE] 44 | [job_name] 45 | 46 | positional arguments: 47 | job_name Job name to execute 48 | 49 | optional arguments: 50 | -h, --help show this help message and exit 51 | --no-rm Does not run 'docker-compose rm' before building 52 | -t TAG Docker image tag 53 | --local-cache LOCAL_CACHE 54 | Path to the local cache 55 | 56 | To run all jobs defined in your _infrabox.json_ file simply do: 57 | 58 | infrabox run 59 | 60 | 61 | In case you have multiple jobs defined an want to run only one of them you can do: 62 | 63 | infrabox run 64 | 65 | ## Push a Job 66 | To be able to use infrabox push you have to create a project in the InfraBox Dashboard and create an auth token for it. 67 | 68 | Auth Token and InfraBox API Host must be set as environment variables. 69 | 70 | export INFRABOX_CLI_TOKEN= 71 | export INFRABOX_URL= 72 | 73 | To push your local project simply do: 74 | 75 | infrabox push 76 | 77 | This will compress your local project and upload it to InfraBox. Now you can open the InfraBox Dashboard and navigate to your project. You should see the jobs running on InfraBox. 78 | 79 | You can also watch the console output of your pushed jobs locally. Just use the _--show-console_ option. 80 | 81 | infrabox push --show-console 82 | 83 | ## Pull a Job 84 | In case you would like to run a job which has been already executed on InfraBox you can use _infrabox pull_. It will download the docker container and all its inputs so you can the same container locally and investigate any issue. 85 | 86 | infrabox pull --job-id 87 | 88 | You can find the exact command for each job on the job detail page of InfraBox under _Run local_ 89 | 90 | ## Secrets 91 | If you reference secrets in your job definition (i.e. as environment variable) then you can add a _.infraboxsecrets.json_ file to your project right next to the _.infrabox.json_ file. This file should then contain all your secrets referenced in your job definition as a simple object: 92 | 93 | { 94 | "SECRET_NAME1": "my secret value", 95 | "Another secret": "another value" 96 | } 97 | 98 | ## How to get support 99 | If you need help please post your questions to [Stack Overflow](https://stackoverflow.com/questions/tagged/infrabox). 100 | In case you found a bug please open a [Github Issue](https://github.com/SAP/InfraBox-cli/issues). 101 | Follow us on Twitter: [@Infra_Box](https://twitter.com/Infra_Box) or have look at our Slack channel [infrabox.slack.com](https://infrabox.slack.com/). 102 | -------------------------------------------------------------------------------- /bin/infrabox: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | import infraboxcli 3 | infraboxcli.main() 4 | -------------------------------------------------------------------------------- /examples/1_hello_world/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | RUN adduser -S testuser 4 | USER testuser 5 | 6 | CMD echo "hello world" 7 | -------------------------------------------------------------------------------- /examples/1_hello_world/infrabox.json: -------------------------------------------------------------------------------- 1 | version: 1 2 | jobs: 3 | - type: docker 4 | name: example1 5 | docker_file: Dockerfile 6 | build_only: false 7 | resources: 8 | limits: {cpu: 1, memory: 1024} -------------------------------------------------------------------------------- /examples/2_dependencies/Dockerfile_consumer: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN adduser -S testuser 3 | USER testuser 4 | CMD cat /infrabox/inputs/producer/data.txt 5 | -------------------------------------------------------------------------------- /examples/2_dependencies/Dockerfile_producer: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN adduser -S testuser 3 | USER testuser 4 | CMD echo "hello world" > /infrabox/output/data.txt 5 | -------------------------------------------------------------------------------- /examples/2_dependencies/infrabox.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "jobs": [{ 4 | "type": "docker", 5 | "name": "producer", 6 | "docker_file": "Dockerfile_producer", 7 | "build_only": false, 8 | "resources": {"limits": {"cpu": 1, "memory": 1024}} 9 | }, { 10 | "type": "docker", 11 | "name": "consumer", 12 | "docker_file": "Dockerfile_consumer", 13 | "build_only": false, 14 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 15 | "depends_on": ["producer"] 16 | }] 17 | } 18 | -------------------------------------------------------------------------------- /examples/3_testresult/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | ADD result.json /result.json 4 | ADD entrypoint.sh /entrypoint.sh 5 | 6 | RUN adduser -S tester 7 | USER tester 8 | 9 | CMD /entrypoint.sh 10 | -------------------------------------------------------------------------------- /examples/3_testresult/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | # Run your tests and generate the result json file 4 | 5 | cp /result.json /infrabox/upload/testresult 6 | -------------------------------------------------------------------------------- /examples/3_testresult/infrabox.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "jobs": [{ 4 | "type": "docker", 5 | "name": "testresult", 6 | "docker_file": "Dockerfile", 7 | "build_only": false, 8 | "resources": {"limits": {"cpu": 1, "memory": 1024}} 9 | }] 10 | } 11 | -------------------------------------------------------------------------------- /examples/3_testresult/result.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "tests": [{ 4 | "name": "test 1", 5 | "suite": "Test suite", 6 | "status": "ok", 7 | "duration": 123, 8 | "measurements": [{ 9 | "name": "Custom measurement", 10 | "value": "123", 11 | "unit": "hz" 12 | }] 13 | }, { 14 | "name": "test 2", 15 | "suite": "Test suite", 16 | "status": "error", 17 | "duration": 43, 18 | "message": "TypeError: Again !?!", 19 | "stack": "Traceback (most recent call last):\n File \"e.py\", line 7, in \n raise TypeError(\"Again !?!\")" 20 | }] 21 | } 22 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/Dockerfile_consumer1: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN adduser -S testuser 3 | USER testuser 4 | CMD cat /infrabox/inputs/producer/data.txt 5 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/Dockerfile_consumer2: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN adduser -S testuser 3 | USER testuser 4 | 5 | # should not be executed 6 | CMD exit 1 7 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/Dockerfile_consumer3: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN adduser -S testuser 3 | USER testuser 4 | CMD cat /infrabox/inputs/producer/data.txt 5 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/Dockerfile_consumer_error: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | CMD echo "Cleanup after error" 3 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/Dockerfile_producer: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | RUN adduser -S testuser 3 | USER testuser 4 | CMD echo "hello world" > /infrabox/output/data.txt 5 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/Dockerfile_producer_error: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | CMD exit 1 3 | -------------------------------------------------------------------------------- /examples/6_dependency_conditions/infrabox.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "jobs": [{ 4 | "type": "docker", 5 | "name": "producer", 6 | "docker_file": "Dockerfile_producer", 7 | "build_only": false, 8 | "resources": {"limits": {"cpu": 1, "memory": 1024}} 9 | }, { 10 | "type": "docker", 11 | "name": "consumer1", 12 | "docker_file": "Dockerfile_consumer1", 13 | "build_only": false, 14 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 15 | "depends_on": [ 16 | "producer" 17 | ] 18 | }, { 19 | "type": "docker", 20 | "name": "consumer2", 21 | "docker_file": "Dockerfile_consumer2", 22 | "build_only": false, 23 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 24 | "depends_on": [ 25 | {"job": "producer", "on": ["error", "failure"]} 26 | ] 27 | }, { 28 | "type": "docker", 29 | "name": "consumer3", 30 | "docker_file": "Dockerfile_consumer3", 31 | "build_only": false, 32 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 33 | "depends_on": [ 34 | {"job": "producer", "on": ["*"]} 35 | ] 36 | }, { 37 | "type": "docker", 38 | "name": "producer-error", 39 | "docker_file": "Dockerfile_producer_error", 40 | "build_only": false, 41 | "resources": {"limits": {"cpu": 1, "memory": 1024}} 42 | }, { 43 | "type": "docker", 44 | "name": "consumer-error", 45 | "docker_file": "Dockerfile_consumer_error", 46 | "build_only": false, 47 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 48 | "depends_on": [ 49 | {"job": "producer-error", "on": ["*"]} 50 | ] 51 | }] 52 | } 53 | -------------------------------------------------------------------------------- /infrabox.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": 1, 3 | "jobs": [{ 4 | "type": "docker", 5 | "name": "pyinfrabox", 6 | "docker_file": "infrabox/pyinfrabox/Dockerfile", 7 | "build_only": false, 8 | "resources": { "limits": { "cpu": 0.5, "memory": 1024 } } 9 | }, { 10 | "type": "docker", 11 | "name": "infraboxcli-python2", 12 | "docker_file": "infrabox/infraboxcli/Dockerfile_python2", 13 | "build_only": false, 14 | "resources": { "limits": { "cpu": 0.5, "memory": 1024 } } 15 | }, { 16 | "type": "docker", 17 | "name": "infraboxcli-python3", 18 | "docker_file": "infrabox/infraboxcli/Dockerfile_python3", 19 | "build_only": false, 20 | "resources": { "limits": { "cpu": 0.5, "memory": 1024 } } 21 | }] 22 | } 23 | -------------------------------------------------------------------------------- /infrabox/infraboxcli/Dockerfile_python2: -------------------------------------------------------------------------------- 1 | FROM alpine:3.6 2 | 3 | RUN apk add --no-cache py2-pip py2-yaml py2-cryptography gcc python2-dev musl-dev g++ 4 | RUN pip install coverage pyyaml 5 | 6 | RUN adduser -S tester 7 | 8 | COPY . /infraboxcli 9 | RUN chown -R tester /infraboxcli 10 | 11 | WORKDIR /infraboxcli 12 | 13 | RUN pip install . 14 | RUN dos2unix /infraboxcli/infrabox/infraboxcli/entrypoint.sh 15 | 16 | USER tester 17 | 18 | CMD /infraboxcli/infrabox/infraboxcli/entrypoint.sh 19 | -------------------------------------------------------------------------------- /infrabox/infraboxcli/Dockerfile_python3: -------------------------------------------------------------------------------- 1 | FROM alpine:3.6 2 | 3 | RUN apk add --no-cache python3 py3-cryptography gcc python3-dev musl-dev g++ 4 | RUN pip3 install coverage pyyaml 5 | 6 | RUN adduser -S tester 7 | 8 | COPY . /infraboxcli 9 | RUN chown -R tester /infraboxcli 10 | 11 | WORKDIR /infraboxcli 12 | 13 | RUN pip3 install . 14 | 15 | USER tester 16 | 17 | RUN dos2unix /infraboxcli/infrabox/infraboxcli/entrypoint.sh 18 | 19 | CMD /infraboxcli/infrabox/infraboxcli/entrypoint.sh 20 | -------------------------------------------------------------------------------- /infrabox/infraboxcli/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | find /infraboxcli -name \*.pyc -delete 3 | 4 | # test help 5 | echo "## Test --help" 6 | infrabox --help 7 | 8 | # test list 9 | echo "## Test list" 10 | infrabox list 11 | 12 | # test validate 13 | echo "## Test validate" 14 | infrabox validate 15 | 16 | cd examples 17 | 18 | # validate examples 19 | echo "## validate 1_hello_world" 20 | cd 1_hello_world 21 | infrabox validate 22 | cd .. 23 | 24 | echo "## validate 2_dependencies" 25 | cd 2_dependencies 26 | infrabox validate 27 | cd .. 28 | 29 | echo "## validate 3_testresult" 30 | cd 3_testresult 31 | infrabox validate 32 | cd .. 33 | 34 | echo "## validate 6_dependency_conditions" 35 | cd 6_dependency_conditions 36 | infrabox validate 37 | cd .. 38 | -------------------------------------------------------------------------------- /infrabox/pyinfrabox/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM stege/baseimage:1 2 | 3 | RUN apk add --no-cache py-pip py-yaml git 4 | RUN pip install coverage future xmlrunner pyyaml 5 | 6 | ENV PYTHONPATH=/ 7 | 8 | RUN adduser -S tester 9 | 10 | COPY pyinfrabox /pyinfrabox 11 | COPY infrabox/pyinfrabox/entrypoint.sh /pyinfrabox/entrypoint.sh 12 | 13 | RUN chown -R tester /pyinfrabox 14 | 15 | USER tester 16 | 17 | WORKDIR /pyinfrabox 18 | 19 | RUN dos2unix /pyinfrabox/entrypoint.sh 20 | 21 | CMD /pyinfrabox/entrypoint.sh 22 | -------------------------------------------------------------------------------- /infrabox/pyinfrabox/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | echo "## Run tests" 3 | 4 | coverage run --source=.,/pyinfrabox --branch test.py 5 | 6 | rc=$? 7 | 8 | set -e 9 | coverage report -m 10 | coverage xml 11 | 12 | cp coverage.xml /infrabox/upload/coverage/ 13 | 14 | exit $rc 15 | -------------------------------------------------------------------------------- /infraboxcli/__init__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import sys 4 | 5 | from infraboxcli.graph import graph 6 | from infraboxcli.init import init 7 | from infraboxcli.list_jobs import list_jobs 8 | from infraboxcli.log import logger 9 | from infraboxcli.pull import pull 10 | from infraboxcli.push import push 11 | from infraboxcli.run import run 12 | from infraboxcli.validate import validate 13 | 14 | from infraboxcli.dashboard import user 15 | from infraboxcli.dashboard import project 16 | from infraboxcli.dashboard import remotes 17 | from infraboxcli.dashboard import local_config 18 | from infraboxcli.install import install_infrabox 19 | 20 | version = '0.9.0' 21 | 22 | def main(): 23 | username = 'unknown' 24 | 25 | if os.name != 'nt': 26 | import pwd 27 | username = pwd.getpwuid(os.getuid()).pw_name 28 | 29 | parser = argparse.ArgumentParser(prog="infrabox") 30 | parser.add_argument("--url", 31 | required=False, 32 | default=os.environ.get('INFRABOX_URL', None), 33 | help="Address of the API server") 34 | parser.add_argument("--ca-bundle", 35 | required=False, 36 | default=os.environ.get('INFRABOX_CA_BUNDLE', None), 37 | help="Path to a CA_BUNDLE file or directory with certificates of trusted CAs") 38 | parser.add_argument("-f", dest='infrabox_file', required=False, type=str, 39 | help="Path to an infrabox.json or infrabox.yaml file") 40 | sub_parser = parser.add_subparsers(help='sub-command help') 41 | 42 | # version 43 | version_init = sub_parser.add_parser('version', help='Show the current version') 44 | version_init.set_defaults(version=version) 45 | 46 | # init 47 | parser_init = sub_parser.add_parser('init', help='Create a simple project') 48 | parser_init.set_defaults(is_init=True) 49 | parser_init.set_defaults(func=init) 50 | 51 | # push 52 | parser_push = sub_parser.add_parser('push', help='Push a local project to InfraBox') 53 | parser_push.add_argument("--show-console", action='store_true', required=False, 54 | help="Show the console output of the jobs") 55 | parser_push.set_defaults(show_console=False) 56 | parser_push.set_defaults(validate_only=False) 57 | parser_push.set_defaults(func=push) 58 | 59 | # pull 60 | parser_pull = sub_parser.add_parser('pull', help='Pull a remote job') 61 | parser_pull.set_defaults(is_pull=True) 62 | parser_pull.add_argument("--job-id", required=True) 63 | parser_pull.add_argument("--no-container", required=False, dest='pull_container', action='store_false', 64 | help="Only the inputs will be downloaded but not the actual container. Implies --no-run.") 65 | parser_pull.set_defaults(pull_container=True) 66 | 67 | parser_pull.add_argument("--no-run", required=False, dest='run_container', action='store_false', 68 | help="The container will not be run.") 69 | parser_pull.set_defaults(run_container=True) 70 | parser_pull.set_defaults(func=pull) 71 | 72 | # graph 73 | parser_graph = sub_parser.add_parser('graph', help='Generate a graph of your local jobs') 74 | parser_graph.set_defaults(func=graph) 75 | 76 | # validate 77 | validate_graph = sub_parser.add_parser('validate', help='Validate infrabox.json or infrabox.yaml') 78 | validate_graph.set_defaults(func=validate) 79 | 80 | # list 81 | list_job = sub_parser.add_parser('list', help='List all available jobs') 82 | list_job.set_defaults(func=list_jobs) 83 | 84 | # install 85 | install = sub_parser.add_parser('install', help='Setup InfraBox') 86 | install.set_defaults(is_install=True) 87 | install.set_defaults(func=install_infrabox) 88 | 89 | # run 90 | parser_run = sub_parser.add_parser('run', help='Run your jobs locally') 91 | parser_run.add_argument("job_name", nargs="?", type=str, 92 | help="Job name to execute") 93 | parser_run.add_argument("--no-rm", action='store_true', required=False, 94 | help="Does not run 'docker-compose rm' before building") 95 | parser_run.add_argument("--build-arg", required=False, type=str, nargs='?', 96 | help="Set docker build arguments", action='append') 97 | parser_run.add_argument("--env", required=False, type=str, nargs='?', 98 | help="Override environment variables", action='append') 99 | parser_run.add_argument("--env-file", required=False, type=str, default=None, 100 | help="Environment file to override environment values") 101 | parser_run.add_argument("-t", dest='tag', required=False, type=str, 102 | help="Docker image tag") 103 | parser_run.add_argument("-c", "--children", action='store_true', 104 | help="Also run children of a job") 105 | parser_run.add_argument("--local-cache", required=False, type=str, 106 | default="/tmp/{}/infrabox/local-cache".format(username), 107 | help="Path to the local cache") 108 | parser_run.add_argument("--memory", required=False, type=float, 109 | help="Override a memory limit for your job") 110 | parser_run.add_argument("--cpu", required=False, type=float, 111 | help="Override a cpu limit for your job") 112 | parser_run.add_argument("--unlimited", action='store_true', required=False, 113 | help="Do not apply cpu and mem limits.") 114 | parser_run.set_defaults(no_rm=False) 115 | parser_run.set_defaults(func=run) 116 | 117 | # Project 118 | parser_project = sub_parser.add_parser('project', help='Manage your project') 119 | parser_project.add_argument('--project-name', dest='remote_project_name', required=False, type=str) 120 | parser_project.set_defaults(project_command=True) 121 | sub_project = parser_project.add_subparsers(dest='project') 122 | 123 | # Project list 124 | parser_projects_list = sub_project.add_parser('list', help='Get a list of all your projects') 125 | parser_projects_list.add_argument('--verbose', required=False, default=True, type=str2bool) 126 | parser_projects_list.set_defaults(func=project.list_projects) 127 | 128 | # Project status 129 | parser_projects_list = sub_project.add_parser('status', help='Get some info about your current project') 130 | parser_projects_list.add_argument('--verbose', required=False, default=True, type=str2bool) 131 | parser_projects_list.set_defaults(func=project.print_status) 132 | 133 | # Create project 134 | parser_project_create = sub_project.add_parser('create', help='Create a new project') 135 | parser_project_create.add_argument('--name', required=True, type=str, 136 | help='Name of the project you want to create') 137 | parser_project_create.add_argument('--type', required=True, type=str, 138 | help='Name of the project { upload, github, gerrit } you want to create') 139 | parser_project_create.add_argument('--public', required=False, default=False, action='store_true', 140 | help='Make your project public') 141 | parser_project_create.add_argument('--private', required=False, default=False, action='store_true', 142 | help='Make your project private') 143 | parser_project_create.set_defaults(func=project.create_project) 144 | 145 | parser_project_delete = sub_project.add_parser('delete', help='Delete a project') 146 | parser_project_delete.add_argument('--name', required=False, type=str, 147 | help='Name of the project you want to delete') 148 | parser_project_delete.add_argument('--id', required=False, type=str, 149 | help='Id of the project you want to delete') 150 | parser_project_delete.set_defaults(func=project.delete_project) 151 | 152 | # Collaborators 153 | parser_collaborators = sub_project.add_parser('collaborators', help='Add or remove collaborators for your project') 154 | sub_collaborators = parser_collaborators.add_subparsers() 155 | 156 | parser_list_collaborators = sub_collaborators.add_parser('list', help='Show collaborators list') 157 | parser_list_collaborators.add_argument('--verbose', required=False, default=True, type=str2bool) 158 | parser_list_collaborators.set_defaults(func=project.list_collaborators) 159 | 160 | parser_add_collaborator = sub_collaborators.add_parser('add', help='Add a collaborator') 161 | parser_add_collaborator.add_argument('--username', required=True, type=str, 162 | help='Username of the collaborator you want to add') 163 | parser_add_collaborator.set_defaults(func=project.add_collaborator) 164 | 165 | parser_remove_collaborator = sub_collaborators.add_parser('remove', help='Remove a collaborator') 166 | parser_remove_collaborator.add_argument('--username', required=True, type=str, 167 | help='Username of the collaborator you want to remove') 168 | parser_remove_collaborator.set_defaults(func=project.remove_collaborator) 169 | 170 | # Secrets 171 | parser_secrets = sub_project.add_parser('secrets', help='Create or delete secrets') 172 | sub_secrets = parser_secrets.add_subparsers() 173 | 174 | parser_list_secrets = sub_secrets.add_parser('list', help='Show all your secrets') 175 | parser_list_secrets.add_argument('--verbose', required=False, default=True, type=str2bool) 176 | parser_list_secrets.set_defaults(func=project.list_secrets) 177 | 178 | parser_create_secret = sub_secrets.add_parser('create', help='Create a secret') 179 | parser_create_secret.add_argument('--name', required=True, type=str, help='Name of the secret') 180 | parser_create_secret.add_argument('--value', required=True, type=str, help='Value of the secret') 181 | parser_create_secret.set_defaults(func=project.add_secret) 182 | 183 | parser_delete_secret = sub_secrets.add_parser('delete', help='Delete a secret') 184 | parser_delete_secret.add_argument('--name', required=False, type=str, 185 | help='Name of the secret you want to delete') 186 | parser_delete_secret.add_argument('--id', required=False, type=str, 187 | help='Id of the secret you want to delete') 188 | parser_delete_secret.set_defaults(func=project.delete_secret) 189 | 190 | # Tokens 191 | parsers_project_tokens = sub_project.add_parser('tokens', help='Manage your project tokens') 192 | sub_project_tokens = parsers_project_tokens.add_subparsers() 193 | 194 | parser_list_project_tokens = sub_project_tokens.add_parser('list', help='Show all your project tokens') 195 | parser_list_project_tokens.add_argument('--verbose', required=False, default=True, type=str2bool) 196 | parser_list_project_tokens.set_defaults(func=project.list_project_tokens) 197 | 198 | parser_add_project_token = sub_project_tokens.add_parser('create', help='Create a project token') 199 | parser_add_project_token.add_argument('--description', required=True, type=str, 200 | help='Description of the project token you want to create') 201 | #TODO when scope push/pull functionality is implemented, uncomment following 2 lines 202 | #parser_add_project_token.add_argument('--scope_push', required=False, default=True, type=str2bool, help='Scope push') 203 | #parser_add_project_token.add_argument('--scope_pull', required=False, default=True, type=str2bool, help='Scope pull') 204 | parser_add_project_token.set_defaults(func=project.add_project_token) 205 | 206 | parser_remove_project_token = sub_project_tokens.add_parser('delete', help='Delete a project token') 207 | parser_remove_project_token.add_argument('--id', required=False, type=str, 208 | help='Id of the project token you want to delete') 209 | parser_remove_project_token.add_argument('--description', required=False, type=str, 210 | help='Description of the project token you want to delete') 211 | parser_remove_project_token.set_defaults(func=project.delete_project_token) 212 | 213 | # Login 214 | parser_login = sub_parser.add_parser('login', help='Login to infrabox') 215 | parser_login.add_argument('remote_url', nargs='?', type=str, help='Name of remote') 216 | parser_login.add_argument('--email', required=False, default=None, type=str, help='Email of the user') 217 | parser_login.add_argument('--password', required=False, default=None, type=str, help='Password of the user') 218 | parser_login.set_defaults(func=user.login) 219 | 220 | # Logout 221 | parser_logout = sub_parser.add_parser('logout', help='Logout from current remote') 222 | parser_logout.set_defaults(func=user.logout) 223 | 224 | # Config 225 | parser_config = sub_parser.add_parser('config', help='Configure your infrabox') 226 | sub_config = parser_config.add_subparsers(dest='config') 227 | 228 | parser_config_current_project = sub_config.add_parser('set-current-project', help='Set new current project') 229 | parser_config_current_project.add_argument('project_name', nargs='?', type=str, help='Name of the project') 230 | parser_config_current_project.set_defaults(func=local_config.set_current_project_name) 231 | 232 | # Remotes 233 | parser_remotes = sub_parser.add_parser('remotes', help='Current remotes') 234 | sub_remotes = parser_remotes.add_subparsers() 235 | parser_remotes_list = sub_remotes.add_parser('list', help='Show your all remotes') 236 | parser_remotes_list.add_argument('--verbose', required=False, default=True, type=str2bool) 237 | parser_remotes_list.set_defaults(func=remotes.list_remotes) 238 | 239 | # Parse args 240 | args = parser.parse_args() 241 | 242 | if 'version' in args: 243 | print('infraboxcli %s' % version) 244 | return 245 | 246 | if "DOCKER_HOST" in os.environ: 247 | logger.error("DOCKER_HOST is set") 248 | logger.error("infrabox can't be used to run jobs on a remote machine") 249 | sys.exit(1) 250 | 251 | if args.ca_bundle: 252 | if args.ca_bundle.lower() == "false": 253 | args.ca_bundle = False 254 | # according to: https://stackoverflow.com/a/28002687/131120 255 | import requests.packages.urllib3 as urllib3 256 | urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 257 | else: 258 | if not os.path.exists(args.ca_bundle): 259 | logger.error("INFRABOX_CA_BUNDLE: %s not found" % args.ca_bundle) 260 | sys.exit(1) 261 | 262 | if args.infrabox_file: 263 | if not os.path.exists(args.infrabox_file): 264 | logger.error('%s does not exist' % args.infrabox_file) 265 | sys.exit(1) 266 | 267 | p = os.path.abspath(args.infrabox_file) 268 | 269 | args.project_root = p[0:p.rfind('/')] 270 | args.infrabox_file_path = p 271 | args.project_name = os.path.basename(p) 272 | else: 273 | # Find infrabox.json 274 | p = os.getcwd() 275 | 276 | while p: 277 | tb = os.path.join(p, 'infrabox.json') 278 | if not os.path.exists(tb): 279 | tb = os.path.join(p, 'infrabox.yaml') 280 | if not os.path.exists(tb): 281 | p = p[0:p.rfind('/')] 282 | else: 283 | args.project_root = p 284 | args.infrabox_file_path = tb 285 | args.project_name = os.path.basename(p) 286 | break 287 | 288 | if 'job_name' not in args: 289 | args.children = True 290 | 291 | if 'project_root' not in args and 'is_init' not in args and 'is_pull' not in args and 'is_install' not in args: 292 | logger.error("infrabox.json or infrabox.yaml not found in current or any parent directory") 293 | sys.exit(1) 294 | 295 | # Run command 296 | args.func(args) 297 | 298 | 299 | def str2bool(v): 300 | if v.lower() in ('yes', 'true', 't', 'y', '1'): 301 | return True 302 | elif v.lower() in ('no', 'false', 'f', 'n', '0'): 303 | return False 304 | else: 305 | raise argparse.ArgumentTypeError('Boolean value expected.') 306 | -------------------------------------------------------------------------------- /infraboxcli/console.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | 4 | from socketIO_client import SocketIO 5 | from colorama import Fore 6 | 7 | from infraboxcli.log import logger 8 | 9 | logging.basicConfig(format='%(asctime)-15s %(message)s', level=logging.WARNING) 10 | 11 | colors = [ 12 | Fore.RED, 13 | Fore.GREEN, 14 | Fore.YELLOW, 15 | Fore.BLUE, 16 | Fore.MAGENTA, 17 | Fore.CYAN, 18 | Fore.WHITE 19 | ] 20 | 21 | job_name_len = 0 22 | jobs = {} 23 | 24 | def on_console_update(*args): 25 | u = args[0] 26 | job_id = u['job_id'] 27 | output = u['data'] 28 | 29 | if not output: 30 | return 31 | 32 | job_name = jobs[job_id]['name'] 33 | color = jobs[job_id]['color'] 34 | 35 | lines = output.splitlines() 36 | f = '{:%s}' % job_name_len 37 | for l in lines: 38 | print('%s%s:%s %s' % (color, f.format(job_name), Fore.RESET, l)) 39 | 40 | def on_disconnect(*_args): 41 | logger.info('Disconnected') 42 | 43 | def show_console(build_id, args): 44 | logger.info("Starting console output for build %s" % build_id) 45 | cookies = {'token': args.token} 46 | 47 | with SocketIO(args.url + '/api/v1/socket.io', 48 | cookies=cookies, 49 | wait_for_connection=False, 50 | verify=args.ca_bundle) as s: 51 | def on_job_update(*args): 52 | u = args[0]['data'] 53 | job = u['job'] 54 | job_id = job['id'] 55 | 56 | if job_id not in jobs: 57 | s.emit('listen:console', job_id) 58 | color = colors[len(jobs) % len(colors)] 59 | job['color'] = color 60 | jobs[job_id] = job 61 | global job_name_len 62 | job_name_len = max(job_name_len, len(job['name'])) 63 | else: 64 | jobs[job_id]['state'] = job['state'] 65 | 66 | # no jobs yet 67 | if not jobs: 68 | return 69 | 70 | # check if create job failed 71 | if len(jobs) == 1: 72 | for job_id in jobs: 73 | state = jobs[job_id]['state'] 74 | name = jobs[job_id]['name'] 75 | if state == 'failure' or state == 'error' or state == 'killed': 76 | logger.error("Job %s failed with '%s'" % (name, state)) 77 | sys.exit(1) 78 | 79 | # wait until we received the real jobs 80 | if len(jobs) < 2: 81 | return 82 | 83 | active = False 84 | for job_id in jobs: 85 | state = jobs[job_id]['state'] 86 | if state in ('scheduled', 'queued', 'running'): 87 | active = True 88 | 89 | if active: 90 | return 91 | 92 | rc = 0 93 | for job_id in jobs: 94 | state = jobs[job_id]['state'] 95 | name = jobs[job_id]['name'] 96 | 97 | if state == 'finished': 98 | logger.info("Job %s finished successfully" % name) 99 | else: 100 | logger.error("Job %s failed with '%s'" % (name, state)) 101 | rc = 1 102 | 103 | sys.exit(rc) 104 | 105 | s.on('disconnect', on_disconnect) 106 | s.on('notify:job', on_job_update) 107 | s.on('notify:console', on_console_update) 108 | 109 | s.emit('listen:build', build_id) 110 | s.wait() 111 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/README.md: -------------------------------------------------------------------------------- 1 | # New CLI Usage 2 | ## Remotes management 3 | 4 | __Get remotes list__: 5 | ```sh 6 | $ infrabox remotes list 7 | ``` 8 | 9 | __Login to some remote, e.g. http://exampleremote.com__: 10 | ```sh 11 | $ infrabox login http://exampleremote.com 12 | ``` 13 | You will be asked to enter email and password then. 14 | You can also optionally pass these as parameters `--email`, `--password` 15 | 16 | If log in procedure succeeded, the remote will be set as current, meaning all further commands will be performed in that remote's context. 17 | 18 | __Logout from current remote__: 19 | ```sh 20 | $ infrabox logout 21 | ``` 22 | This action will delete current remote's token from local config. 23 | 24 | ## Configuration 25 | __Set current project__: 26 | ```sh 27 | $ infrabox config set-current-project PROJECT_NAME 28 | ``` 29 | If project name was a valid one, all further commands will be performed in that project's context. 30 | 31 | __You can also specify non-current project name for any project command to perform the command in another project's context, e.g.__: 32 | Get collaborators list of project named _ANOTHER_PROJECT_NAME_: 33 | ```sh 34 | $ infrabox project --project-name ANOTHER_PROJECT_NAME collaborators list 35 | ``` 36 | 37 | 38 | ## Project 39 | ### * General project management 40 | __Get project list__ 41 | ```sh 42 | $ infrabox project list 43 | ``` 44 | 45 | __Create new project__: 46 | Available project types: 47 | - upload; 48 | - github (WIP); 49 | - gerrit (WIP); 50 | 51 | Available visibility options: 52 | - -\-private 53 | - -\-public 54 | 55 | E.g.: Create private project named "PrivateUploadProject" of _upload_ type 56 | ```sh 57 | $ infrabox project create --name PrivateUploadProject --type upload --private 58 | ``` 59 | __Delete a project__: 60 | By name: 61 | ```sh 62 | $ infrabox project delete --name PROJECT_NAME 63 | ``` 64 | By id: 65 | ```sh 66 | $ infrabox project delete --id PROJECT_ID 67 | ``` 68 | 69 | ### * Collaborators 70 | __Get collaborators list__: 71 | ```sh 72 | $ infrabox project collaborators list 73 | ``` 74 | __Add a collaborator__: 75 | ```sh 76 | $ infrabox project collaborators add --username USERNAME 77 | ``` 78 | __Remove a collaborator__: 79 | ```sh 80 | $ infrabox project collaborators remove --username USERNAME 81 | ``` 82 | 83 | ### * Secrets 84 | __Get secrets list__: 85 | ```sh 86 | $ infrabox project secrets list 87 | ``` 88 | __Create a new secret__: 89 | ```sh 90 | $ infrabox project secrets create --name SECRET_NAME --value SECRET_VALUE 91 | ``` 92 | __Delete a secret__: 93 | By name: 94 | ```sh 95 | $ infrabox project secrets delete --name SECRET_NAME 96 | ``` 97 | By id: 98 | ```sh 99 | $ infrabox project secrets delete --id SECRET_ID 100 | ``` 101 | 102 | ### * Project tokens 103 | __Get tokens list__: 104 | ```sh 105 | $ infrabox project tokens list 106 | ``` 107 | __Create a new token__: 108 | ```sh 109 | $ infrabox project tokens create --description DESCRIPTION 110 | ``` 111 | __Delete a token__: 112 | By description: 113 | ```sh 114 | $ infrabox project tokens delete --description DESCRIPTION 115 | ``` 116 | By id: 117 | ```sh 118 | $ infrabox project tokens delete --id TOKEN_ID 119 | ``` 120 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SAP/InfraBox-cli/b08804a1e91f2e454b5a28a21b37317d28081dd7/infraboxcli/dashboard/__init__.py -------------------------------------------------------------------------------- /infraboxcli/dashboard/cli_client.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from infraboxcli.log import logger 4 | 5 | session = requests.Session() 6 | connection_error_message = 'Can\'t connect to the remote. Please, check your connection or remote url.' 7 | 8 | 9 | def get(url, headers=None, cookies_handler=None, verify=None, timeout=60): 10 | try: 11 | response = session.get(url, headers=headers, verify=verify, timeout=timeout) 12 | except: 13 | logger.exception() 14 | exit(1) 15 | 16 | if cookies_handler: 17 | cookies_handler(url, session.cookies.get_dict()) 18 | return response 19 | 20 | 21 | def post(url, data, headers=None, cookies_handler=None, verify=None, timeout=60): 22 | try: 23 | response = session.post(url, json=data, headers=headers, verify=verify, timeout=timeout) 24 | except: 25 | logger.exception() 26 | exit(1) 27 | 28 | if cookies_handler: 29 | cookies_handler(url, session.cookies.get_dict()) 30 | return response 31 | 32 | 33 | def delete(url, headers=None, cookies_handler=None, verify=None, timeout=60): 34 | try: 35 | response = session.delete(url, headers=headers, verify=verify, timeout=timeout) 36 | except: 37 | logger.exception() 38 | exit(1) 39 | 40 | if cookies_handler: 41 | cookies_handler(url, session.cookies.get_dict()) 42 | return response 43 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/external.py: -------------------------------------------------------------------------------- 1 | from infraboxcli.log import logger 2 | from infraboxcli.dashboard import local_config 3 | 4 | from pyinfrabox.utils import get_remote_url 5 | 6 | 7 | def save_user_token(url, cookies_dict): 8 | config = local_config.get_config() 9 | if config is None: 10 | config = {} 11 | 12 | config.setdefault('remotes', {}) 13 | 14 | is_new_remote_or_null = False 15 | remote_url = get_remote_url(url) 16 | if remote_url not in config['remotes'] \ 17 | or config['remotes'][remote_url] is None: 18 | is_new_remote_or_null = True 19 | 20 | # Decide what are we going to do if user entered invalid username or password: 21 | # either use `current_user_token` if it exists or raise an error 22 | allow_login_if_current_user_token_is_set = False 23 | 24 | user_token = None 25 | if 'token' not in cookies_dict: 26 | if is_new_remote_or_null or not allow_login_if_current_user_token_is_set: 27 | logger.error('Unauthorized: invalid username and/or password.') 28 | exit(1) 29 | else: 30 | user_token = config['remotes'][remote_url]['current_user_token'] 31 | else: 32 | user_token = cookies_dict['token'] 33 | 34 | config['current_remote'] = remote_url 35 | config['remotes'].setdefault(remote_url, {}) 36 | config['remotes'][remote_url]['current_user_token'] = user_token 37 | 38 | local_config.save_config(config) 39 | logger.info('Logged in successfully.') 40 | 41 | 42 | def get_current_user_token(): 43 | try: 44 | config = local_config.get_config() 45 | 46 | current_remote = config['current_remote'] 47 | if not current_remote: 48 | raise Exception('Current remote not set') 49 | 50 | current_user_token = config['remotes'][current_remote]['current_user_token'] 51 | if current_user_token is None or not current_user_token: 52 | raise Exception('Current user token not found') 53 | 54 | return current_user_token 55 | except: 56 | logger.error('Could not load current user token. Please, log in.') 57 | exit(1) 58 | 59 | 60 | def delete_current_user_token(): 61 | try: 62 | config = local_config.get_config() 63 | 64 | current_remote = config['current_remote'] 65 | if not current_remote: 66 | raise Exception('current remote not set') 67 | 68 | if not config['remotes'][current_remote]['current_user_token']: 69 | return False 70 | 71 | config['remotes'][current_remote]['current_user_token'] = "" 72 | local_config.save_config(config) 73 | 74 | return True 75 | except: 76 | return False 77 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/local_config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | 4 | from os.path import expanduser 5 | from infraboxcli.log import logger 6 | 7 | config_file_path = '.infrabox/config.json' 8 | home = expanduser("~") 9 | 10 | def set_current_project_name(args): 11 | from infraboxcli.dashboard import project 12 | all_projects = project.get_projects(args).json() 13 | 14 | project_exists = False 15 | for project in all_projects: 16 | if args.project_name == project['name']: 17 | project_exists = True 18 | break 19 | 20 | if not project_exists: 21 | logger.error('Project with such a name does not exist.') 22 | exit(1) 23 | 24 | try: 25 | config = get_config() 26 | 27 | config['remotes'][get_current_remote_url()]['current_project'] = args.project_name 28 | save_config(config) 29 | 30 | return True 31 | except: 32 | return False 33 | 34 | 35 | def get_current_project_name(): 36 | try: 37 | return get_config()['remotes'][get_current_remote_url()]['current_project'] 38 | except: 39 | return None 40 | 41 | 42 | def get_current_remote_url(): 43 | try: 44 | return get_config()['current_remote'] 45 | except: 46 | return None 47 | 48 | 49 | def get_all_remotes(): 50 | try: 51 | config = get_config() 52 | 53 | remotes = config['remotes'].keys() 54 | if not remotes: 55 | raise Exception('No remotes') 56 | 57 | return remotes 58 | except: 59 | logger.error('No available remotes. Please, log in.') 60 | exit(1) 61 | 62 | 63 | def get_config(): 64 | p = os.path.join(home, config_file_path) 65 | 66 | if not os.path.exists(p): 67 | return None 68 | 69 | with open(p, 'r') as config_file: 70 | config = json.load(config_file) 71 | 72 | return config 73 | 74 | def save_config(config): 75 | p = os.path.join(home, config_file_path) 76 | bp = os.path.dirname(p) 77 | 78 | if not os.path.exists(bp): 79 | os.makedirs(bp) 80 | 81 | with open(p, 'w+') as config_file: 82 | json.dump(config, config_file) 83 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/project.py: -------------------------------------------------------------------------------- 1 | from infraboxcli.dashboard.cli_client import get, post, delete 2 | from infraboxcli.dashboard.user import get_user_headers 3 | import infraboxcli.env 4 | 5 | from infraboxcli.log import logger 6 | 7 | api_projects_endpoint_url = '/api/v1/projects/' 8 | allowed_project_types = ['upload'] #TODO: add ['github', 'gitlab', 'gerrit'] 9 | 10 | 11 | def check_project_is_set(args): 12 | infraboxcli.env.check_env_cli_token(args) 13 | 14 | if args.remote_project_name: 15 | args.project_id = get_project_id_by_name(args) 16 | 17 | if args.project_id is not None: 18 | if 'project_name_printed' not in args \ 19 | and 'using_default_project' not in args: 20 | logger.info('Project: {project_name}'.format(project_name=args.remote_project_name)) 21 | args.project_name_printed = True 22 | 23 | return True 24 | 25 | exit(1) 26 | 27 | 28 | def get_projects(args): 29 | infraboxcli.env.check_env_url(args) 30 | 31 | url = args.url + api_projects_endpoint_url 32 | response = get(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 33 | 34 | return response 35 | 36 | 37 | def list_projects(args): 38 | if args.verbose: 39 | all_projects = get_projects(args).json() 40 | 41 | logger.info('Projects:') 42 | msg = "" 43 | for project in all_projects: 44 | msg += 'Name: {}\nId: {}\nType: {}\nPublic: {}\n---\n'\ 45 | .format(project['name'], project['id'], project['type'], project['public']) 46 | logger.log(msg, print_header=False) 47 | 48 | 49 | def print_status(args): 50 | if args.verbose: 51 | infraboxcli.env.check_env_cli_token(args) 52 | 53 | if args.remote_project_name: 54 | project = get_project_by_name(args) 55 | elif args.project_id: 56 | project = get_project_by_id(args) 57 | 58 | if project is None: 59 | logger.error('Current project is not set.') 60 | exit(1) 61 | 62 | num_collaborators = len(get_collaborators(args).json()) 63 | num_tokens = len(get_project_tokens(args).json()) 64 | num_secrets = len(get_secrets(args).json()) 65 | 66 | logger.info('Project status:') 67 | msg = 'Name: {}\nId: {}\nType: {}\nPublic: {}\n---\n' \ 68 | + 'Total collaborators: {}\nTotal tokens: {}\nTotal secrets: {}\n---\n' 69 | logger.log(msg.format(project['name'], project['id'], project['type'], project['public'], 70 | num_collaborators, num_tokens, num_secrets), print_header=False) 71 | 72 | 73 | def create_project(args): 74 | infraboxcli.env.check_env_url(args) 75 | 76 | if not args.private and not args.public: 77 | logger.error('Specify if your project is going to be public or private, please.') 78 | return 79 | 80 | if args.private and args.public: 81 | logger.error('Project can\'t be public and private simultaneously. ' 82 | + 'Choose only one option, please.') 83 | return 84 | 85 | is_private_project = True 86 | if args.public: 87 | is_private_project = False 88 | 89 | args.type = args.type.lower() 90 | if args.type not in allowed_project_types: 91 | logger.error('Provided project type is not supported.' 92 | + '\nAllowed project types are: [ {allowed_types} ]' 93 | .format(allowed_types=', '.join(allowed_project_types))) 94 | return 95 | 96 | url = args.url + api_projects_endpoint_url 97 | 98 | data = { 99 | 'name': args.name, 100 | 'type': args.type, 101 | 'private': is_private_project 102 | } 103 | response = post(url, data=data, headers=get_user_headers(), verify=args.ca_bundle, timeout=60) 104 | 105 | if response.status_code != 200: 106 | logger.error(response.json()['message']) 107 | else: 108 | logger.info(response.json()['message']) 109 | 110 | return response 111 | 112 | 113 | def get_project_id_by_name(args): 114 | all_projects = get_projects(args).json() 115 | 116 | for project in all_projects: 117 | if args.remote_project_name == project['name']: 118 | return project['id'] 119 | 120 | logger.info('Project with such a name does not exist.') 121 | return None 122 | 123 | 124 | def get_project_name_by_id(args): 125 | project = get_project_by_id(args) 126 | if project: 127 | return project['name'] 128 | 129 | 130 | def get_project_by_id(args): 131 | all_projects = get_projects(args).json() 132 | 133 | for project in all_projects: 134 | if args.project_id == project['id']: 135 | return project 136 | 137 | logger.info('Project with such an id does not exist.') 138 | return None 139 | 140 | 141 | def get_project_by_name(args): 142 | project_id = get_project_id_by_name(args) 143 | 144 | if project_id: 145 | args.project_id = project_id 146 | return get_project_by_id(args) 147 | 148 | 149 | def delete_project(args): 150 | if args.id: 151 | delete_project_by_id(args) 152 | elif args.name: 153 | delete_project_by_name(args) 154 | else: 155 | logger.error('Please, provide either token id or name.') 156 | 157 | 158 | def delete_project_by_name(args): 159 | infraboxcli.env.check_env_url(args) 160 | 161 | args.remote_project_name = args.name 162 | project_id = get_project_id_by_name(args) 163 | 164 | if not project_id: 165 | return 166 | 167 | args.id = project_id 168 | return delete_project_by_id(args) 169 | 170 | 171 | def delete_project_by_id(args): 172 | infraboxcli.env.check_env_url(args) 173 | url = args.url + api_projects_endpoint_url + args.id 174 | response = delete(url, headers=get_user_headers(), verify=args.ca_bundle, timeout=60) 175 | 176 | if response.status_code != 200: 177 | logger.error(response.json()['message']) 178 | else: 179 | logger.info(response.json()['message']) 180 | 181 | return response 182 | 183 | 184 | def get_collaborators(args): 185 | check_project_is_set(args) 186 | 187 | url = args.url + api_projects_endpoint_url + args.project_id + '/collaborators' 188 | response = get(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 189 | return response 190 | 191 | 192 | def list_collaborators(args): 193 | if args.verbose: 194 | all_collaborators = get_collaborators(args).json() 195 | 196 | logger.info('Collaborators:') 197 | msg = "" 198 | for collaborator in all_collaborators: 199 | msg += 'Username: %s' % collaborator['username']\ 200 | + '\nE-mail: %s' % collaborator['email']\ 201 | + '\n---\n' 202 | logger.log(msg, print_header=False) 203 | 204 | 205 | def add_collaborator(args): 206 | check_project_is_set(args) 207 | 208 | url = args.url + api_projects_endpoint_url + args.project_id + '/collaborators' 209 | data = { 'username': args.username } 210 | response = post(url, data, get_user_headers(), verify=args.ca_bundle, timeout=60) 211 | 212 | logger.info(response.json()['message']) 213 | return response 214 | 215 | 216 | def remove_collaborator(args): 217 | check_project_is_set(args) 218 | 219 | all_project_collaborators = get_collaborators(args).json() 220 | collaborator_id = None 221 | for collaborator in all_project_collaborators: 222 | if collaborator['username'] == args.username: 223 | collaborator_id = collaborator['id'] 224 | break 225 | 226 | if collaborator_id is None: 227 | logger.info('Specified user is not in collaborators list.') 228 | return 229 | 230 | url = args.url + api_projects_endpoint_url + args.project_id + '/collaborators/' + collaborator_id 231 | response = delete(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 232 | 233 | logger.info(response.json()['message']) 234 | return response 235 | 236 | 237 | def get_secrets(args): 238 | check_project_is_set(args) 239 | 240 | url = args.url + api_projects_endpoint_url + args.project_id + '/secrets' 241 | response = get(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 242 | 243 | return response 244 | 245 | 246 | def list_secrets(args): 247 | if args.verbose: 248 | all_secrets = get_secrets(args).json() 249 | 250 | logger.info('Secrects:') 251 | msg = "" 252 | for secret in all_secrets: 253 | msg += 'Name: %s' % secret['name']\ 254 | + '\nId: %s' % secret['id']\ 255 | + '\n---\n' 256 | logger.log(msg, print_header=False) 257 | 258 | 259 | def get_secret_id_by_name(args): 260 | all_secrets = get_secrets(args).json() 261 | 262 | for secret in all_secrets: 263 | if args.name == secret['name']: 264 | return secret['id'] 265 | 266 | logger.info('Secret with such a name does not exist.') 267 | return None 268 | 269 | 270 | def add_secret(args): 271 | check_project_is_set(args) 272 | 273 | url = args.url + api_projects_endpoint_url + args.project_id + '/secrets' 274 | data = {'name': args.name, 'value': args.value} 275 | response = post(url, data, get_user_headers(), verify=args.ca_bundle, timeout=60) 276 | 277 | logger.info(response.json()['message']) 278 | return response 279 | 280 | 281 | def delete_secret(args): 282 | if args.id: 283 | delete_secret_by_id(args) 284 | elif args.name: 285 | delete_secret_by_name(args) 286 | else: 287 | logger.error('Please, provide either token id or description.') 288 | 289 | 290 | def delete_secret_by_name(args): 291 | check_project_is_set(args) 292 | 293 | secret_id = get_secret_id_by_name(args) 294 | 295 | if not secret_id: 296 | return 297 | 298 | args.id = secret_id 299 | return delete_secret_by_id(args) 300 | 301 | 302 | def delete_secret_by_id(args): 303 | check_project_is_set(args) 304 | 305 | url = args.url + api_projects_endpoint_url + args.project_id + '/secrets/' + args.id 306 | response = delete(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 307 | 308 | logger.info(response.json()['message']) 309 | return response 310 | 311 | 312 | def get_project_tokens(args): 313 | check_project_is_set(args) 314 | 315 | url = args.url + api_projects_endpoint_url + args.project_id + '/tokens' 316 | response = get(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 317 | 318 | return response 319 | 320 | 321 | def list_project_tokens(args): 322 | if args.verbose: 323 | all_project_tokens = get_project_tokens(args).json() 324 | 325 | logger.info('Project tokens:') 326 | msg = "" 327 | for project_token in all_project_tokens: 328 | msg += 'Description: %s' % project_token['description']\ 329 | + '\nId: %s' % project_token['id']\ 330 | + '\nScope push: %s' % project_token['scope_push']\ 331 | + '\nScope pull: %s' % project_token['scope_pull']\ 332 | + '\n---\n' 333 | logger.log(msg, print_header=False) 334 | 335 | 336 | def get_project_token_id_by_description(args): 337 | all_project_tokens = get_project_tokens(args).json() 338 | 339 | for project_token in all_project_tokens: 340 | if args.description == project_token['description']: 341 | return project_token['id'] 342 | 343 | logger.info('Token with such a description does not exist.') 344 | return None 345 | 346 | 347 | def add_project_token(args): 348 | check_project_is_set(args) 349 | 350 | url = args.url + api_projects_endpoint_url + args.project_id + '/tokens' 351 | 352 | data = { 353 | 'description': args.description, 354 | #TODO when scope push/pull functionality is implemented, 355 | # delete following 2 lines and uncomment next 2 lines 356 | 'scope_push': True, 357 | 'scope_pull': True 358 | #'scope_push': args.scope_push, 359 | #'scope_pull': args.scope_pull 360 | } 361 | 362 | response = post(url, data, get_user_headers(), verify=args.ca_bundle, timeout=60) 363 | 364 | if response.status_code != 200: 365 | logger.error(response.json()['message']) 366 | return 367 | 368 | # Print project token to the CLI 369 | logger.info('Authentication Token:' 370 | + '\nPlease save your token at a secure place. We will not show it to you again.\n') 371 | logger.log(response.json()['data']['token'], print_header=False) 372 | 373 | return response 374 | 375 | 376 | def delete_project_token(args): 377 | if args.id: 378 | delete_project_token_by_id(args) 379 | elif args.description: 380 | delete_project_token_by_description(args) 381 | else: 382 | logger.error('Please, provide either token id or description.') 383 | 384 | 385 | def delete_project_token_by_description(args): 386 | check_project_is_set(args) 387 | 388 | token_id = get_project_token_id_by_description(args) 389 | 390 | if not token_id: 391 | return 392 | 393 | args.id = token_id 394 | return delete_project_token_by_id(args) 395 | 396 | 397 | def delete_project_token_by_id(args): 398 | check_project_is_set(args) 399 | 400 | url = args.url + api_projects_endpoint_url + args.project_id + '/tokens/' + args.id 401 | response = delete(url, get_user_headers(), verify=args.ca_bundle, timeout=60) 402 | 403 | logger.info(response.json()['message']) 404 | return response 405 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/remotes.py: -------------------------------------------------------------------------------- 1 | from infraboxcli.dashboard import local_config 2 | from infraboxcli.log import logger 3 | 4 | 5 | def list_remotes(args): 6 | if args.verbose: 7 | remotes = local_config.get_all_remotes() 8 | 9 | msg = '\n: '.join(remotes) 10 | logger.info('Remotes:') 11 | logger.log(msg, print_header=False) 12 | -------------------------------------------------------------------------------- /infraboxcli/dashboard/user.py: -------------------------------------------------------------------------------- 1 | import getpass 2 | from infraboxcli.log import logger 3 | from infraboxcli.dashboard.cli_client import post 4 | from infraboxcli.dashboard.external import get_current_user_token, save_user_token, delete_current_user_token 5 | import infraboxcli.env 6 | 7 | from pyinfrabox.utils import validate_url 8 | 9 | 10 | api_endpoint_url = '/api/v1/' 11 | 12 | def get_user_token(): 13 | return get_current_user_token() 14 | 15 | def get_user_headers(): 16 | return {'Authorization': 'token %s' % get_user_token()} 17 | 18 | def login(args): 19 | if args.remote_url: 20 | args.url = args.remote_url 21 | 22 | if args.remote_url and not validate_url(args.remote_url): 23 | logger.error('Invalid url.') 24 | exit(1) 25 | 26 | infraboxcli.env.check_env_url(args) 27 | 28 | email = args.email 29 | password = args.password 30 | 31 | if not email: 32 | email = raw_input("Email: ") 33 | # Don't allow to pass password without email 34 | password = None 35 | 36 | if not password: 37 | password = getpass.getpass('Password: ') 38 | 39 | data = {"email": email, "password": password} 40 | 41 | url = args.url + api_endpoint_url + 'account/login' 42 | response = post(url, data, cookies_handler=save_user_token, verify=args.ca_bundle) 43 | 44 | return response 45 | 46 | 47 | def logout(args): 48 | token_deleted = delete_current_user_token() 49 | 50 | if token_deleted: 51 | logger.info('Successfully logged out.') 52 | else: 53 | logger.info('Already logged out.') 54 | -------------------------------------------------------------------------------- /infraboxcli/env.py: -------------------------------------------------------------------------------- 1 | import os 2 | import textwrap 3 | 4 | import jwt 5 | 6 | from infraboxcli.log import logger 7 | from infraboxcli.dashboard.local_config import get_current_remote_url, get_current_project_name 8 | 9 | 10 | def check_project_root(args): 11 | if 'project_root' not in args: 12 | logger.error("infrabox.json not found in current or any parent directory") 13 | exit(1) 14 | 15 | 16 | def check_env_url(args): 17 | if not args.url: 18 | current_remote_url = get_current_remote_url() 19 | if current_remote_url: 20 | args.url = current_remote_url 21 | return True 22 | 23 | error_msg = textwrap.dedent("\ 24 | Remote URL is not specified. Either set INFRABOX_URL env var or specify an url via `--url` argument.") 25 | logger.error(error_msg) 26 | exit(1) 27 | 28 | 29 | def check_env_cli_token(args): 30 | check_env_url(args) 31 | 32 | token = os.environ.get('INFRABOX_CLI_TOKEN', None) 33 | if not token: 34 | logger.error('INFRABOX_CLI_TOKEN env var must be set') 35 | exit(1) 36 | 37 | args.token = token 38 | 39 | t = jwt.decode(token, verify=False, options={"verify_signature": False}) 40 | args.project_id = t['project']['id'] 41 | 42 | if 'remote_project_name' not in args: 43 | current_config_project_name = get_current_project_name() 44 | if current_config_project_name: 45 | args.remote_project_name = current_config_project_name 46 | args.using_default_project = True 47 | 48 | 49 | return True 50 | -------------------------------------------------------------------------------- /infraboxcli/execute.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | from infraboxcli.log import logger 4 | 5 | def execute(command, cwd=None, env=None, ignore_error=False, ignore_output=False): 6 | logger.info('Running external process (cwd=%s): %s' % (cwd, ' '.join(command))) 7 | 8 | if env is None: 9 | env = os.environ 10 | 11 | process = subprocess.Popen(command, 12 | shell=False, 13 | stdout=subprocess.PIPE, 14 | stderr=subprocess.STDOUT, 15 | cwd=cwd, 16 | env=env, 17 | universal_newlines=True) 18 | 19 | # Poll process for new output until finished 20 | while True: 21 | line = process.stdout.readline() 22 | if not line: 23 | break 24 | 25 | if ignore_output: 26 | continue 27 | 28 | print(line.rstrip()) 29 | 30 | process.wait() 31 | 32 | if ignore_error: 33 | return 34 | 35 | exitCode = process.returncode 36 | if exitCode != 0: 37 | raise Exception(exitCode) 38 | -------------------------------------------------------------------------------- /infraboxcli/graph.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from infraboxcli.log import logger 5 | from infraboxcli.job_list import load_infrabox_file, get_job_list 6 | from infraboxcli.workflow import WorkflowCache 7 | from infraboxcli.env import check_project_root 8 | 9 | 10 | def graph(args): 11 | check_project_root(args) 12 | args.project_root = os.path.abspath(args.project_root) 13 | infrabox_file_path = args.infrabox_file_path 14 | if not os.path.isfile(infrabox_file_path): 15 | logger.error('%s does not exist' % infrabox_file_path) 16 | sys.exit(1) 17 | 18 | data = load_infrabox_file(args.infrabox_file_path) 19 | jobs = get_job_list(data, args, infrabox_context=args.project_root) 20 | 21 | cache = WorkflowCache(args) 22 | cache.add_jobs(jobs) 23 | cache.print_graph() 24 | -------------------------------------------------------------------------------- /infraboxcli/init.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import sys 4 | 5 | from infraboxcli.log import logger 6 | 7 | def init(_): 8 | p = os.getcwd() 9 | logger.info("Initializing %s" % p) 10 | 11 | infrabox_json = os.path.join(p, 'infrabox.json') 12 | if os.path.exists(infrabox_json): 13 | logger.error("%s already exists" % infrabox_json) 14 | sys.exit(1) 15 | 16 | dockerfile = os.path.join(p, 'infrabox', 'test', 'Dockerfile') 17 | infrabox_test = os.path.join(p, 'infrabox', 'test') 18 | if os.path.exists(dockerfile): 19 | logger.error("%s already exists" % dockerfile) 20 | sys.exit(1) 21 | 22 | 23 | logger.info("Creating infrabox.json") 24 | 25 | with open(infrabox_json, 'w+') as f: 26 | json.dump({ 27 | "version": 1, 28 | "jobs": [{ 29 | "name": "test", 30 | "type": "docker", 31 | "build_only": False, 32 | "resources": {"limits": {"memory": 1024, "cpu": 1}}, 33 | "docker_file": "infrabox/test/Dockerfile" 34 | }] 35 | }, f, sort_keys=True, indent=4) 36 | 37 | logger.info("Creating infrabox/test/Dockerfile") 38 | os.makedirs(infrabox_test) 39 | 40 | with open(dockerfile, 'w+') as f: 41 | f.write(""" 42 | FROM alpine 43 | 44 | RUN adduser -S testuser 45 | USER testuser 46 | 47 | CMD echo "hello world" 48 | """) 49 | 50 | gitignore = os.path.join(p, '.gitignore') 51 | if os.path.exists(gitignore): 52 | with open(gitignore, 'a') as f: 53 | f.write("\n.infrabox/") 54 | f.write("\n.infraboxsecrets.json") 55 | 56 | dockerignore = os.path.join(p, '.dockerignore') 57 | with open(dockerignore, 'a') as f: 58 | f.write("\n.infrabox/") 59 | f.write("\n.infraboxsecrets.json") 60 | 61 | logger.info("Successfully initialized project") 62 | logger.info("Use 'infrabox run' to execute your jobs") 63 | -------------------------------------------------------------------------------- /infraboxcli/install.py: -------------------------------------------------------------------------------- 1 | import json 2 | import string 3 | import random 4 | import base64 5 | import sys 6 | import os 7 | import subprocess 8 | import yaml 9 | 10 | import inquirer 11 | 12 | CLUSTER_PROVIDERS = [ 13 | 'Google Kubernetes Engine', 14 | 'Use cluster from kubeconfig' 15 | ] 16 | 17 | def execute(cmd, cwd=None, shell=False, ignore_error=False): 18 | try: 19 | return subprocess.check_output(cmd, stderr=subprocess.STDOUT, cwd=cwd, shell=shell) 20 | except subprocess.CalledProcessError as e: 21 | if ignore_error: 22 | raise 23 | else: 24 | print(e.output) 25 | sys.exit(1) 26 | 27 | def create_gke_cluster(name): 28 | print('# Creating GKE Cluster') 29 | execute(['gcloud', 'container', 'clusters', 'create', name, 30 | '--zone', 'us-east1-b', 31 | '--machine-type', 'n1-standard-4']) 32 | 33 | def create_namespace(name): 34 | try: 35 | execute(['kubectl', 'get', 'ns', name], ignore_error=True) 36 | except: 37 | execute(['kubectl', 'create', 'ns', name]) 38 | 39 | def create_namespaces(): 40 | print('# Creating Namespaces') 41 | create_namespace('infrabox-system') 42 | create_namespace('infrabox-worker') 43 | 44 | def install_helm(): 45 | print('# Installing helm') 46 | try: 47 | execute(['kubectl', 'get', '-n', 'kube-system', 'sa', 'tiller'], ignore_error=True) 48 | return 49 | except: 50 | pass 51 | 52 | execute(['helm', 'repo', 'update']) 53 | execute(['kubectl', '-n', 'kube-system', 'create', 'sa', 'tiller']) 54 | execute(['kubectl', 'create', 'clusterrolebinding', 'tiller', 55 | '--clusterrole', 'cluster-admin', '--serviceaccount=kube-system:tiller']) 56 | execute(['helm', 'init', '--service-account', 'tiller', '--wait']) 57 | 58 | def install_postgres(): 59 | print('# Installing PostgreSQL') 60 | try: 61 | execute(['kubectl', 'get', '-n', 'infrabox-system', 'deployments', 'postgres-postgresql'], ignore_error=True) 62 | return 63 | except: 64 | pass 65 | 66 | execute(['helm', 'install', '--name', 'postgres', 'stable/postgresql', '--version', '1.0.0', 67 | '--set', 'imageTag=9.6.2,postgresPassword=postgres,probes.readiness.periodSeconds=5', 68 | '--namespace', 'infrabox-system']) 69 | 70 | def install_minio(): 71 | print('# Installing Minio') 72 | try: 73 | execute(['kubectl', 'get', '-n', 'infrabox-system', 'deployments', 'infrabox-minio'], ignore_error=True) 74 | return 75 | except: 76 | pass 77 | 78 | execute(['helm', 'install', '--set', 'serviceType=ClusterIP,replicas=1,persistence.enabled=false', 79 | '-n', 'infrabox-minio', '--wait', '--namespace', 'infrabox-system', 'stable/minio']) 80 | 81 | def preflight_checks(): 82 | print('# Preflight checks') 83 | execute(['helm', 'version', '--client']) 84 | execute(['kubectl', 'version', '--client']) 85 | execute(['gcloud', '--version']) 86 | execute(['git', 'version']) 87 | 88 | def install_nginx_ingress(): 89 | print('# Install nginx ingress') 90 | try: 91 | execute(['kubectl', 'get', '-n', 'kube-system', 'deployments', 'nic-nginx-ingress-controller'], 92 | ignore_error=True) 93 | return 94 | except: 95 | pass 96 | 97 | execute(['helm', 'install', '-n', 'nic', '--namespace', 'kube-system', 98 | '--wait', 'stable/nginx-ingress']) 99 | 100 | def get_host(): 101 | o = execute(['kubectl', 'get', 'services', '-n', 'kube-system', 'nic-nginx-ingress-controller', '-o', 'json']) 102 | j = json.loads(o) 103 | ip = j['status']['loadBalancer']['ingress'][0]['ip'] 104 | return "%s.nip.io" % ip 105 | 106 | 107 | def clone_repo(a): 108 | print('# Clone InfraBox repository') 109 | execute(['mkdir', '-p', a['workdir']]) 110 | repo_dir = os.path.join(a['workdir'], 'InfraBox') 111 | 112 | execute(['rm', '-rf', repo_dir]) 113 | execute(['git', 'clone', 'https://github.com/SAP/InfraBox.git'], cwd=a['workdir']) 114 | execute(['git', 'checkout', a['infrabox-version']], cwd=repo_dir) 115 | 116 | def generate_keys(a): 117 | print('# Generate keys') 118 | if os.path.exists(os.path.join(a['workdir'], 'id_rsa')): 119 | return 120 | 121 | execute(['ssh-keygen', '-N', '', '-t', 'rsa', '-f', 'id_rsa'], cwd=a['workdir']) 122 | execute('ssh-keygen -f id_rsa.pub -e -m pem > id_rsa.pem', shell=True, cwd=a['workdir']) 123 | 124 | 125 | def helm_install_infrabox(a): 126 | print('# Install InfraBox') 127 | config = { 128 | 'image': { 129 | 'tag': a['infrabox-version'], 130 | }, 131 | 'general': { 132 | 'dont_check_certificates': True, 133 | }, 134 | 'admin': { 135 | 'email': a['admin-email'], 136 | 'password': a['admin-password'], 137 | 'private_key': base64.b64encode(open(os.path.join(a['workdir'], 'id_rsa')).read()), 138 | 'public_key': base64.b64encode(open(os.path.join(a['workdir'], 'id_rsa.pub')).read()) 139 | }, 140 | 'host': str(a['host']), 141 | 'database': { 142 | 'postgres': { 143 | 'enabled': True, 144 | 'username': 'postgres', 145 | 'password': 'postgres', 146 | 'db': 'postgres', 147 | 'host': 'postgres-postgresql.infrabox-system' 148 | } 149 | }, 150 | 'storage': { 151 | 's3': { 152 | 'enabled': True, 153 | 'endpoint': 'infrabox-minio.infrabox-system', 154 | 'bucket': 'infrabox', 155 | 'secure': False, 156 | 'secret_access_key': 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY', 157 | 'access_key_id': 'AKIAIOSFODNN7EXAMPLE', 158 | 'region': 'us-east1', 159 | 'port': '9000', 160 | } 161 | }, 162 | 'job': { 163 | 'docker_daemon_config': str('{"insecure-registries": ["%s"]}' % a['host']) 164 | } 165 | } 166 | 167 | if 'Github' in a['components']: 168 | config['github'] = { 169 | 'enabled': True, 170 | 'client_id': a['github-client-id'], 171 | 'client_secret': a['github-client-secret'], 172 | 'webhook_secret': ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(20)), 173 | 'login': { 174 | 'enabled': True, 175 | 'allowed_organizations': a['github-allowed-orgs'] 176 | } 177 | } 178 | 179 | with open(os.path.join(a['workdir'], 'values.yaml'), 'w+') as outfile: 180 | yaml.dump(config, outfile, default_flow_style=False) 181 | 182 | execute(['helm', 'install', '-n', 'infrabox', 183 | os.path.join(a['workdir'], 'InfraBox', 'deploy', 'infrabox'), 184 | '-f', 'values.yaml', '--wait', 185 | ], cwd=a['workdir']) 186 | 187 | def install_certificates(a): 188 | print('# Install certificates') 189 | try: 190 | execute(['kubectl', 'get', '-n', 'infrabox-system', 'secrets', 'infrabox-tls-certs'], ignore_error=True) 191 | return 192 | except: 193 | pass 194 | 195 | execute(['openssl', 'req', '-x509', '-nodes', '-days', '365', '-newkey', 'rsa:2048', 196 | '-keyout', '/tmp/tls.key', '-out', '/tmp/tls.crt', '-subj', '/CN=%s' % a['host']]) 197 | execute(['kubectl', 'create', '-n', 'infrabox-system', 'secret', 'tls', 'infrabox-tls-certs', 198 | '--key', '/tmp/tls.key', '--cert', '/tmp/tls.crt']) 199 | 200 | def install(): 201 | preflight_checks() 202 | 203 | questions = [ 204 | inquirer.Text('cluster-name', message="Name of the cluster"), 205 | inquirer.List('cluster-provider', 206 | message="On which provider should we create the kubernetes cluster", 207 | choices=CLUSTER_PROVIDERS, 208 | ), 209 | inquirer.List('infrabox-version', 210 | message="Which version of InfraBox do you want to install", 211 | choices=['1.1.4'], 212 | ), 213 | inquirer.Text('admin-email', message="Admin email"), 214 | inquirer.Checkbox('components', 215 | message="Which component would you like to configure", 216 | choices=['Github'], 217 | ), 218 | ] 219 | config = inquirer.prompt(questions) 220 | 221 | if 'Github' in config['components']: 222 | questions = [ 223 | inquirer.Text('github-client-id', message="Github Client ID"), 224 | inquirer.Text('github-client-secret', message="Github Client Secret"), 225 | inquirer.Text('github-allowed-orgs', 226 | message="Comma separated list of Github Organizations allowed to login"), 227 | ] 228 | 229 | answers = inquirer.prompt(questions) 230 | config.update(answers) 231 | 232 | questions = [ 233 | inquirer.List('start', 234 | message="Select yes to start installation", 235 | choices=['no', 'yes'], 236 | ), 237 | ] 238 | 239 | answers = inquirer.prompt(questions) 240 | if answers['start'] != 'yes': 241 | return 242 | 243 | 244 | config['admin-password'] = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10)) 245 | 246 | if config['cluster-provider'] == CLUSTER_PROVIDERS[0]: 247 | create_gke_cluster(config['cluster-name']) 248 | 249 | config['workdir'] = '/tmp/install-infrabox/%s' % config['cluster-name'] 250 | 251 | create_namespaces() 252 | install_helm() 253 | install_postgres() 254 | install_minio() 255 | install_nginx_ingress() 256 | config['host'] = get_host() 257 | install_certificates(config) 258 | clone_repo(config) 259 | generate_keys(config) 260 | helm_install_infrabox(config) 261 | 262 | print("Your InfraBox is ready: https://%s" % config['host']) 263 | print() 264 | 265 | if 'Github' in config['components']: 266 | print("IMPORTANT: Update your Github callback url to: https://%s/github/auth/callback" % config['host']) 267 | print() 268 | 269 | print("The configuration has been stored here: %s" % config['workdir']) 270 | print("Please keep a backup of it at a secure place.") 271 | print("It contains secret data like the encryption key and your admin password.") 272 | 273 | def install_infrabox(_): 274 | install() 275 | -------------------------------------------------------------------------------- /infraboxcli/job_list.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import uuid 4 | import sys 5 | import yaml 6 | 7 | from builtins import range 8 | from pyinfrabox.infrabox import validate_json 9 | from infraboxcli.log import logger 10 | from infraboxcli.execute import execute 11 | 12 | LOADED_FILES = {} 13 | 14 | def load_infrabox_file(path): 15 | if path in LOADED_FILES: 16 | logger.error('Recursive included detected with %s' % path) 17 | sys.exit(1) 18 | 19 | LOADED_FILES[path] = path 20 | 21 | with open(path) as f: 22 | try: 23 | data = json.load(f) 24 | except ValueError: 25 | f.seek(0) 26 | if (sys.version_info.major == 2) or (yaml.__version__ < "5.1"): 27 | data = yaml.load(f) 28 | else: 29 | data = yaml.load(f, Loader=yaml.FullLoader) 30 | validate_json(data) 31 | return data 32 | 33 | def get_parent_name(parents): 34 | name = "" 35 | for i in range(0, len(parents)): 36 | p = parents[i] 37 | 38 | if i > 0: 39 | name += '/' 40 | 41 | name += p 42 | 43 | return name 44 | 45 | def rewrite_job_dependencies(job): 46 | # rewrite depends_on from 47 | # "jobname" -> {"job": "jobname", "on": ["finished"]} 48 | # "*" => ["finished", "error", "failure"] 49 | if 'depends_on' in job: 50 | for x in range(0, len(job['depends_on'])): 51 | dep = job['depends_on'][x] 52 | if not isinstance(dep, dict): 53 | job['depends_on'][x] = {"job": dep, "on": ["finished"]} 54 | else: 55 | for o in dep['on']: 56 | if o != "*": 57 | continue 58 | 59 | job['depends_on'][x] = { 60 | "job": dep['job'], 61 | "on": ["finished", "error", "failure", "skipped"] 62 | } 63 | 64 | 65 | def get_job_list(data, args, parents=None, infrabox_context=None): 66 | jobs = [] 67 | 68 | if not parents: 69 | parents = [] 70 | 71 | parent_name = get_parent_name(parents) 72 | 73 | for job in data['jobs']: 74 | job['id'] = str(uuid.uuid4()) 75 | job['parents'] = parents 76 | job['infrabox_context'] = os.path.normpath(infrabox_context) 77 | 78 | if 'build_context' in job: 79 | job['build_context'] = os.path.normpath(os.path.join(infrabox_context, job['build_context'])) 80 | else: 81 | job['build_context'] = os.path.normpath(infrabox_context) 82 | 83 | if parent_name != '': 84 | job['name'] = parent_name + "/" + job['name'] 85 | 86 | deps = job.get('depends_on', []) 87 | for x in range(0, len(deps)): 88 | dep = deps[x] 89 | if isinstance(dep, dict): 90 | dep = dep['job'] 91 | 92 | deps[x] = parent_name + "/" + dep 93 | 94 | rewrite_job_dependencies(job) 95 | 96 | job_name = job['name'] 97 | 98 | if job['type'] != "workflow" and job['type'] != 'git': 99 | jobs.append(job) 100 | continue 101 | 102 | new_parents = parents[:] 103 | new_parents.append(job_name) 104 | 105 | if job['type'] == "git": 106 | repo_path = os.path.join('/tmp', job_name) 107 | clone_branch = job.get('branch', None) 108 | execute(['rm', '-rf', repo_path]) 109 | if clone_branch: 110 | execute(['git', 'clone', '--depth=50', '--branch', clone_branch, job['clone_url'], repo_path]) 111 | else: 112 | execute(['git', 'clone', '--depth=50', job['clone_url'], repo_path]) 113 | execute(['git', 'config', 'remote.origin.url', job['clone_url']], cwd=repo_path) 114 | execute(['git', 'config', 'remote.origin.fetch', '+refs/heads/*:refs/remotes/origin/*'], cwd=repo_path) 115 | execute(['git', 'fetch', 'origin', job['commit']], cwd=repo_path) 116 | 117 | execute(['git', 'checkout', job['commit']], cwd=repo_path) 118 | 119 | ib_path = os.path.join(repo_path, job.get('infrabox_file', 'infrabox.json')) 120 | if not os.path.exists(ib_path): 121 | ib_path = os.path.join(repo_path, job.get('infrabox_file', 'infrabox.yaml')) 122 | 123 | data = load_infrabox_file(ib_path) 124 | sub = get_job_list(data, args, new_parents, 125 | infrabox_context=os.path.dirname(ib_path)) 126 | 127 | # Set the build context to dirname of the infrabox.json 128 | # if not build context is specified 129 | for s in sub: 130 | if 'build_context' not in s: 131 | s['build_context'] = os.path.normpath(os.path.dirname(ib_path)) 132 | 133 | else: 134 | p = os.path.join(infrabox_context, job['infrabox_file']) 135 | p = os.path.normpath(p) 136 | data = load_infrabox_file(p) 137 | sub = get_job_list(data, args, new_parents, 138 | infrabox_context=os.path.dirname(p)) 139 | 140 | # every sub job which does not have a parent 141 | # should be a child of the current job 142 | job_with_children = {} 143 | for s in sub: 144 | deps = s.get('depends_on', []) 145 | if not deps: 146 | s['depends_on'] = job.get('depends_on', []) 147 | 148 | for d in deps: 149 | job_with_children[d['job']] = True 150 | 151 | jobs += sub 152 | 153 | # add a wait job to all sub jobs 154 | # which don't have a child, so we have 155 | # one 'final' job 156 | final_job = { 157 | "type": "wait", 158 | "name": job_name, 159 | "depends_on": [], 160 | "id": str(uuid.uuid4()), 161 | "parents": new_parents 162 | } 163 | 164 | for s in sub: 165 | sub_name = s['name'] 166 | if sub_name not in job_with_children: 167 | final_job['depends_on'].append({"job": sub_name, "on": ["finished"]}) 168 | 169 | jobs.append(final_job) 170 | 171 | return jobs 172 | -------------------------------------------------------------------------------- /infraboxcli/list_jobs.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | 4 | from infraboxcli.log import logger 5 | from infraboxcli.job_list import load_infrabox_file, get_job_list 6 | from infraboxcli.workflow import WorkflowCache 7 | from infraboxcli.env import check_project_root 8 | 9 | def list_jobs(args): 10 | check_project_root(args) 11 | 12 | args.project_root = os.path.abspath(args.project_root) 13 | infrabox_file_path = args.infrabox_file_path 14 | if not os.path.isfile(infrabox_file_path): 15 | logger.error('%s does not exist' % infrabox_file_path) 16 | sys.exit(1) 17 | 18 | data = load_infrabox_file(args.infrabox_file_path) 19 | jobs = get_job_list(data, args, infrabox_context=args.project_root) 20 | 21 | cache = WorkflowCache(args) 22 | cache.add_jobs(jobs) 23 | cache.print_tree() 24 | -------------------------------------------------------------------------------- /infraboxcli/log.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | from colorama import Fore, init 4 | init() 5 | 6 | 7 | class Logger(object): 8 | def __init__(self): 9 | pass 10 | 11 | def _print(self, color, s): 12 | print("%s[infrabox] %s%s" % (color, s, Fore.RESET)) 13 | 14 | def log(self, s, print_header=True): 15 | print("%s%s" % ("[infrabox] " if print_header else "", s)) 16 | 17 | def info(self, s): 18 | self._print(Fore.BLUE, s) 19 | 20 | def warn(self, s): 21 | self._print(Fore.YELLOW, s) 22 | 23 | def error(self, s): 24 | self._print(Fore.RED, s) 25 | 26 | def exception(self): 27 | msg = traceback.format_exc() 28 | self.error(msg) 29 | 30 | 31 | 32 | logger = Logger() 33 | -------------------------------------------------------------------------------- /infraboxcli/pull.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import tempfile 4 | import tarfile 5 | import shutil 6 | import subprocess 7 | 8 | import requests 9 | 10 | import infraboxcli.env 11 | from infraboxcli.log import logger 12 | 13 | def download_file(url, filename, args): 14 | headers = {'Authorization': 'token ' + args.token} 15 | r = requests.get(url, headers=headers, stream=True, timeout=5, verify=args.ca_bundle) 16 | 17 | if r.status_code == 404: 18 | # no file exists 19 | return 20 | 21 | if r.status_code != 200: 22 | logger.error("Failed to download output of job") 23 | sys.exit(1) 24 | 25 | with open(filename, 'wb') as f: 26 | for chunk in r.iter_content(chunk_size=1024): 27 | if chunk: 28 | f.write(chunk) 29 | 30 | def pull(args): 31 | infraboxcli.env.check_project_root(args) 32 | infraboxcli.env.check_env_cli_token(args) 33 | 34 | if not args.url: 35 | logger.error('either --url or INFRABOX_URL must be set') 36 | sys.exit(1) 37 | 38 | headers = {'Authorization': 'token ' + args.token} 39 | url = '%s/api/v1/projects/%s/jobs/%s/manifest' % (args.url, args.project_id, args.job_id) 40 | r = requests.get(url, headers=headers, timeout=5, verify=args.ca_bundle) 41 | 42 | if r.status_code != 200: 43 | logger.error("Failed to download job manifest") 44 | logger.error(r.text) 45 | sys.exit(1) 46 | 47 | manifest = r.json() 48 | 49 | # Create directories 50 | path = os.path.join(tempfile.gettempdir(), 'infrabox', manifest['id']) 51 | if os.path.exists(path): 52 | shutil.rmtree(path) 53 | 54 | download_path = os.path.join(path, 'downloads') 55 | os.makedirs(download_path) 56 | inputs_path = os.path.join(path, 'inputs') 57 | os.makedirs(inputs_path) 58 | cache_path = os.path.join(path, 'cache') 59 | os.makedirs(cache_path) 60 | output_path = os.path.join(path, 'output') 61 | os.makedirs(output_path) 62 | 63 | # download inputs 64 | for d in manifest['dependencies']: 65 | p = os.path.join(inputs_path, d['name']) 66 | logger.info('Downloading output of %s to %s' % (d['name'], p)) 67 | os.makedirs(p) 68 | package_path = os.path.join(download_path, '%s.%s' % (d['id'], d['output']['format'])) 69 | download_file(d['output']['url'], package_path, args) 70 | 71 | if not os.path.exists(package_path): 72 | logger.info('No output found') 73 | continue 74 | 75 | # unpack 76 | tar = tarfile.open(package_path) 77 | tar.extractall(p) 78 | 79 | # download output 80 | logger.info('Downloading output of %s to %s' % (manifest['name'], output_path)) 81 | 82 | package_path = os.path.join(download_path, '%s.%s' % (manifest['id'], manifest['output']['format'])) 83 | download_file(manifest['output']['url'], package_path, args) 84 | 85 | if os.path.exists(package_path): 86 | tar = tarfile.open(package_path) 87 | tar.extractall(output_path) 88 | 89 | # remove download dir again 90 | shutil.rmtree(download_path) 91 | 92 | if not args.pull_container: 93 | return 94 | 95 | # login 96 | logger.info("Login to registry") 97 | image = manifest['image'].replace("//", "/") 98 | subprocess.check_call(('docker', 'login', image, '-p', args.token, '-u', 'infrabox')) 99 | 100 | # pulling images 101 | logger.info("Pulling image") 102 | subprocess.check_call(('docker', 'pull', image)) 103 | 104 | if not args.run_container: 105 | return 106 | 107 | # running it 108 | logger.info("Running container") 109 | 110 | cmd = ['docker', 'run', '-v', '%s:/infrabox' % path] 111 | cmd.append(image) 112 | 113 | subprocess.check_call(cmd) 114 | -------------------------------------------------------------------------------- /infraboxcli/push.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import zipfile 4 | import fnmatch 5 | import tempfile 6 | import requests 7 | 8 | from infraboxcli.console import show_console 9 | from infraboxcli.validate import validate_infrabox_file 10 | from infraboxcli.log import logger 11 | import infraboxcli.env 12 | 13 | def ignore_file(ignore_list, path): 14 | for i in ignore_list: 15 | if fnmatch.fnmatch(path, i): 16 | return True 17 | 18 | return False 19 | 20 | def add_files(args, ignore_list, path, ziph): 21 | c = os.listdir(path) 22 | 23 | for f in c: 24 | p = os.path.join(path, f) 25 | rp = os.path.relpath(p, args.project_root) 26 | 27 | if os.path.isfile(p) and not ignore_file(ignore_list, rp): 28 | ziph.write(p, rp) 29 | continue 30 | 31 | if os.path.isdir(p) and not ignore_file(ignore_list, rp): 32 | add_files(args, ignore_list, p, ziph) 33 | continue 34 | 35 | def zipdir(args): 36 | logger.info('compressing %s' % args.project_root) 37 | 38 | dockerignore = os.path.join(args.project_root, '.dockerignore') 39 | 40 | ignore_list = [] 41 | if os.path.exists(dockerignore): 42 | logger.info('Using .dockerignore') 43 | 44 | with open(dockerignore) as di: 45 | ignore = di.read().splitlines() 46 | 47 | for i in ignore: 48 | i = i.strip() 49 | if not i.startswith("#"): 50 | ignore_list.append(i) 51 | 52 | ft = tempfile.TemporaryFile() 53 | ziph = zipfile.ZipFile(ft, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) 54 | 55 | add_files(args, ignore_list, args.project_root, ziph) 56 | 57 | ziph.close() 58 | ft.seek(0, os.SEEK_END) 59 | size = ft.tell() 60 | logger.info('finished, file size is %s kb' % (size / 1024)) 61 | ft.seek(0) 62 | return ft 63 | 64 | def upload_zip(args, f): 65 | logger.info('Uploading ...') 66 | url = '%s/api/v1/projects/%s/upload/' % (args.url, args.project_id) 67 | files = {'project.zip': f} 68 | headers = {'Authorization': 'bearer ' + args.token} 69 | r = requests.post(url, files=files, headers=headers, timeout=120, verify=args.ca_bundle) 70 | 71 | try: 72 | d = r.json() 73 | except: 74 | print(r.text) 75 | raise 76 | 77 | if r.status_code != 200: 78 | logger.error("Upload failed: %s" % d['message']) 79 | sys.exit(1) 80 | 81 | return d['data'] 82 | 83 | def push(args): 84 | infraboxcli.env.check_project_root(args) 85 | infraboxcli.env.check_env_cli_token(args) 86 | 87 | if not args.url: 88 | logger.error('either --url or INFRABOX_URL must be set') 89 | sys.exit(1) 90 | 91 | if not os.path.isdir(args.project_root): 92 | logger.error('%s does not exist or is not a directory' % args.project_root) 93 | sys.exit(1) 94 | 95 | validate_infrabox_file(args) 96 | 97 | if args.validate_only: 98 | return 99 | 100 | zip_file = zipdir(args) 101 | result = upload_zip(args, zip_file) 102 | 103 | logger.info(result['url']) 104 | 105 | if args.show_console: 106 | show_console(result['build']['id'], args) 107 | -------------------------------------------------------------------------------- /infraboxcli/run.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import signal 4 | import shutil 5 | import sys 6 | import copy 7 | from datetime import datetime 8 | import traceback 9 | import yaml 10 | 11 | from infraboxcli.execute import execute 12 | from infraboxcli.job_list import get_job_list, load_infrabox_file 13 | from infraboxcli.log import logger 14 | from infraboxcli.workflow import WorkflowCache 15 | from infraboxcli.env import check_project_root 16 | from pyinfrabox import docker_compose 17 | 18 | from past.builtins import basestring 19 | 20 | parent_jobs = [] 21 | 22 | def makedirs(path): 23 | os.makedirs(path) 24 | os.chmod(path, 0o777) 25 | 26 | def makedirs_if_not_exists(path): 27 | if not os.path.exists(path): 28 | makedirs(path) 29 | 30 | def recreate_sym_link(source, link_name): 31 | if os.name == 'nt': 32 | if os.path.exists(link_name): 33 | shutil.rmtree(link_name) 34 | shutil.copytree(source, link_name) 35 | else: 36 | if os.path.exists(link_name): 37 | os.remove(link_name) 38 | os.symlink(source, link_name) 39 | 40 | def get_build_context(job, args): 41 | job_build_context = job.get('build_context', None) 42 | job_infrabox_context = job['infrabox_context'] 43 | 44 | # Default build context is the infrabox context 45 | build_context = job_infrabox_context 46 | 47 | if job_build_context: 48 | # job specified build context is alway relative to the infrabox context 49 | build_context = os.path.join(job_infrabox_context, job_build_context) 50 | 51 | build_context = os.path.join(args.project_root, build_context) 52 | return os.path.normpath(build_context) 53 | 54 | def recreate_directories(dirs): 55 | for d in dirs: 56 | if os.path.exists(d): 57 | try: 58 | shutil.rmtree(d) 59 | except: 60 | execute(['docker', 'run', '-v', '%s:/to_delete' % d, 'alpine', 'rm', '-rf', '/to_delete'], 61 | ignore_error=True, 62 | ignore_output=True) 63 | shutil.rmtree(d) 64 | 65 | makedirs(d) 66 | 67 | 68 | def create_infrabox_directories(args, job, service=None, services=None, compose_file=None): 69 | #pylint: disable=too-many-locals 70 | job_name = job['name'].replace('/', '_') 71 | 72 | if service: 73 | job_name += "/" + service 74 | 75 | # Create dirs 76 | work_dir = os.path.join(args.project_root, '.infrabox', 'work') 77 | job_dir = os.path.join(work_dir, 'jobs', job_name) 78 | infrabox = os.path.join(job_dir, 'infrabox') 79 | infrabox_cache = os.path.join(infrabox, 'cache') 80 | infrabox_output = os.path.join(infrabox, 'output') 81 | infrabox_inputs = os.path.join(infrabox, 'inputs') 82 | infrabox_upload = os.path.join(infrabox, 'upload') 83 | infrabox_testresult = os.path.join(infrabox_upload, 'testresult') 84 | infrabox_coverage = os.path.join(infrabox_upload, 'coverage') 85 | infrabox_markup = os.path.join(infrabox_upload, 'markup') 86 | infrabox_badge = os.path.join(infrabox_upload, 'badge') 87 | infrabox_archive = os.path.join(infrabox_upload, 'archive') 88 | infrabox_job_json = os.path.join(infrabox, 'job.json') 89 | infrabox_gosu = os.path.join(infrabox, 'gosu.sh') 90 | infrabox_local_cache = args.local_cache 91 | 92 | # If any directories used as volumes in docker do not exist prior to the docker run call, 93 | # docker will create them as root! 94 | makedirs_if_not_exists(infrabox_cache) 95 | makedirs_if_not_exists(infrabox_local_cache) 96 | makedirs_if_not_exists(infrabox_upload) 97 | makedirs_if_not_exists(infrabox_archive) 98 | 99 | logger.info('Recreating directories') 100 | 101 | recreate_dirs = [ 102 | infrabox_output, 103 | infrabox_inputs, 104 | infrabox_testresult, 105 | infrabox_coverage, 106 | infrabox_markup, 107 | infrabox_badge 108 | ] 109 | 110 | recreate_directories(recreate_dirs) 111 | 112 | job['directories'] = { 113 | "output": infrabox_output, 114 | "upload/testresult": infrabox_testresult, 115 | "upload/coverage": infrabox_coverage, 116 | "upload/markup": infrabox_markup, 117 | "upload/badge": infrabox_badge, 118 | "upload/archive": infrabox_archive, 119 | "cache": infrabox_cache, 120 | "local-cache": args.local_cache, 121 | "job.json:ro": infrabox_job_json, 122 | "gosu.sh:ro": infrabox_gosu 123 | } 124 | 125 | if service: 126 | service_build = services[service].get('build', None) 127 | 128 | if service_build: 129 | service_build_context = service_build.get('context', None) 130 | 131 | if service_build_context: 132 | context = os.path.join(os.path.dirname(compose_file), service_build_context) 133 | job['directories']['context'] = context 134 | else: 135 | job['directories']['context'] = get_build_context(job, args) 136 | 137 | # create job.json 138 | with open(infrabox_job_json, 'w') as out: 139 | o = { 140 | "parent_jobs": parent_jobs, 141 | "local": True, 142 | "job": { 143 | "name": job_name, 144 | }, 145 | "project": { 146 | "name": args.project_name, 147 | } 148 | } 149 | 150 | json.dump(o, out) 151 | 152 | 153 | if os.path.exists(os.path.join(args.project_root, '.infrabox', 'inputs')): 154 | shutil.rmtree(os.path.join(args.project_root, '.infrabox', 'inputs')) 155 | 156 | for dep in job.get('depends_on', []): 157 | source_path = os.path.join(args.project_root, '.infrabox', 'work', 158 | 'jobs', dep['job'].replace('/', '_'), 'infrabox', 'output') 159 | 160 | if not os.path.exists(source_path): 161 | continue 162 | 163 | dep = dep['job'].split("/")[-1] 164 | destination_path = os.path.join(args.project_root, '.infrabox', 'inputs', dep) 165 | 166 | shutil.copytree(source_path, destination_path, symlinks=True) 167 | 168 | job['directories']['inputs/%s' % dep] = source_path 169 | 170 | # Create symlinks 171 | recreate_sym_link(infrabox_output, os.path.join(args.project_root, '.infrabox', 'output')) 172 | recreate_sym_link(infrabox_upload, os.path.join(args.project_root, '.infrabox', 'upload')) 173 | recreate_sym_link(infrabox_cache, os.path.join(args.project_root, '.infrabox', 'cache')) 174 | 175 | def get_secret(args, name): 176 | secrets_file = os.path.join(args.project_root, '.infraboxsecrets.json') 177 | if not os.path.exists(secrets_file): 178 | logger.error("No secrets file found") 179 | sys.exit(1) 180 | 181 | with open(secrets_file) as f: 182 | secrets = json.load(f) 183 | 184 | if name not in secrets: 185 | logger.error("%s not found in .infraboxsecrets.json" % name) 186 | sys.exit(1) 187 | 188 | return secrets[name] 189 | 190 | def build_and_run_docker_compose(args, job): 191 | create_infrabox_directories(args, job) 192 | 193 | compose_file = os.path.join(job['infrabox_context'], job['docker_compose_file']) 194 | compose_file = os.path.normpath(compose_file) 195 | compose_file_new = compose_file + ".infrabox" 196 | 197 | # rewrite compose file 198 | compose_file_content = docker_compose.create_from(compose_file) 199 | for service in compose_file_content['services']: 200 | create_infrabox_directories(args, job, 201 | service=service, 202 | services=compose_file_content['services'], 203 | compose_file=compose_file) 204 | 205 | volumes = [] 206 | for v in compose_file_content['services'][service].get('volumes', []): 207 | if isinstance(v, basestring): 208 | v = v.replace('/infrabox/context', args.project_root) 209 | volumes.append(v) 210 | 211 | for name, path in job['directories'].items(): 212 | volumes.append(str('%s:/infrabox/%s' % (path, name))) 213 | 214 | # Mount /infrabox/context to the build context of the service if build.context 215 | # is set in the compose file for the service 216 | service_build = compose_file_content['services'][service].get('build', None) 217 | if service_build: 218 | service_build_context = service_build.get('context', None) 219 | if service_build_context: 220 | build_context = os.path.join(os.path.dirname(compose_file), service_build_context) 221 | volumes += ['%s:/infrabox/context' % str(build_context)] 222 | else: 223 | volumes += ['%s:/infrabox/context' % args.project_root] 224 | else: 225 | volumes += ['%s:/infrabox/context' % args.project_root] 226 | volumes = list(dict.fromkeys(volumes)) 227 | 228 | compose_file_content['services'][service]['volumes'] = volumes 229 | 230 | build = compose_file_content['services'][service].get('build', None) 231 | if build: 232 | if not build.get('args', None): 233 | build['args'] = [] 234 | elif not any([build_arg.startswith("INFRABOX_BUILD_NUMBER=") for build_arg in build['args']]): 235 | build['args'] += ['INFRABOX_BUILD_NUMBER=local'] 236 | 237 | with open(compose_file_new, "w+") as out: 238 | yaml.dump(compose_file_content, out, default_flow_style=False) 239 | 240 | env = { 241 | 'PATH': os.environ['PATH'], 242 | 'INFRABOX_CLI': 'true', 243 | 'INFRABOX_BUILD_NUMBER': 'local' 244 | } 245 | 246 | if 'environment' in job: 247 | for name, value in job['environment'].items(): 248 | if isinstance(value, dict): 249 | env[name] = get_secret(args, value['$secret']) 250 | else: 251 | env[name] = value 252 | 253 | if not args.no_rm: 254 | execute(['docker-compose', '-p', args.project_name, 255 | '-f', compose_file_new, 'rm', '-f'], env=env, cwd=job['build_context']) 256 | 257 | execute(['docker-compose', '-p', args.project_name, 258 | '-f', compose_file_new, 'build'], env=env, cwd=job['build_context']) 259 | 260 | def signal_handler(_, __): 261 | logger.info("Stopping docker containers") 262 | execute(['docker-compose', '-f', compose_file_new, 'stop'], env=env, cwd=job['build_context']) 263 | os.remove(compose_file_new) 264 | sys.exit(0) 265 | 266 | signal.signal(signal.SIGINT, signal_handler) 267 | 268 | execute(['docker-compose', '-p', args.project_name, 269 | '-f', compose_file_new, 'up', '--abort-on-container-exit'], env=env) 270 | signal.signal(signal.SIGINT, signal.SIG_DFL) 271 | 272 | # Print the return code of all the containers 273 | execute(['docker-compose', '-p', args.project_name, 274 | '-f', compose_file_new, 'ps'], env=env, cwd=job['build_context']) 275 | 276 | os.remove(compose_file_new) 277 | 278 | def build_docker_image(args, job, image_name, target=None): 279 | # Build the image 280 | logger.info("Build docker image") 281 | 282 | docker_file = os.path.normpath(os.path.join(get_build_context(job, args), 283 | job['docker_file'])) 284 | 285 | cmd = ['docker', 'build', '-t', image_name, '.', '-f', docker_file] 286 | if 'build_arguments' in job: 287 | for name, value in job['build_arguments'].items(): 288 | cmd += ['--build-arg', '%s=%s' %(name, value)] 289 | 290 | if args.build_arg: 291 | for a in args.build_arg: 292 | cmd += ['--build-arg', a] 293 | 294 | if not args.build_arg or not any([build_arg.startswith("INFRABOX_BUILD_NUMBER=") for build_arg in args.build_arg]): 295 | cmd += ['--build-arg', 'INFRABOX_BUILD_NUMBER=local'] 296 | 297 | # memory limit 298 | if not args.unlimited: 299 | cmd += ['-m', '%sm' % job['resources']['limits']['memory']] 300 | 301 | if target: 302 | cmd += ['--target', target] 303 | 304 | execute(cmd, cwd=get_build_context(job, args)) 305 | 306 | def run_container(args, job, image_name): 307 | container_name = 'ib_' + job['name'].replace("/", "-") 308 | 309 | if not args.no_rm: 310 | execute(['docker', 'rm', container_name], 311 | cwd=args.project_root, 312 | ignore_error=True, 313 | ignore_output=True) 314 | 315 | # Run the continer 316 | cmd = ['docker', 'run', '--name', container_name] 317 | 318 | # Security context 319 | security_context = job.get('security_context', {}) 320 | 321 | privileged = security_context.get('privileged', False) 322 | if privileged: 323 | cmd += ['--privileged', '-v', '/tmp/docker:/var/lib/docker'] 324 | 325 | for name, path in job['directories'].items(): 326 | cmd += ['-v', '%s:/infrabox/%s' % (path, name)] 327 | 328 | if 'environment' in job: 329 | for name, value in job['environment'].items(): 330 | if isinstance(value, dict): 331 | cmd += ['-e', '%s=%s' % (name, get_secret(args, value['$secret']))] 332 | else: 333 | cmd += ['-e', '%s=%s' % (name, value)] 334 | 335 | cmd += ['-e', 'INFRABOX_CLI=true', '-e', 'INFRABOX_BUILD_NUMBER=local'] 336 | 337 | if args.env: 338 | for e in args.env: 339 | cmd += ['-e', e] 340 | 341 | if args.env_file: 342 | cmd += ['--env-file', args.env_file] 343 | 344 | if os.name != 'nt': 345 | cmd += ['-e', 'INFRABOX_UID=%s' % os.geteuid()] 346 | cmd += ['-e', 'INFRABOX_GID=%s' % os.getegid()] 347 | 348 | if not args.unlimited: 349 | # memory limit 350 | cmd += ['-m', '%sm' % job['resources']['limits']['memory']] 351 | 352 | # CPU limit 353 | cmd += ['--cpus', str(job['resources']['limits']['cpu'])] 354 | 355 | cmd.append(image_name) 356 | 357 | if (job['type'] == 'docker-image' or job['type'] == 'docker') and job.get('command', None): 358 | cmd += job['command'] 359 | 360 | logger.info("Run docker container") 361 | try: 362 | execute(cmd, cwd=args.project_root) 363 | except: 364 | try: 365 | execute(['docker', 'stop', container_name]) 366 | except: 367 | pass 368 | raise 369 | 370 | logger.info("Commiting Container") 371 | execute(['docker', 'commit', container_name, image_name], cwd=args.project_root) 372 | 373 | def tag_docker_image(image_name, deployments): 374 | new_images = [] 375 | for d in deployments: 376 | new_image_name = "%s/%s:%s" % (d['host'], d['repository'], d.get('tag', 'build_local')) 377 | execute(['docker', 'tag', image_name, new_image_name]) 378 | new_images.append(new_image_name) 379 | return new_images 380 | 381 | def run_docker_image(args, job): 382 | create_infrabox_directories(args, job) 383 | 384 | if args.build_arg: 385 | args_infrabox_build_nr = [build_arg for build_arg in args.build_arg if build_arg.startswith("INFRABOX_BUILD_NUMBER=")] 386 | else: 387 | args_infrabox_build_nr = [] 388 | 389 | if args_infrabox_build_nr: 390 | arg_infrabox_build_nr = args_infrabox_build_nr[-1].replace("INFRABOX_BUILD_NUMBER=", "") 391 | else: 392 | arg_infrabox_build_nr = 'local' 393 | image_name = job['image'].replace('$INFRABOX_BUILD_NUMBER', arg_infrabox_build_nr) 394 | 395 | if job.get('run', True): 396 | run_container(args, job, image_name) 397 | 398 | deployments = job.get('deployments', []) 399 | tag_docker_image(image_name, deployments) 400 | 401 | def build_and_run_docker(args, job): 402 | create_infrabox_directories(args, job) 403 | 404 | image_name = None 405 | if args.tag: 406 | image_name = args.tag 407 | else: 408 | image_name = args.project_name + '_' + job['name'] 409 | image_name = image_name.replace("/", "-") 410 | image_name = image_name.lower() 411 | 412 | deployments = job.get('deployments', []) 413 | new_images = [] 414 | if deployments: 415 | for d in deployments: 416 | target = d.get('target', None) 417 | 418 | if not target and not job.get('build_only', True): 419 | continue 420 | 421 | build_docker_image(args, job, image_name, target=target) 422 | new_images.extend(tag_docker_image(image_name, [d])) # tag when target is set 423 | 424 | build_docker_image(args, job, image_name) 425 | if not job.get('build_only', True): 426 | run_container(args, job, image_name) 427 | 428 | new_images.extend(tag_docker_image(image_name, filter(lambda d: 'target' not in d, deployments))) # tag when target is _not_ set 429 | for new_image in new_images: 430 | logger.info(new_image) 431 | 432 | def get_parent_job(name): 433 | for job in parent_jobs: 434 | if job['name'] == name: 435 | return job 436 | 437 | return None 438 | 439 | def track_as_parent(job, state, start_date=datetime.now(), end_date=datetime.now()): 440 | parent_jobs.append({ 441 | "name": job['name'], 442 | "state": state, 443 | "start_date": str(start_date), 444 | "end_date": str(end_date), 445 | "depends_on": job.get('depends_on', []) 446 | }) 447 | 448 | def build_and_run(args, job, cache): 449 | # check if dependency conditions are met 450 | for dep in job.get("depends_on", []): 451 | on = dep['on'] 452 | parent = get_parent_job(dep['job']) 453 | 454 | if not parent: 455 | continue 456 | 457 | if parent['state'] not in on: 458 | logger.info('Skipping job %s' % job['name']) 459 | track_as_parent(job, 'skipped') 460 | return 461 | 462 | job_type = job['type'] 463 | start_date = datetime.now() 464 | 465 | logger.info("Starting job %s" % job['name']) 466 | 467 | state = 'finished' 468 | 469 | try: 470 | if job_type == "docker-compose": 471 | build_and_run_docker_compose(args, job) 472 | elif job_type == "docker": 473 | build_and_run_docker(args, job) 474 | elif job_type == "docker-image": 475 | run_docker_image(args, job) 476 | elif job_type == "wait": 477 | # do nothing 478 | pass 479 | else: 480 | logger.error("Unknown job type") 481 | sys.exit(1) 482 | except Exception as e: 483 | state = 'failure' 484 | traceback.print_exc(file=sys.stdout) 485 | logger.warn("Job failed: %s" % e) 486 | sys.exit(1) 487 | 488 | if not job.get('directories', None): 489 | return 490 | 491 | # Dynamic child jobs 492 | infrabox_file = os.path.join(job['directories']['output'], 'infrabox.json') 493 | if not os.path.exists(infrabox_file): 494 | infrabox_file = os.path.join(job['directories']['output'], 'infrabox.yaml') 495 | 496 | jobs = [] 497 | if os.path.exists(infrabox_file): 498 | logger.info("Loading generated jobs") 499 | 500 | data = load_infrabox_file(infrabox_file) 501 | jobs = get_job_list(data, args, infrabox_context=os.path.join(args.project_root, '.infrabox', 'output')) 502 | 503 | end_date = datetime.now() 504 | 505 | track_as_parent(job, state, start_date, end_date) 506 | logger.info("Finished job %s" % job['name']) 507 | 508 | for j in jobs: 509 | # Prefix name with parent 510 | j['name'] = job['name'] + '/' + j['name'] 511 | 512 | # Add dependencies to all root jobs 513 | if not j.get('depends_on', None): 514 | j['depends_on'] = [{"on": ["finished"], "job": job['name']}] 515 | else: 516 | dependencies = copy.deepcopy(j['depends_on']) 517 | 518 | for d in dependencies: 519 | d['job'] = job['name'] + '/' + d['job'] 520 | 521 | j['depends_on'] = dependencies 522 | 523 | cache.add_job(j) 524 | 525 | if args.children: 526 | for j in jobs: 527 | build_and_run(args, j, cache) 528 | 529 | def run(args): 530 | check_project_root(args) 531 | 532 | # Init workflow cache 533 | cache = WorkflowCache(args) 534 | 535 | # validate infrabox.json 536 | data = load_infrabox_file(args.infrabox_file_path) 537 | if args.memory: 538 | logger.warn('WARNING: only int resource limits are supported right now. Using rounded int instead of provided value.') 539 | for job in data['jobs']: 540 | job['resources']['limits']['memory'] = int(args.memory) 541 | if args.cpu: 542 | logger.warn('WARNING: only int resource limits are supported right now. Using rounded int instead of provided value.') 543 | for job in data['jobs']: 544 | job['resources']['limits']['cpu'] = int(args.cpu) 545 | 546 | jobs = get_job_list(data, args, infrabox_context=args.project_root) 547 | 548 | if not args.job_name: 549 | # We run all jobs, so clear all cached jobs 550 | cache.clear() 551 | 552 | # Cache all jobs 553 | cache.add_jobs(jobs) 554 | 555 | for j in cache.get_jobs(job_name=args.job_name, children=args.children): 556 | build_and_run(args, j, cache) 557 | -------------------------------------------------------------------------------- /infraboxcli/validate.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import json 4 | import yaml 5 | 6 | from pyinfrabox.infrabox import validate_json 7 | from infraboxcli.env import check_project_root 8 | from infraboxcli.log import logger 9 | 10 | def validate_infrabox_file(args): 11 | args.project_root = os.path.abspath(args.project_root) 12 | infrabox_file_path = args.infrabox_file_path 13 | if not os.path.isfile(infrabox_file_path): 14 | logger.error('%s does not exist' % infrabox_file_path) 15 | sys.exit(1) 16 | 17 | with open(infrabox_file_path, 'r') as f: 18 | try: 19 | data = json.load(f) 20 | except ValueError: 21 | f.seek(0) 22 | if (sys.version_info.major == 2) or (yaml.__version__ < "5.1"): 23 | data = yaml.load(f) 24 | else: 25 | data = yaml.load(f, Loader=yaml.FullLoader) 26 | validate_json(data) 27 | 28 | def validate(args): 29 | check_project_root(args) 30 | validate_infrabox_file(args) 31 | logger.info("No issues found in infrabox file %s" % args.infrabox_file_path) 32 | -------------------------------------------------------------------------------- /infraboxcli/workflow.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | import sys 4 | 5 | from infraboxcli.log import logger 6 | 7 | class WorkflowCache(object): 8 | def __init__(self, args): 9 | self.jobs = [] 10 | self.persist = True 11 | 12 | work_dir = os.path.join(args.project_root, '.infrabox', 'work') 13 | 14 | if not os.path.exists(work_dir): 15 | os.makedirs(work_dir) 16 | 17 | self.infrabox_full_workflow = os.path.join(work_dir, 'full_workflow.json') 18 | 19 | if os.path.exists(self.infrabox_full_workflow): 20 | with open(self.infrabox_full_workflow) as f: 21 | data = json.load(f) 22 | self.jobs = data['jobs'] 23 | 24 | if args.infrabox_file: 25 | # a infrabox.json file was specified with -f 26 | # so we don't persist the cache to not overwrite 27 | # the full workflow graph 28 | self.persist = False 29 | self.jobs = [] 30 | 31 | def clear(self): 32 | if os.path.exists(self.infrabox_full_workflow): 33 | os.remove(self.infrabox_full_workflow) 34 | 35 | self.jobs = [] 36 | 37 | def get_jobs(self, job_name=None, children=False): 38 | if not job_name: 39 | return self.jobs 40 | 41 | jobs = [] 42 | for j in self.jobs: 43 | if j['name'] == job_name: 44 | jobs.append(j) 45 | 46 | if children: 47 | for p in j.get('depends_on', []): 48 | jobs += self.get_jobs(p['name'], children) 49 | 50 | if not jobs: 51 | logger.error("job %s not found in infrabox.json" % job_name) 52 | sys.exit(1) 53 | 54 | return jobs 55 | 56 | def add_jobs(self, jobs): 57 | for j in jobs: 58 | self.add_job(j) 59 | 60 | def add_job(self, job): 61 | updated = False 62 | 63 | for i in range(0, len(self.jobs)): 64 | if self.jobs[i]['name'] == job['name']: 65 | updated = True 66 | self.jobs[i] = job 67 | break 68 | 69 | if not updated: 70 | self.jobs.append(job) 71 | 72 | self._write() 73 | 74 | def _write(self): 75 | if not self.persist: 76 | return 77 | 78 | with open(self.infrabox_full_workflow, 'w') as out: 79 | json.dump({'version': 1, 'jobs': self.jobs}, out, indent=4) 80 | 81 | def print_tree(self): 82 | jobs = sorted(self.jobs, key=lambda k: k['name']) 83 | for j in jobs: 84 | print(j['name']) 85 | 86 | def print_graph(self): 87 | index = {} 88 | def place(index, name, job): 89 | if name.startswith('/'): 90 | name = name[1:] 91 | 92 | if name == "": 93 | index[""] = job 94 | return 95 | 96 | prefix = name.split('/')[0] 97 | if prefix not in index: 98 | index[prefix] = {} 99 | place(index[prefix], name[len(prefix):], job) 100 | 101 | for job in self.jobs: 102 | place(index, job['name'], job) 103 | 104 | print('digraph "Jobs" {') 105 | 106 | def print_cluster(name, cluster, indent=' '): 107 | if 'name' in cluster: 108 | # then this is a job and not a cluster 109 | print('{indent}"{name}" [label="{label}" shape=box]'.format( 110 | name=cluster['name'], 111 | label=cluster['name'].split('/')[-1], 112 | indent=indent)) 113 | elif len(cluster) == 1: 114 | if (sys.version_info.major == 2): 115 | for inner_name, inner_cluster in cluster.iteritems(): 116 | print_cluster(inner_name, inner_cluster, indent) 117 | elif (sys.version_info.major == 3): 118 | for inner_name, inner_cluster in cluster.items(): 119 | print_cluster(inner_name, inner_cluster, indent) 120 | else: 121 | raise Exception('Unsupport python version') 122 | else: 123 | print('{indent}subgraph "cluster_{name}" {{'.format(name=name, indent=indent)) 124 | 125 | if (sys.version_info.major == 2): 126 | for inner_name, inner_cluster in cluster.iteritems(): 127 | print_cluster(inner_name, inner_cluster, indent + ' ') 128 | elif (sys.version_info.major == 3): 129 | for inner_name, inner_cluster in cluster.items(): 130 | print_cluster(inner_name, inner_cluster, indent + ' ') 131 | else: 132 | raise Exception('Unsupport python version') 133 | 134 | print('{indent}}}'.format(indent=indent)) 135 | 136 | if (sys.version_info.major == 2): 137 | for name, cluster in index.iteritems(): 138 | print_cluster(name, cluster) 139 | elif (sys.version_info.major == 3): 140 | for name, cluster in index.items(): 141 | print_cluster(name, cluster) 142 | else: 143 | raise Exception('Unsupport python version') 144 | 145 | for j in self.jobs: 146 | name = j['name'] 147 | 148 | for dep in j.get('depends_on', []): 149 | if isinstance(dep, str): 150 | print(' "{a}" -> "{b}"'.format(a=dep, b=name)) 151 | else: 152 | print(' "{a}" -> "{b}" [label="{on}"]'.format( 153 | a=dep['job'], 154 | b=name, 155 | on=", ".join(dep['on']))) 156 | 157 | print('}') 158 | -------------------------------------------------------------------------------- /pyinfrabox/__init__.py: -------------------------------------------------------------------------------- 1 | class ValidationError(Exception): 2 | def __init__(self, path, message): 3 | super(ValidationError, self).__init__("%s: %s" % (path, message)) 4 | -------------------------------------------------------------------------------- /pyinfrabox/badge/__init__.py: -------------------------------------------------------------------------------- 1 | from pyinfrabox import ValidationError 2 | from pyinfrabox.utils import * 3 | from builtins import int 4 | 5 | def check_version(v, path): 6 | if not isinstance(v, int): 7 | raise ValidationError(path, "must be an int") 8 | 9 | if v != 1: 10 | raise ValidationError(path, "unsupported version") 11 | 12 | 13 | def parse_badge(d): 14 | check_allowed_properties(d, "#", ("version", "subject", "status", "color")) 15 | check_required_properties(d, "#", ("version", "subject", "status", "color")) 16 | check_version(d['version'], "#version") 17 | 18 | check_text(d['subject'], "#subject") 19 | check_text(d['status'], "#status") 20 | check_color(d['color'], "#color") 21 | 22 | def validate_badge(d): 23 | parse_badge(d) 24 | -------------------------------------------------------------------------------- /pyinfrabox/docker_compose/__init__.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | def handle_version(d, r): 4 | supported_versions = ("3.2",) 5 | v = str(d['version']) 6 | if v not in supported_versions: 7 | raise Exception("version not supported, supported version is 3.2") 8 | 9 | r["version"] = v 10 | 11 | def handle_service(name, d, r): 12 | r['services'][name] = {} 13 | 14 | for key, value in d[name].items(): 15 | allowed_fields = [ 16 | 'links', 17 | 'environment', 18 | 'networks', 19 | 'tty', 20 | 'volumes', 21 | 'ports', 22 | 'restart', 23 | 'build', 24 | 'command', 25 | 'image', 26 | 'container_name', 27 | 'depends_on', 28 | 'entrypoint', 29 | 'links', 30 | 'extra_hosts', 31 | 'network_mode' 32 | ] 33 | 34 | if key in allowed_fields: 35 | r['services'][name][key] = value 36 | else: 37 | raise Exception("[services][%s][%s] not supported" % (name, key)) 38 | 39 | def handle_services(d, r): 40 | d = d['services'] 41 | r['services'] = {} 42 | for key in d.keys(): 43 | handle_service(key, d, r) 44 | 45 | 46 | def parse(d): 47 | r = {} 48 | 49 | if not d: 50 | raise Exception("invalid file") 51 | 52 | if "version" not in d: 53 | raise Exception("version not found") 54 | 55 | if "services" not in d: 56 | raise Exception("services not found") 57 | 58 | for key in d.keys(): 59 | if key == "version": 60 | handle_version(d, r) 61 | elif key == "services": 62 | handle_services(d, r) 63 | elif key == "networks": 64 | r[key] = d[key] 65 | else: 66 | raise Exception("[%s] not supported" % key) 67 | 68 | return r 69 | 70 | def create_from(path): 71 | with open(path) as f: 72 | d = yaml.load(f.read()) 73 | return parse(d) 74 | -------------------------------------------------------------------------------- /pyinfrabox/markup/__init__.py: -------------------------------------------------------------------------------- 1 | from builtins import int, range 2 | 3 | from pyinfrabox import ValidationError 4 | from pyinfrabox.utils import check_allowed_properties, check_required_properties, check_number, check_color, check_text 5 | 6 | def check_version(v, path): 7 | if not isinstance(v, int): 8 | raise ValidationError(path, "must be an int") 9 | 10 | if v != 1: 11 | raise ValidationError(path, "unsupported version") 12 | 13 | def parse_heading(d, path): 14 | check_allowed_properties(d, path, ("type", "text")) 15 | check_required_properties(d, path, ("type", "text")) 16 | check_text(d['text'], path + ".text") 17 | 18 | def parse_hline(d, path): 19 | check_allowed_properties(d, path, ("type",)) 20 | check_required_properties(d, path, ("type",)) 21 | 22 | def parse_text(d, path): 23 | check_allowed_properties(d, path, ("type", "text", "emphasis", "color")) 24 | check_required_properties(d, path, ("type", "text")) 25 | 26 | check_text(d['text'], path + ".text") 27 | 28 | if 'emphasis' in d: 29 | if d['emphasis'] not in ("bold", "italic"): 30 | raise ValidationError(path + ".emphasis", "not a valid value") 31 | 32 | if 'color' in d: 33 | check_color(d['color'], path + ".color") 34 | 35 | def parse_icon(d, path): 36 | check_allowed_properties(d, path, ("type", "name", "color")) 37 | check_required_properties(d, path, ("type", "name")) 38 | check_text(d['name'], path + ".name") 39 | 40 | if 'color' in d: 41 | check_color(d['color'], path + ".color") 42 | 43 | def parse_pie(d, path): 44 | check_allowed_properties(d, path, ("type", "data", "name")) 45 | check_required_properties(d, path, ("type", "data", "name")) 46 | check_text(d['name'], path + ".name") 47 | 48 | for i in range(0, len(d['data'])): 49 | elem = d['data'][i] 50 | p = "%s.data[%s]" % (path, i) 51 | 52 | check_allowed_properties(elem, p, ("label", "value", "color")) 53 | check_required_properties(elem, p, ("label", "value", "color")) 54 | 55 | check_text(elem['label'], p + ".label") 56 | check_number(elem['value'], p + ".value") 57 | check_color(elem['color'], p + ".color") 58 | 59 | def parse_ordered_list(d, path): 60 | check_allowed_properties(d, path, ("type", "elements")) 61 | check_required_properties(d, path, ("type", "elements")) 62 | parse_elements(d['elements'], path + ".elements") 63 | 64 | def parse_unordered_list(d, path): 65 | check_allowed_properties(d, path, ("type", "elements")) 66 | check_required_properties(d, path, ("type", "elements")) 67 | parse_elements(d['elements'], path + ".elements") 68 | 69 | def parse_group(d, path): 70 | check_allowed_properties(d, path, ("type", "elements")) 71 | check_required_properties(d, path, ("type", "elements")) 72 | parse_elements(d['elements'], path + ".elements") 73 | 74 | def parse_paragraph(d, path): 75 | check_allowed_properties(d, path, ("type", "elements")) 76 | check_required_properties(d, path, ("type", "elements")) 77 | parse_elements(d['elements'], path + ".elements") 78 | 79 | def parse_grid(d, path): 80 | check_allowed_properties(d, path, ("type", "rows")) 81 | check_required_properties(d, path, ("type", "rows")) 82 | 83 | if not isinstance(d['rows'], list): 84 | raise ValidationError(path + ".rows", "must be an array") 85 | 86 | if not d['rows']: 87 | raise ValidationError(path + ".rows", "must not be empty") 88 | 89 | for i in range(0, len(d['rows'])): 90 | r = d['rows'][i] 91 | parse_elements(r, "%s.rows[%s]" % (path, i)) 92 | 93 | def parse_table(d, path): 94 | check_allowed_properties(d, path, ("type", "rows", "headers")) 95 | check_required_properties(d, path, ("type", "rows")) 96 | 97 | if 'headers' in d: 98 | if not isinstance(d['headers'], list): 99 | raise ValidationError(path + ".headers", "must be an array") 100 | 101 | col_count = len(d['headers']) 102 | if col_count == 0: 103 | raise ValidationError(path + ".headers", "must not be empty") 104 | 105 | for i in range(0, col_count): 106 | h = d['headers'][i] 107 | parse_text(h, "%s.headers[%s]" % (path, i)) 108 | 109 | 110 | if not isinstance(d['rows'], list): 111 | raise ValidationError(path + ".rows", "must be an array") 112 | 113 | if not d['rows']: 114 | raise ValidationError(path + ".rows", "must not be empty") 115 | 116 | for i in range(0, len(d['rows'])): 117 | r = d['rows'][i] 118 | p = "%s.rows[%s]" % (path, i) 119 | 120 | if 'headers' in d: 121 | if len(r) != col_count: 122 | raise ValidationError(p, "does not have the correct number of columns") 123 | 124 | parse_elements(r, p) 125 | 126 | 127 | def parse_elements(e, path): 128 | if not isinstance(e, list): 129 | raise ValidationError(path, "must be an array") 130 | 131 | if not e: 132 | raise ValidationError(path, "must not be empty") 133 | 134 | for i in range(0, len(e)): 135 | elem = e[i] 136 | p = "%s[%s]" % (path, i) 137 | 138 | if 'type' not in elem: 139 | raise ValidationError(p, "does not contain a 'type'") 140 | 141 | t = elem['type'] 142 | 143 | if t == 'h1' or t == 'h2' or t == 'h3' or t == 'h4' or t == 'h5': 144 | parse_heading(elem, p) 145 | elif t == 'hline': 146 | parse_hline(elem, p) 147 | elif t == 'paragraph': 148 | parse_paragraph(elem, p) 149 | elif t == 'text': 150 | parse_text(elem, p) 151 | elif t == 'ordered_list': 152 | parse_ordered_list(elem, p) 153 | elif t == 'unordered_list': 154 | parse_unordered_list(elem, p) 155 | elif t == 'group': 156 | parse_group(elem, p) 157 | elif t == 'pie': 158 | parse_pie(elem, p) 159 | elif t == 'grid': 160 | parse_grid(elem, p) 161 | elif t == 'table': 162 | parse_table(elem, p) 163 | elif t == 'icon': 164 | parse_icon(elem, p) 165 | else: 166 | raise ValidationError(p, "type '%s' not supported" % t) 167 | 168 | def parse_document(d): 169 | check_allowed_properties(d, "#", ("version", "title", "elements")) 170 | check_required_properties(d, "#", ("version", "title", "elements")) 171 | 172 | check_version(d['version'], "#version") 173 | check_text(d['title'], "#title") 174 | parse_elements(d['elements'], "#elements") 175 | 176 | def validate_markup(d): 177 | parse_document(d) 178 | -------------------------------------------------------------------------------- /pyinfrabox/test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import sys 3 | import xmlrunner 4 | 5 | if __name__ == '__main__': 6 | s = unittest.defaultTestLoader.discover('.') 7 | r = xmlrunner.XMLTestRunner(output='/infrabox/upload/testresult/').run(s) 8 | sys.exit(not r.wasSuccessful()) 9 | -------------------------------------------------------------------------------- /pyinfrabox/testresult/__init__.py: -------------------------------------------------------------------------------- 1 | from builtins import int, range 2 | 3 | from pyinfrabox import ValidationError 4 | from pyinfrabox.utils import * 5 | 6 | def check_version(v, path): 7 | if not isinstance(v, int): 8 | raise ValidationError(path, "must be an int") 9 | 10 | if v != 1: 11 | raise ValidationError(path, "unsupported version") 12 | 13 | def parse_measurement(d, path): 14 | check_allowed_properties(d, path, ("name", "unit", "value")) 15 | check_required_properties(d, path, ("name", "unit", "value")) 16 | check_text(d['unit'], path + ".unit") 17 | check_text(d['name'], path + ".name") 18 | check_text(d['value'], path + ".value") 19 | 20 | def parse_measurements(e, path): 21 | if not isinstance(e, list): 22 | raise ValidationError(path, "must be an array") 23 | 24 | for i in range(0, len(e)): 25 | elem = e[i] 26 | path = "%s[%s]" % (path, i) 27 | parse_measurement(elem, path) 28 | 29 | def parse_t(d, path): 30 | check_allowed_properties(d, path, 31 | ("suite", "name", "status", "duration", "message", 32 | "stack", "measurements")) 33 | check_required_properties(d, path, ("suite", "name", "status", "duration")) 34 | check_text(d['suite'], path + ".suite") 35 | check_text(d['name'], path + ".name") 36 | check_text(d['status'], path + ".status") 37 | check_number(d['duration'], path + ".duration") 38 | 39 | if 'message' in d: 40 | check_text(d['message'], path + ".message") 41 | 42 | if 'stack' in d: 43 | check_text(d['stack'], path + ".stack") 44 | 45 | if 'measurements' in d: 46 | parse_measurements(d['measurements'], path + ".measurements") 47 | 48 | def parse_ts(e, path): 49 | if not isinstance(e, list): 50 | raise ValidationError(path, "must be an array") 51 | 52 | if not e: 53 | raise ValidationError(path, "must not be empty") 54 | 55 | for i in range(0, len(e)): 56 | elem = e[i] 57 | p = "%s[%s]" % (path, i) 58 | parse_t(elem, p) 59 | 60 | def parse_document(d): 61 | check_allowed_properties(d, "#", ("version", "tests")) 62 | check_required_properties(d, "#", ("version", "tests")) 63 | 64 | check_version(d['version'], "#version") 65 | parse_ts(d['tests'], "#tests") 66 | 67 | def validate_result(d): 68 | parse_document(d) 69 | -------------------------------------------------------------------------------- /pyinfrabox/tests/__init__.py: -------------------------------------------------------------------------------- 1 | import pyinfrabox.tests.test_docker_compose 2 | import pyinfrabox.tests.test_json 3 | import pyinfrabox.tests.test_markup 4 | import pyinfrabox.tests.test_testresult 5 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/empty.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SAP/InfraBox-cli/b08804a1e91f2e454b5a28a21b37317d28081dd7/pyinfrabox/tests/test/empty.yml -------------------------------------------------------------------------------- /pyinfrabox/tests/test/invalid_version.yml: -------------------------------------------------------------------------------- 1 | version: "asd" 2 | services: 3 | test: 4 | image: asd 5 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/no_services.yml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/no_version.yml: -------------------------------------------------------------------------------- 1 | services: 2 | - name: asd 3 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/unsupported_option.yml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | services: 3 | test: 4 | image: asd 5 | expose: 6 | - "3000" 7 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/unsupported_top_level.yml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | services: 3 | blub: 4 | image: asd 5 | blub: asd 6 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/unsupported_version.yml: -------------------------------------------------------------------------------- 1 | version: 3 2 | services: 3 | test: 4 | image: blub 5 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test/valid_1.yml: -------------------------------------------------------------------------------- 1 | version: "3.2" 2 | 3 | services: 4 | postgres: 5 | image: postgres 6 | 7 | test: 8 | build: 9 | context: ../../../../ 10 | dockerfile: src/server/test/test/Dockerfile 11 | command: /project/src/utils/wait-for-postgres.sh -- /project/src/server/test/test/start_test.sh 12 | environment: 13 | - NODE_ENV=test 14 | - INFRABOX_JOB_ID=${INFRABOX_JOB_ID} 15 | links: 16 | - postgres 17 | 18 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test_docker_compose.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | from pyinfrabox.docker_compose import create_from 5 | 6 | class TestDockerCompose(unittest.TestCase): 7 | def run_exception(self, path, message): 8 | path = os.path.join(os.path.dirname(os.path.realpath(__file__)), path) 9 | 10 | try: 11 | create_from(path) 12 | assert False 13 | except Exception as e: 14 | print(e) 15 | self.assertEqual(e.message, message) 16 | 17 | def test_empty(self): 18 | self.run_exception('./test/empty.yml', 'invalid file') 19 | 20 | def test_no_version(self): 21 | self.run_exception('./test/no_version.yml', 'version not found') 22 | 23 | def test_unsupported_version(self): 24 | self.run_exception('./test/unsupported_version.yml', 'version not supported, supported version is 3.2') 25 | 26 | def test_invalid_version(self): 27 | self.run_exception('./test/invalid_version.yml', 'version not supported, supported version is 3.2') 28 | 29 | def test_unsupported_option(self): 30 | self.run_exception('./test/unsupported_option.yml', '[services][test][expose] not supported') 31 | 32 | def test_no_services(self): 33 | self.run_exception('./test/no_services.yml', 'services not found') 34 | 35 | def test_unsupported_top_level(self): 36 | self.run_exception('./test/unsupported_top_level.yml', '[blub] not supported') 37 | 38 | def test_valid_1(self): 39 | create_from('./tests/test/valid_1.yml') 40 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test_json.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pyinfrabox import ValidationError 4 | from pyinfrabox.infrabox import validate_json 5 | 6 | class TestDockerCompose(unittest.TestCase): 7 | def raises_expect(self, data, expected): 8 | try: 9 | validate_json(data) 10 | assert False 11 | except ValidationError as e: 12 | self.assertEqual(e.message, expected) 13 | 14 | def test_version(self): 15 | self.raises_expect({}, "#: property 'version' is required") 16 | self.raises_expect({'version': 'asd', 'jobs': []}, "#version: must be an int") 17 | self.raises_expect({'version': '1', 'jobs': []}, "#version: must be an int") 18 | self.raises_expect({'version': 2, 'jobs': []}, "#version: unsupported version") 19 | 20 | def test_jobs(self): 21 | self.raises_expect({'version': 1, 'jobs': 'asd'}, "#jobs: must be an array") 22 | self.raises_expect({'version': 1, 'jobs': [{}]}, "#jobs[0]: does not contain a 'type'") 23 | 24 | def test_empty_jobs(self): 25 | validate_json({'version': 1, 'jobs': []}) 26 | 27 | def test_dep_defined_later(self): 28 | d = { 29 | "version": 1, 30 | "jobs": [{ 31 | "type": "docker", 32 | "name": "source", 33 | "docker_file": "Dockerfile", 34 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 35 | "build_only": False, 36 | "depends_on": ["compile"] 37 | }, { 38 | "type": "docker", 39 | "name": "compile", 40 | "docker_file": "Dockerfile", 41 | "build_only": False, 42 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 43 | }] 44 | } 45 | 46 | validate_json(d) 47 | 48 | def test_dep_detect_circular_dependency(self): 49 | d = { 50 | "version": 1, 51 | "jobs": [{ 52 | "type": "docker", 53 | "name": "a", 54 | "docker_file": "Dockerfile", 55 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 56 | "build_only": False, 57 | "depends_on": ["b"] 58 | }, { 59 | "type": "docker", 60 | "name": "b", 61 | "docker_file": "Dockerfile", 62 | "build_only": False, 63 | "depends_on": ["c"], 64 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 65 | }, { 66 | "type": "docker", 67 | "name": "c", 68 | "docker_file": "Dockerfile", 69 | "build_only": False, 70 | "depends_on": ["a"], 71 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 72 | }] 73 | } 74 | 75 | self.raises_expect(d, "Jobs: Circular dependency detected.") 76 | 77 | def test_dep_not_found(self): 78 | d = { 79 | "version": 1, 80 | "jobs": [{ 81 | "type": "docker", 82 | "name": "compile", 83 | "docker_file": "test/Dockerfile_benchmarks", 84 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 85 | "build_only": False, 86 | "depends_on": ["not_found"] 87 | }] 88 | } 89 | 90 | self.raises_expect(d, "#jobs[0].depends_on: Job 'not_found' not found") 91 | 92 | def test_deps_must_be_unique(self): 93 | d = { 94 | "version": 1, 95 | "jobs": [{ 96 | "type": "docker", 97 | "name": "source", 98 | "docker_file": "Dockerfile", 99 | "build_only": False, 100 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 101 | }, { 102 | "type": "docker", 103 | "name": "compile", 104 | "docker_file": "Dockerfile", 105 | "build_only": False, 106 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 107 | "depends_on": ["source", "source"] 108 | }] 109 | } 110 | 111 | self.raises_expect(d, "#jobs[1].depends_on: 'source' duplicate dependencies") 112 | 113 | def test_duplicate_job_name(self): 114 | d = { 115 | "version": 1, 116 | "jobs": [{ 117 | "type": "docker", 118 | "name": "compile", 119 | "docker_file": "test/Dockerfile_benchmarks", 120 | "build_only": False, 121 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 122 | }, { 123 | "type": "docker", 124 | "name": "compile", 125 | "docker_file": "test/Dockerfile_benchmarks", 126 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 127 | "build_only": False, 128 | "depends_on": ["compile"] 129 | }] 130 | } 131 | 132 | self.raises_expect(d, "#jobs[1].name: Job name 'compile' already exists") 133 | 134 | def test_dependency_conditions(self): 135 | d = { 136 | "version": 1, 137 | "jobs": [{ 138 | "type": "docker", 139 | "name": "compile", 140 | "docker_file": "test/Dockerfile_benchmarks", 141 | "build_only": False, 142 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 143 | }, { 144 | "type": "docker", 145 | "name": "compile2", 146 | "docker_file": "test/Dockerfile_benchmarks", 147 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 148 | "build_only": False, 149 | "depends_on": [{"job": "compile", "on": True}] 150 | }] 151 | } 152 | 153 | self.raises_expect(d, "#jobs[1].depends_on[0].on: must be a list") 154 | 155 | d['jobs'][1]['depends_on'] = [{"job": "compile", "on": []}] 156 | self.raises_expect(d, "#jobs[1].depends_on[0].on: must not be empty") 157 | 158 | d['jobs'][1]['depends_on'] = [{"job": "compile", "on": [True]}] 159 | self.raises_expect(d, "#jobs[1].depends_on[0].on: True is not a valid value") 160 | 161 | d['jobs'][1]['depends_on'] = [{"job": "compile", "on": ["not valid"]}] 162 | self.raises_expect(d, "#jobs[1].depends_on[0].on: not valid is not a valid value") 163 | 164 | d['jobs'][1]['depends_on'] = [{"job": "not-valid", "on": ["*"]}] 165 | self.raises_expect(d, "#jobs[1].depends_on: Job 'not-valid' not found") 166 | 167 | d['jobs'][1]['depends_on'] = [{"job": "compile", "on": ["error", "error"]}] 168 | self.raises_expect(d, "#jobs[1].depends_on[0].on: error used twice") 169 | 170 | 171 | def test_empty_dep_array(self): 172 | d = { 173 | "version": 1, 174 | "jobs": [{ 175 | "type": "docker", 176 | "name": "compile", 177 | "docker_file": "test/Dockerfile_benchmarks", 178 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 179 | "build_only": False, 180 | "depends_on": [] 181 | }] 182 | } 183 | 184 | self.raises_expect(d, "#jobs[0].depends_on: must not be empty") 185 | 186 | def test_invalid_name(self): 187 | d = { 188 | "version": 1, 189 | "jobs": [{ 190 | "type": "docker", 191 | "name": "../blub", 192 | "docker_file": "Dockerfile", 193 | "build_only": False, 194 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 195 | }] 196 | } 197 | 198 | self.raises_expect(d, "#jobs[0].name: '../blub' not a valid value") 199 | 200 | d = { 201 | "version": 1, 202 | "jobs": [{ 203 | "type": "docker", 204 | "name": "blub'", 205 | "docker_file": "Dockerfile", 206 | "build_only": False, 207 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 208 | }] 209 | } 210 | 211 | self.raises_expect(d, "#jobs[0].name: 'blub\'' not a valid value") 212 | 213 | def test_may_not_depend_on_itself(self): 214 | d = { 215 | "version": 1, 216 | "jobs": [{ 217 | "type": "docker", 218 | "name": "compile", 219 | "docker_file": "Dockerfile", 220 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 221 | "build_only": False, 222 | "depends_on": ["compile"] 223 | }] 224 | } 225 | 226 | self.raises_expect(d, "#jobs[0]: Job 'compile' may not depend on itself") 227 | 228 | def test_may_not_create_jobs(self): 229 | d = { 230 | "version": 1, 231 | "jobs": [{ 232 | "type": "docker", 233 | "name": "Create Jobs", 234 | "docker_file": "test/Dockerfile_benchmarks", 235 | "build_only": False, 236 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 237 | }] 238 | } 239 | 240 | self.raises_expect(d, "#jobs[0].name: 'Create Jobs' not a valid value") 241 | 242 | def test_environment(self): 243 | d = { 244 | "version": 1, 245 | "jobs": [{ 246 | "type": "docker", 247 | "name": "test", 248 | "docker_file": "Dockerfile", 249 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 250 | "environment": None 251 | }] 252 | } 253 | 254 | self.raises_expect(d, "#jobs[0].environment: must be an object") 255 | 256 | d['jobs'][0]['environment'] = [] 257 | self.raises_expect(d, "#jobs[0].environment: must be an object") 258 | 259 | d['jobs'][0]['environment'] = {'key': 123} 260 | self.raises_expect(d, "#jobs[0].environment.key: must be a string or object") 261 | 262 | d['jobs'][0]['environment'] = {'key': {}} 263 | self.raises_expect(d, "#jobs[0].environment.key: must contain a $secret") 264 | 265 | d['jobs'][0]['environment'] = {'key': {'$secret': None}} 266 | self.raises_expect(d, "#jobs[0].environment.key.$secret: is not a string") 267 | 268 | d['jobs'][0]['environment'] = {} 269 | validate_json(d) 270 | 271 | def test_deployments(self): 272 | d = { 273 | "version": 1, 274 | "jobs": [{ 275 | "type": "docker", 276 | "name": "test", 277 | "docker_file": "Dockerfile", 278 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 279 | "deployments": None 280 | }] 281 | } 282 | 283 | self.raises_expect(d, "#jobs[0].deployments: must be an array") 284 | 285 | d['jobs'][0]['deployments'] = [] 286 | self.raises_expect(d, "#jobs[0].deployments: must not be empty") 287 | 288 | d['jobs'][0]['deployments'] = [{}] 289 | self.raises_expect(d, "#jobs[0].deployments[0]: does not contain a 'type'") 290 | 291 | d['jobs'][0]['deployments'] = [{'type': 'unknown'}] 292 | self.raises_expect(d, "#jobs[0].deployments[0]: type 'unknown' not supported") 293 | 294 | d['jobs'][0]['deployments'] = [{'type': 'docker-registry', 'host': 'hostname', 295 | 'repository': 'repo', 'username': 'user', 'password': 'value'}] 296 | self.raises_expect(d, "#jobs[0].deployments[0].password: must be an object") 297 | 298 | d['jobs'][0]['deployments'] = [{'type': 'docker-registry', 'host': 'hostname', 'repository': 'repo', 299 | 'username': 'user', 'password': {'$secret': 'blub'}}] 300 | validate_json(d) 301 | 302 | def test_build_arguments(self): 303 | d = { 304 | "version": 1, 305 | "jobs": [{ 306 | "type": "docker", 307 | "name": "test", 308 | "docker_file": "Dockerfile", 309 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 310 | "build_arguments": None 311 | }] 312 | } 313 | 314 | self.raises_expect(d, "#jobs[0].build_arguments: must be an object") 315 | 316 | d['jobs'][0]['build_arguments'] = [] 317 | self.raises_expect(d, "#jobs[0].build_arguments: must be an object") 318 | 319 | d['jobs'][0]['build_arguments'] = {'key': 123} 320 | self.raises_expect(d, "#jobs[0].build_arguments.key: is not a string") 321 | 322 | d['jobs'][0]['build_arguments'] = {'key': {}} 323 | self.raises_expect(d, "#jobs[0].build_arguments.key: is not a string") 324 | 325 | d['jobs'][0]['build_arguments'] = {} 326 | validate_json(d) 327 | 328 | def test_valid(self): 329 | d = { 330 | "version": 1, 331 | "jobs": [{ 332 | "type": "docker", 333 | "name": "compile", 334 | "docker_file": "test/Dockerfile_benchmarks", 335 | "build_only": False, 336 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 337 | }, { 338 | "type": "docker", 339 | "name": "benchmark_server", 340 | "docker_file": "test/Dockerfile_benchmarks", 341 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 342 | "build_only": False, 343 | "depends_on": ["compile"] 344 | }, { 345 | "type": "docker", 346 | "name": "test_server", 347 | "docker_file": "test/Dockerfile_test_server", 348 | "resources": {"limits": {"cpu": 1, "memory": 1024}}, 349 | "build_only": False, 350 | "depends_on": ["compile"] 351 | }] 352 | } 353 | 354 | validate_json(d) 355 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test_markup.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import unittest 4 | 5 | from pyinfrabox.markup import parse_document, parse_text, parse_ordered_list 6 | from pyinfrabox.markup import parse_unordered_list, parse_group, parse_paragraph 7 | from pyinfrabox.markup import parse_grid, parse_table 8 | from pyinfrabox import ValidationError 9 | 10 | class TestDockerCompose(unittest.TestCase): 11 | def raises_expect(self, data, expected): 12 | try: 13 | parse_document(data) 14 | assert False 15 | except ValidationError as e: 16 | self.assertEqual(e.message, expected) 17 | 18 | 19 | def test_version(self): 20 | self.raises_expect({}, "#: property 'version' is required") 21 | self.raises_expect({'version': 'asd', 'elements': [], 'title': "t"}, "#version: must be an int") 22 | self.raises_expect({'version': '1', 'elements': [], 'title': "t"}, "#version: must be an int") 23 | self.raises_expect({'version': 2, 'elements': [], 'title': "t"}, "#version: unsupported version") 24 | 25 | def test_title(self): 26 | self.raises_expect({'version': 1}, "#: property 'title' is required") 27 | self.raises_expect({'version': 1, 'elements': [], 'title': 123}, "#title: is not a string") 28 | self.raises_expect({'version': 1, 'elements': [], 'title': ""}, "#title: empty string not allowed") 29 | 30 | def test_elements(self): 31 | self.raises_expect({'version': 1, 'title': 'title'}, "#: property 'elements' is required") 32 | self.raises_expect({'version': 1, 'title': 'title', 'elements': 'asd'}, "#elements: must be an array") 33 | self.raises_expect({'version': 1, 'title': 'title', 'elements': []}, "#elements: must not be empty") 34 | self.raises_expect({'version': 1, 'title': 'title', 'elements': [{}]}, 35 | "#elements[0]: does not contain a 'type'") 36 | 37 | def test_heading(self): 38 | for h in ("h1", "h2", "h3", "h4", "h5"): 39 | d = {'version': 1, 'title': 'title', 'elements': []} 40 | 41 | d['elements'] = [{'type': h}] 42 | self.raises_expect(d, "#elements[0]: property 'text' is required") 43 | 44 | d['elements'] = [{'type': h, "text": ""}] 45 | self.raises_expect(d, "#elements[0].text: empty string not allowed") 46 | 47 | d['elements'] = [{'type': h, "text": 1}] 48 | self.raises_expect(d, "#elements[0].text: is not a string") 49 | 50 | def test_unsupported_type(self): 51 | d = {'version': 1, 'title': 'title', 'elements': []} 52 | d['elements'] = [{'type': 'somethingweird'}] 53 | self.raises_expect(d, "#elements[0]: type 'somethingweird' not supported") 54 | 55 | def test_hline(self): 56 | d = {'version': 1, 'title': 'title', 'elements': []} 57 | 58 | d['elements'] = [{'type': 'hline', "addition": True}] 59 | self.raises_expect(d, "#elements[0]: invalid property 'addition'") 60 | 61 | 62 | def test_text(self): 63 | def raises_expect_text(data, expected): 64 | try: 65 | parse_text(data, "#") 66 | assert False 67 | except ValidationError as e: 68 | self.assertEqual(e.message, expected) 69 | 70 | raises_expect_text({}, "#: property 'type' is required") 71 | raises_expect_text({"type": "text"}, "#: property 'text' is required") 72 | raises_expect_text({"type": "text", "text": 123}, "#.text: is not a string") 73 | raises_expect_text({"type": "text", "text": ""}, "#.text: empty string not allowed") 74 | raises_expect_text({"type": "text", "text": "t", "color": "dunno"}, "#.color: not a valid value") 75 | raises_expect_text({"type": "text", "text": "t", "emphasis": "dunno"}, "#.emphasis: not a valid value") 76 | 77 | parse_text({"type": "text", "text": "t", "color": "red", "emphasis": "bold"}, "") 78 | 79 | d = {'version': 1, 'title': 'title', 'elements': [{"type": "text", "text": "t", 80 | "color": "red", "emphasis": "bold"}]} 81 | parse_document(d) 82 | 83 | def test_ordered_list(self): 84 | def raises_expect_list(data, expected): 85 | try: 86 | parse_ordered_list(data, "#") 87 | assert False 88 | except ValidationError as e: 89 | self.assertEqual(e.message, expected) 90 | 91 | raises_expect_list({}, "#: property 'type' is required") 92 | raises_expect_list({"type": "ordered_list"}, "#: property 'elements' is required") 93 | raises_expect_list({"type": "ordered_list", "elements": 123}, "#.elements: must be an array") 94 | raises_expect_list({"type": "ordered_list", "elements": []}, "#.elements: must not be empty") 95 | 96 | parse_ordered_list({"type": "ordered_list", "elements": [{"type": "hline"}]}, "") 97 | 98 | d = {'version': 1, 'title': 'title', 'elements': [{"type": "ordered_list", "elements": [{"type": "hline"}]}]} 99 | parse_document(d) 100 | 101 | def test_unordered_list(self): 102 | def raises_expect_list(data, expected): 103 | try: 104 | parse_unordered_list(data, "#") 105 | assert False 106 | except ValidationError as e: 107 | self.assertEqual(e.message, expected) 108 | 109 | raises_expect_list({}, "#: property 'type' is required") 110 | raises_expect_list({"type": "unordered_list"}, "#: property 'elements' is required") 111 | raises_expect_list({"type": "unordered_list", "elements": 123}, "#.elements: must be an array") 112 | raises_expect_list({"type": "unordered_list", "elements": []}, "#.elements: must not be empty") 113 | 114 | parse_ordered_list({"type": "unordered_list", "elements": [{"type": "hline"}]}, "") 115 | 116 | d = {'version': 1, 'title': 'title', 'elements': [{"type": "unordered_list", "elements": [{"type": "hline"}]}]} 117 | parse_document(d) 118 | 119 | def test_group(self): 120 | def raises_expect_group(data, expected): 121 | try: 122 | parse_unordered_list(data, "#") 123 | assert False 124 | except ValidationError as e: 125 | self.assertEqual(e.message, expected) 126 | 127 | raises_expect_group({}, "#: property 'type' is required") 128 | raises_expect_group({"type": "group"}, "#: property 'elements' is required") 129 | raises_expect_group({"type": "group", "elements": 123}, "#.elements: must be an array") 130 | raises_expect_group({"type": "group", "elements": []}, "#.elements: must not be empty") 131 | 132 | parse_group({"type": "group", "elements": [{"type": "hline"}]}, "") 133 | 134 | d = {'version': 1, 'title': 'title', 'elements': [{"type": "group", "elements": [{"type": "hline"}]}]} 135 | parse_document(d) 136 | 137 | def test_paragraph(self): 138 | def raises_expect_p(data, expected): 139 | try: 140 | parse_unordered_list(data, "#") 141 | assert False 142 | except ValidationError as e: 143 | self.assertEqual(e.message, expected) 144 | 145 | raises_expect_p({}, "#: property 'type' is required") 146 | raises_expect_p({"type": "paragraph"}, "#: property 'elements' is required") 147 | raises_expect_p({"type": "paragraph", "elements": 123}, "#.elements: must be an array") 148 | raises_expect_p({"type": "paragraph", "elements": []}, "#.elements: must not be empty") 149 | 150 | parse_paragraph({"type": "paragraph", "elements": [{"type": "hline"}]}, "") 151 | 152 | d = {'version': 1, 'title': 'title', 'elements': [{"type": "paragraph", "elements": [{"type": "hline"}]}]} 153 | parse_document(d) 154 | 155 | def test_grid(self): 156 | def raises_expect_grid(data, expected): 157 | try: 158 | parse_grid(data, "#") 159 | assert False 160 | except ValidationError as e: 161 | self.assertEqual(e.message, expected) 162 | 163 | raises_expect_grid({}, "#: property 'type' is required") 164 | raises_expect_grid({"type": "grid"}, "#: property 'rows' is required") 165 | raises_expect_grid({"type": "grid", "rows": 123}, "#.rows: must be an array") 166 | raises_expect_grid({"type": "grid", "rows": []}, "#.rows: must not be empty") 167 | raises_expect_grid({"type": "grid", "rows": [{"type": "hline"}]}, "#.rows[0]: must be an array") 168 | 169 | parse_grid({"type": "grid", "rows": [[{"type": "hline"}]]}, "") 170 | 171 | d = {'version': 1, 'title': 'title', 172 | 'elements': [{"type": "grid", "rows": [[{"type": "hline"}], [{"type": "hline"}]]}]} 173 | parse_document(d) 174 | 175 | def test_table(self): 176 | def raises_expect_table(data, expected): 177 | try: 178 | parse_table(data, "#") 179 | assert False 180 | except ValidationError as e: 181 | self.assertEqual(e.message, expected) 182 | 183 | raises_expect_table({}, "#: property 'type' is required") 184 | raises_expect_table({"type": "table"}, "#: property 'rows' is required") 185 | raises_expect_table({"type": "table", "rows": 123, "headers": [{"type": "text", "text": "t"}]}, 186 | "#.rows: must be an array") 187 | 188 | raises_expect_table({"type": "table", "rows": [], "headers": [{"type": "text", "text": "t"}]}, 189 | "#.rows: must not be empty") 190 | raises_expect_table({"type": "table", "rows": [{"type": "hline"}], "headers": [{"type": "text", "text": "t"}]}, 191 | "#.rows[0]: must be an array") 192 | raises_expect_table({"type": "table", "rows": [[{"type": "hline"}]], 193 | "headers": [[{"type": "text", "text": " "}]]}, 194 | "#.headers[0]: must be an object") 195 | 196 | parse_table({"type": "table", "rows": [[{"type": "hline"}]], "headers": [{"type": "text", "text": " "}]}, "") 197 | 198 | d = {'version': 1, 'title': 'title', 'elements': [ 199 | {"type": "table", "rows": [[{"type": "hline"}]], "headers": [{"type": "text", "text": " "}]} 200 | ]} 201 | parse_document(d) 202 | 203 | def test_valid(self): 204 | p = os.path.dirname(os.path.realpath(__file__)) 205 | fp = os.path.join(p, "./test/valid_markup.json") 206 | with open(fp) as f: 207 | d = json.load(f) 208 | parse_document(d) 209 | -------------------------------------------------------------------------------- /pyinfrabox/tests/test_testresult.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from pyinfrabox.testresult import validate_result 4 | from pyinfrabox import ValidationError 5 | 6 | class TestDockerCompose(unittest.TestCase): 7 | def raises_expect(self, data, expected): 8 | try: 9 | validate_result(data) 10 | assert False 11 | except ValidationError as e: 12 | self.assertEqual(e.message, expected) 13 | 14 | def test_version(self): 15 | self.raises_expect({}, "#: property 'version' is required") 16 | self.raises_expect({'version': 'asd', 'tests': []}, "#version: must be an int") 17 | self.raises_expect({'version': '1', 'tests': []}, "#version: must be an int") 18 | self.raises_expect({'version': 2, 'tests': []}, "#version: unsupported version") 19 | 20 | def test_ts(self): 21 | self.raises_expect({'version': 1}, "#: property 'tests' is required") 22 | self.raises_expect({'version': 1, 'tests': 'asd'}, "#tests: must be an array") 23 | self.raises_expect({'version': 1, 'tests': []}, "#tests: must not be empty") 24 | -------------------------------------------------------------------------------- /pyinfrabox/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import errno 3 | import uuid 4 | 5 | from builtins import int, range, str 6 | from past.builtins import basestring 7 | 8 | from pyinfrabox import ValidationError 9 | 10 | try: 11 | #python2 12 | from urlparse import urlparse 13 | except: 14 | #python3 15 | from urllib.parse import urlparse 16 | 17 | 18 | def check_text(t, path, allowEmpty=False): 19 | if not isinstance(t, basestring): 20 | raise ValidationError(path, "is not a string") 21 | 22 | if not allowEmpty and not t: 23 | raise ValidationError(path, "empty string not allowed") 24 | 25 | 26 | def check_allowed_properties(d, path, allowed): 27 | if not isinstance(d, dict): 28 | raise ValidationError(path, "must be an object") 29 | 30 | for key in d: 31 | if key not in allowed: 32 | if 'name' in d.keys(): 33 | raise ValidationError('%s(%s)' % (path, d['name']), "invalid property '%s'" % key) 34 | else: 35 | raise ValidationError(path, "invalid property '%s'" % key) 36 | 37 | 38 | def check_required_properties(d, path, required): 39 | if not isinstance(d, dict): 40 | raise ValidationError(path, "must be an object") 41 | 42 | for key in required: 43 | if key not in d: 44 | if 'name' in d.keys(): 45 | raise ValidationError('%s(%s)' % (path, d['name']), "property '%s' is required" % key) 46 | else: 47 | raise ValidationError(path, "property '%s' is required" % key) 48 | 49 | def check_string_array(e, path): 50 | if not isinstance(e, list): 51 | raise ValidationError(path, "must be an array") 52 | 53 | if not e: 54 | raise ValidationError(path, "must not be empty") 55 | 56 | for i in range(0, len(e)): 57 | elem = e[i] 58 | path = "%s[%s]" % (path, i) 59 | check_text(elem, path) 60 | 61 | 62 | def check_boolean(d, path): 63 | if not isinstance(d, bool): 64 | raise ValidationError(path, "must be a boolean") 65 | 66 | 67 | def check_number(d, path): 68 | if not isinstance(d, int): 69 | raise ValidationError(path, "must be a integer") 70 | 71 | 72 | def check_int_or_float(d, path): 73 | if not isinstance(d, float) and not isinstance(d, int): 74 | raise ValidationError(path, "must be a float") 75 | 76 | 77 | def check_color(d, path): 78 | if d not in ("red", "green", "blue", "yellow", "orange", "white", "black", 79 | "grey"): 80 | raise ValidationError(path, "not a valid value") 81 | 82 | 83 | def get_remote_url(url): 84 | parsed_url = urlparse(url) 85 | return parsed_url.scheme + '://' + parsed_url.netloc 86 | 87 | 88 | def validate_url(url): 89 | try: 90 | result = urlparse(url) 91 | return result.scheme and result.netloc 92 | except: 93 | return False 94 | 95 | 96 | def validate_uuid(uuid_string): 97 | try: 98 | val = uuid.UUID(uuid_string) 99 | except ValueError: 100 | return False 101 | 102 | return val.hex == uuid_string.replace('-', '') 103 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | universal=1 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | def readme(): 4 | with open('README.md') as f: 5 | return f.read() 6 | 7 | setup(name='infraboxcli', 8 | version='0.9.0', 9 | url='https://github.com/infrabox/cli', 10 | description='Command Line Interface for InfraBox', 11 | long_description=readme(), 12 | long_description_content_type="text/markdown", 13 | author='infrabox', 14 | license='MIT', 15 | packages=['infraboxcli', 16 | 'infraboxcli.dashboard', 17 | 'pyinfrabox', 18 | 'pyinfrabox.infrabox', 19 | 'pyinfrabox.badge', 20 | 'pyinfrabox.docker_compose', 21 | 'pyinfrabox.markup', 22 | 'pyinfrabox.testresult'], 23 | install_requires=[ 24 | 'future', 25 | 'jsonschema==2.6.0', 26 | 'requests', 27 | 'colorama', 28 | 'socketIO_client', 29 | 'PyJWT', 30 | 'cryptography', 31 | 'inquirer', 32 | 'pyyaml' 33 | ], 34 | classifiers=[ 35 | 'Development Status :: 4 - Beta', 36 | 'Intended Audience :: Developers', 37 | 'Topic :: Software Development :: Build Tools', 38 | 'License :: OSI Approved :: Apache Software License', 39 | 'Programming Language :: Python :: 2', 40 | 'Programming Language :: Python :: 2.7', 41 | 'Programming Language :: Python :: 3', 42 | 'Programming Language :: Python :: 3.5', 43 | 'Programming Language :: Python :: 3.6', 44 | ], 45 | scripts=['bin/infrabox'], 46 | include_package_data=True, 47 | zip_safe=False) 48 | --------------------------------------------------------------------------------