├── .ci-operator.yaml ├── .gitattributes ├── .gitignore ├── LICENSE ├── Makefile ├── OWNERS ├── README.md ├── ansible ├── rebuild_module.digest ├── rebuild_module.sh ├── roles │ └── openshift_client_python │ │ ├── library │ │ ├── openshift_client_python.py │ │ └── openshift_client_python.template.py │ │ ├── meta │ │ └── main.yml │ │ ├── readme.md │ │ └── tasks │ │ └── main.yml ├── run_sample_playbook.sh └── sample_playbook.yml ├── docs └── PACKAGING.md ├── doctoc.sh ├── examples ├── cluster_tests.py ├── coverage.py ├── custom_apiobjects.py ├── dump.py ├── ephemeral_project.py ├── etcd_status.py ├── exception_tracking.py ├── login.py ├── modify_and_apply.py ├── multiple_contexts.py ├── oc_action.py ├── quotas.py ├── report.py ├── simple.py └── templates.py ├── hack └── verify-ansible-module.sh ├── images └── Dockerfile ├── jobs └── ci │ └── pr.groovy ├── lambda ├── model │ └── python │ │ └── model.py └── package.sh ├── packages ├── monitoring.py └── openshift_client │ ├── __init__.py │ ├── action.py │ ├── ansible.py │ ├── apiobject.py │ ├── base_verbs.py │ ├── config.py │ ├── context.py │ ├── decorators.py │ ├── model.py │ ├── naming.py │ ├── result.py │ ├── selector.py │ ├── status.py │ ├── test_apiobject.py │ ├── test_model.py │ ├── test_selector.py │ ├── test_util.py │ └── util.py ├── pyproject.toml ├── requirements.txt └── run_unittests.sh /.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | namespace: openshift 3 | name: release 4 | tag: golang-1.18 5 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Tell github that this file is generated and can be collapsed by default 2 | ansible/roles/openshift_client_python/library/openshift_client_python.py linguist-generated -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea/ 2 | *.pyc 3 | *.zip 4 | examples/dumps 5 | dist/ 6 | packages/openshift_client.egg-info/ 7 | build/ 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2020 Red Hat, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | ./run_unittests.sh 3 | .PHONY: test 4 | 5 | clean: 6 | find . -type f -name '*.py[co]' -delete -o -type d -name __pycache__ -delete 7 | rm -rf dist packages/openshift_client.egg-info build 8 | .PHONY: clean 9 | 10 | release: clean 11 | python -m build 12 | .PHONY: release 13 | 14 | publish-testpypi: 15 | twine upload --repository testpypi dist/* 16 | .PHONY: publish-testpypi 17 | 18 | publish-pypi: 19 | twine upload --repository pypi dist/* 20 | .PHONY: publish-pypi 21 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | approvers: 2 | - jupierce 3 | - bradmwilliams 4 | reviewers: 5 | - jupierce 6 | - bradmwilliams 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Openshift Python Client 2 | 3 | 4 | 5 | 6 | 7 | **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* 8 | 9 | - [Overview](#overview) 10 | - [Reader Prerequisites](#reader-prerequisites) 11 | - [Setup](#setup) 12 | - [Prerequisites](#prerequisites) 13 | - [Installation Instructions](#installation-instructions) 14 | - [Using PIP](#using-pip) 15 | - [For development](#for-development) 16 | - [Usage](#usage) 17 | - [Quickstart](#quickstart) 18 | - [Selectors](#selectors) 19 | - [APIObjects](#apiobjects) 20 | - [Making changes to APIObjects](#making-changes-to-apiobjects) 21 | - [Running within a Pod](#running-within-a-pod) 22 | - [Tracking oc invocations](#tracking-oc-invocations) 23 | - [Time limits](#time-limits) 24 | - [Advanced contexts](#advanced-contexts) 25 | - [Something missing?](#something-missing) 26 | - [Running oc on a bastion host](#running-oc-on-a-bastion-host) 27 | - [Gathering reports and logs with selectors](#gathering-reports-and-logs-with-selectors) 28 | - [Advanced verbs:](#advanced-verbs) 29 | - [Examples](#examples) 30 | - [Environment Variables](#environment-variables) 31 | - [Defaults when invoking `oc`](#defaults-when-invoking-oc) 32 | - [Master timeout](#master-timeout) 33 | - [SSH Client Host](#ssh-client-host) 34 | 35 | 36 | 37 | ## Overview 38 | 39 | The [openshift-client-python](https://www.github.com/openshift/openshift-client-python) library aims to provide a readable, concise, comprehensive, and fluent 40 | API for rich interactions with an [OpenShift](https://www.openshift.com) cluster. Unlike other clients, this library exclusively uses the command 41 | line tool (oc) to achieve the interactions. This approach comes with important benefits and disadvantages when compared 42 | to other client libraries. 43 | 44 | Pros: 45 | 46 | - No additional software needs to be installed on the cluster. If a system with python support can (1) invoke `oc` 47 | locally OR (2) ssh to a host and invoke `oc`, you can use the library. 48 | - Portable. If you have python and `oc` working, you don't need to worry about OpenShift versions or machine architectures. 49 | - Custom resources are supported and treated just like any other resource. There is no need to generate code to support them. 50 | - Quick to learn. If you understand the `oc` command line interface, you can use this library. 51 | 52 | Cons: 53 | 54 | - This API is not intended to implement something as complex as a controller. For example, it does not implement 55 | watch functionality. If you can't imagine accomplishing your use case through CLI interactions, this API is probably 56 | not the right starting point for it. 57 | - If you care about whether a REST API returns a particular error code, this API is probably not for you. Since it 58 | is based on the CLI, high level return codes are used to determine success or failure. 59 | 60 | ## Reader Prerequisites 61 | 62 | * Familiarity with OpenShift [command line interface](https://docs.okd.io/latest/cli_reference/index.html) 63 | is highly encouraged before exploring the API's features. The API leverages 64 | the [oc](https://docs.okd.io/latest/cli_reference/openshift_cli/getting-started-cli.html#cli-getting-started) 65 | binary and, in many cases, passes method arguments directly on to the command line. This document cannot, therefore, 66 | provide a complete description of all possible OpenShift interactions -- the user may need to reference 67 | the CLI documentation to find the pass-through arguments a given interaction requires. 68 | 69 | * A familiarity with Python is assumed. 70 | 71 | ## Setup 72 | 73 | ### Prerequisites 74 | 75 | 1. Download and install the OpenShift [command-line Tools](https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/) needed to access your OpenShift cluster. 76 | 77 | ### Installation Instructions 78 | 79 | #### Using PIP 80 | 81 | 1. Install the `openshift-client` module from PyPI. 82 | ```bash 83 | sudo pip install openshift-client 84 | ``` 85 | 86 | #### For development 87 | 88 | 1. Git clone https://github.com/openshift/openshift-client-python.git (or your fork). 89 | 2. Add required libraries 90 | ```bash 91 | sudo pip install -r requirements.txt 92 | ``` 93 | 3. Append ./packages to your PYTHONPATH environment variable (e.g. export PYTHONPATH=$(pwd)/packages:$PYTHONPATH). 94 | 4. Write and run your python script! 95 | 96 | ## Usage 97 | 98 | ### Quickstart 99 | 100 | Any standard Python application should be able to use the API if it imports the openshift package. The simplest 101 | possible way to begin using the API is login to your target cluster before running your first application. 102 | 103 | Can you run `oc project` successfully from the command line? Then write your app! 104 | 105 | ```python 106 | #!/usr/bin/python 107 | import openshift_client as oc 108 | 109 | print('OpenShift client version: {}'.format(oc.get_client_version())) 110 | print('OpenShift server version: {}'.format(oc.get_server_version())) 111 | 112 | # Set a project context for all inner `oc` invocations and limit execution to 10 minutes 113 | with oc.project('openshift-infra'), oc.timeout(10 * 60): 114 | # Print the list of qualified pod names (e.g. ['pod/xyz', 'pod/abc', ...] in the current project 115 | print('Found the following pods in {}: {}'.format(oc.get_project_name(), oc.selector('pods').qnames())) 116 | 117 | # Read in the current state of the pod resources and represent them as python objects 118 | for pod_obj in oc.selector('pods').objects(): 119 | 120 | # The APIObject class exposes several convenience methods for interacting with objects 121 | print('Analyzing pod: {}'.format(pod_obj.name())) 122 | pod_obj.print_logs(timestamps=True, tail=15) 123 | 124 | # If you need access to the underlying resource definition, get a Model instance for the resource 125 | pod_model = pod_obj.model 126 | 127 | # Model objects enable dot notation and allow you to navigate through resources 128 | # to an arbitrary depth without checking if any ancestor elements exist. 129 | # In the following example, there is no need for boilerplate like: 130 | # `if .... 'ownerReferences' in pod_model['metadata'] ....` 131 | # Fields that do not resolve will always return oc.Missing which 132 | # is a singleton and can also be treated as an empty dict. 133 | for owner in pod_model.metadata.ownerReferences: # ownerReferences == oc.Missing if not present in resource 134 | # elements of a Model are also instances of Model or ListModel 135 | if owner.kind is not oc.Missing: # Compare as singleton 136 | print(' pod owned by a {}'.format(owner.kind)) # e.g. pod was created by a StatefulSet 137 | 138 | ``` 139 | 140 | ### Selectors 141 | 142 | Selectors are a central concept used by the API to interact with collections 143 | of OpenShift resources. As the name implies, a "selector" selects zero or 144 | more resources on a server which satisfy user specified criteria. An apt 145 | metaphor for a selector might be a prepared SQL statement which can be 146 | used again and again to select rows from a database. 147 | 148 | ```python 149 | # Create a selector which selects all projects. 150 | project_selector = oc.selector("projects") 151 | 152 | # Print the qualified name (i.e. "kind/name") of each resource selected. 153 | print("Project names: " + project_selector.qnames()) 154 | 155 | # Count the number of projects on the server. 156 | print("Number of projects: " + project_selector.count_existing()) 157 | 158 | # Selectors can also be created with a list of names. 159 | sa_selector = oc.selector(["serviceaccount/deployer", "serviceaccount/builder"]) 160 | 161 | # Performing an operation will act on all selected resources. In this case, 162 | # both serviceaccounts are labeled. 163 | sa_selector.label({"mylabel": "myvalue"}) 164 | 165 | # Selectors can also select based on kind and labels. 166 | sa_label_selector = oc.selector("sa", labels={"mylabel": "myvalue"}) 167 | 168 | # We should find the service accounts we just labeled. 169 | print("Found labeled serviceaccounts: " + sa_label_selector.names()) 170 | 171 | # Create a selector for a set of kinds. 172 | print(oc.selector(['dc', 'daemonset']).describe()) 173 | ``` 174 | 175 | The output should look something like this: 176 | 177 | ``` 178 | Project names: [u'projects/default', u'projects/kube-system', u'projects/myproject', u'projects/openshift', u'projects/openshift-infra', u'projects/temp-1495937701365', u'projects/temp-1495937860505', u'projects/temp-1495937908009'] 179 | Number of projects: 8 180 | Found labeled serviceaccounts: [u'serviceaccounts/builder', u'serviceaccounts/deployer'] 181 | ``` 182 | 183 | ### APIObjects 184 | 185 | Selectors allow you to perform "verb" level operations on a set of objects, but 186 | what if you want to interact objects at a schema level? 187 | 188 | ```python 189 | projects_sel = oc.selector("projects") 190 | 191 | # .objects() will perform the selection and return a list of APIObjects 192 | # which model the selected resources. 193 | projects = projects_sel.objects() 194 | 195 | print("Selected " + len(projects) + " projects") 196 | 197 | # Let's store one of the project APIObjects for easy access. 198 | project = projects[0] 199 | 200 | # The APIObject exposes methods providing simple access to metadata and common operations. 201 | print('The project is: {}/{}'.format(project.kind(), project.name())) 202 | project.label({'mylabel': 'myvalue'}) 203 | 204 | # And the APIObject allow you to interact with an object's data via the 'model' attribute. 205 | # The Model is similar to a standard dict, but also allows dot notation to access elements 206 | # of the structured data. 207 | print('Annotations:\n{}\n'.format(project.model.metadata.annotations)) 208 | 209 | # There is no need to perform the verbose 'in' checking you may be familiar with when 210 | # exploring a Model object. Accessing Model attributes will always return a value. If the 211 | # any component of a path into the object does not exist in the underlying model, the 212 | # singleton 'Missing' will be returned. 213 | 214 | if project.model.metadata.annotations.myannotation is oc.Missing: 215 | print("This object has not been annotated yet") 216 | 217 | # If a field in the model contains special characters, use standard Python notation 218 | # to access the key instead of dot notation. 219 | if project.model.metadata.annotations['my-annotation'] is oc.Missing: 220 | print("This object has not been annotated yet") 221 | 222 | # For debugging, you can always see the state of the underlying model by printing the 223 | # APIObject as JSON. 224 | print('{}'.format(project.as_json())) 225 | 226 | # Or getting deep copy dict. Changes made to this dict will not affect the APIObject. 227 | d = project.as_dict() 228 | 229 | # Model objects also simplify looking through kubernetes style lists. For example, can_match 230 | # returns True if the modeled list contains an object with the subset of attributes specified. 231 | # If this example, we are checking if the a node's kubelet is reporting Ready: 232 | oc.selector('node/alpha').object().model.status.conditions.can_match( 233 | { 234 | 'type': 'Ready', 235 | 'status': "True", 236 | } 237 | ) 238 | 239 | 240 | # can_match can also ensure nest objects and list are present within a resource. Several 241 | # of these types of checks are already implemented in the openshift.status module. 242 | def is_route_admitted(apiobj): 243 | return apiobj.model.status.can_match({ 244 | 'ingress': [ 245 | { 246 | 'conditions': [ 247 | { 248 | 'type': 'Admitted', 249 | 'status': 'True', 250 | } 251 | ] 252 | } 253 | ] 254 | }) 255 | ``` 256 | 257 | ### Making changes to APIObjects 258 | 259 | ```python 260 | # APIObject exposes simple interfaces to delete and patch the resource it represents. 261 | # But, more interestingly, you can make detailed changes to the model and apply those 262 | # changes to the API. 263 | 264 | project.model.metadata.labels['my_label'] = 'myvalue' 265 | project.apply() 266 | 267 | 268 | # If modifying the underlying API resources could be contentious, use the more robust 269 | # modify_and_apply method which can retry the operation multiple times -- refreshing 270 | # with the current object state between failures. 271 | 272 | # First, define a function that will make changes to the model. 273 | def make_model_change(apiobj): 274 | apiobj.model.data['somefile.yaml'] = 'wyxz' 275 | return True 276 | 277 | 278 | # modify_and_apply will call the function and attempt to apply its changes to the model 279 | # if it returns True. If the apply is rejected by the API, the function will pull 280 | # the latest object content, call make_model_change again, and try the apply again 281 | # up to the specified retry account. 282 | configmap.modify_and_apply(make_model_change, retries=5) 283 | 284 | 285 | # For best results, ensure the function passed to modify_and_apply is idempotent: 286 | 287 | def set_unmanaged_in_cvo(apiobj): 288 | desired_entry = { 289 | 'group': 'config.openshift.io/v1', 290 | 'kind': 'ClusterOperator', 291 | 'name': 'openshift-samples', 292 | 'unmanaged': True, 293 | } 294 | 295 | if apiobj.model.spec.overrides.can_match(desired_entry): 296 | # No change required 297 | return False 298 | 299 | if not apiobj.model.spec.overrides: 300 | apiobj.model.spec.overrides = [] 301 | 302 | context.progress('Attempting to disable CVO interest in openshift-samples operator') 303 | apiobj.model.spec.overrides.append(desired_entry) 304 | return True 305 | 306 | 307 | result, changed = oc.selector('clusterversion.config.openshift.io/version').object().modify_and_apply(set_unmanaged_in_cvo) 308 | if changed: 309 | context.report_change('Instructed CVO to ignore openshift-samples operator') 310 | 311 | ``` 312 | 313 | ### Running within a Pod 314 | 315 | It is simple to use the API within a Pod. The `oc` binary automatically 316 | detects it is running within a container and automatically uses the Pod's serviceaccount token/cacert. 317 | 318 | ### Tracking oc invocations 319 | 320 | It is good practice to setup at least one tracking context within your application so that 321 | you will be able to easily analyze what `oc` invocations were made on your behalf and the result 322 | of those operations. *Note that details about all `oc` invocations performed within the context will 323 | be stored within the tracker. Therefore, do not use a single tracker for a continuously running 324 | process -- it will consume memory for every oc invocation.* 325 | 326 | ```python 327 | #!/usr/bin/python 328 | import openshift_client as oc 329 | 330 | with oc.tracking() as tracker: 331 | try: 332 | print('Current user: {}'.format(oc.whoami())) 333 | except: 334 | print('Error acquiring current username') 335 | 336 | # Print out details about the invocations made within this context. 337 | print(tracker.get_result()) 338 | ``` 339 | 340 | In this case, the tracking output would look something like: 341 | 342 | ```json 343 | { 344 | "status": 0, 345 | "operation": "tracking", 346 | "actions": [ 347 | { 348 | "status": 0, 349 | "verb": "project", 350 | "references": {}, 351 | "in": null, 352 | "out": "aos-cd\n", 353 | "err": "", 354 | "cmd": [ 355 | "oc", 356 | "project", 357 | "-q" 358 | ], 359 | "elapsed_time": 0.15344810485839844, 360 | "internal": false, 361 | "timeout": false, 362 | "last_attempt": true 363 | }, 364 | { 365 | "status": 0, 366 | "verb": "whoami", 367 | "references": {}, 368 | "in": null, 369 | "out": "aos-ci-jenkins\n", 370 | "err": "", 371 | "cmd": [ 372 | "oc", 373 | "whoami" 374 | ], 375 | "elapsed_time": 0.6328380107879639, 376 | "internal": false, 377 | "timeout": false, 378 | "last_attempt": true 379 | } 380 | ] 381 | } 382 | ``` 383 | 384 | Alternatively, you can record actions yourself by passing an action_handler to the tracking 385 | contextmanager. Your action handler will be invoked each time an `oc` invocation completes. 386 | 387 | ```python 388 | def print_action(action): 389 | print('Performed: {} - status={}'.format(action.cmd, action.status)) 390 | 391 | 392 | with oc.tracking(action_handler=print_action): 393 | try: 394 | print('Current project: {}'.format(oc.get_project_name())) 395 | print('Current user: {}'.format(oc.whoami())) 396 | except: 397 | print('Error acquiring details about project/user') 398 | 399 | ``` 400 | 401 | ### Time limits 402 | 403 | Have a script you want to ensure succeeds or fails within a specific period of time? Use 404 | a `timeout` context. Timeout contexts can be nested - if any timeout context expires, 405 | the current oc invocation will be killed. 406 | 407 | ```python 408 | #!/usr/bin/python 409 | import openshift_client as oc 410 | 411 | 412 | def node_is_ready(node): 413 | ready = node.model.status.conditions.can_match({ 414 | 'type': 'Ready', 415 | 'status': 'True', 416 | }) 417 | return ready 418 | 419 | 420 | print("Waiting for up to 15 minutes for at least 6 nodes to be ready...") 421 | with oc.timeout(15 * 60): 422 | oc.selector('nodes').until_all(6, success_func=node_is_ready) 423 | print("All detected nodes are reporting ready") 424 | ``` 425 | 426 | You will be able to see in `tracking` context results that a timeout occurred for an affected 427 | invocation. The `timeout` field will be set to `True`. 428 | 429 | ### Advanced contexts 430 | 431 | If you are unable to use a KUBECONFIG environment variable or need fine grained control over the 432 | server/credentials you communicate with for each invocation, use openshift-client-python contexts. 433 | Contexts can be nested and cause oc invocations within them to use the most recently established 434 | context information. 435 | 436 | ```python 437 | with oc.api_server('https:///....'): # use the specified api server for nested oc invocations. 438 | 439 | with oc.token('abc..'): # --server=... --token=abc... will be included in inner oc invocations. 440 | print("Current project: " + oc.get_project_name()) 441 | 442 | with oc.token('def..'): # --server=... --token=def... will be included in inner oc invocations. 443 | print("Current project: " + oc.get_project_name()) 444 | ``` 445 | 446 | You can control the loglevel specified for `oc` invocations. 447 | 448 | ```python 449 | with oc.loglevel(6): 450 | # all oc invocations within this context will be invoked with --loglevel=6 451 | oc... 452 | ``` 453 | 454 | You ask `oc` to skip TLS verification if necessary. 455 | 456 | ```python 457 | with oc.tls_verify(enable=False): 458 | # all oc invocations within this context will be invoked with --insecure-skip-tls-verify 459 | oc... 460 | ``` 461 | 462 | ### Something missing? 463 | 464 | Most common API iterations have abstractions, but if there is no openshift-client-python API 465 | exposing the `oc` function you want to run, you can always use `oc.invoke` to directly pass arguments to 466 | an `oc` invocation on your host. 467 | 468 | ```python 469 | # oc adm policy add-scc-to-user privileged -z my-sa-name 470 | oc.invoke('adm', ['policy', 'add-scc-to-user', 'privileged', '-z', 'my-sa-name']) 471 | ``` 472 | 473 | ### Running oc on a bastion host 474 | 475 | Is your oc binary on a remote host? No problem. Easily remote all CLI interactions over SSH using the client_host 476 | context. Before running this command, you will need to load your ssh agent up with a key 477 | appropriate to the target client host. 478 | 479 | ```python 480 | with openshift_client.client_host(hostname="my.cluster.com", username="root", auto_add_host=True): 481 | # oc invocations will take place on my.cluster.com host as the root user. 482 | print("Current project: " + oc.get_project_name()) 483 | ``` 484 | 485 | Using this model, your Python script will run exactly where you launch it, but all oc invocations will 486 | occur on the remote host. 487 | 488 | ### Gathering reports and logs with selectors 489 | 490 | Various objects within OpenShift have logs associated with them: 491 | 492 | - pods 493 | - deployments 494 | - daemonsets 495 | - statefulsets 496 | - builds 497 | - etc.. 498 | 499 | A selector can gather logs from pods associated with each (and for each container within those pods). Each 500 | log will be a unique value in the dictionary returned. 501 | 502 | ```python 503 | # Print logs for all pods associated with all daemonsets & deployments in openshift-monitoring namespace. 504 | with oc.project('openshift-monitoring'): 505 | for k, v in oc.selector(['daemonset', 'deployment']).logs(tail=500).iteritems(): 506 | print('Container: {}\n{}\n\n'.format(k, v)) 507 | ``` 508 | 509 | The above example would output something like: 510 | 511 | ``` 512 | Container: openshift-monitoring:pod/node-exporter-hw5r5(node-exporter) 513 | time="2018-10-22T21:07:36Z" level=info msg="Starting node_exporter (version=0.16.0, branch=, revision=)" source="node_exporter.go:82" 514 | time="2018-10-22T21:07:36Z" level=info msg="Enabled collectors:" source="node_exporter.go:90" 515 | time="2018-10-22T21:07:36Z" level=info msg=" - arp" source="node_exporter.go:97" 516 | ... 517 | ``` 518 | 519 | Note that these logs are held in memory. Use tail or other available method parameters to ensure 520 | predictable and efficient results. 521 | 522 | To simplify even further, you can ask the library to pretty-print the logs for you: 523 | 524 | ```python 525 | oc.selector(['daemonset', 'deployment']).print_logs() 526 | ``` 527 | 528 | And to quickly pull together significant diagnostic data on selected objects, use `report()` or `print_report()`. 529 | A report includes the following information for each selected object, if available: 530 | 531 | - `object` - The current state of the object. 532 | - `describe` - The output of describe on the object. 533 | - `logs` - If applicable, a map of logs -- one of each container associated with the object. 534 | 535 | ```python 536 | # Pretty-print a detail set of data about all deploymentconfigs, builds, and configmaps in the 537 | # current namespace context. 538 | oc.selector(['dc', 'build', 'configmap']).print_report() 539 | ``` 540 | 541 | ### Advanced verbs: 542 | 543 | Running oc exec on a pod. 544 | 545 | ```python 546 | result = oc.selector('pod/alertmanager-main-0').object().execute(['cat'], 547 | container_name='alertmanager', 548 | stdin='stdin for cat') 549 | print(result.out()) 550 | ``` 551 | 552 | Finding all pods running on a node: 553 | 554 | ```python 555 | with oc.client_host(): 556 | for node_name in oc.selector('nodes').qnames(): 557 | print('Pods running on node: {}'.format(node_name)) 558 | for pod_obj in oc.get_pods_by_node(node_name): 559 | print(' {}'.format(pod_obj.fqname())) 560 | ``` 561 | 562 | Example output: 563 | 564 | ``` 565 | ... 566 | Pods running on node: node/ip-172-31-18-183.ca-central-1.compute.internal 567 | 72-sus:pod/sus-1-vgnmx 568 | ameen-blog:pod/ameen-blog-2-t68qn 569 | appejemplo:pod/ejemplo-1-txdt7 570 | axxx:pod/mysql-5-lx2bc 571 | ... 572 | ``` 573 | 574 | ## Examples 575 | 576 | - [Some unit tests](examples/cluster_tests.py) 577 | 578 | ## Environment Variables 579 | 580 | To allow openshift-client-python applications to be portable between environments without needing to be modified, 581 | you can specify many default contexts in the environment. 582 | 583 | ### Defaults when invoking `oc` 584 | 585 | Establishing explicit contexts within an application will override these environment defaults. 586 | 587 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_OC_PATH` - default path to use when invoking `oc` 588 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_CONFIG_PATH` - default `--kubeconfig` argument 589 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_API_SERVER` - default `--server` argument 590 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_CA_CERT_PATH` - default `--cacert` argument 591 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_PROJECT` - default `--namespace` argument 592 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_OC_LOGLEVEL` - default `--loglevel` argument 593 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_SKIP_TLS_VERIFY` - default `--insecure-skip-tls-verify` 594 | 595 | ### Master timeout 596 | 597 | Defines an implicit outer timeout(..) context for the entire application. This allows you to ensure 598 | that an application terminates within a reasonable time, even if the author of the application has 599 | not included explicit timeout contexts. Like any `timeout` context, this value is not overridden 600 | by subsequent `timeout` contexts within the application. It provides an upper bound for the entire 601 | application's oc interactions. 602 | 603 | - `OPENSHIFT_CLIENT_PYTHON_MASTER_TIMEOUT` 604 | 605 | ### SSH Client Host 606 | 607 | In some cases, it is desirable to run an openshift-client-python application using a local `oc` binary and 608 | in other cases, the `oc` binary resides on a remote client. Encoding this decision in the application 609 | itself is unnecessary. 610 | 611 | Simply wrap you application in a `client_host` context without arguments. This will try to pull 612 | client host information from environment variables if they are present. If they are not present, 613 | the application will execute on the local host. 614 | 615 | For example, the following application will ssh to `OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_HOSTNAME` if it is defined 616 | in the environment. Otherwise, `oc` interactions will be executed on the host running the python application. 617 | 618 | ```python 619 | with oc.client_host(): # if OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_HOSTNAME if not defined in the environment, this is a no-op 620 | print('Found nodes: {}'.format(oc.selector('nodes').qnames())) 621 | ``` 622 | 623 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_HOSTNAME` - The hostname on which the `oc` binary resides 624 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_USERNAME` - Username to use for the ssh connection (optional) 625 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_PORT` - SSH port to use (optional; defaults to 22) 626 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_AUTO_ADD` - Defaults to `false`. If set to `true`, unknown hosts will automatically be trusted. 627 | - `OPENSHIFT_CLIENT_PYTHON_DEFAULT_LOAD_SYSTEM_HOST_KEYS` - Defaults to `true`. If true, the local known hosts information will be used. 628 | -------------------------------------------------------------------------------- /ansible/rebuild_module.digest: -------------------------------------------------------------------------------- 1 | 22eb78d8060029c138e7bd3c465e6ca4 - 2 | -------------------------------------------------------------------------------- /ansible/rebuild_module.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Directory in which this script resides 4 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" 5 | 6 | TEMPLATE_FILE="$DIR/roles/openshift_client_python/library/openshift_client_python.template.py" 7 | OUTPUT_FILE="$DIR/roles/openshift_client_python/library/openshift_client_python.py" 8 | 9 | if [[ ! -f "$TEMPLATE_FILE" ]]; then 10 | echo "Unable to find template file: $TEMPLATE_FILE" 11 | exit 1 12 | fi 13 | 14 | PACKAGES_DIR="$DIR/../packages" 15 | if [[ ! -d "$PACKAGES_DIR" ]]; then 16 | echo "Unable to find packages directory: $PACKAGES_DIR" 17 | exit 1 18 | fi 19 | 20 | pushd "$PACKAGES_DIR" 21 | # Update module digest so that pr.groovy can ensure it is run after each module change 22 | cat $(find openshift_client/ -name '*.py' | sort -d) | md5sum > $DIR/rebuild_module.digest 23 | ENCODED_TGZ=$(tar c --owner=0 --numeric-owner --group=0 --mtime='UTC 2019-01-01' $(find openshift_client/ -name '*.py' | sort -d) | gzip -c -n | base64 --wrap=0) 24 | popd 25 | 26 | echo "#!/usr/bin/env python" > $OUTPUT_FILE 27 | echo "# THIS IS A GENERATED FILE. DO NOT MODIFY IT" >> $OUTPUT_FILE 28 | echo "# Modify: openshift_client_python.template.py and then run rebuild_module.sh to affect this file" >> $OUTPUT_FILE 29 | 30 | replaced=0 31 | 32 | while IFS= read -r line 33 | do 34 | if [[ "$line" == "#!"* ]]; then # Skip the shebang, we write it manually above 35 | continue 36 | fi 37 | if [[ "$line" == " REPLACED_BY_REBUILD_MODULE = '{}'" ]]; then 38 | echo " REPLACED_BY_REBUILD_MODULE = '${ENCODED_TGZ}'" >> "${OUTPUT_FILE}" 39 | replaced=1 40 | else 41 | echo "$line" >> "${OUTPUT_FILE}" 42 | fi 43 | done < "$TEMPLATE_FILE" 44 | 45 | if [[ "$replaced" != "1" ]]; then 46 | echo "Unable to find replacement pattern in template" 47 | exit 1 48 | fi 49 | -------------------------------------------------------------------------------- /ansible/roles/openshift_client_python/library/openshift_client_python.template.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | 5 | from __future__ import absolute_import 6 | from ansible.module_utils.basic import AnsibleModule 7 | 8 | import os 9 | import six 10 | import tempfile 11 | import shutil 12 | import tarfile 13 | import base64 14 | import sys 15 | import pprint 16 | 17 | 18 | # Allows modules to trigger errors 19 | def error(msg, **kwargs): 20 | import openshift_client as oc 21 | raise oc.OpenShiftPythonException(msg, **kwargs) 22 | 23 | 24 | def main(): 25 | import openshift_client as oc 26 | script = module.params["script"] 27 | time = module.params["timeout"] 28 | oc.ansible.reset() 29 | oc.ansible.vars = module.params["vars"] 30 | 31 | if time is not None: 32 | time = int(time) # Allow time to come in as a string 33 | 34 | if module.params["project"] is not None: 35 | oc.context.default_project = module.params["project"] 36 | 37 | with oc.timeout(time): 38 | with oc.tracking() as ct: 39 | try: 40 | with oc.util.OutputCapture() as capture: 41 | exec(script) 42 | 43 | module.debug("openshift_client_python module invocation result:\n" + str(ct.get_result())) 44 | module.exit_json(rc=ct.get_result().status(), 45 | changed=module.params['changes'], 46 | ansible_facts=oc.ansible.new_facts, 47 | stdout=capture.out.getvalue().decode('UTF-8'), 48 | stderr=capture.err.getvalue().decode('UTF-8'), 49 | result=ct.get_result().as_dict() 50 | ) 51 | except oc.OpenShiftPythonException as ose: 52 | module.debug("openshift_client_python module invocation exception: " + str(ose)) 53 | module.debug("openshift_client_python module invocation result:\n" + str(ct.get_result())) 54 | module.fail_json(msg=ose.msg, 55 | rc=ose.result.status(), 56 | exception_attributes=ose.attributes(), 57 | changed=module.params['changes'] or oc.ansible.changed, 58 | ansible_facts=oc.ansible.new_facts, 59 | stdout=capture.out.getvalue().decode('UTF-8'), 60 | stderr=capture.err.getvalue().decode('UTF-8'), 61 | result=ct.get_result().as_dict() 62 | ) 63 | except KeyboardInterrupt: 64 | print('Received KeyboardInterrupt during module', file=sys.stderr) 65 | pprint.pprint(ct.get_result().as_dict(), stream=sys.stderr) 66 | raise 67 | 68 | 69 | if __name__ == '__main__': 70 | # When openshift-client-python/ansible/rebuild_module.sh is executed, it will read in this template 71 | # and replace the following variable with a b64 encoded tarball of the openshift-client-library 72 | # package. The client_python_extract_dir path will contain the 'openshift' package directory. 73 | REPLACED_BY_REBUILD_MODULE = '{}' 74 | OPENSHIFT_CLIENT_PYTHON_TGZ = six.BytesIO(base64.b64decode(REPLACED_BY_REBUILD_MODULE)) 75 | 76 | module = AnsibleModule( 77 | argument_spec=dict( 78 | script=dict(required=True), 79 | vars=dict(required=False, default={}, type='dict'), 80 | project=dict(required=False, default=None), 81 | timeout=dict(required=False, default=None, type='int'), 82 | changes=dict(required=False, default=False, type='bool') 83 | ) 84 | ) 85 | 86 | client_python_extract_dir = tempfile.mkdtemp() 87 | module.debug('Extracting openshift-client-python module to: {}'.format(client_python_extract_dir)) 88 | 89 | try: 90 | tf = tarfile.open(fileobj=OPENSHIFT_CLIENT_PYTHON_TGZ, mode='r:gz') 91 | tf.extractall(client_python_extract_dir) 92 | # Add the newly extacted directory to the python path to resolve the openshift package 93 | sys.path.append(client_python_extract_dir) 94 | # Import openshift_client as oc so that we can delete the extract directory. module.exit_ type methods 95 | # call sys.exit, so this is our only chance to leave no trace. 96 | import openshift_client as oc 97 | shutil.rmtree(client_python_extract_dir) 98 | main() 99 | finally: 100 | if os.path.exists(client_python_extract_dir): 101 | shutil.rmtree(client_python_extract_dir) 102 | 103 | -------------------------------------------------------------------------------- /ansible/roles/openshift_client_python/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Justin Pierce 3 | description: Ansible role supporting openshift-client-python scripts 4 | company: Red hat 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: Apache 2.0 18 | 19 | min_ansible_version: 1.2 20 | 21 | # Optionally specify the branch Galaxy will use when accessing the GitHub 22 | # repo for this role. During role install, if no tags are available, 23 | # Galaxy will use this branch. During import Galaxy will access files on 24 | # this branch. If travis integration is cofigured, only notification for this 25 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 26 | # (usually master) will be used. 27 | #github_branch: 28 | 29 | # 30 | # Below are all platforms currently available. Just uncomment 31 | # the ones that apply to your role. If you don't see your 32 | # platform on this list, let us know and we'll get it added! 33 | # 34 | #platforms: 35 | #- name: OpenBSD 36 | # versions: 37 | # - all 38 | # - 5.6 39 | # - 5.7 40 | # - 5.8 41 | # - 5.9 42 | # - 6.0 43 | # - 6.1 44 | #- name: Fedora 45 | # versions: 46 | # - all 47 | # - 16 48 | # - 17 49 | # - 18 50 | # - 19 51 | # - 20 52 | # - 21 53 | # - 22 54 | # - 23 55 | # - 24 56 | # - 25 57 | #- name: DellOS 58 | # versions: 59 | # - all 60 | # - 10 61 | # - 6 62 | # - 9 63 | #- name: MacOSX 64 | # versions: 65 | # - all 66 | # - 10.10 67 | # - 10.11 68 | # - 10.12 69 | # - 10.7 70 | # - 10.8 71 | # - 10.9 72 | #- name: Synology 73 | # versions: 74 | # - all 75 | # - any 76 | #- name: Junos 77 | # versions: 78 | # - all 79 | # - any 80 | #- name: GenericBSD 81 | # versions: 82 | # - all 83 | # - any 84 | #- name: Void Linux 85 | # versions: 86 | # - all 87 | # - any 88 | #- name: GenericLinux 89 | # versions: 90 | # - all 91 | # - any 92 | #- name: NXOS 93 | # versions: 94 | # - all 95 | # - any 96 | #- name: IOS 97 | # versions: 98 | # - all 99 | # - any 100 | #- name: Amazon 101 | # versions: 102 | # - all 103 | # - 2013.03 104 | # - 2013.09 105 | # - 2016.03 106 | # - 2016.09 107 | #- name: ArchLinux 108 | # versions: 109 | # - all 110 | # - any 111 | #- name: FreeBSD 112 | # versions: 113 | # - all 114 | # - 10.0 115 | # - 10.1 116 | # - 10.2 117 | # - 10.3 118 | # - 11.0 119 | # - 8.0 120 | # - 8.1 121 | # - 8.2 122 | # - 8.3 123 | # - 8.4 124 | # - 9.0 125 | # - 9.1 126 | # - 9.1 127 | # - 9.2 128 | # - 9.3 129 | #- name: Ubuntu 130 | # versions: 131 | # - all 132 | # - lucid 133 | # - maverick 134 | # - natty 135 | # - oneiric 136 | # - precise 137 | # - quantal 138 | # - raring 139 | # - saucy 140 | # - trusty 141 | # - utopic 142 | # - vivid 143 | # - wily 144 | # - xenial 145 | # - yakkety 146 | # - zesty 147 | #- name: Debian 148 | # versions: 149 | # - all 150 | # - etch 151 | # - jessie 152 | # - lenny 153 | # - sid 154 | # - squeeze 155 | # - stretch 156 | # - wheezy 157 | #- name: Alpine 158 | # versions: 159 | # - all 160 | # - any 161 | #- name: EL 162 | # versions: 163 | # - all 164 | # - 5 165 | # - 6 166 | # - 7 167 | #- name: Windows 168 | # versions: 169 | # - all 170 | # - 2012R2 171 | #- name: SmartOS 172 | # versions: 173 | # - all 174 | # - any 175 | #- name: opensuse 176 | # versions: 177 | # - all 178 | # - 12.1 179 | # - 12.2 180 | # - 12.3 181 | # - 13.1 182 | # - 13.2 183 | #- name: SLES 184 | # versions: 185 | # - all 186 | # - 10SP3 187 | # - 10SP4 188 | # - 11 189 | # - 11SP1 190 | # - 11SP2 191 | # - 11SP3 192 | # - 11SP4 193 | # - 12 194 | # - 12SP1 195 | #- name: GenericUNIX 196 | # versions: 197 | # - all 198 | # - any 199 | #- name: Solaris 200 | # versions: 201 | # - all 202 | # - 10 203 | # - 11.0 204 | # - 11.1 205 | # - 11.2 206 | # - 11.3 207 | #- name: eos 208 | # versions: 209 | # - all 210 | # - Any 211 | 212 | galaxy_tags: [] 213 | # List tags for your role here, one per line. A tag is 214 | # a keyword that describes and categorizes the role. 215 | # Users find roles by searching for tags. Be sure to 216 | # remove the '[]' above if you add tags to this list. 217 | # 218 | # NOTE: A tag is limited to a single word comprised of 219 | # alphanumeric characters. Maximum 20 tags per role. 220 | 221 | dependencies: [] 222 | # List your role dependencies here, one per line. 223 | # Be sure to remove the '[]' above if you add dependencies 224 | # to this list. -------------------------------------------------------------------------------- /ansible/roles/openshift_client_python/readme.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | This role exposes the openshift_client_python module which allows you to use python leveraging 5 | the openshift-client-python library directly within ansible playbooks. 6 | 7 | Example Playbook 8 | ---------------- 9 | 10 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 11 | 12 | ```snakeyaml 13 | - hosts: servers 14 | gather_facts: False 15 | 16 | roles: 17 | - openshift_client_python 18 | 19 | tasks: 20 | 21 | - name: Set a fact to be used in script 22 | set_fact: 23 | op_types: "pods" 24 | 25 | - name: Await ingress 26 | 27 | openshift_client_python: 28 | # Default project scope unless overridden with oc.project. 29 | project: 'openshift-monitoring' 30 | 31 | # Timeout (seconds) applies to overall script / all oc interactions must complete. 32 | timeout: 15 33 | 34 | # If you know the script will make changes to the cluster, you can indicate it as a parameter. 35 | # Otherwise, set oc.ansible.changed inside of the script. 36 | changes: True 37 | 38 | # These values will be populated into oc.ansible.vars, which can be accessed within the script. 39 | vars: 40 | some_var_name: 'abc' 41 | another: 5 42 | 43 | script: | 44 | print('You can use an arg: {} and {}'.format(oc.ansible.vars['some_var_name'], oc.ansible.vars['another'])) 45 | 46 | # "oc.ansible.new_facts" is a dict into which you can store new facts. 47 | # These facts will be set by ansible when the script exits. 48 | oc.ansible.new_facts['pods'] = oc.selector("{{op_types}}").qnames() 49 | 50 | oc.selector('route/prometheus-k8s').until_all(1, oc.status.is_route_admitted) 51 | 52 | # An alternate way of reporting a change occurred to the openshift_client_python ansible module. 53 | oc.ansible.changed = True 54 | 55 | # An oc.tracker object will be stored in the register variable. It will detail all 56 | # oc interactions performed by the script. 57 | register: result 58 | 59 | - name: Show tracking result (all oc interactions) 60 | debug: 61 | msg: "{{result}}" 62 | 63 | - name: Use those facts 64 | openshift_client_python: 65 | timeout: 60 66 | script: | 67 | with oc.project('openshift-monitoring'): 68 | 69 | def print_phase(pod_apiobj): 70 | print('Phase for {} = {}'.format(pod_apiobj.qname(), pod_apiobj.model.status.phase)) 71 | 72 | oc.selector({{pods}}).for_each(print_phase) 73 | ``` 74 | 75 | 76 | License 77 | ------- 78 | 79 | Apache License 2.0 80 | -------------------------------------------------------------------------------- /ansible/roles/openshift_client_python/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | 4 | 5 | -------------------------------------------------------------------------------- /ansible/run_sample_playbook.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ -z "$1" ]]; then 4 | echo "Specify the hostname of a bastion with oc/kubeconfig ready for use." 5 | echo "Example: $0 my.bastion.hostname" 6 | exit 1 7 | fi 8 | 9 | ansible-playbook -vvvv sample_playbook.yml -i $1, 10 | -------------------------------------------------------------------------------- /ansible/sample_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # If you want to run 'oc' locally, just use "hosts: localhost" here. Just make sure you can 3 | # run 'oc' with a valid/local kubeconfig. 4 | - hosts: all 5 | gather_facts: False 6 | 7 | roles: 8 | - openshift_client_python 9 | 10 | tasks: 11 | 12 | - name: Set a fact to be used in script 13 | set_fact: 14 | op_types: "pods" 15 | 16 | - name: Await ingress 17 | openshift_client_python: 18 | # Default project scope unless overridden with oc.project 19 | project: 'openshift-monitoring' 20 | # Timeout applies to overall script / all oc interactions must complete 21 | timeout: 15 22 | # If you know the script will make changes, you can indicate it as a parameter 23 | changes: true 24 | 25 | vars: 26 | some_var_name: 'abc' 27 | another: 5 28 | 29 | script: | 30 | print('You can use an arg: {} and {}'.format(oc.ansible.vars['some_var_name'], oc.ansible.vars['another'])) 31 | 32 | # "oc.ansible.new_facts" is a dict into which you can store new facts. 33 | # These facts will be set by ansible when the script exits. 34 | oc.ansible.new_facts['pods'] = oc.selector("{{op_types}}").qnames() 35 | 36 | oc.selector('route/prometheus-k8s').until_all(1, oc.status.is_route_admitted) 37 | 38 | # An alternate way of reporting a change occurred to the module. 39 | oc.ansible.changed = True 40 | 41 | oc.selector('route/prometheus-k8s').until_all(1, oc.status.is_route_admitted) 42 | 43 | register: result 44 | 45 | - name: Show tracking result (all oc interactions) 46 | debug: 47 | msg: "{{result}}" 48 | 49 | - name: Use those facts 50 | openshift_client_python: 51 | timeout: 60 52 | script: | 53 | with oc.project('openshift-monitoring'): 54 | 55 | def print_phase(pod_apiobj): 56 | print('Phase for {} = {}'.format(pod_apiobj.qname(), pod_apiobj.model.status.phase)) 57 | 58 | oc.selector({{pods}}).for_each(print_phase) 59 | 60 | -------------------------------------------------------------------------------- /docs/PACKAGING.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | **Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)* 4 | 5 | - [Openshift Python Client Packaging](#openshift-python-client-packaging) 6 | - [Introduction](#introduction) 7 | - [Recommended Setup](#recommended-setup) 8 | - [Create User Accounts](#create-user-accounts) 9 | - [PyPI - The Python Package Index](#pypi---the-python-package-index) 10 | - [TestPyPI - The Test Python Package Index](#testpypi---the-test-python-package-index) 11 | - [Generate API Tokens](#generate-api-tokens) 12 | - [setup.cfg](#setupcfg) 13 | - [Building](#building) 14 | - [Publishing](#publishing) 15 | - [TestPyPI](#testpypi) 16 | - [PyPI](#pypi) 17 | - [Installation](#installation) 18 | - [TestPyPI](#testpypi-1) 19 | - [PyPI](#pypi-1) 20 | - [Cleanup](#cleanup) 21 | - [Helpful Links](#helpful-links) 22 | 23 | 24 | 25 | # Openshift Python Client Packaging 26 | 27 | ## Introduction 28 | This document primarily serves as a reference for us to publish the openshift-client module to PyPI for general consumption by our consumers. It can also be used by anyone interested in getting started with Python Packaging as all the documented steps and configurations can easily be migrated to any other package/module. 29 | 30 | ## Recommended Setup 31 | ### Create User Accounts 32 | To work with packaging, you will need to create user accounts on one or both of the following sites: 33 | 34 | #### PyPI - The Python Package Index 35 | For **official** releases that are available for installation 36 | * https://pypi.org/ 37 | 38 | #### TestPyPI - The Test Python Package Index 39 | For **testing** python packaging without impacting the official index 40 | * https://test.pypi.org/ 41 | 42 | ### Generate API Tokens 43 | For each account that you create, you can generate API Tokens that make publishing your packages/modules easier. Once the tokens have been generated, you can add them to your `~/.pypirc` file: 44 | 45 | ```text 46 | [pypi] 47 | username = __token__ 48 | password = pypi- 49 | 50 | [testpypi] 51 | repository: https://test.pypi.org/legacy/ 52 | username = __token__ 53 | password = pypi- 54 | ``` 55 | 56 | ## Building 57 | For openshift-client, build both a source distribution and a universal wheel: 58 | ```bash 59 | python -m build 60 | ``` 61 | or: 62 | ```bash 63 | make release 64 | ``` 65 | 66 | ## Publishing 67 | Publishing to either package index is accomplished by using [Twine](https://pypi.org/project/twine/). Because we setup our local `~/.pypirc` above, we can reference the repository by the name defined therein instead of passing the full URL on the commandline. 68 | 69 | ### TestPyPI 70 | ```bash 71 | twine upload --repository testpypi dist/* 72 | ``` 73 | or 74 | ```bash 75 | make publish-testpypi 76 | ``` 77 | 78 | ### PyPI 79 | ```bash 80 | twine upload --repository pypi dist/* 81 | ``` 82 | or 83 | ```bash 84 | make publish-pypi: 85 | ``` 86 | 87 | ## Installation 88 | 89 | ### TestPyPI 90 | Installation from TestPyPI must be performed using one of the following methods: 91 | 92 | 1. Latest version 93 | ```bash 94 | pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple openshift-client 95 | ``` 96 | 2. Specific version 97 | ```bash 98 | pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple openshift-client==1.0.2 99 | ``` 100 | 101 | ### PyPI 102 | 1. Latest version 103 | ```bash 104 | pip install openshift-client 105 | ``` 106 | 107 | 2. Specific version 108 | ```bash 109 | pip install openshift-client==1.0.2 110 | ``` 111 | 112 | ## Cleanup 113 | If you're working on changes, you'll need to bump the version string for every publish to either index (releases are unique). To cleanup the artifacts from previous builds, you can execute the following: 114 | ```bash 115 | rm -rf dist/ packages/openshift_client.egg-info/ build/ 116 | ``` 117 | 118 | ## Helpful Links 119 | * https://packaging.python.org/guides/distributing-packages-using-setuptools/ 120 | * https://setuptools.readthedocs.io/en/latest/index.html 121 | * https://packaging.python.org/guides/single-sourcing-package-version/ 122 | * https://packaging.python.org/guides/using-testpypi/ 123 | * https://packaging.python.org/tutorials/packaging-projects/ 124 | * https://github.com/pypa/sampleproject 125 | * https://realpython.com/pypi-publish-python-package/ 126 | * https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/index.html -------------------------------------------------------------------------------- /doctoc.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | doctoc --github README.md 4 | 5 | -------------------------------------------------------------------------------- /examples/cluster_tests.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | from __future__ import absolute_import 5 | import argparse 6 | import time 7 | import logging 8 | import traceback 9 | 10 | import openshift_client as oc 11 | from contextlib import contextmanager 12 | 13 | 14 | def report_progress(msg): 15 | logging.info('PROGRESS: {}'.format(msg)) 16 | 17 | 18 | def report_verified(msg): 19 | logging.info('VERIFIED: {}'.format(msg)) 20 | 21 | 22 | @contextmanager 23 | def temp_project(name, adm=False, cleanup=True): 24 | """ 25 | Useful context manager for testing purposes. Creates a temporary project, 26 | runs the context within oc.project, and then deletes the project on exit. 27 | Exceptions thrown by content are thrown by contextmanager as well. 28 | :param name: The name of the project to create. 29 | :param adm: If True, the project will be created with 'oc adm ...' 30 | :param cleanup: If True, project will be deleted on return. Only set to False if you 31 | are trying to leave behind some sort of debug breadcrumb. 32 | :return: 33 | """ 34 | oc.delete_project(name, ignore_not_found=True, grace_period=1) 35 | try: 36 | with oc.new_project(name, adm=adm): 37 | yield 38 | finally: 39 | if cleanup: 40 | report_progress('Cleaning up test project: {}'.format(name)) 41 | oc.delete_project(name, ignore_not_found=True, grace_period=1) 42 | 43 | 44 | def simple_http_server_resources(name, port=8080, create_service=False, create_route=False): 45 | """ 46 | Returns a list representing resources which, if instantiated, will run 47 | a simple pod, running a python-based http server on a specified port. If 48 | requested, a kube Service and Route can be created. 49 | :param name: The name of the pod to create 50 | :param port: The port the pod should expose 51 | :param create_service: If True, a Service will be created for the server pod 52 | :param create_route: If True, a Service & Route will be created for the server port 53 | :return: 54 | """ 55 | 56 | objs = [ 57 | oc.build_pod_simple( 58 | name, 59 | 'python:3', 60 | port=port, 61 | command=['python', '-m', 'http.server', str(port)], 62 | labels={'simple-server-run': name} 63 | ), 64 | ] 65 | 66 | if create_service or create_route: 67 | objs.append(oc.build_service_simple(name, 68 | {'simple-server-run': name}, 69 | port, 70 | ), 71 | ) 72 | 73 | if create_route: 74 | objs.append({ 75 | 'apiVersion': 'v1', 76 | 'kind': 'Route', 77 | 'metadata': { 78 | 'name': name, 79 | }, 80 | 'spec': { 81 | 'host': '', 82 | 'port': { 83 | 'targetPort': port, 84 | }, 85 | 'to': { 86 | 'kind': 'Service', 87 | 'name': name, 88 | 'weight': None, 89 | } 90 | } 91 | }) 92 | 93 | return objs 94 | 95 | 96 | def check_online_project_constraints(): 97 | test_project_name = 'imperative-verify-test-project-constraints' 98 | 99 | with temp_project(test_project_name): 100 | time.sleep(2) 101 | 102 | oc.selector('limitrange').object() 103 | report_verified('New projects contain limit ranges') 104 | 105 | oc.selector('networkpolicy').objects() 106 | report_verified('New projects contain network policies') 107 | 108 | oc.selector('resourcequota').objects() 109 | report_verified('New projects contain resource quotas') 110 | 111 | report_verified("Template based project constraints are being created!") 112 | 113 | 114 | def check_prevents_cron_jobs(): 115 | """ 116 | In our cluster configuration, cronjobs can only be created 117 | in privileged projects. Validate this. 118 | """ 119 | 120 | cronjob = { 121 | 'apiVersion': 'batch/v1beta1', 122 | 'kind': 'CronJob', 123 | 'metadata': { 124 | 'name': 'prohibited-cron', 125 | }, 126 | 'spec': { 127 | 'schedule': '@weekly', 128 | 'jobTemplate': { 129 | 'spec': { 130 | 'template': { 131 | 'spec': { 132 | 'containers': [ 133 | { 134 | 'name': 'container0', 135 | 'image': 'busybox', 136 | } 137 | ], 138 | 'restartPolicy': 'Never', 139 | } 140 | } 141 | } 142 | } 143 | } 144 | } 145 | 146 | user_project_name = 'imperative-verify-test-project-scheduled-jobs' 147 | with temp_project(user_project_name): 148 | try: 149 | report_progress('Creating cron job in normal project') 150 | oc.create(cronjob) 151 | assert False, 'Cronjob created but should have been prohibited' 152 | except: 153 | report_verified('Could not create cronjob in user project') 154 | pass 155 | 156 | priv_project_name = 'openshift-imperative-verify-test-project-scheduled-jobs' 157 | with temp_project(priv_project_name, adm=True): 158 | # In openshift-*, we should be able to create the cronjob. 159 | report_progress('Creating cron job in privileged project') 160 | oc.create(cronjob) 161 | report_verified('Able to create cronjob in privileged project') 162 | 163 | 164 | def check_online_network_multitenant(): 165 | def create_test_project(suffix, port): 166 | project_name = 'imperative-verify-test-project-network-{}'.format(suffix) 167 | 168 | # Delete any existing resources 169 | oc.delete_project(project_name, ignore_not_found=True, grace_period=1) 170 | 171 | server_name = 'server-{}'.format(suffix) 172 | client_name = 'client-{}'.format(suffix) 173 | 174 | with oc.new_project(project_name): 175 | # Create a simple http server running in project-A 176 | # It will be exposed by a service and route of the same name 177 | report_progress("Creating server in: " + project_name) 178 | server_sel = oc.create( 179 | simple_http_server_resources(server_name, port, create_service=True, create_route=True) 180 | ) 181 | report_progress("Created: {}".format(server_sel.qnames())) 182 | report_progress("Waiting for resources readiness...") 183 | server_sel.narrow('pod').until_all(1, success_func=oc.status.is_pod_running) 184 | server_sel.narrow('route').until_all(1, success_func=oc.status.is_route_admitted) 185 | 186 | # Create a passive pod that blocks forever so we exec commands within it 187 | client_sel = oc.create( 188 | oc.build_pod_simple(client_name, image='python:3', command=['tail', '-f', '/dev/null'])) 189 | client_sel.until_all(1, success_func=oc.status.is_pod_running) 190 | 191 | server_pod = server_sel.narrow('pod').object() 192 | service = server_sel.narrow('service').object() 193 | route = server_sel.narrow('route').object() 194 | client_pod = client_sel.narrow('pod').object() 195 | 196 | report_progress('Ensure client pod can communicate to server pod IP in same namespace') 197 | client_pod.execute(cmd_to_exec=['curl', 'http://{}:{}'.format(server_pod.model.status.podIP, port)], 198 | auto_raise=True) 199 | 200 | report_progress('Ensure client pod can communicate to server service IP in same namespace') 201 | client_pod.execute(cmd_to_exec=['curl', 'http://{}:{}'.format(service.model.spec.clusterIP, port)], 202 | auto_raise=True) 203 | 204 | report_progress('Ensure client pod can communicate to server service DNS in same namespace') 205 | client_pod.execute(cmd_to_exec=['curl', 'http://{}:{}'.format(server_name, port)], 206 | auto_raise=True) 207 | 208 | report_progress('Ensure client pod can communicate to server route in same namespace') 209 | client_pod.execute(cmd_to_exec=['curl', 'http://{}'.format(route.model.spec.host)], 210 | auto_raise=True) 211 | 212 | # Return a selector for server resources and client resources 213 | return project_name, server_pod, service, route, client_pod 214 | 215 | port_a = 4444 216 | port_b = 4555 217 | 218 | # Create two projects, A and B. Both will self test to make sure they can communicate within 219 | # pods in the same namespace. 220 | proj_a_name, server_pod_a, service_a, route_a, client_pod_a = create_test_project('a', port_a) 221 | proj_b_name, server_pod_b, service_b, route_b, client_pod_b = create_test_project('b', port_b) 222 | 223 | report_progress('Ensure client pod A cannot communicate to server service in another namespace') 224 | assert client_pod_a.execute(cmd_to_exec=['nc', '-z', service_b.model.spec.clusterIP, port_b], 225 | auto_raise=False).status() != 0, 'Expected error 1' 226 | 227 | report_progress('Ensure client pod B cannot communicate to server service in another namespace') 228 | assert client_pod_b.execute(cmd_to_exec=['nc', '-z', service_a.model.spec.clusterIP, port_a], 229 | auto_raise=False).status() != 0, 'Expected error 2' 230 | 231 | report_progress('Ensure client pod A cannot communicate to server pod IP in another namespace') 232 | assert client_pod_a.execute(cmd_to_exec=['nc', '-z', server_pod_b.model.status.podIP, port_b], 233 | auto_raise=False).status() != 0, 'Expected error 1' 234 | 235 | report_progress('Ensure client pod B cannot communicate to server pod IP in another namespace') 236 | assert client_pod_b.execute(cmd_to_exec=['nc', '-z', server_pod_a.model.status.podIP, port_a], 237 | auto_raise=False).status() != 0, 'Expected error 1' 238 | 239 | report_progress('Ensure client pod A can communicate to server route in another namespace') 240 | client_pod_a.execute(cmd_to_exec=['curl', 'http://{}'.format(route_b.model.spec.host)]) 241 | 242 | report_progress('Ensure client pod B can communicate to server route in another namespace') 243 | client_pod_b.execute(cmd_to_exec=['curl', 'http://{}'.format(route_a.model.spec.host)]) 244 | 245 | report_progress("Deleting project: " + proj_a_name) 246 | oc.delete_project(proj_a_name, grace_period=1) 247 | 248 | report_progress("Deleting project: " + proj_b_name) 249 | oc.delete_project(proj_b_name, grace_period=1) 250 | 251 | report_verified("Network policy for multitenant seems solid!") 252 | 253 | 254 | if __name__ == '__main__': 255 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) 256 | 257 | parser = argparse.ArgumentParser(description='Run a series of tests against a cluster') 258 | parser.add_argument('-b', '--bastion', default=None, 259 | help='user@host, hostname, or IP on which to execute oc (oc is executed locally if not specified)', 260 | required=False) 261 | parser.add_argument('--insecure-skip-tls-verify', action='store_true', 262 | help='Skip TLS verify during oc interations (recommended when replacing api certs)') 263 | parser.set_defaults(insecure_skip_tls_verify=False) 264 | 265 | args = vars(parser.parse_args()) 266 | 267 | skip_tls_verify = args['insecure_skip_tls_verify'] 268 | 269 | if skip_tls_verify: 270 | oc.set_default_skip_tls_verify(True) 271 | 272 | bastion_hostname = args['bastion'] 273 | if not bastion_hostname: 274 | print('Running in local mode. Expecting "oc" in PATH') 275 | 276 | with oc.client_host(hostname=bastion_hostname, username="root", 277 | auto_add_host=True, load_system_host_keys=False): 278 | # Ensure tests complete within 30 minutes and track all oc invocations 279 | with oc.timeout(60*30), oc.tracking() as t: 280 | try: 281 | check_online_network_multitenant() 282 | check_prevents_cron_jobs() 283 | check_online_project_constraints() 284 | except: 285 | logging.fatal('Error occurred during tests') 286 | traceback.print_exc() 287 | # print out all oc interactions and do not redact secret information 288 | print("Tracking:\n{}\n\n".format(t.get_result().as_json(redact_streams=False))) 289 | 290 | -------------------------------------------------------------------------------- /examples/coverage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | from __future__ import absolute_import 5 | 6 | import openshift_client as oc 7 | from openshift_client import null, Missing, OpenShiftPythonException 8 | 9 | try: 10 | 11 | print("Projects created by users:", oc.selector("projects").narrow( 12 | lambda prj: prj.metadata.annotations["openshift.io/requester"] is not Missing 13 | ).qnames()) 14 | 15 | oc.selector("projects").narrow( 16 | # Eliminate any projects created by the system 17 | lambda prj: prj.metadata.annotations["openshift.io/requester"] is not Missing 18 | ).narrow( 19 | # Select from user projects any which violate privileged naming convention 20 | lambda prj: 21 | prj.metadata.qname == "openshift" or 22 | prj.metadata.qname.startswith("openshift-") or 23 | prj.metadata.qname == "kubernetes" or 24 | prj.metadata.qname.startswith("kube-") or 25 | prj.metadata.qname.startswith("kubernetes-") 26 | ).for_each( 27 | lambda prj: oc.error("Invalid project: %s" % prj.metadata.qname) 28 | ) 29 | 30 | with oc.timeout(5): 31 | success, obj = oc.selector("pods").until_any(lambda pod: pod.status.phase == "Succeeded") 32 | if success: 33 | print("Found one pod was successful: " + str(obj)) 34 | 35 | with oc.timeout(5): 36 | success, obj = oc.selector("pods").narrow("pod").until_any( 37 | lambda pod: pod.status.conditions.can_match({"type": "Ready", "status": False, "reason": "PodCompleted"})) 38 | if success: 39 | print("Found one pod was successful: " + str(obj)) 40 | 41 | with oc.project("myproject") as project: 42 | 43 | project.create_if_absent( 44 | { 45 | "apiVersion": "v1", 46 | "kind": "User", 47 | "fullName": "Jane Doe", 48 | "groups": null, 49 | "identities": [ 50 | "github:19783215" 51 | ], 52 | "metadata": { 53 | "name": "jane" 54 | } 55 | } 56 | ) 57 | 58 | project.create_if_absent( 59 | { 60 | "apiVersion": "v1", 61 | "kind": "User", 62 | "fullName": "John Doe", 63 | "groups": null, 64 | "identities": [ 65 | "github:19783216" 66 | ], 67 | "metadata": { 68 | "name": "john" 69 | } 70 | } 71 | ) 72 | 73 | pods = oc.selector("pod") 74 | print("Pods: " + str(pods.qnames())) 75 | 76 | users = oc.selector("user/john", "user/jane") 77 | 78 | print("Describing users:\n") 79 | users.describe() 80 | 81 | for user in users: 82 | print(str(user)) 83 | 84 | john = oc.selector("user/john") 85 | john.label({"mylabel": null}) # remove a label 86 | 87 | label_selector = oc.selector("users", labels={"mylabel": "myvalue"}) 88 | 89 | print("users with label step 1: " + str(label_selector.qnames())) 90 | 91 | john.label({"mylabel": "myvalue"}) # add the label back 92 | 93 | print("users with label step 2: " + str(label_selector.qnames())) 94 | 95 | assert(label_selector.qnames()[0] == u'users/john') 96 | 97 | users.label({"another_label": "another_value"}) 98 | 99 | john.object().patch({ 100 | "groups": null, 101 | "identities": [ 102 | "github: 19783215" 103 | ] 104 | }, 105 | ) 106 | 107 | # Unmarshal json into py objects 108 | user_objs = users.objects() 109 | 110 | print("Unmarshalled %d objects" % len(user_objs)) 111 | 112 | for user in user_objs: 113 | if user.metadata.labels.another_label is not Missing: 114 | print("Value of label: " + user.metadata.labels.another_label) 115 | if user.notthere.dontcare.wontbreak is not Missing: 116 | print("Should see this, but also shouldn't see exception") 117 | 118 | project.delete_if_present("user/bark", "user/bite") 119 | 120 | bark_obj = { 121 | "apiVersion": "v1", 122 | "kind": "User", 123 | "fullName": "Bark Doe", 124 | "groups": null, 125 | "identities": [ 126 | "github:9999" 127 | ], 128 | "metadata": { 129 | "name": "bark" 130 | } 131 | } 132 | 133 | bite_obj = { 134 | "apiVersion": "v1", 135 | "kind": "User", 136 | "fullName": "Bite Doe", 137 | "groups": null, 138 | "identities": [ 139 | "github:10000" 140 | ], 141 | "metadata": { 142 | "name": "bite" 143 | } 144 | } 145 | 146 | bark_bite_sel = oc.create([bark_obj, bite_obj]) 147 | 148 | print("How were they created?\n" + str(bark_bite_sel)) 149 | 150 | try: 151 | oc.create(bark_obj) # Should create an error 152 | assert False 153 | except OpenShiftPythonException as create_err: 154 | print("What went wrong?: " + str(create_err)) 155 | 156 | bark_bite_sel.until_any(lambda obj: obj.metadata.qname == "bite") 157 | 158 | except OpenShiftPythonException as e: 159 | print("An exception occurred: " + str(e)) 160 | -------------------------------------------------------------------------------- /examples/custom_apiobjects.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import openshift_client as oc 4 | from openshift_client import APIObject 5 | 6 | 7 | class MyCustomPodClass(APIObject): 8 | def __init__(self, *args, **kwargs): 9 | super().__init__(*args, **kwargs) 10 | 11 | def super_cool_awesomeness(self): 12 | print('Calling: super_cool_awesomeness() on pod: {}/{}'.format(self.model.metadata.namespace, self.model.metadata.name)) 13 | 14 | 15 | if __name__ == '__main__': 16 | with oc.client_host(): 17 | with oc.project('openshift-monitoring'): 18 | 19 | objs = oc.selector('pods', labels={'app': 'prometheus'}).objects(cls=MyCustomPodClass) 20 | 21 | for obj in objs: 22 | print(type(obj)) 23 | obj.super_cool_awesomeness() 24 | -------------------------------------------------------------------------------- /examples/dump.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import absolute_import 4 | import openshift_client as oc 5 | 6 | if __name__ == '__main__': 7 | with oc.client_host(): 8 | oc.dumpinfo_system('dumps/', num_combined_journal_entries=50, num_critical_journal_entries=50, logs_since='1h', logs_tail=500) 9 | -------------------------------------------------------------------------------- /examples/ephemeral_project.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | 5 | import argparse 6 | import logging 7 | import traceback 8 | import openshift_client as oc 9 | from openshift_client import OpenShiftPythonException 10 | from openshift_client.decorators import ephemeral_project 11 | 12 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 13 | logger = logging.getLogger('EphemeralProject') 14 | logger.setLevel(logging.INFO) 15 | 16 | 17 | @ephemeral_project 18 | def run_pods(pod_count=5, *, project_name=None): 19 | logger.info('Running in namespace: {}'.format(project_name)) 20 | 21 | for i in range(pod_count): 22 | pod_name = 'pod-{}'.format(i) 23 | logger.info('Creating: {}'.format(pod_name)) 24 | 25 | pod_selector = oc.create(oc.build_pod_simple(pod_name, image='python:3', command=['tail', '-f', '/dev/null'])) 26 | pod_selector.until_all(1, success_func=oc.status.is_pod_running) 27 | 28 | pods = oc.selector('pods').objects() 29 | logger.info('Found {} pods'.format(len(pods))) 30 | assert len(pods) == pod_count 31 | 32 | 33 | if __name__ == '__main__': 34 | parser = argparse.ArgumentParser(description='Demonstrate the ephemeral_project decorator') 35 | parser.add_argument('-b', '--bastion', default=None, 36 | help='user@host, hostname, or IP on which to execute oc (oc is executed locally if not specified)', 37 | required=False) 38 | parser.add_argument('--insecure-skip-tls-verify', action='store_true', 39 | help='Skip TLS verify during oc interactions (recommended when replacing api certs)') 40 | parser.set_defaults(insecure_skip_tls_verify=False) 41 | 42 | params = vars(parser.parse_args()) 43 | 44 | skip_tls_verify = params['insecure_skip_tls_verify'] 45 | 46 | if skip_tls_verify: 47 | oc.set_default_skip_tls_verify(True) 48 | 49 | bastion_hostname = params['bastion'] 50 | if not bastion_hostname: 51 | logging.info('Running in local mode. Expecting "oc" in PATH') 52 | 53 | with oc.client_host(hostname=bastion_hostname, username="root", auto_add_host=True, load_system_host_keys=False): 54 | # Ensure tests complete within 5 minutes and track all oc invocations 55 | with oc.timeout(60 * 5), oc.tracking() as t: 56 | try: 57 | run_pods() 58 | except (ValueError, OpenShiftPythonException, Exception): 59 | # Print out exception stack trace via the traceback module 60 | logger.info('Traceback output:\n{}\n'.format(traceback.format_exc())) 61 | 62 | # Print out all oc interactions and do not redact secret information 63 | logger.info("OC tracking output:\n{}\n".format(t.get_result().as_json(redact_streams=False))) 64 | -------------------------------------------------------------------------------- /examples/etcd_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import openshift_client as oc 4 | 5 | if __name__ == '__main__': 6 | options = { 7 | 'as': 'system:admin', 8 | } 9 | 10 | with oc.client_host(): 11 | with oc.timeout(60 * 5): 12 | with oc.options(options): 13 | with oc.project("openshift-etcd"): 14 | pods = oc.selector("pods", labels={'app': 'etcd'}).objects() 15 | print(f'Found: {len(pods)} pods') 16 | result = pods[0].execute(cmd_to_exec=['etcdctl', 'endpoint', 'status', '--cluster', '-w', 'table']) 17 | print(f'Result:\n{result.out()}') 18 | -------------------------------------------------------------------------------- /examples/exception_tracking.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | 5 | import argparse 6 | import logging 7 | import traceback 8 | import openshift_client as oc 9 | from openshift_client import OpenShiftPythonException 10 | 11 | logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') 12 | logger = logging.getLogger('ExceptionTracking') 13 | logger.setLevel(logging.INFO) 14 | 15 | if __name__ == '__main__': 16 | parser = argparse.ArgumentParser(description='Demonstrate oc tracking') 17 | parser.add_argument('-b', '--bastion', default=None, 18 | help='user@host, hostname, or IP on which to execute oc (oc is executed locally if not specified)', 19 | required=False) 20 | parser.add_argument('--insecure-skip-tls-verify', action='store_true', 21 | help='Skip TLS verify during oc interations (recommended when replacing api certs)') 22 | parser.set_defaults(insecure_skip_tls_verify=False) 23 | 24 | args = vars(parser.parse_args()) 25 | 26 | skip_tls_verify = args['insecure_skip_tls_verify'] 27 | 28 | if skip_tls_verify: 29 | oc.set_default_skip_tls_verify(True) 30 | 31 | bastion_hostname = args['bastion'] 32 | if not bastion_hostname: 33 | logging.info('Running in local mode. Expecting "oc" in PATH') 34 | 35 | with oc.client_host(hostname=bastion_hostname, username="root", auto_add_host=True, load_system_host_keys=False): 36 | # Ensure tests complete within 30 minutes and track all oc invocations 37 | with oc.timeout(60 * 30), oc.tracking() as t: 38 | try: 39 | with oc.project('default'): 40 | bc = oc.selector('bc/does-not-exist') 41 | bc.start_build() 42 | except (ValueError, OpenShiftPythonException, Exception): 43 | # Print out exception stack trace via the traceback module 44 | logger.info('Traceback output:\n{}\n'.format(traceback.format_exc())) 45 | 46 | # Print out all oc interactions and do not redact secret information 47 | logger.info("OC tracking output:\n{}\n".format(t.get_result().as_json(redact_streams=False))) 48 | -------------------------------------------------------------------------------- /examples/login.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import argparse 4 | import traceback 5 | 6 | import openshift_client as oc 7 | from openshift_client import OpenShiftPythonException, Context 8 | 9 | if __name__ == '__main__': 10 | parser = argparse.ArgumentParser(description='OpenShift Client Login Example') 11 | parser.add_argument('-k', '--kubeconfig', help='The kubeconfig to create', required=True) 12 | parser.add_argument('-s', '--server', help='The API Server to communicate with', required=True) 13 | parser.add_argument('-t', '--token', help='The login token', required=True) 14 | args = vars(parser.parse_args()) 15 | 16 | my_context = Context() 17 | my_context.token = args["token"] 18 | my_context.api_server = args["server"] 19 | my_context.kubeconfig_path = args["kubeconfig"] 20 | 21 | with oc.timeout(60 * 30), oc.tracking() as t, my_context: 22 | if oc.get_config_context() is None: 23 | print(f'Current context not set! Logging into API server: {my_context.api_server}\n') 24 | try: 25 | oc.invoke('login') 26 | except OpenShiftPythonException: 27 | print('error occurred logging into API Server') 28 | traceback.print_exc() 29 | print(f'Tracking:\n{t.get_result().as_json(redact_streams=False)}\n\n') 30 | exit(1) 31 | 32 | print(f'Current context: {oc.get_config_context()}') 33 | 34 | try: 35 | pods = oc.selector('pods').objects() 36 | print(f'Found: {len(pods)} pods') 37 | except OpenShiftPythonException: 38 | print('Error occurred getting pods') 39 | traceback.print_exc() 40 | print(f'Tracking:\n{t.get_result().as_json(redact_streams=False)}\n\n') 41 | -------------------------------------------------------------------------------- /examples/modify_and_apply.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import argparse 4 | import json 5 | import logging 6 | import sys 7 | import traceback 8 | 9 | import openshift_client as oc 10 | from openshift_client import OpenShiftPythonException 11 | from openshift_client.decorators import ephemeral_project 12 | 13 | logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(message)s') 14 | logger = logging.getLogger('ModifyAndApply') 15 | 16 | 17 | def validate_server_connection(ctx): 18 | with oc.options(ctx), oc.tracking(), oc.timeout(60): 19 | try: 20 | username = oc.whoami() 21 | version = oc.get_server_version() 22 | logger.debug(f'Connected to APIServer running version: {version}, as: {username}') 23 | except (ValueError, OpenShiftPythonException, Exception) as e: 24 | logger.error(f"Unable to verify cluster connection using context: \"{ctx['context']}\"") 25 | raise e 26 | 27 | 28 | def test_update_dynamic_keyword_args(obj): 29 | def update_dynamic_keyword_args(apiobj, **kwargs): 30 | logger.info(f'Updating object: {apiobj.name()} with: {json.dumps(kwargs, indent=4, default=str)}') 31 | return False 32 | 33 | r, success = obj.modify_and_apply(update_dynamic_keyword_args, retries=0) 34 | assert len(r.actions()) == 0 35 | assert success == False 36 | 37 | r, success = obj.modify_and_apply(update_dynamic_keyword_args, retries=0, param1='foo') 38 | assert len(r.actions()) == 0 39 | assert success == False 40 | 41 | r, success = obj.modify_and_apply(update_dynamic_keyword_args, retries=0, param1='foo', param2='bar') 42 | assert len(r.actions()) == 0 43 | assert success == False 44 | 45 | r, success = obj.modify_and_apply(update_dynamic_keyword_args, retries=0, random1='foo', modnar1='bar') 46 | assert len(r.actions()) == 0 47 | assert success == False 48 | 49 | 50 | def test_update_named_keyword_args(obj): 51 | def update_named_keyword_args(apiobj, param1=None, param2=None): 52 | logger.info(f'Updating object: {apiobj.name()} with "param1={param1}" and "param2={param2}"') 53 | return False 54 | 55 | r, success = obj.modify_and_apply(update_named_keyword_args, retries=0) 56 | assert len(r.actions()) == 0 57 | assert success == False 58 | 59 | r, success = obj.modify_and_apply(update_named_keyword_args, retries=0, param1='foo') 60 | assert len(r.actions()) == 0 61 | assert success == False 62 | 63 | r, success = obj.modify_and_apply(update_named_keyword_args, retries=0, param1='foo', param2='bar') 64 | assert len(r.actions()) == 0 65 | assert success == False 66 | 67 | try: 68 | obj.modify_and_apply(update_named_keyword_args, retries=0, param3='bip') 69 | except TypeError as e: 70 | if 'got an unexpected keyword argument' in e.__str__(): 71 | logger.info(f'Unknown parameter specified: {e}') 72 | else: 73 | raise e 74 | 75 | 76 | @ephemeral_project 77 | def run(*, project_name=None): 78 | logger.info('Running in namespace: {}'.format(project_name)) 79 | obj = oc.selector('serviceaccount/default').object() 80 | test_update_named_keyword_args(obj) 81 | test_update_dynamic_keyword_args(obj) 82 | 83 | 84 | if __name__ == '__main__': 85 | parser = argparse.ArgumentParser(description='Backup namespace resources') 86 | 87 | config_group = parser.add_argument_group('Configuration Options') 88 | config_group.add_argument('-v', '--verbose', help='Enable verbose output', action='store_true') 89 | 90 | ocp_group = parser.add_argument_group('Openshift Cluster Configuration Options') 91 | ocp_group.add_argument('-c', '--context', help='The OC context to use', default=None) 92 | ocp_group.add_argument('-k', '--kubeconfig', help='The kubeconfig to use (default is "~/.kube/config")', default=None) 93 | ocp_group.add_argument('-n', '--namespace', help='The namespace to process', default=None) 94 | 95 | args = vars(parser.parse_args()) 96 | 97 | if args['verbose']: 98 | logger.setLevel(logging.DEBUG) 99 | 100 | # Validate the connection to the respective cluster 101 | context = {} 102 | if args['context'] is not None: 103 | context.update({'context': args['context']}) 104 | 105 | if args['kubeconfig'] is not None: 106 | context.update({'kubeconfig': args['kubeconfig']}) 107 | 108 | validate_server_connection(context) 109 | 110 | with oc.client_host(): 111 | with oc.timeout(60 * 10), oc.tracking() as t: 112 | with oc.options(context): 113 | try: 114 | run() 115 | except (ValueError, OpenShiftPythonException, Exception): 116 | # Print out exception stack trace via the traceback module 117 | logger.info('Traceback output:\n{}\n'.format(traceback.format_exc())) 118 | 119 | # Print out all oc interactions and do not redact secret information 120 | logger.info("OC tracking output:\n{}\n".format(t.get_result().as_json(redact_streams=False))) 121 | -------------------------------------------------------------------------------- /examples/multiple_contexts.py: -------------------------------------------------------------------------------- 1 | import openshift_client as oc 2 | 3 | if __name__ == '__main__': 4 | context1 = { 5 | 'context': 'cluster1', 6 | } 7 | context2 = { 8 | 'context': 'cluster2', 9 | } 10 | with oc.client_host(): 11 | with oc.timeout(60 * 5): 12 | for context in [context1, context2]: 13 | with oc.options(context): 14 | with oc.project('my-project'): 15 | jobs_list = oc.selector('pods').objects() 16 | print(f'Found: {len(jobs_list)} pods in: {context["context"]}') 17 | -------------------------------------------------------------------------------- /examples/oc_action.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | from __future__ import absolute_import 5 | 6 | from openshift_client import Result 7 | import openshift_client as oc 8 | 9 | ''' 10 | This example illustrates how you can utilize the "oc_action" method to perform any "oc" operations that are not 11 | explicitly supported by the openshift-client-python library. 12 | ''' 13 | if __name__ == '__main__': 14 | with oc.tracking() as tracker: 15 | try: 16 | r = Result("run-test") 17 | r.add_action(oc.oc_action(oc.cur_context(), "run", cmd_args=["nginx", "--image=nginx", "--dry-run=client", None])) 18 | r.fail_if("Unable to run nginx (dry-run)") 19 | print("Output: {}".format(r.out().strip())) 20 | except Exception as e: 21 | print(e) 22 | 23 | print("Tracker: {}".format(tracker.get_result())) 24 | -------------------------------------------------------------------------------- /examples/quotas.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import openshift_client as oc 4 | 5 | if __name__ == '__main__': 6 | with oc.client_host(): 7 | with oc.timeout(60 * 5): 8 | with oc.project('openshift-client-python'): 9 | resource_quotas = oc.selector('resourcequotas').objects() 10 | print(f'Found: {len(resource_quotas)} ResourceQuotas') 11 | 12 | for resource_quota in resource_quotas: 13 | print(f'Processing ResourceQuota: {resource_quota.name()}') 14 | for key in resource_quota.model.spec.hard: 15 | print(f' - {key}: {resource_quota.model.spec.hard[key]}') 16 | 17 | limit_ranges = oc.selector('limitranges').objects() 18 | print(f'\nFound: {len(limit_ranges)} LimitRanges') 19 | 20 | for limit_range in limit_ranges: 21 | print(f'Processing LimitRange: {limit_range.name()}') 22 | for limit in limit_range.model.spec.limits: 23 | print(f' Type: {limit.type}') 24 | print(f' Default CPU Limit: {limit.default.cpu}') 25 | print(f' Default Memory Limit: {limit.default.memory}') 26 | print(f' Default CPU Request: {limit.defaultRequest.cpu}') 27 | print(f' Default Memory Request: {limit.defaultRequest.memory}') 28 | 29 | pods = oc.selector('pods').objects() 30 | print(f'\nFound: {len(pods)} Pods') 31 | 32 | for pod in pods: 33 | print(f'Processing Pod: {pod.name()}') 34 | for container in pod.model.spec.containers: 35 | print(f' Processing Container: {container.name}') 36 | print(f' CPU Limit: {container.resources.limits.cpu}') 37 | print(f' CPU Request: {container.resources.requests.cpu}') 38 | print(f' Memory Limit: {container.resources.limits.memory}') 39 | print(f' Memory Request: {container.resources.requests.memory}') 40 | -------------------------------------------------------------------------------- /examples/report.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import absolute_import 4 | import openshift_client as oc 5 | 6 | if __name__ == '__main__': 7 | with oc.client_host(): 8 | with oc.project('openshift-monitoring'): 9 | oc.selector(['dc', 'build', 'configmap']).print_report() 10 | 11 | -------------------------------------------------------------------------------- /examples/simple.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import absolute_import 4 | from __future__ import print_function 5 | 6 | import openshift_client as oc 7 | from openshift_client import OpenShiftPythonException 8 | 9 | if __name__ == '__main__': 10 | with oc.tracking() as tracker: 11 | try: 12 | print('Current server: {}'.format(oc.api_url())) 13 | print('Current project: {}'.format(oc.get_project_name())) 14 | print('Current user: {}'.format(oc.whoami())) 15 | except OpenShiftPythonException as e: 16 | print('Error acquiring details about project/user: {}'.format(e)) 17 | 18 | # Print out details about the invocations made within this context. 19 | print(tracker.get_result()) 20 | -------------------------------------------------------------------------------- /examples/templates.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import openshift_client as oc 4 | 5 | ''' 6 | This example will scan all the templates, on the cluster, and look specifically for the openshift/nginx-example 7 | template. If the template is located, it clears the namespace (to prevent an error when calling 'oc process'), 8 | updates any template parameter(s), processes the template, and then creates the objects in the current namespace. 9 | ''' 10 | if __name__ == '__main__': 11 | with oc.client_host(): 12 | templates = oc.selector('templates', all_namespaces=True) 13 | 14 | for template in templates.objects(): 15 | if template.model.metadata.namespace == 'openshift' and template.model.metadata.name == 'nginx-example': 16 | template.model.metadata.namespace = '' 17 | 18 | obj = oc.APIObject(dict_to_model=template.as_dict()) 19 | 20 | parameters = { 21 | 'NAME': 'my-nginx', 22 | } 23 | 24 | processed_template = obj.process(parameters=parameters) 25 | obj_sel = oc.create(processed_template) 26 | 27 | for obj in obj_sel.objects(): 28 | print('Created: {}/{}'.format(obj.model.kind, obj.model.metadata.name)) 29 | print(obj.as_json(indent=4)) 30 | -------------------------------------------------------------------------------- /hack/verify-ansible-module.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | 7 | TEMP_DIR="$( mktemp -d )" 8 | OUTPUT_DIR="${ARTIFACT_DIR:=${TEMP_DIR}}" 9 | echo -e "Artifacts will be written to: ${OUTPUT_DIR}" 10 | 11 | BASE_DIR="$( readlink -e $( dirname "${BASH_SOURCE[0]}" )/..)" 12 | 13 | echo -e "\nVerifying ansible module" 14 | 15 | cd ${BASE_DIR}/ansible 16 | 17 | ./rebuild_module.sh 18 | git diff --exit-code rebuild_module.digest || (echo 'You need to run ansible/rebuild_module.sh and include changes in this PR' && exit 1) 19 | 20 | exit 0 21 | -------------------------------------------------------------------------------- /images/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.ci.openshift.org/origin/4.16:cli as cli 2 | 3 | FROM centos:stream9 4 | LABEL maintainer="brawilli@redhat.com" 5 | 6 | COPY --from=cli /usr/bin/oc /usr/bin/ 7 | 8 | ADD . /openshift-client-python 9 | 10 | RUN yum install -y python3 python3-pip git diffutils && python3 -m pip install --upgrade pip && python3 -m pip install -r /openshift-client-python/requirements.txt 11 | 12 | ENV PYTHONPATH=/openshift-client-python/packages:$PYTHONPATH PYTHONUNBUFFERED=1 13 | 14 | WORKDIR /openshift-client-python 15 | 16 | ENTRYPOINT ["/usr/bin/bash"] 17 | -------------------------------------------------------------------------------- /jobs/ci/pr.groovy: -------------------------------------------------------------------------------- 1 | 2 | node('bastion2') { 3 | stage('repo-setup') { 4 | sh "rm -rf git" 5 | dir('git') { 6 | dir('openshift-client-python') { 7 | // This will also create the git/openshift-client-python directory 8 | checkout scm 9 | env.PYTHONPATH = "${pwd()}/packages" 10 | ocp_dir = pwd() 11 | } 12 | } 13 | } 14 | 15 | 16 | stage('ansible') { 17 | dir("${ocp_dir}/ansible") { 18 | sh "./rebuild_module.sh" 19 | echo "Verifying that you submitted your PR after running ./rebuild_module.sh" 20 | sh "git diff --exit-code rebuild_module.digest || (echo 'You need to run ansible/rebuild_module.sh and include changes in this PR' && exit 1)" 21 | } 22 | } 23 | 24 | stage('unittests') { 25 | dir("${ocp_dir}") { 26 | echo "Running run tests" 27 | sh "./run_unittests.sh" 28 | } 29 | } 30 | 31 | } 32 | -------------------------------------------------------------------------------- /lambda/model/python/model.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import six 3 | from six.moves import range 4 | 5 | 6 | class OpenShiftPythonException(Exception): 7 | 8 | def __init__(self, msg, result=None, **kwargs): 9 | super(self.__class__, self).__init__(msg) 10 | self.msg = msg 11 | self.result = result 12 | self.kwargs = kwargs 13 | 14 | def attributes(self): 15 | return dict(self.kwargs) 16 | 17 | def get_result(self): 18 | """ 19 | :return: Returns the Result object associated with 20 | this exception if any. Might be None. 21 | """ 22 | return self.result 23 | 24 | def as_dict(self): 25 | d = dict(self.kwargs) 26 | d["msg"] = self.msg 27 | if self.result is not None: 28 | d["result"] = self.result 29 | return d 30 | 31 | def __str__(self): 32 | if self.result is None: 33 | return self.msg 34 | return "[" + self.msg + "]\n" + repr(self.result) 35 | 36 | 37 | class ModelError(Exception): 38 | 39 | def __init__(self, msg, **kwargs): 40 | super(self.__class__, self).__init__(msg) 41 | self.msg = msg 42 | self.kwargs = kwargs 43 | 44 | 45 | class MissingModel(dict): 46 | 47 | def __init__(self): 48 | super(self.__class__, self).__init__() 49 | pass 50 | 51 | def __getattr__(self, attr): 52 | return self 53 | 54 | def __setattr__(self, key, value): 55 | raise ModelError("Invalid attempt to set key(%s) in missing branch of model" % key) 56 | 57 | def __delattr__(self, key): 58 | raise ModelError("Invalid attempt to delete key(%s) in missing branch of model" % key) 59 | 60 | def __getitem__(self, attr): 61 | return self 62 | 63 | def __setitem__(self, key, value): 64 | raise ModelError("Invalid attempt to set key(%s) in missing branch of model" % key) 65 | 66 | def __delitem__(self, key): 67 | raise ModelError("Invalid attempt to delete key(%s) in missing branch of model" % key) 68 | 69 | # Express false-y 70 | def __bool__(self): 71 | return False 72 | 73 | # Express false-y 74 | def __len__(self): 75 | return 0 76 | 77 | def __str__(self): 78 | return "(MissingModelBranch)" 79 | 80 | def __repr__(self): 81 | return "(MissingModelBranch)" 82 | 83 | def __div__(self, other): 84 | return self 85 | 86 | def __add__(self, other): 87 | return self 88 | 89 | def __sub__(self, other): 90 | return self 91 | 92 | def __mul__(self, other): 93 | return self 94 | 95 | def can_match(self, *vals): 96 | return False 97 | 98 | 99 | # Singleton which indicates if any model attribute was not defined 100 | Missing = MissingModel() 101 | 102 | 103 | def to_model_or_val(v, case_insensitive=False): 104 | if isinstance(v, ListModel) or isinstance(v, Model): 105 | return v 106 | if isinstance(v, list): 107 | return ListModel(v, case_insensitive=case_insensitive) 108 | elif isinstance(v, dict): 109 | return Model(v, case_insensitive=case_insensitive) 110 | else: 111 | return v 112 | 113 | 114 | def _element_can_match( master, test, case_insensitive=False): 115 | if master is Missing: 116 | return False 117 | 118 | if master is None or test is None: 119 | return master is test 120 | 121 | if isinstance(master, str): 122 | master = six.text_type(master) # Turn str into unicode 123 | if case_insensitive: 124 | master = master.lower() 125 | 126 | if isinstance(test, str): 127 | test = six.text_type(test) # Turn str into unicode 128 | if case_insensitive: 129 | test = test.lower() 130 | 131 | for prim in [bool, int, six.text_type, float]: 132 | if isinstance(master, prim): 133 | return master == test or str(master) == str(test) 134 | 135 | if isinstance(master, dict): 136 | if isinstance(test, dict): 137 | return _dict_is_subset(master, test, case_insensitive=case_insensitive) 138 | else: 139 | return False 140 | 141 | if isinstance(master, list): 142 | if isinstance(test, list): 143 | return _list_is_subset(master, test, case_insensitive=case_insensitive) 144 | else: 145 | return False 146 | 147 | raise ModelError("Don't know how to compare %s and %s" % (str(type(master)), str(type(test)))) 148 | 149 | 150 | def _element_in_list(master, e, case_insensitive=False): 151 | for m in master: 152 | if _element_can_match(m, e, case_insensitive=case_insensitive): 153 | return True 154 | return False 155 | 156 | 157 | def _list_is_subset(master, test, case_insensitive=False): 158 | for e in test: 159 | if not _element_in_list(master, e, case_insensitive=case_insensitive): 160 | return False 161 | return True 162 | 163 | 164 | def _dict_is_subset(master, subset, case_insensitive=False): 165 | for k, v in subset.items(): 166 | if case_insensitive: 167 | k = k.lower() 168 | m = master.get(k, Missing) 169 | if not _element_can_match(m, v, case_insensitive=case_insensitive): 170 | return False 171 | 172 | return True 173 | 174 | 175 | class ListModel(list): 176 | 177 | def __init__(self, list_to_model, case_insensitive=False): 178 | super(ListModel, self).__init__() 179 | self.__case_insensitive = case_insensitive 180 | if list_to_model is not None: 181 | self.extend(list_to_model) 182 | 183 | def __setitem__(self, key, value): 184 | super(self.__class__, self).__setitem__(key, value) 185 | 186 | def __delitem__(self, key): 187 | super(self.__class__, self).__delitem__(key) 188 | 189 | def __getitem__(self, index): 190 | if super(self.__class__, self).__len__() > index: 191 | v = super(self.__class__, self).__getitem__(index) 192 | if isinstance(v, Model): 193 | return v 194 | v = to_model_or_val(v, case_insensitive=self.__case_insensitive) 195 | self.__setitem__(index, v) 196 | return v 197 | 198 | # Otherwise, trigger out of bounds exception 199 | return super(self.__class__, self).__getitem__(index) 200 | 201 | def __iter__(self): 202 | for i in range(0, super(self.__class__, self).__len__()): 203 | yield self[i] 204 | 205 | def _primitive(self): 206 | """ 207 | :return: Returns the ListModel as a python list 208 | :rtype: list 209 | """ 210 | l = [] 211 | for e in self: 212 | l.append(e) 213 | return l 214 | 215 | def can_match(self, list_or_entry): 216 | """ 217 | Answers whether this list is a subset of the specified list. If the argument is not a list, 218 | it placed into one for comparison purposes. 219 | Elements of the argument list can be primitives, lists, or dicts. In the case of non-primitives, the list or 220 | dicts must ultimately be subsets of at least one element in the receiver list. 221 | :param list_or_entry: The list to compare or a primitive/dict that must exist in the receiver's list. 222 | :return: Returns true if all of the elements specify can match (i.e. are subsets of) elements of this list. 223 | """ 224 | if not isinstance(list_or_entry, (list, tuple, ListModel)): 225 | # If we were not passed a list, turn it into one 226 | list_or_entry = [list_or_entry] 227 | 228 | return _list_is_subset(self, list_or_entry, case_insensitive=self.__case_insensitive) 229 | 230 | 231 | class Model(dict): 232 | 233 | def __init__(self, dict_to_model=None, case_insensitive=False): 234 | super(Model, self).__init__() 235 | 236 | self.__case_insensitive = case_insensitive 237 | 238 | if dict_to_model is not None: 239 | for k, v in dict_to_model.items(): 240 | if self.__case_insensitive: 241 | k = k.lower() 242 | self[k] = to_model_or_val(v, case_insensitive=case_insensitive) 243 | 244 | def __getattr__(self, attr): 245 | 246 | if isinstance(attr, six.string_types): 247 | if attr.startswith('_Model__'): # e.g. _Model__case_insensitive 248 | raise AttributeError 249 | 250 | if self.__case_insensitive: 251 | attr = attr.lower() 252 | 253 | if super(Model, self).__contains__(attr): 254 | v = super(self.__class__, self).get(attr) 255 | if isinstance(v, Model) or isinstance(v, ListModel): 256 | return v 257 | v = to_model_or_val(v, self.__case_insensitive) 258 | self.__setattr__(attr, v) 259 | return v 260 | else: 261 | return Missing 262 | 263 | def __setattr__(self, key, value): 264 | if key.startswith('_Model__'): # e.g. _Model__case_insensitive 265 | return super(Model, self).__setattr__(key, value) 266 | 267 | if self.__case_insensitive: 268 | key = key.lower() 269 | 270 | self.__setitem__(key, value) 271 | 272 | def __getitem__(self, key): 273 | return self.__getattr__(key) 274 | 275 | def __setitem__(self, key, value): 276 | super(Model, self).__setitem__(key, to_model_or_val(value, case_insensitive=self.__case_insensitive)) 277 | 278 | def __delitem__(self, key): 279 | if self.__is_case_sensitive__(): 280 | key = key.lower() 281 | super(Model, self).__delitem__(key) 282 | 283 | def _primitive(self): 284 | """ 285 | :return: Returns the Model as a python dict 286 | :rtype: dict 287 | """ 288 | d = {} 289 | for k, v in six.iteritems(self): 290 | if isinstance(v, Model) or isinstance(v, ListModel): 291 | v = v._primitive() 292 | d[k] = v 293 | return d 294 | 295 | def can_match(self, val): 296 | """ 297 | Answers whether this Model matches all elements of the argument. 298 | :param val: A dict or Model with elements set that must be found within this model. 299 | :return: Returns true if all of the elements can match (i.e. are subsets of) elements of this list. 300 | """ 301 | return _dict_is_subset(self, val, case_insensitive=self.__case_insensitive) 302 | -------------------------------------------------------------------------------- /lambda/package.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | F=$(pwd)/lambda-layer-model.zip 6 | rm -f $F 7 | 8 | pushd model 9 | zip -r $F python/* 10 | 11 | -------------------------------------------------------------------------------- /packages/monitoring.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import print_function 4 | from __future__ import absolute_import 5 | import openshift_client as oc 6 | from openshift_client import Missing 7 | import traceback 8 | 9 | with oc.tracking() as t: 10 | with oc.client_host(hostname="18.222.71.125", username="root", auto_add_host=True): # free-stg 11 | with oc.project("openshift-monitoring"): 12 | try: 13 | 14 | result = oc.selector('pod/alertmanager-main-0').object().execute(['cat'], 15 | container_name='alertmanager', 16 | stdin='stdin for cat') 17 | print(result.out()) 18 | exit(0) 19 | 20 | cr_rules = oc.selector("prometheusrules") 21 | print("CR has the following rule sets: {}".format(cr_rules.qnames())) 22 | 23 | if cr_rules.object().model.metadata.labels.cr_generated is Missing: 24 | print("Rule was not generated by CR") 25 | 26 | oc.selector('pods').annotate(annotations={ 27 | 'cr_annotation_test': None, 28 | }) 29 | 30 | oc.selector('node/pod_ip-172-31-79-85.us-east-2.compute.internal').object().patch({ 31 | 'metadata': { 32 | 'annotations': { 33 | 'cr_patch': 'yes' 34 | } 35 | } 36 | }, strategy="strategic", cmd_args=['-o=yaml']) 37 | 38 | # cr_rules.object().label({ 39 | # 'cr_test': 'yes', 40 | # }) 41 | 42 | print("Result:\n{}".format(t.get_result())) 43 | except Exception: 44 | traceback.print_exc() 45 | -------------------------------------------------------------------------------- /packages/openshift_client/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .context import * 4 | from .base_verbs import * 5 | from .model import OpenShiftPythonException 6 | from .model import Model, Missing 7 | from .selector import * 8 | from .apiobject import * 9 | from . import naming 10 | from . import status 11 | from . import config 12 | from .ansible import ansible 13 | 14 | # Single source for module version 15 | __VERSION__ = '2.0.5' 16 | 17 | null = None # Allow scripts to specify null in object definitions 18 | 19 | 20 | # Allows modules to trigger errors 21 | def error(msg, **kwargs): 22 | raise OpenShiftPythonException(msg, **kwargs) 23 | 24 | 25 | # Convenience method for accessing the module version 26 | def get_module_version(): 27 | return __VERSION__ 28 | -------------------------------------------------------------------------------- /packages/openshift_client/action.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import subprocess 4 | import time 5 | import socket 6 | import json 7 | import os 8 | import re 9 | import datetime 10 | import traceback 11 | import six 12 | 13 | from .util import TempFile, is_collection_type 14 | 15 | 16 | # Three base64 encoded components, '.' delimited is a token. First, find any such match. 17 | # You can find examples of these tokens with `oc sa get-token ` 18 | token_regex = re.compile(r"[a-zA-Z0-9+/_\-]{10,}\.[a-zA-Z0-9+/_\-]{100,}\.[a-zA-Z0-9+/_\-]{20,}") 19 | 20 | # Find any semblance of kind..Secret 21 | secret_regex = re.compile(r"\W*kind\W+Secret\W*", re.IGNORECASE) 22 | 23 | # OAuthAccessTokens are 43 char base64 encoded strings 24 | oauth_regex = re.compile(r"[a-zA-Z0-9+/_\-]{43}") 25 | 26 | 27 | def _is_sensitive(content_str): 28 | 29 | if token_regex.findall(content_str): 30 | return True 31 | 32 | if secret_regex.findall(content_str): 33 | return True 34 | 35 | if oauth_regex.findall(content_str): 36 | return True 37 | 38 | return False 39 | 40 | 41 | def _redaction_string(): 42 | return u'**REDACTED**' 43 | 44 | 45 | def _redact_content(content_str): 46 | 47 | content_str = token_regex.sub(_redaction_string(), content_str, 0) 48 | content_str = oauth_regex.sub(_redaction_string(), content_str, 0) 49 | 50 | if secret_regex.match(content_str): 51 | return 'Secret: {}'.format(_redaction_string()) 52 | 53 | return content_str 54 | 55 | 56 | class Action(object): 57 | 58 | def __init__(self, verb, cmd_list, out, err, references, status, stdin_str=None, 59 | last_attempt=True, internal=False, elapsed_time=0, 60 | timeout=False, 61 | exec_time=0): 62 | self.status = status 63 | self.verb = verb 64 | self.cmd = cmd_list 65 | self.out = out or '' 66 | self.err = err or '' 67 | self.stdin_str = stdin_str 68 | self.references = references 69 | self.timeout = timeout 70 | self.last_attempt = last_attempt 71 | self.internal = internal 72 | self.elapsed_time = elapsed_time 73 | self.exec_time = exec_time 74 | 75 | if self.references is None: 76 | self.references = {} 77 | 78 | def as_dict(self, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True): 79 | 80 | d = { 81 | 'timestamp': self.exec_time, 82 | 'elapsed_time': self.elapsed_time, 83 | 'success': (self.status == 0), # allows an easy grep in tracking output 84 | 'status': self.status, 85 | 'verb': self.verb, 86 | 'cmd': self.cmd, 87 | 'out': self.out, 88 | 'err': self.err, 89 | 'in': self.stdin_str, 90 | 'references': self.references, 91 | 'timeout': self.timeout, 92 | 'last_attempt': self.last_attempt, 93 | 'internal': self.internal, 94 | } 95 | 96 | if redact_tokens: 97 | redacted = [] 98 | next_is_token = False 99 | for arg in self.cmd: 100 | if next_is_token: 101 | redacted.append(_redaction_string()) 102 | next_is_token = False 103 | elif arg == '--token': 104 | next_is_token = True 105 | redacted.append(arg) 106 | elif arg.startswith('--token'): 107 | redacted.append(u'--token=**REDACTED**') 108 | else: 109 | redacted.append(arg) 110 | d['cmd'] = redacted 111 | 112 | if redact_references: 113 | refs = {} 114 | for (key, value) in six.iteritems(self.references): 115 | 116 | # pass through references starting with . since those are internal and designed not to 117 | # contain private values. 118 | if key.startswith('.'): 119 | refs[key] = value 120 | continue 121 | 122 | # References van be string or complex structures. 123 | if isinstance(value, six.string_types): 124 | value_str = value 125 | else: 126 | # If a structure of some type, serialize into a string to 127 | # check the entire thing for sensitivity. 128 | value_str = json.dumps(value) 129 | 130 | if _is_sensitive(value_str): 131 | refs[key] = _redact_content(value_str) 132 | else: 133 | # If not sensitive, make sure to keep structure. 134 | refs[key] = value 135 | 136 | d['references'] = refs 137 | 138 | if redact_streams: 139 | if _is_sensitive(self.err): 140 | d['err'] = _redact_content(self.err) 141 | else: 142 | d['err'] = self.err 143 | 144 | if self.stdin_str: 145 | if redact_streams and _is_sensitive(self.stdin_str): 146 | d['in'] = _redaction_string() 147 | else: 148 | try: 149 | # If the input can be parsed as json, do so 150 | if self.stdin_str.strip().startswith('{'): 151 | d['in_obj'] = json.loads(self.stdin_str) 152 | del d['in'] 153 | except: 154 | pass 155 | 156 | if redact_streams and _is_sensitive(self.out): 157 | d['out'] = _redact_content(self.out) 158 | else: 159 | if len(self.out) > truncate_stdout > -1: 160 | d['out'] = (self.out[:truncate_stdout] + '...truncated...') 161 | else: 162 | try: 163 | # If the output can be parsed as json, do so 164 | if self.out.startswith('{'): 165 | d['out_obj'] = json.loads(self.out) 166 | del d['out'] 167 | except: 168 | pass 169 | 170 | return d 171 | 172 | def as_json(self, indent=4, redact_tokens=True, redact_streams=True, redact_references=True): 173 | return json.dumps( 174 | self.as_dict(redact_tokens=redact_tokens, redact_references=redact_references, 175 | redact_streams=redact_streams), indent=indent) 176 | 177 | 178 | def escape_arg(arg): 179 | # https://stackoverflow.com/questions/3163236/escape-arguments-for-paramiko-sshclient-exec-command 180 | return "'%s'" % (str(arg).replace(r"'", r"'\''"),) 181 | 182 | 183 | def _flatten_list(l): 184 | """ 185 | Flattens a list of elements (which can themselves be lists) into a single list 186 | of strings. 187 | :param l: A list which may contain other lists. Elements of that list may be None. 188 | :return: A single, flat list. None elements found in the argument will not be included. 189 | """ 190 | 191 | if l is None: 192 | return [] 193 | 194 | agg = [] 195 | if is_collection_type(l): 196 | for e in l: 197 | agg.extend(_flatten_list(e)) 198 | else: 199 | if isinstance(l, bool): # bools are lowercase for things like labels 200 | l = '{}'.format(l).lower() 201 | else: # Make sure everything is a string 202 | l = '{}'.format(l) 203 | agg.append(l) 204 | 205 | return agg 206 | 207 | 208 | def oc_action(context, verb, cmd_args=None, all_namespaces=False, no_namespace=False, namespace=None, 209 | references=None, stdin_obj=None, stdin_str=None, last_attempt=True, 210 | **kwargs): 211 | """ 212 | Executes oc client verb with arguments. Returns an Action with result information. 213 | :param context: context information for the execution 214 | :param verb: The name of the verb to execute 215 | :param cmd_args: A list of strings|list which will be flattened into oc arguments 216 | :param all_namespaces: If true, --all-namespaces will be included in the invocation 217 | :param no_namespace: If true, namespace will not be included in invocation 218 | :param namespace: Namespace which will override context namespace if specified 219 | :param references: A dict of values to include in the tracking information for this action 220 | :param stdin_obj: A json serializable object to supply to stdin for the oc invocation 221 | :param stdin_str: If stdin is not a json serializable object. Cannot be specified in conjunction with stdin_obj. 222 | :param last_attempt: If False, implies that this action will be retried by higher level control on failure. 223 | :param kwargs: 224 | :return: An Action object. 225 | :rtype: Action 226 | """ 227 | cmds = [context.get_oc_path(), verb] 228 | 229 | if references is None: 230 | references = {} 231 | 232 | if context.get_kubeconfig_path() is not None: 233 | cmds.append("--kubeconfig=%s" % context.get_kubeconfig_path()) 234 | 235 | if context.get_api_server() is not None: 236 | url = context.get_api_server() 237 | 238 | # If insecure:// is specified, skip TLS verification 239 | if url.startswith("insecure://"): 240 | url = "https://" + url[len("insecure://"):] 241 | cmds.append("--insecure-skip-tls-verify") 242 | 243 | cmds.append("--server=%s" % url) 244 | 245 | if context.get_token() is not None: 246 | cmds.append('--token={}'.format(context.get_token())) 247 | 248 | if context.get_ca_cert_path() is not None: 249 | cmds.append('--cacert={}'.format(context.get_ca_cert_path())) 250 | 251 | if all_namespaces: 252 | cmds.append("--all-namespaces") 253 | elif namespace: 254 | cmds.append("--namespace=%s" % namespace) 255 | elif context.get_project() is not None and not no_namespace: 256 | cmds.append("--namespace=%s" % context.get_project()) 257 | 258 | for k, v in six.iteritems(context.get_options()): 259 | # If a value was set to None, it should not impact the command line 260 | if not v: 261 | continue 262 | 263 | if not k.startswith('-'): 264 | if len(k) > 1: 265 | k = '--{}'.format(k) 266 | else: 267 | k = '-{}'.format(k) 268 | 269 | cmds.append('{}={}'.format(k, v).lower()) 270 | 271 | if context.get_loglevel() is not None: 272 | cmds.append("--loglevel=%s" % context.get_loglevel()) 273 | 274 | if context.get_skip_tls_verify(): 275 | cmds.append("--insecure-skip-tls-verify") 276 | 277 | # Arguments which are lists are flattened into the command list 278 | cmds.extend(_flatten_list(cmd_args)) 279 | 280 | period = 0.01 281 | 282 | timeout = False 283 | 284 | # If stdin_object is specified, serialize into the string. 285 | if stdin_obj: 286 | stdin_str = json.dumps(stdin_obj, indent=None) 287 | 288 | # Set defaults in case is_out_of_time is true 289 | stdout = "" 290 | stderr = "" 291 | return_code = -1 292 | 293 | start_time = time.time() 294 | exec_time = int((datetime.datetime.now(datetime.timezone.utc) - datetime.datetime(1970, 1, 1, tzinfo=datetime.timezone.utc)).total_seconds() * 1000) 295 | 296 | if context.get_ssh_client() is not None: 297 | references['.client_host'] = '{}@{}'.format(context.get_ssh_username() or '', context.get_ssh_hostname()) 298 | 299 | # If we are out of time, don't even try to execute. 300 | expired, timeout_context = context.get_out_of_time() 301 | if not expired: 302 | 303 | if context.get_ssh_client() is not None: 304 | command_string = "" 305 | 306 | for i, c in enumerate(cmds): 307 | # index zero is 'oc' -- no need to escape 308 | if i > 0: 309 | c = " {}".format(escape_arg(c)) 310 | 311 | command_string += c 312 | 313 | try: 314 | pathed_command = 'PATH=$PATH:$HOME/bin {}'.format(command_string) 315 | 316 | # This timeout applies to individual read / write channel operations which follow. 317 | # If paramiko fails to timeout, consider using polling: https://stackoverflow.com/a/45844203 318 | remaining, timeout_context = context.get_min_remaining_seconds() 319 | ssh_stdin, ssh_stdout, ssh_stderr = context.get_ssh_client().exec_command(command=pathed_command, 320 | timeout=remaining, 321 | environment={ 322 | 'LC_ALL': 'en_US.UTF-8', 323 | } 324 | ) 325 | if stdin_str: 326 | ssh_stdin.write(stdin_str) 327 | ssh_stdin.flush() 328 | ssh_stdin.channel.shutdown_write() 329 | 330 | # In python2, read() returns type:str. In python3, I believe it will return type:bytes. 331 | # By decoding, we are making the assumption that openshift-client-python will be 332 | # useful for text based interactions (e.g. we don't support oc exec with 333 | # binary output). By converting into a real unicode string type, hopefully we prevent 334 | # a raft of incompatibilities between 2 and 3. 335 | stdout = ssh_stdout.read().decode('utf-8', errors='ignore') 336 | stderr = ssh_stderr.read().decode('utf-8', errors='ignore') 337 | return_code = ssh_stdout.channel.recv_exit_status() 338 | 339 | except socket.timeout as error: 340 | timeout = True 341 | return_code = -1 342 | 343 | else: 344 | 345 | with TempFile(content=stdin_str) as stdin_file: 346 | with TempFile() as out: 347 | with TempFile() as err: 348 | # When only python3 is supported, change to using standard timeout 349 | env = os.environ.copy() 350 | env['LC_ALL'] = 'en_US.UTF-8' 351 | process = subprocess.Popen(cmds, stdin=stdin_file.file, 352 | stdout=out.file, stderr=err.file, env=env) 353 | 354 | while process.poll() is None: 355 | expired, timeout_context = context.get_out_of_time() 356 | if expired: 357 | try: 358 | timeout = True 359 | process.kill() 360 | break 361 | except OSError: 362 | pass # ignore 363 | time.sleep(period) 364 | period = min(1, period + period) # Poll fast at first, but slow down to 1/sec over time 365 | 366 | # See note in paramiko flow on decoding 367 | stdout = out.read().decode('utf-8', errors='ignore') 368 | stderr = err.read().decode('utf-8', errors='ignore') 369 | 370 | return_code = process.returncode 371 | if timeout: 372 | return_code = -1 373 | 374 | end_time = time.time() 375 | elapsed_time = (end_time - start_time) 376 | 377 | else: 378 | timeout = True 379 | return_code = -1 380 | elapsed_time = -1 # Indicate we never tried to run the process 381 | 382 | # If there is an error, collect a stack for debug purposes 383 | if return_code != 0: 384 | references['.stack'] = traceback.format_stack() 385 | 386 | if timeout and timeout_context and timeout_context.frame_info: 387 | references['.timeout_context'] = '{}:{}'.format(timeout_context.frame_info[0], timeout_context.frame_info[1]) 388 | 389 | internal = kwargs.get("internal", False) 390 | a = Action(verb, cmds, stdout, stderr, references, return_code, 391 | stdin_str=stdin_str, last_attempt=last_attempt, 392 | internal=internal, elapsed_time=elapsed_time, 393 | exec_time=exec_time, timeout=timeout, 394 | ) 395 | 396 | context.register_action(a) 397 | return a 398 | -------------------------------------------------------------------------------- /packages/openshift_client/ansible.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from __future__ import absolute_import 4 | from threading import local 5 | 6 | # Used by openshift-client-python module to store new facts, variables, etc 7 | # during the execution of a playbook task. 8 | ansible = local() 9 | 10 | 11 | def ansible_context_reset(): 12 | 13 | # Facts set in this dict will be set as new facts when the task exits 14 | ansible.new_facts = {} 15 | 16 | # Will be populated with variables passed into the task 17 | ansible.vars = {} 18 | 19 | # Allows an ansible module script to indicate changes were made to the cluster 20 | ansible.changed = False 21 | 22 | 23 | ansible.reset = ansible_context_reset 24 | ansible.reset() 25 | -------------------------------------------------------------------------------- /packages/openshift_client/config.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import openshift_client as oc 4 | import base64 5 | import json 6 | 7 | 8 | def get_kubeconfig(): 9 | """ 10 | :return: Returns the current kubeconfig as a python dict 11 | """ 12 | return json.loads(oc.invoke('config', 13 | cmd_args=['view', 14 | '-o=json', 15 | '--raw', 16 | ], 17 | no_namespace=True).out().strip()) 18 | 19 | 20 | def _get_kubeconfig_model(_kc_model=None): 21 | if _kc_model: 22 | return _kc_model 23 | else: 24 | return oc.Model(dict_to_model=get_kubeconfig()) 25 | 26 | 27 | def get_kubeconfig_cluster_names(_kc_model=None): 28 | """ 29 | :param _kc_model: Internally used to cache kubeconfig info. 30 | :return: Returns a list of all the cluster names in the kubeconfig. 31 | """ 32 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 33 | names = [] 34 | for cluster_entry in kc.clusters: 35 | names.append(cluster_entry.name) 36 | return names 37 | 38 | 39 | def get_kubeconfig_current_context_name(_kc_model=None): 40 | """ 41 | :param _kc_model: Internally used to cache kubeconfig info. 42 | :return: Returns the name of the current context in your kubeconfig 43 | """ 44 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 45 | return kc['current-context'] 46 | 47 | 48 | def get_kubeconfig_context(context_name=None, _kc_model=None): 49 | """ 50 | :param _kc_model: Internally used to cache kubeconfig info. 51 | :param context_name: The context to retrieve or None to retrieve the current context. 52 | :return: Returns a dict of the specified context or current context. e.g. {cluster:..., namespace:...., user:....} 53 | """ 54 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 55 | if context_name is None: 56 | context_name = get_kubeconfig_current_context_name(_kc_model=kc) 57 | 58 | for context_entry in kc.contexts: 59 | if context_entry.name == context_name: 60 | return context_entry.context._primitive() 61 | 62 | return None 63 | 64 | 65 | def get_kubeconfig_current_cluster_name(_kc_model=None): 66 | """ 67 | :param _kc_model: Internally used to cache kubeconfig info. 68 | :return: Returns the cluster associated with the current context. 69 | """ 70 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 71 | current_context_name = get_kubeconfig_current_context_name(_kc_model=kc) 72 | return get_kubeconfig_context(context_name=current_context_name, _kc_model=kc)['cluster'] 73 | 74 | 75 | def get_kubeconfig_cluster(cluster_name=None, _kc_model=None): 76 | """ 77 | :param cluster_name: The context to retrieve or None for current context dict 78 | :param _kc_model: Internally used to cache kubeconfig info. 79 | :return: Returns a raw bytes from kubeconfig in a dict of the specified cluster or current cluster. 80 | e.g. {server:.. certificate-authority-data:.}. Note that since the bytes are raw, an entry like 81 | certificate-data-authority would need to be decoded to get PEM content. 82 | """ 83 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 84 | if cluster_name is None: 85 | cluster_name = get_kubeconfig_current_cluster_name(_kc_model=kc) 86 | 87 | for cluster_entry in kc.clusters: 88 | if cluster_entry.name == cluster_name: 89 | return cluster_entry.cluster._primitive() 90 | 91 | return None 92 | 93 | 94 | def set_kubeconfig_insecure_skip_tls_verify(active, cluster_name=None, _kc_model=None): 95 | """ 96 | Sets or removes insecure-skip-tls-verify for the specified cluster (or the current cluster if 97 | not specified). 98 | :param active: If True, enable insecure-skip-tls-verify for the the cluster 99 | :param cluster_name: The cluster name to modify. If not specified, the current context's cluster will be modified. 100 | :param _kc_model: Internally used to cache kubeconfig info. 101 | """ 102 | if not cluster_name: 103 | cluster_name = get_kubeconfig_current_cluster_name(_kc_model=_kc_model) 104 | 105 | oc.invoke('config', 106 | cmd_args=['set-cluster', 107 | cluster_name, 108 | '--insecure-skip-tls-verify={}'.format(str(active).lower()), 109 | ], 110 | no_namespace=True) 111 | 112 | 113 | def remove_kubeconfig_certifcate_authority(cluster_name=None, _kc_model=None): 114 | """ 115 | When you installer a valid certificate for your api endpoint, you may want to 116 | use your host's local certificate authorities. To do that, references to certificate 117 | authorities must be removed from your kubeconfig. 118 | :param cluster_name: The cluster name to modify. If not specified, the current context's cluster will be modified. 119 | :param _kc_model: Internally used to cache kubeconfig info. 120 | """ 121 | if not cluster_name: 122 | cluster_name = get_kubeconfig_current_cluster_name(_kc_model=_kc_model) 123 | 124 | # Setting insecure will remove any other certificate-authority data from the cluster's entry 125 | set_kubeconfig_insecure_skip_tls_verify(True, cluster_name=cluster_name, _kc_model=_kc_model) 126 | 127 | # Now set it back to false, removing the insecure-skip-tls-verify entry from kubeconfig 128 | set_kubeconfig_insecure_skip_tls_verify(False, cluster_name=cluster_name, _kc_model=_kc_model) 129 | 130 | 131 | def get_kubeconfig_certificate_authority_data(cluster_name=None, _kc_model=None): 132 | """ 133 | Returns the certificate authority data (if any) for the specified cluster. 134 | :param cluster_name: The cluster name to inspect. If not specified, the ca data will be 135 | returned for the current context's cluster. 136 | :param _kc_model: Internally used to cache kubeconfig info. 137 | :return: The PEM encoded x509 data or None if the cluster did not posses a certificate-authority-data 138 | field. 139 | """ 140 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 141 | if not cluster_name: 142 | cluster_name = get_kubeconfig_current_cluster_name(_kc_model=kc) 143 | 144 | cluster_dict = get_kubeconfig_cluster(cluster_name, _kc_model=kc) 145 | data = cluster_dict.get('certificate-authority-data', None) 146 | 147 | if data: 148 | # the data is base64 encoded PEM, so decode it. 149 | return base64.b64decode(data) 150 | 151 | return None 152 | 153 | 154 | def set_kubeconfig_certificate_authority_data(ca_data, cluster_name=None, _kc_model=None): 155 | """ 156 | Sets the certificate authority data for one or more clusters in the kubeconfig. 157 | :param ca_data: The certificate authority data (PEM format). The chain will be encoded into 158 | base64 before being set in the kubeconfig. 159 | :param cluster_name: The cluster name to affect. If not specified, the ca data will be 160 | set for the current context. 161 | :param _kc_model: Internally used to cache kubeconfig info. 162 | :return: n/a 163 | """ 164 | kc = _get_kubeconfig_model(_kc_model=_kc_model) 165 | if not cluster_name: 166 | cluster_name = get_kubeconfig_current_cluster_name(_kc_model=kc) 167 | 168 | # The kubeconfig cluster entry may have an existing certificate-authority file or have 169 | # insecure-skip-tls-verify set to true. Have ca-data set alongside either of these is 170 | # an invalid state for the kubeconfig, so we use a trick: setting insecure-skip-tls-verify 171 | # will clear existing certificate authority entries. When we set it back to true, we can 172 | # safely poke in the ca-data 173 | 174 | remove_kubeconfig_certifcate_authority(cluster_name=cluster_name, _kc_model=kc) 175 | 176 | b64_data = base64.b64encode(ca_data) 177 | 178 | # Now we can poke in the value that we need 179 | oc.invoke('config', 180 | # https://github.com/kubernetes/kubectl/issues/501#issuecomment-406890261 181 | cmd_args=['set', 182 | 'clusters.{}.certificate-authority-data'.format(cluster_name), 183 | b64_data 184 | ], 185 | no_namespace=True) 186 | -------------------------------------------------------------------------------- /packages/openshift_client/context.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import inspect 4 | import os 5 | 6 | from datetime import datetime, timezone 7 | from datetime import timedelta 8 | from threading import local 9 | 10 | from .result import Result 11 | 12 | # Provides defaults for ssh_client context instantiations 13 | DEFAULT_SSH_HOSTNAME = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_HOSTNAME", None) 14 | DEFAULT_SSH_USERNAME = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_USERNAME", None) 15 | DEFAULT_SSH_PORT = int(os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_PORT", "22")) 16 | DEFAULT_SSH_AUTO_ADD = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_AUTO_ADD", "false").lower() in ( 17 | "yes", "true", "t", "y", "1") 18 | DEFAULT_LOAD_SYSTEM_HOST_KEYS = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_LOAD_SYSTEM_HOST_KEYS", "true").lower() in ( 19 | "yes", "true", "t", "y", "1") 20 | 21 | # If set, --insecure-skip-tls-verify will be included on all oc invocations 22 | GLOBAL_SKIP_TLS_VERIFY = os.getenv("OPENSHIFT_CLIENT_PYTHON_SKIP_TLS_VERIFY", "false").lower() in ( 23 | "yes", "true", "t", "y", "1") 24 | 25 | # Environment variable can specify generally how long openshift operations can execute before an exception 26 | MASTER_TIMEOUT = int(os.getenv("OPENSHIFT_CLIENT_PYTHON_MASTER_TIMEOUT", -1)) 27 | 28 | 29 | def cur_context(): 30 | return context.stack[-1] 31 | 32 | 33 | class Context(object): 34 | def __init__(self): 35 | self.parent = None 36 | self.oc_path = None 37 | self.kubeconfig_path = None 38 | self.api_server = None 39 | self.token = None 40 | self.ca_cert_path = None 41 | self.project_name = None 42 | self.loglevel_value = None 43 | self.skip_tls_verify = None 44 | self.tracking_strategy = None 45 | self.no_tracking = False 46 | self.timeout_datetime = None 47 | self.options = None 48 | 49 | # ssh configuration 50 | self.ssh_client = None 51 | self.ssh_hostname = None 52 | 53 | self.ssh_port = 22 54 | self.ssh_username = None 55 | self.ssh_password = None 56 | self.ssh_timeout = 600 57 | self.ssh_auto_add_host = False 58 | self.ssh_load_system_host_keys = True 59 | 60 | # Find the source code that appears to have created this Context 61 | self.frame_info = None 62 | for frame in inspect.stack(): 63 | module = inspect.getmodule(frame[0]) 64 | if module and (module.__name__ == 'openshift_client' or module.__name__.startswith('openshift_client.')): 65 | # The source appears to be within this module; skip this frame 66 | continue 67 | 68 | self.frame_info = inspect.getframeinfo(frame[0]) 69 | break 70 | 71 | def __enter__(self): 72 | if len(context.stack) > 0: 73 | self.parent = context.stack[-1] 74 | context.stack.append(self) 75 | self.reconnect_ssh() 76 | return self 77 | 78 | def close_ssh(self): 79 | # Shutdown ssh if it is in use 80 | if self.ssh_client: 81 | try: 82 | self.ssh_client.close() 83 | except: 84 | pass 85 | self.ssh_client = None 86 | 87 | def reconnect_ssh(self): 88 | """ 89 | If you lose a connection to the bastion, you can restablish it. 90 | :return: 91 | """ 92 | self.close_ssh() 93 | if self.ssh_hostname: 94 | 95 | # Just-in-time import to avoid hard dependency. Allows 96 | # you to use local 'oc' without having paramiko installed. 97 | import paramiko 98 | 99 | # https://github.com/paramiko/paramiko/issues/175#issuecomment-24125451 100 | paramiko.packet.Packetizer.REKEY_BYTES = pow(2, 40) 101 | paramiko.packet.Packetizer.REKEY_PACKETS = pow(2, 40) 102 | 103 | self.ssh_client = paramiko.SSHClient() 104 | 105 | # Should we load known_hosts? 106 | if self.ssh_load_system_host_keys: 107 | self.ssh_client.load_system_host_keys() 108 | 109 | # Should we trust an unknown host? 110 | if self.ssh_auto_add_host: 111 | self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 112 | 113 | self.ssh_client.connect(hostname=self.ssh_hostname, port=self.ssh_port, username=self.ssh_username, 114 | password=self.ssh_password, timeout=self.ssh_timeout) 115 | 116 | # Enable agent fowarding 117 | transport = self.ssh_client.get_transport() 118 | paramiko.agent.AgentRequestHandler(transport.open_session()) 119 | 120 | def __exit__(self, type, value, traceback): 121 | context.stack.pop() 122 | self.close_ssh() 123 | 124 | # TODO: deprecate this API (flagged on: 4/28/2023) 125 | def get_api_url(self): 126 | """ 127 | The API has been flagged for deprecation. Please use get_api_server instead 128 | """ 129 | return self.get_api_server() 130 | 131 | def get_api_server(self): 132 | 133 | if self.api_server is not None: 134 | return self.api_server 135 | if self.parent is not None: 136 | return self.parent.get_api_server() 137 | return context.default_api_server 138 | 139 | def get_token(self): 140 | 141 | if self.token is not None: 142 | return self.token 143 | if self.parent is not None: 144 | return self.parent.get_token() 145 | return context.default_token 146 | 147 | def get_ca_cert_path(self): 148 | 149 | if self.ca_cert_path is not None: 150 | return self.ca_cert_path 151 | if self.parent is not None: 152 | return self.parent.get_ca_cert_path() 153 | return context.default_ca_cert_path 154 | 155 | def get_oc_path(self): 156 | if self.oc_path is not None: 157 | return self.oc_path 158 | if self.parent is not None: 159 | return self.parent.get_oc_path() 160 | return context.default_oc_path 161 | 162 | def get_kubeconfig_path(self): 163 | if self.kubeconfig_path is not None: 164 | return self.kubeconfig_path 165 | if self.parent is not None: 166 | return self.parent.get_kubeconfig_path() 167 | return context.default_kubeconfig_path 168 | 169 | def get_ssh_client(self): 170 | """ 171 | :rtype SSHClient: 172 | """ 173 | if self.ssh_client is not None: 174 | return self.ssh_client 175 | if self.parent is not None: 176 | return self.parent.get_ssh_client() 177 | return None 178 | 179 | def get_ssh_username(self): 180 | if self.ssh_username is not None: 181 | return self.ssh_username 182 | if self.parent is not None: 183 | return self.parent.get_ssh_username() 184 | return None 185 | 186 | def get_ssh_password(self): 187 | if self.ssh_password is not None: 188 | return self.ssh_password 189 | if self.parent is not None: 190 | return self.parent.get_ssh_password() 191 | return None 192 | 193 | def get_ssh_hostname(self): 194 | if self.ssh_hostname is not None: 195 | return self.ssh_hostname 196 | if self.parent is not None: 197 | return self.parent.get_ssh_hostname() 198 | return None 199 | 200 | def get_project(self): 201 | if self.project_name is not None: 202 | return self.project_name 203 | # if cluster is changing, don't check parent for project 204 | # with project must always be inside with cluster. 205 | if self.api_server is None and self.parent is not None: 206 | return self.parent.get_project() 207 | return context.default_project 208 | 209 | def get_options(self, add_to=None): 210 | 211 | if add_to is None: 212 | add_to = {} 213 | 214 | aggregate = add_to 215 | 216 | # If we are the top context, apply default options 217 | if not self.parent: 218 | aggregate.update(context.default_options) 219 | else: 220 | # Otherwise, aggregate our ancestor options recursively 221 | self.parent.get_options(add_to=aggregate) 222 | 223 | # Contribute the options of this context (override anything from ancestors) 224 | if self.options: 225 | aggregate.update(self.options) 226 | 227 | return aggregate 228 | 229 | def get_loglevel(self): 230 | if self.loglevel_value is not None: 231 | return self.loglevel_value 232 | if self.parent is not None: 233 | return self.parent.get_loglevel() 234 | return context.default_loglevel 235 | 236 | def get_skip_tls_verify(self): 237 | if self.skip_tls_verify is not None: 238 | return self.skip_tls_verify 239 | if self.parent is not None: 240 | return self.parent.get_skip_tls_verify() 241 | return context.default_skip_tls_verify 242 | 243 | def get_out_of_time(self): 244 | """ 245 | :return: Returns any Context which claims it is timed out. Returns (True,Context) if any surrounding timeout context is expired. If not, returns (False,None) 246 | """ 247 | 248 | # Unlike most context methods, timeout methods use cur_context instead of self. 249 | # This allows selectors/apiobjects captured in one timeout block to be used in another. 250 | c = cur_context() 251 | now = datetime.now(timezone.utc) 252 | while c is not None: 253 | if c.timeout_datetime is not None and now > c.timeout_datetime: 254 | return True, c 255 | c = c.parent 256 | return False, None 257 | 258 | def get_min_remaining_seconds(self): 259 | """ 260 | :return: Returns the number of seconds a command needs to finish to satisfy 261 | existing timeout contexts and the Context which possessed the minimum; i.e. (secs, Context). 262 | A minimum of 1 second is always returned if a timeout context exists. If no timeout context exists, 263 | (None,None) is returned. 264 | """ 265 | 266 | # Unlike most context methods, timeout methods use cur_context instead of self. 267 | # This allows selectors/apiobjects captured in one timeout block to be used in another. 268 | c = cur_context() 269 | min_secs = None 270 | now = datetime.now(timezone.utc) 271 | limiting_context = None 272 | while c is not None: 273 | if c.timeout_datetime is not None: 274 | if now > c.timeout_datetime: 275 | return 1, c 276 | elif min_secs is None: 277 | min_secs = (c.timeout_datetime - now).total_seconds() 278 | limiting_context = c 279 | elif (c.timeout_datetime - now).total_seconds() < min_secs: 280 | limiting_context = c 281 | min_secs = (c.timeout_datetime - now).total_seconds() 282 | c = c.parent 283 | 284 | if min_secs is not None and min_secs < 1: 285 | return 1, limiting_context 286 | 287 | return min_secs, limiting_context 288 | 289 | def get_result(self): 290 | """ 291 | :return: If this contextmanager was returned by `with tracking()`, returns 292 | the Result object which has tracked all internal oc invocations. Otherwise, 293 | returns None. 294 | """ 295 | 296 | # Check instance type since this could also be a user's callable. 297 | if isinstance(self.tracking_strategy, Result): 298 | return self.tracking_strategy 299 | else: 300 | return None 301 | 302 | # Add an actions to any tracking 303 | # contexts enclosing the current context. 304 | # Adds will be terminated if a no_tracking context is encountered. 305 | def register_action(self, action): 306 | c = self 307 | while c is not None: 308 | if c.no_tracking: 309 | return 310 | 311 | if c.tracking_strategy: 312 | if isinstance(c.tracking_strategy, Result): 313 | c.tracking_strategy.add_action(action) 314 | else: 315 | c.tracking_strategy(action) 316 | 317 | c = c.parent 318 | 319 | def set_timeout(self, seconds): 320 | """ 321 | Sets the internal timeout for this context the specified number of 322 | seconds in the future from the time it is called. Internal use only. 323 | :param seconds: The number of seconds from now to start timing out oc invocations. If None, timeout 324 | for this context is cleared. 325 | :return: N/A 326 | """ 327 | if seconds and seconds > 0: 328 | self.timeout_datetime = datetime.now(timezone.utc) + timedelta(seconds=seconds) 329 | else: 330 | self.timeout_datetime = None 331 | 332 | 333 | def set_default_oc_path(path): 334 | """ 335 | Sets the default full path of the oc binary to execute for this thread. 336 | If no client_path() context is in use, this path will be used. 337 | """ 338 | context.default_oc_path = path 339 | 340 | 341 | def set_default_kubeconfig_path(path): 342 | context.default_kubeconfig_path = path 343 | 344 | 345 | # TODO: deprecate this API (flagged on: 4/28/2023) 346 | def set_default_api_url(url): 347 | """ 348 | The API has been flagged for deprecation. Please use set_default_api_server instead 349 | """ 350 | set_default_api_server(url) 351 | 352 | 353 | def set_default_api_server(server): 354 | context.default_api_server = server 355 | 356 | 357 | def set_default_project(name): 358 | context.default_project = name 359 | 360 | 361 | def set_default_token(v): 362 | context.default_token = v 363 | 364 | 365 | def set_default_loglevel(v): 366 | context.default_loglevel = v 367 | 368 | 369 | def set_default_skip_tls_verify(do_skip): 370 | context.default_skip_tls_verify = do_skip 371 | 372 | 373 | def blank(): 374 | """ 375 | :return: Returns a blank context which can be used to temporarily replace a real context in a with statement. 376 | Mostly useful for debugging programs without having to tab/untab a large amount of code. 377 | """ 378 | c = Context() 379 | return c 380 | 381 | 382 | def client_host(hostname=None, port=DEFAULT_SSH_PORT, username=DEFAULT_SSH_USERNAME, password=None, 383 | auto_add_host=DEFAULT_SSH_AUTO_ADD, load_system_host_keys=DEFAULT_LOAD_SYSTEM_HOST_KEYS, 384 | connect_timeout=600): 385 | """ 386 | Will ssh to the specified host to in order to run oc commands. If hostname is not specified, 387 | the environment variable OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_HOSTNAME will be used. If the environment variable is 388 | not defined, this context will have no effect and the current host will be assumed to be the 389 | host on which oc will be run. 390 | :param hostname: The hostname or IP address. Defaults to environment variable OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_HOSTNAME if None. 391 | If the hostname is of the form 'user@host', the string will be split and the user will take precedence over 392 | any argument / environment variable supplied. 393 | :param port: The ssh port. Defaults to OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_PORT, then None. 394 | :param username: The username to use. Defaults to OPENSHIFT_CLIENT_PYTHON_DEFAULT_USERNAME, then None. 395 | :param password: The username's password 396 | :param auto_add_host: Whether to auto accept host certificates. Defaults to OPENSHIFT_CLIENT_PYTHON_DEFAULT_SSH_AUTO_ADD, then false. 397 | :param load_system_host_keys: Whether load known_hosts. Defaults to DEFAULT_LOAD_SYSTEM_HOST_KEYS, then true. 398 | :param connect_timeout: Connection timeout 399 | :return: 400 | """ 401 | c = Context() 402 | 403 | if hostname is None: 404 | hostname = DEFAULT_SSH_HOSTNAME 405 | 406 | if hostname and '@' in hostname: 407 | c.ssh_username, c.ssh_hostname = hostname.split('@', 1) 408 | else: 409 | c.ssh_hostname = hostname 410 | c.ssh_username = username 411 | 412 | c.ssh_port = port 413 | c.ssh_password = password 414 | c.ssh_timeout = connect_timeout 415 | c.ssh_auto_add_host = auto_add_host 416 | c.ssh_load_system_host_keys = load_system_host_keys 417 | 418 | return c 419 | 420 | 421 | def client_path(oc_path): 422 | """ 423 | Specifies the full path to the oc binary in this context. If unspecified, 'oc' is invoked and 424 | it should be in $PATH. 425 | :param oc_path: Fully path to executable oc binary 426 | :return: 427 | """ 428 | c = Context() 429 | 430 | c.oc_path = oc_path 431 | return c 432 | 433 | 434 | def api_server(api_url=None, ca_cert_path=None, kubeconfig_path=None): 435 | """ 436 | Establishes a context in which inner oc interactions 437 | will target the specified OpenShift API server (--server arguments). 438 | Contexts can be nested. The most immediate ancestor cluster context 439 | will define the API server targeted by an action. 440 | :param api_url: The oc --server argument to use. 441 | :param kubeconfig_path: The oc --kubeconfig argument to use. 442 | :return: The context object. Can be safely ignored. 443 | """ 444 | 445 | c = Context() 446 | c.kubeconfig_path = kubeconfig_path 447 | c.api_server = api_url 448 | c.ca_cert_path = ca_cert_path 449 | return c 450 | 451 | 452 | def token(val=None): 453 | """ 454 | Establishes a context in which inner oc interactions 455 | will include the specified token on the command line with --token. 456 | :param val: The oc --token argument to use. 457 | :return: The context object. Can be safely ignored. 458 | """ 459 | 460 | c = Context() 461 | c.token = val 462 | return c 463 | 464 | 465 | def project(name): 466 | """ 467 | Establishes a context in which inner oc interactions 468 | will impact the named OpenShift project. project contexts 469 | can be nested. The most immediate ancestor project context 470 | will define the project used by an action. 471 | :param name: The name of the project. If None, parent context project will be used. 472 | :return: The context object. Can be safely ignored. 473 | """ 474 | c = Context() 475 | if not name: 476 | return c 477 | 478 | # split is to strip qualifier off if specified ('project/test' -> 'test') 479 | c.project_name = name.split("/")[-1] 480 | return c 481 | 482 | 483 | def tracking(action_handler=None, limit=None): 484 | """ 485 | Establishes a context in which all inner actions will 486 | be tracked (unless a inner no_tracking context prevents 487 | tracking). Trackers can be nested -- all actions 488 | performed within a tracker's context will be tracked unless 489 | there is a descendant no_tracking context which blocks tracking 490 | from propagating to this ancestor. 491 | :param action_handler: If specified, after each oc action is 492 | performed, this method will be called with the Action object. 493 | If not specified, all Actions will aggregate into a internally 494 | managed Result object which can be accessed with get_result. 495 | :param limit: If specified, it allows to specify a limit on the 496 | number of actions stored by a given tracking context. If not 497 | specified or given a value less than 0, it will store unlimited number of oc 498 | interactions, and the limit value will be stored in the Result object. 499 | :return: The tracker contextmanager. If action_handler is not 500 | specified, call get_result to receive a Result object with all 501 | tracked Action objects. 502 | """ 503 | c = Context() 504 | if action_handler: 505 | if not callable(action_handler): 506 | raise ValueError('Expected action_handler to be callable') 507 | c.tracking_strategy = action_handler 508 | else: 509 | c.tracking_strategy = Result('tracking', limit) 510 | 511 | return c 512 | 513 | 514 | def no_tracking(): 515 | """ 516 | Prevent outer tracker contexts from registering 517 | oc actions in their tracker objects. This is useful 518 | when a large amount of data is going to be transferred 519 | via stdout/stderr OR when certain actions make carry 520 | confidential data that should not appear in trackers. 521 | :return: The context object. Can be safely ignored. 522 | """ 523 | c = Context() 524 | c.no_tracking = True 525 | return c 526 | 527 | 528 | def options(*args): 529 | """ 530 | Establishes a context in which inner oc invocations will be passed 531 | an arbitrary set of options. This is most useful in ensuring, for 532 | example, that a certain --token, --as, --context, etc, is passed to each 533 | oc invocation. 534 | 535 | Keys should be long form option names, without preceding hyphens. e.g. 536 | { 'token': '.....' } . 537 | 538 | Unlike most other contexts, .options is additive. If on oc invocation is 539 | embedded within two .options, it will include both sets. Inner option 540 | contexts will override the same key specified at outer levels. A value 541 | of None will prevent the option from being passed. 542 | 543 | Tip for flags: Even flags like --insecure-skip-tls-verify can be 544 | specified as key=value: --insecure-skip-tls-verify=true 545 | 546 | :param args: A vararg list of dicts. 547 | Keys in dicts will be pre-pended with '-' if single letter or 548 | '--' if multiple letter not already preceded with a hyphen. 549 | 550 | :return: The context object. Can be safely ignored. 551 | """ 552 | 553 | c = Context() 554 | c.options = {} 555 | 556 | for d in args: 557 | c.options.update(d) 558 | 559 | return c 560 | 561 | 562 | # Example: with loglevel(x): 563 | # Creates a new context with the specified log level. 564 | def loglevel(v): 565 | """ 566 | Establishes a context in which inner oc interactions 567 | will execute with the specified loglevel. loglevel contexts 568 | can be nested. The most immediate ancestor loglevel context 569 | will define the loglevel used by an action. 570 | :param v: The loglevel to use (0-9). 571 | :return: The context object. Can be safely ignored. 572 | """ 573 | c = Context() 574 | c.loglevel_value = v 575 | return c 576 | 577 | 578 | def tls_verify(enable=True): 579 | """ 580 | Establishes a context in which inner oc interactions 581 | will pass honor/ignore tls verification. 582 | :param enable: If false, --insecure-skip-tls-verify will be passed to oc invocations 583 | :return: The context object. Can be safely ignored. 584 | """ 585 | c = Context() 586 | c.skip_tls_verify = not enable 587 | return c 588 | 589 | 590 | def timeout(seconds): 591 | """ 592 | Establishes a context in which inner oc interactions 593 | must terminate within a specified period. timeout contexts 594 | can be nested and each nested layer will be enforced. 595 | If actions run longer than the specified timeout, an exception 596 | will be thrown. 597 | :param seconds: The number of seconds before actions should time out. 598 | :return: The context object. Can be safely ignored. 599 | """ 600 | c = Context() 601 | c.set_timeout(seconds) 602 | return c 603 | 604 | 605 | class ThreadLocalContext(local): 606 | def __init__(self): 607 | self.default_oc_path = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_OC_PATH", "oc") # Assume oc is in $PATH by default 608 | self.default_kubeconfig_path = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_CONFIG_PATH", None) 609 | self.default_api_server = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_API_SERVER", None) 610 | self.default_token = None # Does not support environment variable injection to discourage this insecure practice 611 | self.default_ca_cert_path = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_CA_CERT_PATH", None) 612 | self.default_project = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_PROJECT", None) 613 | self.default_options = {} 614 | self.default_loglevel = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_OC_LOGLEVEL", None) 615 | self.default_skip_tls_verify = os.getenv("OPENSHIFT_CLIENT_PYTHON_DEFAULT_SKIP_TLS_VERIFY", None) 616 | 617 | root_context = Context() 618 | root_context.set_timeout(MASTER_TIMEOUT) 619 | 620 | # Ensure stack always has at least one member to simplify getting last 621 | # with [-1] 622 | self.stack = [root_context] 623 | 624 | 625 | # All threads will have a context which is 626 | # managed by a stack of Context objects. As 627 | # a thread establish additional context using 628 | # 'with' statements, the stack will push/grow. As 629 | # 'with' blocks end, the stack will pop/shrink. 630 | context = ThreadLocalContext() 631 | -------------------------------------------------------------------------------- /packages/openshift_client/decorators.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import functools 4 | import random 5 | import string 6 | 7 | from . import new_project, delete_project 8 | 9 | 10 | def _id_generator(size=6, chars=string.ascii_lowercase + string.digits): 11 | return ''.join(random.choice(chars) for _ in range(size)) 12 | 13 | 14 | def _generate_project_name(): 15 | return "ephemeral-project-{}".format(_id_generator()) 16 | 17 | 18 | def ephemeral_project(_func=None, *, project_name=_generate_project_name()): 19 | def decorator(func): 20 | @functools.wraps(func) 21 | def wrapper(*args, **kwargs): 22 | with new_project(project_name): 23 | value = func(*args, project_name=project_name, **kwargs) 24 | delete_project(project_name) 25 | return value 26 | return wrapper 27 | 28 | if _func is None: 29 | return decorator 30 | else: 31 | return decorator(_func) 32 | -------------------------------------------------------------------------------- /packages/openshift_client/model.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import six 4 | 5 | from six.moves import range 6 | 7 | 8 | class OpenShiftPythonException(Exception): 9 | 10 | def __init__(self, msg, result=None, **kwargs): 11 | super(self.__class__, self).__init__(msg) 12 | self.msg = msg 13 | self.result = result 14 | self.kwargs = kwargs 15 | 16 | def attributes(self): 17 | return dict(self.kwargs) 18 | 19 | def get_result(self): 20 | """ 21 | :return: Returns the Result object associated with 22 | this exception if any. Might be None. 23 | """ 24 | return self.result 25 | 26 | def as_dict(self): 27 | d = dict(self.kwargs) 28 | d["msg"] = self.msg 29 | if self.result is not None: 30 | d["result"] = self.result 31 | return d 32 | 33 | def __str__(self): 34 | if self.result is None: 35 | return self.msg 36 | return "[" + self.msg + "]\n" + repr(self.result) 37 | 38 | 39 | class ModelError(Exception): 40 | 41 | def __init__(self, msg, **kwargs): 42 | super(self.__class__, self).__init__(msg) 43 | self.msg = msg 44 | self.kwargs = kwargs 45 | 46 | 47 | class MissingModel(dict): 48 | 49 | def __init__(self): 50 | super(self.__class__, self).__init__() 51 | pass 52 | 53 | def __getattr__(self, attr): 54 | return self 55 | 56 | def __setattr__(self, key, value): 57 | raise ModelError("Invalid attempt to set key(%s) in missing branch of model" % key) 58 | 59 | def __delattr__(self, key): 60 | raise ModelError("Invalid attempt to delete key(%s) in missing branch of model" % key) 61 | 62 | def __getitem__(self, attr): 63 | return self 64 | 65 | def __setitem__(self, key, value): 66 | raise ModelError("Invalid attempt to set key(%s) in missing branch of model" % key) 67 | 68 | def __delitem__(self, key): 69 | raise ModelError("Invalid attempt to delete key(%s) in missing branch of model" % key) 70 | 71 | # Express false-y 72 | def __bool__(self): 73 | return False 74 | 75 | # Express false-y 76 | def __len__(self): 77 | return 0 78 | 79 | def __str__(self): 80 | return "(MissingModelBranch)" 81 | 82 | def __repr__(self): 83 | return "(MissingModelBranch)" 84 | 85 | def __div__(self, other): 86 | return self 87 | 88 | def __add__(self, other): 89 | return self 90 | 91 | def __sub__(self, other): 92 | return self 93 | 94 | def __mul__(self, other): 95 | return self 96 | 97 | def can_match(self, *vals): 98 | return False 99 | 100 | 101 | # Singleton which indicates if any model attribute was not defined 102 | Missing = MissingModel() 103 | 104 | 105 | def to_model_or_val(v, case_insensitive=False): 106 | if isinstance(v, ListModel) or isinstance(v, Model): 107 | return v 108 | if isinstance(v, list): 109 | return ListModel(v, case_insensitive=case_insensitive) 110 | elif isinstance(v, dict): 111 | return Model(v, case_insensitive=case_insensitive) 112 | else: 113 | return v 114 | 115 | 116 | def _element_can_match(master, test, case_insensitive=False): 117 | if master is Missing: 118 | return False 119 | 120 | if master is None or test is None: 121 | return master is test 122 | 123 | if isinstance(master, str): 124 | master = six.text_type(master) # Turn str into unicode 125 | if case_insensitive: 126 | master = master.lower() 127 | 128 | if isinstance(test, str): 129 | test = six.text_type(test) # Turn str into unicode 130 | if case_insensitive: 131 | test = test.lower() 132 | 133 | for prim in [bool, int, six.text_type, float]: 134 | if isinstance(master, prim): 135 | return master == test or str(master) == str(test) 136 | 137 | if isinstance(master, dict): 138 | if isinstance(test, dict): 139 | return _dict_is_subset(master, test, case_insensitive=case_insensitive) 140 | else: 141 | return False 142 | 143 | if isinstance(master, list): 144 | if isinstance(test, list): 145 | return _list_is_subset(master, test, case_insensitive=case_insensitive) 146 | else: 147 | return False 148 | 149 | raise ModelError("Don't know how to compare %s and %s" % (str(type(master)), str(type(test)))) 150 | 151 | 152 | def _element_in_list(master, e, case_insensitive=False): 153 | for m in master: 154 | if _element_can_match(m, e, case_insensitive=case_insensitive): 155 | return True 156 | return False 157 | 158 | 159 | def _list_is_subset(master, test, case_insensitive=False): 160 | for e in test: 161 | if not _element_in_list(master, e, case_insensitive=case_insensitive): 162 | return False 163 | return True 164 | 165 | 166 | def _dict_is_subset(master, subset, case_insensitive=False): 167 | for k, v in subset.items(): 168 | if case_insensitive: 169 | k = k.lower() 170 | m = master.get(k, Missing) 171 | if not _element_can_match(m, v, case_insensitive=case_insensitive): 172 | return False 173 | 174 | return True 175 | 176 | 177 | class ListModel(list): 178 | 179 | def __init__(self, list_to_model, case_insensitive=False): 180 | super(ListModel, self).__init__() 181 | self.__case_insensitive = case_insensitive 182 | if list_to_model is not None: 183 | self.extend(list_to_model) 184 | 185 | def __setitem__(self, key, value): 186 | super(self.__class__, self).__setitem__(key, value) 187 | 188 | def __delitem__(self, key): 189 | super(self.__class__, self).__delitem__(key) 190 | 191 | def __getitem__(self, index): 192 | if super(self.__class__, self).__len__() > index: 193 | v = super(self.__class__, self).__getitem__(index) 194 | if isinstance(v, Model): 195 | return v 196 | v = to_model_or_val(v, case_insensitive=self.__case_insensitive) 197 | self.__setitem__(index, v) 198 | return v 199 | 200 | # Otherwise, trigger out of bounds exception 201 | return super(self.__class__, self).__getitem__(index) 202 | 203 | def __iter__(self): 204 | for i in range(0, super(self.__class__, self).__len__()): 205 | yield self[i] 206 | 207 | def _primitive(self): 208 | """ 209 | :return: Returns the ListModel as a python list 210 | :rtype: list 211 | """ 212 | l = [] 213 | for e in self: 214 | if isinstance(e, Model) or isinstance(e, ListModel): 215 | e = e._primitive() 216 | l.append(e) 217 | return l 218 | 219 | def can_match(self, list_or_entry): 220 | """ 221 | Answers whether this list is a subset of the specified list. If the argument is not a list, 222 | it placed into one for comparison purposes. 223 | Elements of the argument list can be primitives, lists, or dicts. In the case of non-primitives, the list or 224 | dicts must ultimately be subsets of at least one element in the receiver list. 225 | :param list_or_entry: The list to compare or a primitive/dict that must exist in the receiver's list. 226 | :return: Returns true if all of the elements specify can match (i.e. are subsets of) elements of this list. 227 | """ 228 | if not isinstance(list_or_entry, (list, tuple, ListModel)): 229 | # If we were not passed a list, turn it into one 230 | list_or_entry = [list_or_entry] 231 | 232 | return _list_is_subset(self, list_or_entry, case_insensitive=self.__case_insensitive) 233 | 234 | 235 | class Model(dict): 236 | 237 | def __init__(self, dict_to_model=None, case_insensitive=False): 238 | super(Model, self).__init__() 239 | 240 | self.__case_insensitive = case_insensitive 241 | 242 | if dict_to_model is not None: 243 | for k, v in dict_to_model.items(): 244 | if self.__case_insensitive: 245 | k = k.lower() 246 | self[k] = to_model_or_val(v, case_insensitive=case_insensitive) 247 | 248 | def __getattr__(self, attr): 249 | 250 | if isinstance(attr, six.string_types): 251 | if attr.startswith('_Model__'): # e.g. _Model__case_insensitive 252 | raise AttributeError 253 | 254 | if self.__case_insensitive: 255 | attr = attr.lower() 256 | 257 | if super(Model, self).__contains__(attr): 258 | v = super(self.__class__, self).get(attr) 259 | if isinstance(v, Model) or isinstance(v, ListModel): 260 | return v 261 | v = to_model_or_val(v, self.__case_insensitive) 262 | self.__setattr__(attr, v) 263 | return v 264 | else: 265 | return Missing 266 | 267 | def __setattr__(self, key, value): 268 | if key.startswith('_Model__'): # e.g. _Model__case_insensitive 269 | return super(Model, self).__setattr__(key, value) 270 | 271 | if self.__case_insensitive: 272 | key = key.lower() 273 | 274 | self.__setitem__(key, value) 275 | 276 | def __getitem__(self, key): 277 | return self.__getattr__(key) 278 | 279 | def __setitem__(self, key, value): 280 | super(Model, self).__setitem__(key, to_model_or_val(value, case_insensitive=self.__case_insensitive)) 281 | 282 | def __delitem__(self, key): 283 | if self.__is_case_sensitive__(): 284 | key = key.lower() 285 | super(Model, self).__delitem__(key) 286 | 287 | def _primitive(self): 288 | """ 289 | :return: Returns the Model as a python dict 290 | :rtype: dict 291 | """ 292 | d = {} 293 | for k, v in six.iteritems(self): 294 | if isinstance(v, Model) or isinstance(v, ListModel): 295 | v = v._primitive() 296 | d[k] = v 297 | return d 298 | 299 | def can_match(self, val): 300 | """ 301 | Answers whether this Model matches all elements of the argument. 302 | :param val: A dict or Model with elements set that must be found within this model. 303 | :return: Returns true if all of the elements can match (i.e. are subsets of) elements of this list. 304 | """ 305 | return _dict_is_subset(self, val, case_insensitive=self.__case_insensitive) 306 | -------------------------------------------------------------------------------- /packages/openshift_client/result.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import json 4 | 5 | from .model import OpenShiftPythonException 6 | 7 | 8 | class Result(object): 9 | def __init__(self, high_level_operation, tracking_limit=None): 10 | self.high_level_operation = high_level_operation 11 | self.__actions = [] 12 | # if tracking_limit is less than 0 that means unlimited tracking_limit 13 | if tracking_limit is not None and tracking_limit >= 0: 14 | self.limit_tracking_actions = tracking_limit 15 | else: 16 | self.limit_tracking_actions = None 17 | 18 | def actions(self): 19 | my_list = [a for a in self.__actions if not a.internal] 20 | return my_list 21 | 22 | # Returns a bitwise OR of all underlying action statuses (if 0, all actions returned 0) 23 | def status(self): 24 | s = 0 25 | for action in self.__actions: 26 | # If not the last attempt, return status does not matter; errors ignored. 27 | if action.last_attempt: 28 | s |= int(action.status) 29 | return s 30 | 31 | # Returns aggregate stdout from all underlying actions 32 | def out(self): 33 | s = u'' 34 | for action in self.__actions: 35 | if action.out: 36 | s += action.out 37 | if not s.endswith("\n"): 38 | s += u'\n' 39 | return s 40 | 41 | def get_timeout(self): 42 | """ 43 | :return: Iterates through all actions in this Result and returns the first Action object 44 | it finds that indicates it timed out. If no action timed out, returns None. 45 | """ 46 | for action in self.__actions: 47 | if action.timeout: 48 | return action 49 | return None 50 | 51 | # Returns aggregate stderr from all underlying actions 52 | def err(self): 53 | s = u'' 54 | for action in self.__actions: 55 | if action.err: 56 | s += action.err 57 | if not s.endswith("\n"): 58 | s += u'\n' 59 | return s 60 | 61 | def as_dict(self, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True): 62 | 63 | m = { 64 | "operation": self.high_level_operation, 65 | "status": self.status(), 66 | "actions": [action.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens, 67 | redact_references=redact_references, 68 | redact_streams=redact_streams) for action in self.__actions] 69 | } 70 | 71 | return m 72 | 73 | def as_json(self, indent=4, truncate_stdout=-1, redact_tokens=True, redact_streams=True, redact_references=True): 74 | return json.dumps( 75 | self.as_dict(truncate_stdout=truncate_stdout, redact_tokens=redact_tokens, 76 | redact_references=redact_references, redact_streams=redact_streams), 77 | indent=indent) 78 | 79 | def add_action(self, action): 80 | self.__actions.append(action) 81 | if self.limit_tracking_actions is not None and len(self.__actions) > self.limit_tracking_actions: 82 | self.__actions.pop(0) 83 | 84 | def add_result(self, result): 85 | self.__actions.extend(result.__actions) 86 | 87 | def __repr__(self): 88 | return self.as_json() 89 | 90 | def fail_if(self, msg): 91 | if self.get_timeout(): 92 | msg += " (Timeout during: {})".format(self.get_timeout().as_dict()['cmd']) 93 | 94 | if self.status() != 0: 95 | raise OpenShiftPythonException(msg, self) 96 | -------------------------------------------------------------------------------- /packages/openshift_client/status.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | from .model import Missing 4 | 5 | 6 | def is_route_admitted(apiobj): 7 | return apiobj.model.status.can_match({ 8 | 'ingress': [ 9 | { 10 | 'conditions': [ 11 | { 12 | 'type': 'Admitted', 13 | 'status': 'True', 14 | } 15 | ] 16 | } 17 | ] 18 | }) 19 | 20 | 21 | def is_pod_running(apiobj): 22 | return apiobj.model.status.phase == 'Running' 23 | 24 | 25 | def is_pod_succeeded(apiobj): 26 | return apiobj.model.status.phase == 'Succeeded' 27 | 28 | 29 | def is_node_ready(apiobj): 30 | return apiobj.model.status.conditions.can_match({ 31 | 'type': 'Ready', 32 | 'status': 'True', 33 | }) 34 | 35 | 36 | def is_operator_ready(operator_apiobj): 37 | 38 | # Operator not reporting conditions yet? 39 | if not operator_apiobj.model.status.conditions: 40 | return False 41 | 42 | happy = True 43 | for condition in operator_apiobj.model.status.conditions: 44 | 45 | if condition.type == "Progressing" and condition.status == "True": 46 | happy = False 47 | 48 | if condition.type == "Failing" and condition.status == "True": 49 | happy = False 50 | 51 | # Degraded replaced 'Failing' in 4.1 52 | if condition.type == "Degraded" and condition.status == "True": 53 | happy = False 54 | 55 | if condition.type == "Available" and condition.status == "False": 56 | happy = False 57 | 58 | return happy 59 | 60 | 61 | def is_credentialsrequest_provisioned(apiobj): 62 | if apiobj.model.status.provisioned is not Missing: 63 | return apiobj.model.status.provisioned # This is a boolean 64 | return False 65 | 66 | 67 | def is_pvc_bound(apiobj): 68 | return apiobj.model.status.phase == 'Bound' 69 | 70 | 71 | def is_imagestream_imported(apiobj): 72 | """ 73 | Returns False if an imagestream reports an issue 74 | importing images. Recommended that you run import-image --all 75 | against the imagestream. 76 | """ 77 | return not apiobj.model.status.tags.can_match( 78 | { 79 | 'conditions': [ 80 | { 81 | 'type': 'ImportSuccess', 82 | 'status': 'False' 83 | } 84 | ] 85 | } 86 | ) 87 | -------------------------------------------------------------------------------- /packages/openshift_client/test_apiobject.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from openshift_client import Context 4 | from .apiobject import APIObject 5 | 6 | 7 | class TestModel(unittest.TestCase): 8 | 9 | def test_empty(self): 10 | obj = APIObject() 11 | self.assertIs(len(obj.model), 0) 12 | self.assertEqual(obj.as_dict(), {}) 13 | self.assertEqual(obj.as_json(), '{}') 14 | self.assertIsNone(obj.context.project_name) 15 | 16 | def test_context(self): 17 | context = Context() 18 | context.project_name = "my-project" 19 | obj = APIObject(context=context) 20 | self.assertEqual(obj.context.project_name, context.project_name) 21 | 22 | 23 | if __name__ == '__main__': 24 | unittest.main() 25 | -------------------------------------------------------------------------------- /packages/openshift_client/test_model.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import unittest 4 | 5 | from .model import * 6 | 7 | 8 | class TestModel(unittest.TestCase): 9 | 10 | def test_empty(self): 11 | empty = Model() 12 | self.assertIs(empty.metadata, Missing) 13 | self.assertIs(empty["metadata"], Missing) 14 | self.assertIs(empty.metadata.a, Missing) 15 | self.assertIs(empty.metadata["a"], Missing) 16 | 17 | def test_falsey(self): 18 | miss = Model().something.missing 19 | self.assertTrue(miss is Missing) 20 | if miss: 21 | self.fail("Expected falsey value") 22 | 23 | if len(miss) != 0: 24 | self.fail("Expected zero length") 25 | 26 | def test_primitive(self): 27 | d = { 28 | "a": 1, 29 | "b": 2, 30 | "map1": { 31 | "c": 3, 32 | "d": 4 33 | }, 34 | "list1": [ 35 | 5, 36 | 6, 37 | 7, 38 | ], 39 | "list2": [ 40 | { 41 | "e": 5, 42 | "f": 6 43 | }, 44 | { 45 | "g": 5, 46 | "h": 6 47 | }, 48 | ], 49 | } 50 | m = Model(dict_to_model=d) 51 | d2 = m._primitive() 52 | if d2 != d: 53 | self.fail('Primitive did not restore to expected state') 54 | 55 | self.assertTrue(isinstance(m, Model)) 56 | self.assertFalse(isinstance(d2['map1'], Model)) 57 | self.assertFalse(isinstance(d2['list2'], ListModel)) 58 | self.assertFalse(isinstance(d2['list2'][0], Model)) 59 | 60 | def test_access(self): 61 | m = Model() 62 | m.metadata = { 63 | "a": 1, 64 | "b": 2, 65 | "map1": { 66 | "c": 3, 67 | "d": 4 68 | }, 69 | "list1": [ 70 | 5, 71 | 6, 72 | 7, 73 | ], 74 | "list2": [ 75 | { 76 | "e": 5, 77 | "f": 6 78 | }, 79 | { 80 | "g": 5, 81 | "h": 6 82 | }, 83 | ], 84 | "anull": None, 85 | "astring": "thevalue" 86 | } 87 | 88 | self.assertIsNot(m.metadata, Missing) 89 | self.assertIsNot(m.metadata.a, Missing) 90 | self.assertIs(m.metadata.A, Missing) 91 | self.assertIs(m.metadata.B, Missing) 92 | self.assertEqual(m.metadata.b, 2) 93 | 94 | self.assertIsNot(m.metadata.map1, Missing) 95 | self.assertIsNot(m.metadata["map1"], Missing) 96 | 97 | self.assertIs(m.metadata["map_notthere"], Missing) 98 | self.assertIs(m.metadata.map_notthere, Missing) 99 | 100 | self.assertEqual(m.metadata.map1.c, 3) 101 | self.assertEqual(m.metadata.map1.d, 4) 102 | self.assertIs(m.metadata.map1.e, Missing) 103 | 104 | self.assertEqual(len(m.metadata.list1), 3) 105 | self.assertEqual(len(m.metadata["list1"]), 3) 106 | self.assertEqual(m.metadata.list1[0], 5) 107 | self.assertEqual(m.metadata.list1, [5,6,7]) 108 | self.assertEqual(m.metadata["list1"], [5,6,7]) 109 | 110 | try: 111 | m.metadata.list1[3] 112 | self.fail("Did not receive expected IndexError") 113 | except IndexError: 114 | pass 115 | 116 | self.assertIsNot(m.metadata.list2, Missing) 117 | self.assertIsNot(m.metadata.list2[0], Missing) 118 | self.assertIsNot(m.metadata.list2[1], Missing) 119 | self.assertIsNot(m.metadata.list2[1].g, Missing) 120 | self.assertIsNot(m.metadata.list2[1].h, Missing) 121 | self.assertIs(m.metadata.list2[1].notthere, Missing) 122 | self.assertIsNone(m.metadata.anull) 123 | 124 | self.assertEqual(m.metadata.astring, "thevalue") 125 | self.assertEqual(m.metadata["astring"], "thevalue") 126 | 127 | m.list3 = ['a', 'b'] 128 | self.assertIsNot(m.list3, Missing) 129 | self.assertIsNot(m["list3"], Missing) 130 | self.assertEqual(m["list3"][0], "a") 131 | 132 | m.a = 5 133 | m.b = "hello" 134 | m.c = True 135 | m.d = False 136 | m.e = None 137 | 138 | self.assertEqual(m.a, 5) 139 | self.assertEqual(m.b, "hello") 140 | self.assertEqual(m.c, True) 141 | self.assertEqual(m.d, False) 142 | self.assertEqual(m.e, None) 143 | 144 | def test_access_case_insensitive(self): 145 | m = Model(case_insensitive=True) 146 | m.metadata = { 147 | "A": 1, 148 | "b": 2, 149 | "mAp1": { 150 | "c": 3, 151 | "D": 4 152 | }, 153 | "lIst1": [ 154 | 5, 155 | 6, 156 | 7, 157 | ], 158 | "lisT2": [ 159 | { 160 | "e": 5, 161 | "F": 6 162 | }, 163 | { 164 | "g": 5, 165 | "h": 6 166 | }, 167 | ], 168 | "aNull": None, 169 | "aString": "thevalue" 170 | } 171 | 172 | self.assertIsNot(m.metadata, Missing) 173 | self.assertIsNot(m.metadata.a, Missing) 174 | self.assertEqual(m.metadata.b, 2) 175 | self.assertIsNot(m.metadata.A, Missing) 176 | self.assertEqual(m.metadata.B, 2) 177 | 178 | self.assertIsNot(m.metadata.map1, Missing) 179 | self.assertIsNot(m.metadata["map1"], Missing) 180 | self.assertIsNot(m.metadata.MAP1, Missing) 181 | 182 | self.assertIs(m.metadata["map_notthere"], Missing) 183 | self.assertIs(m.metadata.map_notthere, Missing) 184 | 185 | self.assertEqual(m.metadata.map1.c, 3) 186 | self.assertEqual(m.metadata.map1.d, 4) 187 | self.assertIs(m.metadata.map1.e, Missing) 188 | 189 | self.assertEqual(m.metadata.MAP1.C, 3) 190 | self.assertEqual(m.metadata.MAP1.D, 4) 191 | self.assertIs(m.metadata.MAP1.E, Missing) 192 | 193 | self.assertEqual(len(m.metadata.list1), 3) 194 | self.assertEqual(len(m.metadata["list1"]), 3) 195 | self.assertEqual(m.metadata.list1[0], 5) 196 | self.assertEqual(m.metadata.list1, [5,6,7]) 197 | self.assertEqual(m.metadata["list1"], [5,6,7]) 198 | 199 | self.assertEqual(len(m.METADATA.LIST1), 3) 200 | self.assertEqual(len(m.METADATA["LIST1"]), 3) 201 | self.assertEqual(m.METADATA.LIST1[0], 5) 202 | self.assertEqual(m.METADATA.LIST1, [5,6,7]) 203 | self.assertEqual(m.METADATA["LIST1"], [5,6,7]) 204 | 205 | try: 206 | m.metadata.list1[3] 207 | self.fail("Did not receive expected IndexError") 208 | except IndexError: 209 | pass 210 | 211 | self.assertIsNot(m.metadata.list2, Missing) 212 | self.assertIsNot(m.metadata.list2[0], Missing) 213 | self.assertIsNot(m.metadata.list2[1], Missing) 214 | self.assertIsNot(m.metadata.list2[1].g, Missing) 215 | self.assertIsNot(m.metadata.list2[1].h, Missing) 216 | self.assertIs(m.metadata.list2[1].notthere, Missing) 217 | self.assertIsNone(m.metadata.anull) 218 | 219 | self.assertIsNot(m.METADATA.LIST2, Missing) 220 | self.assertIsNot(m.METADATA.LIST2[0], Missing) 221 | self.assertIsNot(m.METADATA.LIST2[1], Missing) 222 | self.assertIsNot(m.METADATA.LIST2[1].G, Missing) 223 | self.assertIsNot(m.METADATA.LIST2[1].H, Missing) 224 | self.assertIs(m.METADATA.LIST2[1].notthere, Missing) 225 | self.assertIsNone(m.METADATA.anull) 226 | 227 | 228 | self.assertEqual(m.metadata.astring, "thevalue") 229 | self.assertEqual(m.metadata["astring"], "thevalue") 230 | 231 | m.list3 = ['a', 'b'] 232 | self.assertIsNot(m.list3, Missing) 233 | self.assertIsNot(m["list3"], Missing) 234 | self.assertEqual(m["list3"][0], "a") 235 | 236 | m.a = 5 237 | m.b = "hello" 238 | m.c = True 239 | m.d = False 240 | m.e = None 241 | 242 | self.assertEqual(m.a, 5) 243 | self.assertEqual(m.b, "hello") 244 | self.assertEqual(m.c, True) 245 | self.assertEqual(m.d, False) 246 | self.assertEqual(m.e, None) 247 | 248 | def test_dict_match(self): 249 | 250 | d = Model({ 251 | 'a': 1, 252 | 'b': 2, 253 | 'c': { 254 | 'x': 1, 255 | 'y': 2, 256 | 'z': ['z1', 'z2', 'z3'] 257 | } 258 | }) 259 | 260 | self.assertTrue(d.can_match({'a': 1})) 261 | self.assertFalse(d.can_match({'a': 3})) 262 | 263 | self.assertTrue(d.can_match({'a': 1, 'b': 2})) 264 | self.assertFalse(d.can_match({'a': 1, 'b': 4})) 265 | self.assertFalse(d.can_match({'a': 1, 'r': 4})) 266 | 267 | self.assertTrue(d.can_match({'a': 1, 'b': 2, 'c': {}})) 268 | self.assertTrue(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1}})) 269 | self.assertFalse(d.can_match({'a': 1, 'b': 2, 'c': {'x': 2}})) 270 | self.assertTrue(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1, 'y': 2}})) 271 | self.assertFalse(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1, 'y': 3}})) 272 | 273 | self.assertTrue(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1, 'y': 2, 'z': []}})) 274 | self.assertTrue(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1, 'y': 2, 'z': ['z1']}})) 275 | self.assertTrue(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1, 'y': 2, 'z': ['z1', 'z2']}})) 276 | self.assertFalse(d.can_match({'a': 1, 'b': 2, 'c': {'x': 1, 'y': 2, 'z': ['z1', 'z5']}})) 277 | 278 | def test_list_match(self): 279 | 280 | l1 = ListModel(["a", "b", "c"]) 281 | self.assertTrue(l1.can_match(l1)) 282 | self.assertTrue(l1.can_match([])) 283 | self.assertTrue(l1.can_match(["b", "c"])) 284 | self.assertTrue(l1.can_match(["a", "c"])) 285 | self.assertTrue(l1.can_match("c")) 286 | self.assertTrue(l1.can_match("a")) 287 | 288 | nomatch_lm = ListModel(["1"]) 289 | self.assertFalse(l1.can_match(nomatch_lm)) 290 | self.assertFalse(l1.can_match("1")) 291 | self.assertFalse(l1.can_match(["1"])) 292 | self.assertFalse(l1.can_match(["1", "2"])) 293 | self.assertFalse(l1.can_match(True)) 294 | 295 | self.assertFalse(l1.can_match({"a": 2})) 296 | 297 | l2 = ListModel([True]) 298 | self.assertTrue(l2.can_match(True)) 299 | self.assertFalse(l2.can_match(False)) 300 | 301 | l3 = ListModel([ 302 | { 303 | "a": 1, 304 | "b": 2, 305 | "c": 3 306 | }, 307 | { 308 | "d": 1, 309 | "e": 2, 310 | "f": 3 311 | }, 312 | { 313 | "d": True, 314 | "e": [2, 3, True], 315 | "f": 3 316 | } 317 | ]) 318 | 319 | self.assertTrue(l3.can_match( 320 | { 321 | "c": 3 322 | } 323 | )) 324 | self.assertTrue(l3.can_match( 325 | { 326 | "c": 3, 327 | "a": 1 328 | } 329 | )) 330 | self.assertTrue(l3.can_match( 331 | { 332 | "c": 3, 333 | "a": 1, 334 | "b": 2 335 | } 336 | )) 337 | self.assertFalse(l3.can_match( 338 | { 339 | "a": 1, 340 | "b": 3, 341 | } 342 | )) 343 | self.assertFalse(l3.can_match( 344 | { 345 | "b": 3, 346 | } 347 | )) 348 | self.assertTrue(l3.can_match( 349 | { 350 | "d": True, 351 | "f": 3, 352 | } 353 | )) 354 | self.assertFalse(l3.can_match( 355 | { 356 | "e": 3, 357 | } 358 | )) 359 | self.assertTrue(l3.can_match( 360 | { 361 | "e": [3], 362 | } 363 | )) 364 | self.assertTrue(l3.can_match( 365 | { 366 | "e": [2, 3], 367 | } 368 | )) 369 | self.assertTrue(l3.can_match( 370 | { 371 | "e": [2, 3, True], 372 | } 373 | )) 374 | self.assertFalse(l3.can_match( 375 | { 376 | "d": True, 377 | "e": [2, 3, False], 378 | } 379 | )) 380 | self.assertTrue(l3.can_match( 381 | { 382 | "d": True, 383 | "e": [2, 3, True], 384 | } 385 | )) 386 | 387 | l4 = ListModel([ 388 | { 389 | "a": 1, 390 | "b": { 391 | "a1": 5, 392 | "b1": { 393 | "a2": 6, 394 | "b2": { 395 | "a3": 7, 396 | "b3": 8 397 | } 398 | } 399 | }, 400 | "c": 3 401 | }, 402 | ]) 403 | 404 | self.assertTrue(l4.can_match( 405 | { 406 | "a": 1, 407 | } 408 | )) 409 | self.assertTrue(l4.can_match( 410 | { 411 | "a": 1, 412 | "b": { 413 | "a1": 5 414 | } 415 | } 416 | )) 417 | self.assertTrue(l4.can_match( 418 | { 419 | "a": 1, 420 | "b": { 421 | "a1": 5, 422 | "b1": { 423 | "a2": 6 424 | } 425 | } 426 | } 427 | )) 428 | self.assertTrue(l4.can_match( 429 | { 430 | "a": 1, 431 | "b": { 432 | "a1": 5, 433 | "b1": { 434 | "b2": { 435 | "b3": 8 436 | } 437 | } 438 | } 439 | } 440 | )) 441 | 442 | 443 | if __name__ == '__main__': 444 | unittest.main() 445 | -------------------------------------------------------------------------------- /packages/openshift_client/test_selector.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import unittest 4 | 5 | from .selector import selector 6 | from .naming import qname_matches 7 | 8 | 9 | class TestSelector(unittest.TestCase): 10 | 11 | def test_qname_matches(self): 12 | self.assertTrue(qname_matches('template/x', 'template/x')) 13 | self.assertTrue(qname_matches('template/x', ['template/x'])) 14 | self.assertTrue(qname_matches('template/x', ['template/y', 'template/x'])) 15 | self.assertFalse(qname_matches('template/x', ['template/y', 'template/x2'])) 16 | 17 | # See whether fuzzy matching of kinds is working 18 | 19 | self.assertTrue(qname_matches('template/django', ['template.template.openshift.io/django'])) 20 | self.assertFalse(qname_matches('template/django', ['template.template.openshift.io/django.2'])) 21 | self.assertTrue(qname_matches('template/django', ['template.template/django'])) 22 | self.assertFalse(qname_matches('template/django', ['template.template/django.2'])) 23 | 24 | self.assertFalse(qname_matches('template2/django', ['template.template.openshift.io/django'])) 25 | self.assertFalse(qname_matches('template/django2', ['template.template.openshift.io/django'])) 26 | 27 | def test_set_operations(self): 28 | s1 = selector([]) 29 | s2 = selector(['pod/abc', 'pod/xyz']) 30 | self.assertEqual(s1.subtract(s2).qnames(), []) 31 | self.assertEqual(s1.union(s2).qnames(), ['pod/abc', 'pod/xyz']) 32 | 33 | s3 = selector(['pod/abc2', 'pod/xyz']) 34 | self.assertEqual(s2.subtract(s3).qnames(), ['pod/abc']) 35 | self.assertEqual(s2.intersect(s3).qnames(), ['pod/xyz']) 36 | 37 | # See whether fuzzy matching of kinds is working 38 | 39 | t1 = selector(['template/django']) 40 | t2 = selector(['template.template.openshift.io/django', 'template.template.openshift.io/django2']) 41 | self.assertEqual(len(t1.union(t2).qnames()), 2) 42 | self.assertEqual(len(t1.intersect(t2).qnames()), 1) 43 | self.assertEqual(len(t1.subtract(t2).qnames()), 0) 44 | 45 | t1 = selector(['template/django']) 46 | t2 = selector(['template.template.openshift.io/django']) 47 | self.assertEqual(len(t1.union(t2).qnames()), 1) 48 | self.assertEqual(len(t1.intersect(t2).qnames()), 1) 49 | self.assertEqual(len(t1.subtract(t2).qnames()), 0) 50 | 51 | 52 | if __name__ == '__main__': 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /packages/openshift_client/test_util.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import unittest 4 | 5 | from .util import extract_numerical_value 6 | from .model import Missing 7 | 8 | 9 | def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): 10 | return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) 11 | 12 | 13 | class TestSelector(unittest.TestCase): 14 | 15 | def test_extract_numerical_value(self): 16 | test_dict = { 17 | None: 0.0, 18 | '': 0.0, 19 | 'i': 0, 20 | 'M': 0.0, 21 | 'Mi': 0.0, 22 | '0': 0.0, 23 | '0i': 0.0, 24 | '0n': 0.0, 25 | '0ni': 0.0, 26 | '1e2': 100.0, 27 | '1e2Mi': 104857600.0, 28 | '1e2i': 100.0, 29 | '1e2M': 100000000.0, 30 | '.314ni': 2.9243528842926026e-10, 31 | '3.14n': 3.1400000000000003e-09, 32 | '3.14u': 3.14e-06, 33 | '3.14m': 0.00314, 34 | '3.14': 3.14, 35 | '3.14i': 3.14, 36 | '3.14K': 3140.0, 37 | '3.14k': 3140.0, 38 | '3.14M': 3140000.0, 39 | '3.14G': 3140000000.0, 40 | '3.14T': 3140000000000.0, 41 | '3.14P': 3140000000000000.0, 42 | '3.14E': 3.14e+18, 43 | '314.Ei': 3.6201735244654995e+20 44 | } 45 | 46 | for i in test_dict.keys(): 47 | self.assertTrue(isclose(test_dict[i], extract_numerical_value(i))) 48 | # test oc.Missing 49 | self.assertTrue(isclose(extract_numerical_value(Missing), 0.0)) 50 | 51 | 52 | if __name__ == '__main__': 53 | unittest.main() 54 | -------------------------------------------------------------------------------- /packages/openshift_client/util.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | 3 | import tempfile 4 | import sys 5 | import io 6 | import os 7 | import errno 8 | import json 9 | import six 10 | 11 | 12 | # Context manager that will swap stdout/stderr with buffers. 13 | # Anything the inner block prints will be captured in these 14 | # buffers and availed in the as: object. 15 | class OutputCapture(object): 16 | 17 | def __init__(self): 18 | self.out = io.BytesIO() 19 | self.err = io.BytesIO() 20 | 21 | def __enter__(self): 22 | sys.stdout = self.out 23 | sys.stderr = self.err 24 | return self 25 | 26 | def __exit__(self, exc_type, exc_value, traceback): 27 | sys.stdout = sys.__stdout__ 28 | sys.stderr = sys.__stderr__ 29 | 30 | 31 | class TempFile(object): 32 | """ 33 | Creates a temporary file, open for reading/writing within the context. 34 | If content is specified, it is written into the file when created and 35 | the file position is reset to 0. 36 | """ 37 | 38 | def __init__(self, content=None, suffix=".tmp"): 39 | self.suffix = suffix 40 | self.file = None 41 | self.content = content 42 | 43 | def __enter__(self): 44 | self.file = tempfile.TemporaryFile(suffix=self.suffix, prefix="openshift-client-python") 45 | 46 | if self.content: 47 | try: 48 | self.file.write(self.content.encode('utf-8')) 49 | self.flush() 50 | self.file.seek(0, os.SEEK_SET) # seek to the beginning of the file 51 | except Exception: 52 | self.destroy() 53 | raise 54 | 55 | return self 56 | 57 | def flush(self): 58 | self.file.flush() 59 | 60 | def read(self): 61 | self.flush() 62 | self.file.seek(0, os.SEEK_SET) 63 | return self.file.read() 64 | 65 | def destroy(self): 66 | if self.file is not None: 67 | try: 68 | self.file.close() 69 | except Exception: 70 | pass 71 | self.file = None 72 | 73 | def __exit__(self, type, value, traceback): 74 | self.destroy() 75 | 76 | 77 | def split_names(output): 78 | """ 79 | Designed to split up output from -o=name into a 80 | simple list of qualified object names ['kind/name', 'kind/name', ...] 81 | :param output: A single string containing all of the output to parse 82 | :return: A list of qualified object names 83 | """ 84 | if output is None: 85 | return [] 86 | return [x.strip() for x in output.strip().split("\n") if x.strip() != ""] 87 | 88 | 89 | def is_collection_type(obj): 90 | return isinstance(obj, (list, tuple, set)) 91 | 92 | 93 | def indent_lines(text, padding=' '): 94 | return ''.join(padding+line for line in text.splitlines(True)) 95 | 96 | 97 | def print_logs(stream, logs_dict, initial_indent_count=0, encoding='utf-8'): 98 | indent = ' ' * initial_indent_count 99 | next_indent = ' ' * (initial_indent_count + 2) 100 | for container_fqn, log in six.iteritems(logs_dict): 101 | stream.write(u'{}[logs:begin]{}========\n'.format(indent, container_fqn)) 102 | value_string = log.strip().replace('\r\n', '\n') 103 | stream.write(u'{}\n'.format(indent_lines(value_string, next_indent))) 104 | stream.write(u'{}[logs:end]{}========\n'.format(indent, container_fqn)) 105 | 106 | 107 | def print_report_entry(stream, d, initial_indent_count=0, encoding='utf-8'): 108 | indent = ' ' * initial_indent_count 109 | next_indent = ' ' * (initial_indent_count + 2) 110 | for entry, value in six.iteritems(d): 111 | stream.write(u'{}*{}:\n'.format(indent, entry)) 112 | 113 | if entry == 'logs': 114 | print_logs(stream, value, initial_indent_count + 2, encoding=encoding) 115 | else: 116 | if isinstance(value, dict): # for 'object' 117 | value_string = json.dumps(value, indent=2) 118 | elif isinstance(value, six.string_types): # for 'describe' 119 | value_string = value.strip().replace('\r\n', '\n') 120 | else: 121 | value_string = u'{}'.format(value) 122 | 123 | stream.write(u'{}\n'.format(indent_lines(value_string, next_indent))) 124 | 125 | 126 | def print_report(stream, report_dict, initial_indent_count=0, encoding='utf-8'): 127 | indent = ' ' * initial_indent_count 128 | for fqn, details in six.iteritems(report_dict): 129 | stream.write(u'\n{}[report:begin]{}========\n'.format(indent, fqn)) 130 | print_report_entry(stream, details, initial_indent_count + 2, encoding=encoding) 131 | stream.write(u'\n{}[report:end]{}========\n'.format(indent, fqn)) 132 | 133 | 134 | def mkdir_p(path): 135 | try: 136 | os.makedirs(path) 137 | except OSError as exc: # Python >2.5 138 | if exc.errno == errno.EEXIST and os.path.isdir(path): 139 | pass 140 | else: 141 | raise 142 | return path 143 | 144 | 145 | # unit scale used by kubernetes 146 | _unit_scales = {'n': -3, 'u': -2, 'm': -1, 'k': 1, 'K': 1, 'M': 2, 'G': 3, 'T': 4, 'P': 5, 'E': 6} 147 | 148 | 149 | def extract_numerical_value(val): 150 | """Extract numerical values from string, removing any units present 151 | e.g, 10K => 10000; 10Ki => 10240 """ 152 | if not val: 153 | return 0 154 | base = 10 155 | value = 0 156 | power = 0 157 | power_scale = 3 158 | unit_place = -1 159 | if val[-1] == 'i': 160 | if len(val) < 3: 161 | return 0 162 | base = 2 163 | power_scale = 10 164 | unit_place = -2 165 | unit = val[unit_place] 166 | if unit in _unit_scales: 167 | power = _unit_scales[unit] 168 | if len(val[:unit_place]) == 0: 169 | value = 0 170 | else: 171 | value = float(val[:unit_place]) 172 | elif unit_place == -2: 173 | value = float(val[:-1]) 174 | else: 175 | value = float(val) 176 | return value * pow(base, power*power_scale) 177 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [tool.setuptools.packages.find] 6 | where = ["packages"] 7 | 8 | [tool.setuptools.dynamic] 9 | version = {attr = "openshift_client.__VERSION__"} 10 | 11 | [project] 12 | name = "openshift-client" 13 | description = "OpenShift python client" 14 | keywords = ["OpenShift"] 15 | readme = "README.md" 16 | license = {file = "LICENSE"} 17 | authors = [ 18 | {name = "Justin Pierce", email = "jupierce@redhat.com"}, 19 | ] 20 | maintainers = [ 21 | {name = "Brad Williams", email = "brawilli@redhat.com"}, 22 | ] 23 | classifiers = [ 24 | "Development Status :: 4 - Beta", 25 | "Intended Audience :: Developers", 26 | "Intended Audience :: Information Technology", 27 | "License :: OSI Approved :: Apache Software License", 28 | "Operating System :: OS Independent", 29 | "Programming Language :: Python", 30 | "Programming Language :: Python :: 3", 31 | "Programming Language :: Python :: 3.12", 32 | "Topic :: Utilities", 33 | ] 34 | requires-python = ">= 3.0" 35 | dependencies = [ 36 | "build", 37 | "six", 38 | "pyyaml", 39 | ] 40 | dynamic = [ 41 | "version", 42 | ] 43 | 44 | [project.optional-dependencies] 45 | ssh = ["paramiko"] 46 | 47 | [project.urls] 48 | Homepage = "https://github.com/openshift/openshift-client-python" 49 | Issues = "https://github.com/openshift/openshift-client-python/issues" 50 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # This file can be used to install dependencies for local library development 3 | # 4 | build 5 | six 6 | pyyaml 7 | paramiko 8 | -------------------------------------------------------------------------------- /run_unittests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]:-$0}") 4 | 5 | export PYTHONPATH="${SCRIPT_ROOT}/packages" 6 | cd ${SCRIPT_ROOT}/packages 7 | 8 | python3 -m unittest discover 9 | --------------------------------------------------------------------------------