├── .dockerignore ├── .gitignore ├── .papr.yaml ├── Dockerfile ├── LICENSE ├── README.md ├── docs ├── DISTROS.md ├── RUNNING.md └── sample.papr.yml ├── experimental └── papr2kube.py ├── papr ├── __init__.py ├── main ├── provisioner ├── spawner.py ├── testrunner └── utils │ ├── __init__.py │ ├── common.py │ ├── common.sh │ ├── ext_schema.py │ ├── gh.py │ ├── index.j2 │ ├── indexer.py │ ├── os_provision.py │ ├── parser.py │ ├── required-index.j2 │ ├── schema.yml │ ├── sshwait │ └── user-data ├── requirements.txt ├── setup.py └── validator.py /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Distribution / packaging 7 | build/ 8 | dist/ 9 | *.egg-info/ 10 | 11 | # virtualenv 12 | .venv/ 13 | -------------------------------------------------------------------------------- /.papr.yaml: -------------------------------------------------------------------------------- 1 | branches: 2 | - master 3 | - auto 4 | - try 5 | 6 | required: true 7 | 8 | container: 9 | image: registry.fedoraproject.org/fedora:27 10 | 11 | packages: 12 | - '@buildsys-build' 13 | - python3-devel 14 | 15 | tests: 16 | - pip3 install flake8 pylint -r requirements.txt 17 | - flake8 *.py papr/*.py papr/utils/*.py 18 | - pylint -E *.py papr 19 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM registry.fedoraproject.org/fedora:25 2 | MAINTAINER Jonathan Lebon 3 | 4 | # NB: we install libyaml-devel so that we can use 5 | # CSafeLoader in PyYAML (see related comment in the parser) 6 | 7 | RUN dnf install -y \ 8 | git \ 9 | gcc \ 10 | sudo \ 11 | docker \ 12 | findutils \ 13 | python3-devel \ 14 | redhat-rpm-config \ 15 | python3-pip \ 16 | libyaml-devel \ 17 | nmap-ncat && \ 18 | dnf clean all 19 | 20 | # There's a tricky bit here. We mount $PWD at $PWD in the 21 | # container so that when we do the nested docker run in the 22 | # main script, the paths the daemon receives will still be 23 | # correct from the host perspective. 24 | 25 | # We use --net=host here to be able to communicate with the 26 | # internal OpenStack instance. For some reason, the default 27 | # bridge docker sets up causes issues. Will debug this 28 | # properly eventually. 29 | 30 | LABEL RUN="/usr/bin/docker run --rm --privileged \ 31 | -v /run/docker.sock:/run/docker.sock \ 32 | -v \"\$PWD:\$PWD\" --workdir \"\$PWD\" \ 33 | --net=host \ 34 | -e github_repo \ 35 | -e github_branch \ 36 | -e github_pull_id \ 37 | -e github_commit \ 38 | -e github_contexts \ 39 | -e github_token \ 40 | -e os_keyname \ 41 | -e os_privkey \ 42 | -e os_network \ 43 | -e os_floating_ip_pool \ 44 | -e s3_prefix \ 45 | -e site_repos \ 46 | -e OS_AUTH_URL \ 47 | -e OS_TENANT_ID \ 48 | -e OS_TENANT_NAME \ 49 | -e OS_USERNAME \ 50 | -e OS_PASSWORD \ 51 | -e AWS_ACCESS_KEY_ID \ 52 | -e AWS_SECRET_ACCESS_KEY \ 53 | -e BUILD_ID \ 54 | -e RHCI_DEBUG_NO_TEARDOWN \ 55 | -e RHCI_DEBUG_ALWAYS_RUN \ 56 | -e RHCI_DEBUG_USE_NODE \ 57 | \${OPT1} \ 58 | \${IMAGE}" 59 | 60 | COPY . /src 61 | 62 | RUN pip3 install -r /src/requirements.txt /src 63 | 64 | CMD ["/usr/bin/papr"] 65 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Jonathan Lebon 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### ⚠️⚠️⚠️ PAPR has been decommissioned ⚠️⚠️⚠️ 2 | 3 | It has been replaced by 4 | [CoreOS CI](https://github.com/coreos/coreos-ci) and 5 | [Prow](https://github.com/openshift/release/). 6 | 7 | See also https://github.com/coreos/fedora-coreos-tracker/issues/764. 8 | See also https://github.com/projectatomic/papr/issues/105. 9 | 10 | ### PAPR (previously called redhat-ci) 11 | 12 | PAPR (pronounced like "paper") is a testing tool similar in 13 | workflow to Travis CI, but with an emphasis on enabling test 14 | environments useful for the Project Atomic effort. Only 15 | Fedora and CentOS-based test environments are supported for 16 | now (see [DISTROS](docs/DISTROS.md) for details). 17 | 18 | Configured projects have a `.papr.yml` file located in their 19 | repositories, detailing how to provision the environment and 20 | which tests should be run. Multiple testsuites can be 21 | defined, each with a different "context" (these refer to the 22 | names of the status checkmarks that appear on GitHub pull 23 | requests). A sample YAML file with allowed keys can be found 24 | [here](docs/sample.papr.yml). 25 | 26 | A running instance of this service is currently maintained 27 | in the internal Red Hat infrastructure and is set up to 28 | monitor a growing list of projects. The full list of 29 | monitored repos appears below. 30 | 31 | If you'd like to run *your own instance* of this service, 32 | please see [RUNNING](docs/RUNNING.md). 33 | 34 | ### Monitored projects 35 | 36 | - [autotest/autotest-docker](https://github.com/autotest/autotest-docker.git) 37 | - [flatpak/flatpak](https://github.com/flatpak/flatpak) 38 | - [flatpak/flatpak-builder](https://github.com/flatpak/flatpak-builder) 39 | - [openshift/openshift-ansible](https://github.com/openshift/openshift-ansible) 40 | - [ostreedev/ostree](https://github.com/ostreedev/ostree) 41 | - [projectatomic/atomic](https://github.com/projectatomic/atomic) 42 | - [projectatomic/atomic-host-tests](http://github.com/projectatomic/atomic-host-tests) 43 | - [projectatomic/atomic-system-containers](https://github.com/projectatomic/atomic-system-containers) 44 | - [projectatomic/bubblewrap](https://github.com/projectatomic/bubblewrap) 45 | - [projectatomic/buildah](https://github.com/projectatomic/buildah) 46 | - [projectatomic/bwrap-oci](https://github.com/projectatomic/bwrap-oci) 47 | - [projectatomic/commissaire](https://github.com/projectatomic/commissaire) 48 | - [projectatomic/commissaire-http](https://github.com/projectatomic/commissaire-http) 49 | - [projectatomic/commissaire-service](https://github.com/projectatomic/commissaire-service) 50 | - [projectatomic/container-storage-setup](https://github.com/projectatomic/container-storage-setup) 51 | - [projectatomic/docker](https://github.com/projectatomic/docker) 52 | - [projectatomic/papr](https://github.com/projectatomic/papr) 53 | - [projectatomic/registries](https://github.com/projectatomic/registries) 54 | - [projectatomic/rpm-ostree](https://github.com/projectatomic/rpm-ostree) 55 | 56 | **If you would like to have a repository added, please open 57 | a pull request to update the list above.** 58 | 59 | ### More details about Project Atomic CI services 60 | 61 | In addition to PAPR, many of the projects above are also 62 | hooked up to 63 | [our instance of](https://homu-projectatomic-ci.apps.ci.centos.org/) 64 | the upstream [Homu](https://github.com/servo/homu/) project. 65 | 66 | While PAPR deals with automatic testing of branches and 67 | PRs, Homu is used as a merge bot. 68 | 69 | You only need to know a few commands to interact with these 70 | services: 71 | - If PR tests failed and you'd like to rerun them, use 72 | `bot, retest this please`. 73 | - If a PR is ready to be merged, use 74 | `@rh-atomic-bot r+ `. This will rebase the PR 75 | on the target branch, *rerun* the tests, and push the 76 | commits if the tests pass. 77 | - If the merge failed and you want to retest it, use 78 | `@rh-atomic-bot retry`. 79 | 80 | **NOTE: it is not required (but encouraged!) to use Homu as a merge 81 | bot when using PAPR to automatically run tests against your PRs. 82 | If your repo is currently only using PAPR and would like to start using 83 | Homu, [open an issue here](https://github.com/projectatomic/papr/issues/new) 84 | to request usage of Homu.** 85 | -------------------------------------------------------------------------------- /docs/DISTROS.md: -------------------------------------------------------------------------------- 1 | # Supported distros 2 | 3 | Below is a full list of valid values for the `distro` key in 4 | the YAML: 5 | 6 | - `fedora/27/atomic` 7 | - `fedora/27/atomic/pungi` 8 | - `fedora/27/cloud` 9 | - `fedora/27/cloud/pungi` 10 | - `fedora/28/atomic` 11 | - `fedora/28/atomic/pungi` 12 | - `fedora/28/cloud` 13 | - `centos/7/atomic` 14 | - `centos/7/cloud` 15 | - `centos/7/atomic/smoketested` 16 | - `centos/7/atomic/continuous` 17 | 18 | It follows a consistent pattern: 19 | 20 | ``` 21 | $distro/$releasever/$variant[/$stream] 22 | ``` 23 | 24 | The images are automatically updated whenever the upstreams 25 | update them. 26 | 27 | ## Fedora streams: 28 | 29 | ### fedora/28/atomic 30 | 31 | This is the officially supported Atomic Host image from 32 | Fedora. They are usually updated 33 | [every two 34 | weeks](https://fedoraproject.org/wiki/Changes/Two_Week_Atomic). 35 | You can download the image yourself from 36 | https://getfedora.org/en/atomic/download/. 37 | 38 | ### fedora/28/atomic/pungi 39 | 40 | These are fetched from koji after the (currently daily) runs 41 | of [pungi](https://pagure.io/pungi). 42 | 43 | ### fedora/27/atomic 44 | 45 | This is the last officially released Fedora 27 Atomic Host 46 | image, which is no longer updated. It will be removed once 47 | projects have migrated to Fedora 28. 48 | 49 | ### fedora/27/cloud, fedora/28/cloud 50 | 51 | These are the officially supported Cloud images from Fedora. 52 | They are not usually updated after initial release. You can 53 | download the image yourself from 54 | https://alt.fedoraproject.org/en/cloud/. 55 | 56 | ## CentOS streams: 57 | 58 | ### centos/7/atomic 59 | 60 | This is the official CentOS Atomic Host release. It follows 61 | the component versions of Red Hat Enterprise Linux Atomic 62 | Host and is usually updated every 2 to 6 weeks after RHEL 63 | releases. You can download the image yourself from 64 | https://wiki.centos.org/SpecialInterestGroup/Atomic/Download. 65 | 66 | ### centos/7/cloud 67 | 68 | This is the official CentOS Cloud variant. Also usually 69 | updated every 2 to 6 weeks after RHEL releases. You can 70 | download the image yourself from 71 | https://wiki.centos.org/Download. 72 | 73 | ### centos/7/atomic/smoketested, centos/7/atomic/continuous 74 | 75 | These images are built much more frequently and closely 76 | track the master branch of the git repositories of many core 77 | components of Atomic Host. For more information about these 78 | streams, and to download the images yourself, see 79 | https://wiki.centos.org/SpecialInterestGroup/Atomic/Devel. 80 | -------------------------------------------------------------------------------- /docs/RUNNING.md: -------------------------------------------------------------------------------- 1 | ### Parameters 2 | 3 | The script takes no arguments, but expects the following 4 | environment vars to be set: 5 | 6 | - `github_repo` -- GitHub repo in `/` format. 7 | - `github_branch` -- Branch to test (incompatible with 8 | `gihub_pull_id`). 9 | - `github_pull_id` -- Pull request ID to test (incompatible 10 | with `github_branch`). 11 | 12 | The following optional environment vars may be set: 13 | 14 | - `github_commit` -- SHA of commit to expect; this allows 15 | for handling of race conditions. 16 | - `github_token` -- If specified, update the commit status 17 | using GitHub's API, accessed with this repo-scoped token. 18 | - `github_contexts` -- A pipe-separated list of contexts. If 19 | specified, only the testsuites which set these contexts 20 | will be run. 21 | - `os_keyname` -- OpenStack keypair to use for provisioning, 22 | if you want to support virtualized tests. 23 | - `os_privkey` -- Private key corresponding to the OpenStack 24 | keyname, if you want to support virtualized tests. 25 | - `os_network` -- OpenStack network to use for provisioning, 26 | if you want to support virtualized tests. 27 | - `os_floating_ip_pool` -- If specified, assign a floating 28 | IP to the provisioned node from this pool and use the IP 29 | to communicate with it. This is required if not running on 30 | the same OpenStack network as the node. 31 | - `s3_prefix` -- If specified, artifacts will be uploaded to 32 | this S3 path, in `[/]` form. 33 | - `site_repos` -- If specified, pipe-separated list of 34 | repo files to inject. Each entry specifies the OS it is 35 | valid for. E.g.: 36 | 37 | ``` 38 | centos/7=http://example.com/centos.repo|fedora/*=repos/fedora.repo 39 | ``` 40 | 41 | If you want to support virtualized tests, it also implicitly 42 | expects the usual OpenStack variables needed for 43 | authentication. These can normally be sourced from an RC 44 | file downloadable from the OpenStack interface: 45 | 46 | - `OS_AUTH_URL` 47 | - `OS_TENANT_ID` 48 | - `OS_USERNAME` 49 | - `OS_PASSWORD` 50 | 51 | Finally, AWS credentials may additionally be specified if 52 | uploading artifacts to S3: 53 | 54 | - `AWS_ACCESS_KEY_ID` 55 | - `AWS_SECRET_ACCESS_KEY` 56 | 57 | ### Running 58 | 59 | The `main` script integrates nicely in Jenkins, though it 60 | can be run locally, which is useful for testing. The easiest 61 | way to get started is to run inside a Python virtualenv with 62 | python-novaclient, PyYAML, jinja2, and awscli installed (the 63 | latter only being required if artifact uploading is wanted). 64 | Docker is also expected to be up and running for 65 | containerized tests. 66 | 67 | The script checks out the repo in `checkouts/$repo` and will 68 | re-use it if available rather than cloning each time. No 69 | builds are done on the host; the repo is transferred to the 70 | test environment during provisioning. 71 | 72 | A `state` directory is created, in which all temporary 73 | files that need to be stored during a run are kept. 74 | 75 | ### Exit code 76 | 77 | We return non-zero *only* if there is an infrastructure 78 | error. In other words, no matter whether the PR/branch 79 | passes or fails the tests, we should always expect a clean 80 | exit. PR failures can be reported through the commit status 81 | API, or by looking at the state/rc file. 82 | -------------------------------------------------------------------------------- /docs/sample.papr.yml: -------------------------------------------------------------------------------- 1 | # This is a sample .papr.yml which documents all the 2 | # supported fields. You can validate your .papr.yml file by 3 | # using the validator.py script. It must be in UTF-8 format 4 | # (though only fields denoted to support UTF-8 can contain 5 | # non-ASCII values). 6 | 7 | # REQUIRED (only one of 'host' or 'container' or 'cluster') 8 | # Provision a single host. 9 | host: 10 | # REQUIRED 11 | # Specify the distro to provision (see DISTROS.md for full list). 12 | distro: fedora/27/atomic 13 | 14 | # OPTIONAL 15 | # Specify the minimum requirements the host must 16 | # fulfill. The values defined below are the defaults 17 | # used when no minimums are specified. 18 | specs: 19 | # OPTIONAL 20 | # Minimum amount of RAM in MB. 21 | ram: 2048 22 | # OPTIONAL 23 | # Minimum number of CPUs. 24 | cpus: 1 25 | # OPTIONAL 26 | # Minimum size of primary disk in GB. 27 | disk: 20 28 | # OPTIONAL 29 | # Minimum size of secondary disk in GB. Set to 0 if no 30 | # secondary disk is needed. 31 | secondary-disk: 0 32 | 33 | # OPTIONAL 34 | # If using an Atomic Host variant, you can specify which 35 | # ostree to use. To use the latest tree on the current 36 | # branch, use: 37 | ostree: latest 38 | 39 | # Otherwise, you may specify a tree by providing a dict: 40 | ostree: 41 | # OPTIONAL 42 | # Specify the remote to which to rebase. If omitted, 43 | # the current remote is used. 44 | remote: http://example.com/remote/repo 45 | # OPTIONAL 46 | # Specify the branch to which to rebase. If omitted, 47 | # the current branch is used. 48 | branch: my/branch 49 | # OPTIONAL 50 | # Specify the version or checksum to deploy. If 51 | # omitted, the latest commit is used. 52 | revision: 7.145.42 53 | 54 | # REQUIRED (only one of 'host' or 'container' or 'cluster') 55 | # Provision a container. 56 | container: 57 | # REQUIRED 58 | # Specify an FQIN or Docker Hub image. 59 | image: registry.fedoraproject.org/fedora:27 60 | 61 | # REQUIRED (only one of 'host' or 'container' or 'cluster') 62 | # Provision multiple hosts. 63 | cluster: 64 | # REQUIRED 65 | # List of hosts to provision. The same keys as `host` 66 | # above are accepted. 67 | hosts: 68 | # REQUIRED 69 | # Node hostname. Also makes the environment variable 70 | # $RHCI_{name}_IP available (with dots and dashes 71 | # replaced by underscores). 72 | - name: host1 73 | distro: centos/7/atomic 74 | ostree: latest 75 | - name: host2 76 | distro: fedora/25/cloud 77 | specs: 78 | secondary-disk: 10 79 | # OPTIONAL 80 | # If specified, the scripts are run on this container. 81 | # If omitted, the scripts are run on the first host 82 | # listed in the 'hosts' list. 83 | container: 84 | image: fedora:27 85 | 86 | # OPTIONAL 87 | # List the branches to test. If omitted, only the master 88 | # branch is tested. 89 | branches: 90 | - master 91 | - dev 92 | 93 | # OPTIONAL 94 | # Whether this testsuite should run on pull requests. This 95 | # is useful for example if you would like longer tests to 96 | # only run on branches (for use with a merge bot). If 97 | # omitted, defaults to true. 98 | pulls: true 99 | 100 | # OPTIONAL 101 | # GitHub commit status context to use when reporting back 102 | # status updates to GitHub. If omitted, defaults to 103 | # 'Red Hat CI'. 104 | context: 'CI Tester' 105 | 106 | # OPTIONAL 107 | # Mark this testsuite as required. This causes a special 108 | # "required" context to be reported to GitHub. The result is 109 | # set to successful only if all testsuites marked as 110 | # required are also successful. This is useful for grouping 111 | # statuses under a single context to be used by the branch 112 | # protection settings or by a merge bot, rather than dealing 113 | # with multiple (possibly changing) statuses. 114 | required: true 115 | 116 | # OPTIONAL 117 | # Additional YUM repositories to inject during provisioning. 118 | extra-repos: 119 | - name: my-repo-name # REQUIRED key 120 | baseurl: https://example.com/repo 121 | - name: my-other-repo-name # REQUIRED key 122 | metalink: https://example.com/metalink?repo=mydistro 123 | gpgcheck: 0 124 | 125 | # OPTIONAL 126 | # List of packages to install during provisioning. If 127 | # running on an Atomic Host variant, packages will be 128 | # layered. 129 | packages: 130 | - make 131 | - gcc 132 | 133 | # OPTIONAL 134 | # Environment variables to define for test procedure. Full 135 | # UTF-8 values are supported. 136 | env: 137 | VAR1: val1 138 | VAR2: val2 139 | SNOWMAN: ☃ 140 | 141 | # REQUIRED (at least one of 'build' or 'tests') 142 | # If the project follows the Build API (see 143 | # https://github.com/cgwalters/build-api), you may specify 144 | # the 'build' key to automatically build the code. 145 | build: true 146 | 147 | # You may customize the build further by providing a dict 148 | # instead. 149 | build: 150 | # OPTIONAL 151 | # Customize the configuration step. 152 | config-opts: > 153 | --enable-systemd 154 | CFLAGS='-fsanitize=undefined' 155 | # OPTIONAL 156 | # Customize the build step. 157 | build-opts: MYVAR=1 158 | # OPTIONAL 159 | # Customize the install step. 160 | install-opts: DESTDIR=$PWD/install 161 | 162 | # REQUIRED (at least one of 'build' or 'tests') 163 | # Put the tasks to be executed in the 'tests' key. They are 164 | # run from the root of the repo. If the 'build' key is also 165 | # present, tests will run after a successful build. Full 166 | # UTF-8 values are supported. 167 | tests: 168 | - make check 169 | - make installcheck LANG=français 170 | - ansible-playbook -i host1,$RHCI_host2_IP, playbook.yml 171 | 172 | # OPTIONAL 173 | # Time to allow before aborting tests. Must satisfy regex 174 | # '[0-9]+[smh]'. If omitted, defaults to '2h', which is the 175 | # maximum. 176 | timeout: 30m 177 | 178 | # OPTIONAL 179 | # List of files/directories to upload to Amazon S3. 180 | artifacts: 181 | - test-suite.log 182 | 183 | --- 184 | 185 | # It is possible to specify multiple testsuites by starting 186 | # a second document. All the same fields are supported, 187 | # though the 'context' key must be different. 188 | 189 | # OPTIONAL 190 | # Start off by inheriting all the keys from the previous 191 | # document. You may then override keys by simply redefining 192 | # them. At least the 'context' key must be overridden. If 193 | # omitted, defaults to false. 194 | inherit: true 195 | 196 | # REQUIRED 197 | # A distinct context must be used here. 198 | context: 'My other testsuite' 199 | 200 | # To unset an inherited key, simply leave off its value. 201 | artifacts: 202 | 203 | # As a convenience, specifying one of the 'host', 204 | # 'container' or 'cluster' keys automatically unsets the 205 | # other two if inherited, so that there is no need to 206 | # explicitly unset them. 207 | host: 208 | distro: fedora/27/atomic 209 | 210 | # There is no support to append to an inherited list. You 211 | # must redefine them as needed. 212 | packages: 213 | - make 214 | - gcc 215 | - autoconf 216 | 217 | env: 218 | VAR1: newval1 219 | -------------------------------------------------------------------------------- /experimental/papr2kube.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Convert a papr YAML file into a Kubernetes Job 4 | 5 | import os 6 | import sys 7 | import re 8 | import time 9 | import yaml 10 | import traceback 11 | import argparse 12 | import subprocess 13 | 14 | # XXX: switch to relative imports when we're a proper module 15 | from papr import PKG_DIR 16 | import papr.utils.parser as paprparser 17 | 18 | GH_NAME_REGEX = re.compile('^[A-Za-z0-9_.-]+$') 19 | 20 | def paprsuite2kubejob(gh_org, gh_repo, commitsha, suiteidx, suite): 21 | commitsha_short = commitsha[0:10] 22 | name = 'papr-{}-{}-{}-{}'.format(gh_org, gh_repo, commitsha_short, suiteidx) 23 | metadata = {'name': name} 24 | cmd = 'set -xeuo pipefail\n' 25 | for test in suite['tests']: 26 | cmd += test + '\n' 27 | volumes = [ 28 | {'name': 'builddir', 'emptyDir': {}} 29 | ] 30 | containers = [{ 31 | 'name': name, 32 | 'image': suite['container']['image'], 33 | 'volumeMounts': [ 34 | { 'name': 'builddir', 35 | 'mountPath': '/srv', 36 | } 37 | ], 38 | 'securityContext': {'runAsUser': 0}, 39 | 'workingDir': '/srv/build', 40 | 'command': ["/usr/bin/bash", "-c", cmd], 41 | }] 42 | initContainers = [ 43 | { 'name': 'init-git', 44 | 'image': 'registry.centos.org/centos/centos:7', 45 | 'volumeMounts': [ 46 | { 'name': 'builddir', 47 | 'mountPath': '/srv', 48 | } 49 | ], 50 | 'securityContext': {'runAsUser': 0}, 51 | 'workingDir': '/srv/', 52 | 'command': ['/bin/sh', '-c', 53 | '''set -xeuo pipefail; yum -y install git 54 | git clone --depth=100 https://github.com/{gh_org}/{gh_repo} build 55 | cd build 56 | git checkout {commitsha}'''.format(gh_org=gh_org, gh_repo=gh_repo, commitsha=commitsha)] 57 | } 58 | ] 59 | r = {'apiVersion': 'batch/v1', 60 | 'kind': 'Job', 61 | 'metadata': metadata, 62 | 'spec': { 63 | 'template': { 64 | 'metadata': dict(metadata), 65 | 'spec': { 66 | 'volumes': volumes, 67 | 'initContainers': initContainers, 68 | 'containers': containers, 69 | 'restartPolicy': 'Never' 70 | }, 71 | }, 72 | }, 73 | } 74 | return r 75 | 76 | def main(): 77 | "Main entry point." 78 | 79 | parser = argparse.ArgumentParser(description='Convert .papr.yml to Kuberentes Jobs') 80 | parser.add_argument('--limit', action='store', type=int, help='Emit at most N jobs') 81 | parser.add_argument('ghid', action='store', help='github repo') 82 | parser.add_argument('commitsha', action='store', help='commit sha') 83 | parser.add_argument('path', action='store', help='Path to papr YAML (normally .papr.yml)') 84 | args = parser.parse_args() 85 | 86 | (org,proj) = args.ghid.split('/', 1) 87 | assert GH_NAME_REGEX.match(org) 88 | assert GH_NAME_REGEX.match(proj) 89 | 90 | suite_parser = paprparser.SuiteParser(args.path) 91 | suites = suite_parser.parse() 92 | 93 | stream = sys.stdout 94 | stream.write('# Generated from papr YAML: {}\n'.format(os.path.basename(args.path))) 95 | jobs = [] 96 | joblist = {'apiVersion': 'v1', 97 | 'kind': 'List', 98 | 'items': jobs} 99 | for i,suite in enumerate(suites): 100 | if not suite.get('container'): 101 | continue 102 | jobs.append(paprsuite2kubejob(org, proj, args.commitsha, i, suite)) 103 | if len(jobs) == args.limit: 104 | break 105 | yaml.dump(joblist, stream=stream, explicit_start=True) 106 | 107 | if __name__ == '__main__': 108 | sys.exit(main()) 109 | -------------------------------------------------------------------------------- /papr/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import sys 5 | import subprocess 6 | 7 | PKG_DIR = os.path.dirname(os.path.realpath(__file__)) 8 | 9 | 10 | def main(): 11 | main_script = os.path.join(PKG_DIR, 'main') 12 | main_args = [main_script] + sys.argv[:1] 13 | return subprocess.run(main_args).returncode 14 | 15 | 16 | if __name__ == "__main__": 17 | sys.exit(main()) 18 | -------------------------------------------------------------------------------- /papr/main: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Exeuo pipefail 3 | 4 | # This is the bootstrapping script. It makes some validation 5 | # checks, checks out the repo, and starts the spawner, which 6 | # does the actual parsing and spawning of test runners. 7 | 8 | THIS_DIR=$(dirname $0) 9 | 10 | source $THIS_DIR/utils/common.sh 11 | 12 | main() { 13 | 14 | # NB: bash trickery: don't use any of the function calls 15 | # in if-statements, it will completely disable set -e 16 | # inside the function... Yet another reason to port this 17 | # to Python. 18 | 19 | # NB2: if you need to change directory, do it in a 20 | # subshell. 21 | 22 | # NB3: the use of eval is strictly forbidden. Never 23 | # directly run a user-provided variable. 24 | 25 | # we keep everything non-reusable for this run in state/ 26 | rm -rf state && mkdir state 27 | 28 | # Make sure we update GitHub if we exit due to errexit. 29 | # We also do a GitHub update on clean exit. 30 | ensure_err_github_update 31 | 32 | validate_vars 33 | 34 | checkout_ref 35 | 36 | # support the old name until projects migrate over 37 | if [ ! -f checkouts/$github_repo/.papr.yml ] && 38 | [ ! -f checkouts/$github_repo/.papr.yaml ] && 39 | [ ! -f checkouts/$github_repo/.redhat-ci.yml ]; then 40 | echo "INFO: No .papr.yml file found." 41 | exit 0 42 | fi 43 | 44 | export PYTHONUNBUFFERED=1 45 | 46 | exec python3 $THIS_DIR/spawner.py 47 | } 48 | 49 | validate_vars() { 50 | 51 | if [ -n "${github_branch:-}" ] && [ -n "${github_pull_id:-}" ]; then 52 | echo "ERROR: Can't specify both github_branch and github_pull_id." 53 | return 1 54 | elif [ -z "${github_branch:-}" ] && [ -z "${github_pull_id:-}" ]; then 55 | echo "ERROR: One of github_branch or github_pull_id must be specified." 56 | return 1 57 | else 58 | # let's not print token information 59 | set +x 60 | for var in github_{repo,token}; do 61 | if [ -z "${!var:-}" ]; then 62 | echo "ERROR: Missing variable '${var}'." 63 | return 1 64 | fi 65 | done 66 | set -x 67 | fi 68 | } 69 | 70 | checkout_ref() { 71 | 72 | local repo=checkouts/$github_repo 73 | 74 | # let's re-use checkouts if available (this is safe since we don't actually 75 | # pollute these repos, we just rsync them) 76 | if [ ! -d $repo ]; then 77 | git clone https://github.com/$github_repo $repo 78 | else 79 | # update the origin refs for projects that expect a fresh clone 80 | git -C $repo fetch origin 81 | fi 82 | 83 | local sha_cmp 84 | 85 | # checkout target commit 86 | if [ -n "${github_branch:-}" ]; then 87 | git -C $repo fetch origin $github_branch 88 | sha_cmp=$(git -C $repo rev-parse FETCH_HEAD) 89 | export github_url=https://github.com/$github_repo/commits/$github_branch 90 | else 91 | if git -C $repo fetch origin refs/pull/$github_pull_id/merge; then 92 | touch state/is_merge_sha 93 | sha_cmp=$(git -C $repo rev-parse FETCH_HEAD^2) 94 | else 95 | git -C $repo fetch origin refs/pull/$github_pull_id/head 96 | sha_cmp=$(git -C $repo rev-parse FETCH_HEAD) 97 | fi 98 | export github_url=https://github.com/$github_repo/pull/$github_pull_id 99 | fi 100 | 101 | if [ -n "${github_commit:-}" ] && [ "$github_commit" != "$sha_cmp" ]; then 102 | echo "INFO: Expected commit $github_commit, but received $sha_cmp." 103 | echo "INFO: Most likely the ref was updated since this job (or parent" 104 | echo "INFO: job) was triggered. Silently exiting since another run with" 105 | echo "INFO: the updated SHA is underway." 106 | exit 0 107 | elif [ -z "${github_commit:-}" ]; then 108 | # define it as a global 109 | export github_commit=$sha_cmp 110 | fi 111 | 112 | git -C $repo checkout FETCH_HEAD 113 | git -C $repo rev-parse HEAD > state/sha 114 | } 115 | 116 | ensure_err_github_update() { 117 | # we don't have a context yet, so let's just use a generic one 118 | trap "common_update_github 'Red Hat CI' error 'An internal error occurred.'" ERR 119 | } 120 | 121 | main "$@" 122 | -------------------------------------------------------------------------------- /papr/provisioner: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Exeuo pipefail 3 | 4 | # This script provisions a node on OpenStack. It may be 5 | # called multiple times in parallel. 6 | 7 | THIS_DIR=$(dirname $0) 8 | 9 | source $THIS_DIR/utils/common.sh 10 | 11 | main() { 12 | 13 | # NB: see the various NBs in the main() of main. 14 | 15 | state=$1; shift 16 | parsedhost=$1; shift 17 | outdir=$1; shift 18 | 19 | [ -d $parsedhost ] 20 | mkdir $outdir 21 | 22 | provision_host 23 | } 24 | 25 | provision_host() { 26 | 27 | # include the BUILD_ID directly in the name to make it 28 | # easier to determine which nodes belong to which runs 29 | # when troubleshooting 30 | os_name_prefix=papr 31 | if [ -n "${BUILD_ID:-}" ]; then 32 | os_name_prefix=$os_name_prefix-$BUILD_ID 33 | fi 34 | 35 | env \ 36 | os_image="$(cat $parsedhost/distro)" \ 37 | os_min_ram=$(cat $parsedhost/min_ram) \ 38 | os_min_vcpus=$(cat $parsedhost/min_cpus) \ 39 | os_min_disk=$(cat $parsedhost/min_disk) \ 40 | os_min_ephemeral=$(cat $parsedhost/min_secondary_disk) \ 41 | os_name_prefix=$os_name_prefix \ 42 | os_user_data="$THIS_DIR/utils/user-data" \ 43 | python3 "$THIS_DIR/utils/os_provision.py" $outdir 44 | 45 | ssh_wait $(cat $outdir/node_addr) $state/node_key 46 | 47 | if [ -f $parsedhost/ostree_revision ]; then 48 | if ! on_atomic_host; then 49 | update_github error "Cannot specify 'ostree' on non-AH." 50 | touch $state/exit # signal testrunner to exit nicely 51 | exit 0 52 | fi 53 | deploy_ostree 54 | fi 55 | } 56 | 57 | deploy_ostree() { 58 | local remote=$(cat $parsedhost/ostree_remote) 59 | local branch=$(cat $parsedhost/ostree_branch) 60 | local revision=$(cat $parsedhost/ostree_revision) 61 | 62 | local rc=0 63 | local skip_reboot=0 64 | if [ -z "$remote" ] && [ -z "$branch" ]; then 65 | 66 | if [ -z "$revision" ]; then 67 | vmssh rpm-ostree upgrade --upgrade-unchanged-exit-77 || rc=$? 68 | else 69 | vmssh rpm-ostree deploy "$revision" || rc=$? 70 | fi 71 | 72 | if [ $rc == 77 ]; then 73 | skip_reboot=1 74 | elif [ $rc != 0 ]; then 75 | update_github error "Failed to upgrade or deploy." 76 | touch $state/exit # signal testrunner to exit nicely 77 | exit 0 78 | fi 79 | else 80 | local refspec= 81 | 82 | if [ -n "$remote" ]; then 83 | vmssh ostree remote add --no-gpg-verify papr "$remote" 84 | refspec=papr: 85 | fi 86 | 87 | if [ -n "$branch" ]; then 88 | refspec="${refspec}$branch" 89 | fi 90 | 91 | if [ -z "$revision" ]; then 92 | vmssh rpm-ostree rebase "$refspec" || rc=$? 93 | else 94 | vmssh rpm-ostree rebase "$refspec" "$revision" || rc=$? 95 | fi 96 | 97 | if [ $rc != 0 ]; then 98 | update_github error "Failed to rebase onto refspec." 99 | touch $state/exit # signal testrunner to exit nicely 100 | exit 0 101 | fi 102 | fi 103 | 104 | if [ $skip_reboot != 1 ]; then 105 | vmreboot 106 | fi 107 | } 108 | 109 | update_github() { 110 | local context=$(cat $state/parsed/context) 111 | common_update_github "$context" "$@" 112 | } 113 | 114 | vmssh() { 115 | ssh -q -n -i $state/node_key \ 116 | -o StrictHostKeyChecking=no \ 117 | -o PasswordAuthentication=no \ 118 | -o UserKnownHostsFile=/dev/null \ 119 | root@$(cat $outdir/node_addr) "$@" 120 | } 121 | 122 | vmreboot() { 123 | vmssh systemctl reboot || : 124 | sleep 3 # give time for port to go down 125 | ssh_wait $(cat $outdir/node_addr) $state/node_key 126 | } 127 | 128 | on_atomic_host() { 129 | vmssh test -f /run/ostree-booted 130 | } 131 | 132 | main "$@" 133 | -------------------------------------------------------------------------------- /papr/spawner.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # Parse the YAML file, start the testrunners in parallel, 4 | # and wait for them. 5 | 6 | import os 7 | import sys 8 | import time 9 | import traceback 10 | import threading 11 | import subprocess 12 | 13 | import boto3 14 | import jinja2 15 | 16 | # XXX: switch to relative imports when we're a proper module 17 | from papr import PKG_DIR 18 | import papr.utils.parser as parser 19 | import papr.utils.common as common 20 | import papr.utils.gh as gh 21 | 22 | 23 | def main(): 24 | "Main entry point." 25 | 26 | try: 27 | suites = parse_suites() 28 | except parser.ParserError as e: 29 | # print the error to give feedback in the logs, but 30 | # exit nicely since this is not an infra failure 31 | traceback.print_exc() 32 | gh_status('error', "Red Hat CI", "Invalid YAML file.") 33 | if os.environ.get('github_pull_id'): 34 | gh_comment(':boom: Invalid `.papr.yml`: {}.'.format(e.msg)) 35 | else: 36 | n = len(suites) 37 | if n > 0: 38 | spawn_testrunners(n) 39 | inspect_suite_failures(suites) 40 | update_required_context(suites) 41 | else: 42 | print("INFO: No testsuites to run.") 43 | 44 | 45 | def parse_suites(): 46 | 47 | yml_file = os.path.join('checkouts', 48 | os.environ['github_repo'], 49 | '.papr.yml') 50 | 51 | # try new name, fall back to old name until projects migrate over 52 | for name in ['.papr.yml', '.papr.yaml', '.redhat-ci.yml']: 53 | yml_file = os.path.join('checkouts', os.environ['github_repo'], name) 54 | if os.path.isfile(yml_file): 55 | break 56 | else: 57 | # this should have been checked in main, so should never happen 58 | assert False, "No valid YAML file found" 59 | 60 | # are we supposed to run only some testsuites? 61 | only_contexts = os.environ.get('github_contexts') 62 | if only_contexts is not None: 63 | only_contexts = only_contexts.split('|') 64 | 65 | suites = [] 66 | branch = os.environ.get('github_branch') 67 | suite_parser = parser.SuiteParser(yml_file) 68 | for idx, suite in enumerate(suite_parser.parse()): 69 | if len(os.environ.get('RHCI_DEBUG_ALWAYS_RUN', '')) == 0: 70 | branches = suite.get('branches', ['master']) 71 | if branch and branch not in branches: 72 | print("INFO: %s suite not defined to run for branch %s." % 73 | (common.ordinal(idx + 1), branch)) 74 | continue 75 | if not branch and not suite.get('pulls', True): 76 | print("INFO: %s suite not defined to run on pull requests." % 77 | common.ordinal(idx + 1)) 78 | continue 79 | if only_contexts and suite['context'] not in only_contexts: 80 | print("INFO: %s suite not in github_contexts env var." % 81 | common.ordinal(idx + 1)) 82 | continue 83 | suite_dir = 'state/suite-%d/parsed' % len(suites) 84 | parser.flush_suite(suite, suite_dir) 85 | suites.append(suite) 86 | 87 | return suites 88 | 89 | 90 | def spawn_testrunners(n): 91 | 92 | testrunner = os.path.join(PKG_DIR, "testrunner") 93 | 94 | runners = [] 95 | threads = [] 96 | for i in range(n): 97 | p = subprocess.Popen([testrunner, str(i)], 98 | stdout=subprocess.PIPE, 99 | stderr=subprocess.STDOUT) 100 | t = threading.Thread(target=read_pipe, 101 | args=(i, p.stdout)) 102 | t.start() 103 | runners.append(p) 104 | threads.append(t) 105 | 106 | # We don't implement any fail fast here, so just do a 107 | # naive wait to collect them all. 108 | failed = [] 109 | for i, runner in enumerate(runners): 110 | if runner.wait() != 0: 111 | failed.append(i) 112 | 113 | for thread in threads: 114 | thread.join() 115 | 116 | # NB: When we say 'failed' here, we're talking about 117 | # infrastructure failure. Bad PR code should never cause 118 | # rc != 0. 119 | if failed: 120 | raise Exception("the following runners failed: %s" % str(failed)) 121 | 122 | 123 | def read_pipe(idx, fd): 124 | # NB: We can't trust the output from the testrunner, so 125 | # just read it and write it back in binary mode. 126 | s = fd.readline() 127 | while s != b'': 128 | if not s.endswith(b'\n'): 129 | s += b'\n' 130 | # pylint: disable=no-member 131 | sys.stdout.buffer.write((b'[%d] ' % idx) + s) 132 | s = fd.readline() 133 | 134 | 135 | def inspect_suite_failures(suites): 136 | 137 | for i, suite in enumerate(suites): 138 | assert 'rc' not in suite 139 | 140 | # If the rc file doesn't exist but the runner exited 141 | # nicely, then it means there was a semantic error 142 | # in the YAML (e.g. bad Docker image, bad ostree 143 | # revision, etc...). 144 | if not os.path.isfile("state/suite-%d/rc" % i): 145 | suite['rc'] = 1 146 | else: 147 | with open("state/suite-%d/rc" % i) as f: 148 | suite['rc'] = int(f.read().strip()) 149 | 150 | # It's helpful to have an easy global way to figure out 151 | # if any of the suites failed, e.g. for integration in 152 | # Jenkins. Let's write a 'failures' file counting the 153 | # number of failed suites. 154 | with open("state/failures", "w") as f: 155 | f.write("%d" % count_failures(suites)) 156 | 157 | 158 | def count_failures(suites): 159 | return sum([int(suite['rc'] != 0) for suite in suites]) 160 | 161 | 162 | def update_required_context(suites): 163 | 164 | # don't send 'required' context if we're only targeting some testsuites 165 | if 'github_contexts' in os.environ: 166 | return 167 | 168 | required_suites = [suite for suite in suites if suite.get('required')] 169 | total = len(required_suites) 170 | 171 | if total == 0: 172 | return 173 | 174 | # OK, let's upload a very basic index file that just 175 | # links to the results of all the required suites 176 | 177 | results_suites = [] 178 | for i, suite in enumerate(suites): 179 | name = suite['context'] 180 | if os.path.isfile("state/suite-%d/url" % i): 181 | with open("state/suite-%d/url" % i) as f: 182 | url = f.read().strip() 183 | else: 184 | # something went really wrong in the tester, fallback to src url 185 | url = os.environ['github_url'] 186 | result = (suite['rc'] == 0) 187 | results_suites.append((name, result, url)) 188 | 189 | tpl_fname = os.path.join(PKG_DIR, 'utils', 'required-index.j2') 190 | 191 | s3_key = '%s/%s/%s.%s/%s' % (os.environ['s3_prefix'], 192 | os.environ['github_repo'], 193 | os.environ['github_commit'], 194 | # rough equivalent of date +%s%N 195 | int(time.time() * 1e9), 196 | 'index.html') 197 | 198 | with open(tpl_fname) as tplf: 199 | tpl = jinja2.Template(tplf.read(), autoescape=True) 200 | tpl.globals['url'] = os.environ.get('github_url', "N/A") 201 | tpl.globals['commit'] = os.environ.get('github_commit', "N/A") 202 | data = tpl.render(suites=results_suites) 203 | upload_to_s3(s3_key, data, 'text/html') 204 | 205 | url = 'https://s3.amazonaws.com/%s' % s3_key 206 | 207 | failed = count_failures(required_suites) 208 | gh_status('success' if failed == 0 else 'failure', 'required', 209 | "%d/%d PASSES" % (total - failed, total), url) 210 | 211 | 212 | def gh_status(state, context, description, url=None): 213 | 214 | try: 215 | args = {'repo': os.environ['github_repo'], 216 | 'commit': os.environ['github_commit'], 217 | 'token': os.environ['github_token'], 218 | 'state': state, 219 | 'context': context, 220 | 'description': description, 221 | 'url': url} 222 | 223 | gh.status(**args) 224 | 225 | if os.path.isfile('state/is_merge_sha'): 226 | with open('state/sha') as f: 227 | args['commit'] = f.read().strip() 228 | gh.status(**args) 229 | 230 | # it can happen that the commit doesn't even exist 231 | # anymore, so let's be tolerant of such errors 232 | except gh.CommitNotFoundException: 233 | pass 234 | 235 | 236 | def gh_comment(text): 237 | 238 | args = {'repo': os.environ['github_repo'], 239 | 'token': os.environ['github_token'], 240 | 'issue': int(os.environ['github_pull_id']), 241 | 'text': text} 242 | 243 | gh.comment(**args) 244 | 245 | 246 | def upload_to_s3(bucket_key, data, type): 247 | s3 = boto3.resource("s3") 248 | bucket, key = bucket_key.split('/', 1) 249 | s3.Object(bucket, key).put(Body=data, ContentType=type) 250 | 251 | 252 | if __name__ == '__main__': 253 | sys.exit(main()) 254 | -------------------------------------------------------------------------------- /papr/testrunner: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -Exeuo pipefail 3 | 4 | # This script is run multiple times in parallel: once for 5 | # each testsuite defined in the YAML file. 6 | 7 | THIS_DIR=$(dirname $0) 8 | 9 | source $THIS_DIR/utils/common.sh 10 | 11 | main() { 12 | 13 | # NB: see the various NBs in the main() of main. 14 | 15 | # We take a single argument; the state dir index to use. 16 | # But we still expect the global state dir to be in the 17 | # $PWD. 18 | state_idx=$1; shift 19 | 20 | state=state/suite-${state_idx} 21 | 22 | [ -d state ] && [ -d $state ] 23 | 24 | # Make sure we update GitHub if we exit due to errexit. 25 | # We also do a GitHub update on clean exit. 26 | ensure_err_github_update 27 | 28 | provision_env 29 | 30 | prepare_env 31 | 32 | build_and_test 33 | 34 | fetch_artifacts 35 | 36 | s3_upload 37 | 38 | final_github_update 39 | } 40 | 41 | provision_env() { 42 | if containerized; then 43 | ensure_teardown_container 44 | provision_container 45 | else 46 | ssh_setup_key 47 | if clustered; then 48 | ensure_teardown_cluster 49 | provision_cluster 50 | else 51 | ensure_teardown_node 52 | provision_node 53 | fi 54 | fi 55 | } 56 | 57 | provision_container() { 58 | local image=$(cat $state/parsed/image) 59 | 60 | if containerized; then 61 | update_github pending "Provisioning container..." 62 | fi 63 | 64 | # Let's pre-pull the image so that it doesn't count 65 | # as part of the test timeout. 66 | if ! sudo docker pull "$image"; then 67 | update_github error "Could not pull image '$image'." 68 | exit 0 69 | fi 70 | 71 | local name=papr-$(date +%s%N) 72 | if [ -n "${BUILD_ID:-}" ]; then 73 | name=$name-$BUILD_ID 74 | fi 75 | 76 | sudo docker run --name $name -d \ 77 | --device /dev/kvm \ 78 | --cidfile $state/cid \ 79 | "$image" sleep infinity 80 | 81 | if [ $UID != 0 ]; then 82 | sudo chown $UID:$UID $state/cid 83 | fi 84 | } 85 | 86 | provision_node() { 87 | 88 | update_github pending "Provisioning host..." 89 | 90 | # substitute node to use in ":" format 91 | if [ -n "${PAPR_DEBUG_USE_NODE:-}" ]; then 92 | echo "${PAPR_DEBUG_USE_NODE%:*}" > $state/host/node_name 93 | echo "${PAPR_DEBUG_USE_NODE#*:}" > $state/host/node_addr 94 | else 95 | $THIS_DIR/provisioner $state $state/parsed/host $state/host 96 | if [ -f $state/exit ]; then 97 | # the provisioner encountered a user error and already updated GH 98 | exit 0 99 | fi 100 | fi 101 | } 102 | 103 | provision_cluster() { 104 | local nhosts=$(cat $state/parsed/nhosts) 105 | 106 | update_github pending "Provisioning cluster..." 107 | 108 | seq 0 $((nhosts - 1)) | xargs -P 0 -n 1 -I {} \ 109 | $THIS_DIR/provisioner $state $state/parsed/host-{} $state/host-{} 110 | 111 | if [ -f $state/exit ]; then 112 | # a provisioner encountered a user error and already updated GH 113 | exit 0 114 | fi 115 | 116 | if container_controlled; then 117 | provision_container 118 | else 119 | # make the first host the controller 120 | ln -s host-0 $state/host 121 | fi 122 | 123 | local i=0 124 | while [ $i -lt $nhosts ]; do 125 | local name=$(cat $state/parsed/host-$i/name) 126 | local addr=$(cat $state/host-$i/node_addr) 127 | name=$(sed 's/[.-]/_/g' <<< "$name") 128 | # also export under the old name until projects migrate over 129 | echo "export RHCI_${name}_IP=${addr}" >> $state/parsed/envs 130 | echo "export PAPR_${name}_IP=${addr}" >> $state/parsed/envs 131 | i=$((i + 1)) 132 | done 133 | } 134 | 135 | prepare_env() { 136 | local upload_dir=$state/$github_commit.$state_idx.$(date +%s%N) 137 | echo $upload_dir > $state/upload_dir 138 | mkdir $upload_dir 139 | 140 | if [ -n "${site_repos:-}" ]; then 141 | env_inject_site_repos 142 | fi 143 | 144 | # https://github.com/projectatomic/rpm-ostree/issues/687 145 | if ! on_atomic_host; then 146 | env_make_rpmmd_cache 147 | fi 148 | 149 | # inject extra repos before installing packages 150 | if [ -f $state/parsed/papr-extras.repo ]; then 151 | envcmd mkdir -p /etc/yum.repos.d 152 | envcp $state/parsed/papr-extras.repo /etc/yum.repos.d 153 | fi 154 | 155 | if [ -f $state/parsed/packages ]; then 156 | if on_atomic_host; then 157 | overlay_packages 158 | else 159 | install_packages 160 | fi 161 | fi 162 | 163 | if clustered; then 164 | ssh_setup_cluster 165 | fi 166 | 167 | envcmd mkdir -p /var/tmp/checkout 168 | envcp checkouts/$github_repo/. /var/tmp/checkout 169 | 170 | # inject some helpful variables to allow projects to 171 | # more tightly integrate with redhat-ci 172 | echo "export RHCI_REPO=${github_repo}" >> $state/parsed/envs 173 | echo "export RHCI_COMMIT=${github_commit}" >> $state/parsed/envs 174 | if [ -n "${github_branch:-}" ]; then 175 | echo "export RHCI_BRANCH=${github_branch}" >> $state/parsed/envs 176 | else 177 | echo "export RHCI_PULL_ID=${github_pull_id}" >> $state/parsed/envs 178 | if [ -f state/is_merge_sha ]; then 179 | echo "export RHCI_MERGE_COMMIT=$(cat state/sha)" >> $state/parsed/envs 180 | fi 181 | fi 182 | 183 | # re-inject under the new name, delete the old name once projects are 184 | # migrated over 185 | echo "export PAPR_REPO=${github_repo}" >> $state/parsed/envs 186 | echo "export PAPR_COMMIT=${github_commit}" >> $state/parsed/envs 187 | if [ -n "${github_branch:-}" ]; then 188 | echo "export PAPR_BRANCH=${github_branch}" >> $state/parsed/envs 189 | else 190 | echo "export PAPR_PULL_ID=${github_pull_id}" >> $state/parsed/envs 191 | if [ -f state/is_merge_sha ]; then 192 | echo "export PAPR_MERGE_COMMIT=$(cat state/sha)" >> $state/parsed/envs 193 | fi 194 | local target_branch=$(query_github pulls/${github_pull_id} base ref) 195 | echo "export PAPR_PULL_TARGET_BRANCH=${target_branch}" >> $state/parsed/envs 196 | fi 197 | } 198 | 199 | ssh_setup_key() { 200 | set +x 201 | cat > $state/node_key <<< "$os_privkey" 202 | chmod 0600 $state/node_key 203 | set -x 204 | } 205 | 206 | ssh_setup_cluster() { 207 | local nhosts=$(cat $state/parsed/nhosts) 208 | 209 | # since the common case is to interact with the various 210 | # nodes by ssh, let's make sure it's all set up nicely 211 | # ahead of time 212 | 213 | # let's go through the hosts once to collect keys 214 | local i=0 215 | while [ $i -lt $nhosts ]; do 216 | local name=$(cat $state/parsed/host-$i/name) 217 | local addr=$(cat $state/host-$i/node_addr) 218 | ssh-keyscan $addr 2>/dev/null | \ 219 | sed "s/^/$name,/" >> $state/known_hosts 220 | echo $addr $name >> $state/hosts 221 | i=$((i + 1)) 222 | done 223 | 224 | # We use a different key than the one used to provision 225 | # the nodes here, since we don't want to expose the 226 | # private key of the OpenStack keypair used. NB: not in 227 | # a state dir; we don't want to regen a key on every 228 | # run. 229 | mkdir -p cluster_keypair 230 | flock --exclusive cluster_keypair sh -ec \ 231 | 'if [ ! -f cluster_keypair/id_rsa ]; then 232 | # just stick with RSA which is supported on all platforms 233 | ssh-keygen -t rsa -b 4096 -N "" -f cluster_keypair/id_rsa 234 | fi' 235 | 236 | if container_controlled; then 237 | # most base images don't have ssh 238 | if ! envcmd [ -x /bin/ssh ]; then 239 | envcmd yum install -y openssh-clients 240 | fi 241 | envcmd mkdir -m 0600 /root/.ssh 242 | envcp cluster_keypair/id_rsa /root/.ssh 243 | envcp $state/known_hosts /root/.ssh 244 | envcp $state/hosts /etc/hosts.append 245 | envcmd sh -c "cat /etc/hosts.append >> /etc/hosts" 246 | fi 247 | 248 | vmipssh() { 249 | ip=$1; shift 250 | ssh -q -i $state/node_key \ 251 | -o StrictHostKeyChecking=no \ 252 | -o PasswordAuthentication=no \ 253 | -o UserKnownHostsFile=/dev/null \ 254 | root@$ip "$@" 255 | } 256 | 257 | i=0 258 | while [ $i -lt $nhosts ]; do 259 | local name=$(cat $state/parsed/host-$i/name) 260 | local addr=$(cat $state/host-$i/node_addr) 261 | 262 | # some of these could be redone more cleanly through 263 | # cloud-init, though the dynamic aspect would 264 | # probably end up making it look similar 265 | 266 | vmipssh $addr hostnamectl set-hostname $name 267 | 268 | # we don't want to inject the public ip of this host 269 | # into its hosts file since programs might be 270 | # confused to see the hostname resolve to an address 271 | # that's not assigned to any of its interfaces. no 272 | # need to inject the private ip in its stead either; 273 | # the myhostname nss module already does the right 274 | # thing for us 275 | sed "/^${addr//./\\.} / d" $state/hosts | \ 276 | vmipssh $addr "cat >> /etc/hosts" 277 | 278 | vmipssh $addr "cat >> /root/.ssh/known_hosts" < $state/known_hosts 279 | vmipssh $addr "cat > /root/.ssh/id_rsa" < cluster_keypair/id_rsa 280 | vmipssh $addr chmod 0400 /root/.ssh/id_rsa 281 | vmipssh $addr "cat >> /root/.ssh/authorized_keys" \ 282 | < cluster_keypair/id_rsa.pub 283 | i=$((i + 1)) 284 | done 285 | 286 | unset -f vmipssh 287 | } 288 | 289 | overlay_packages() { 290 | local upload_dir=$(cat $state/upload_dir) 291 | 292 | local rc=0 293 | logged_envcmd $upload_dir/setup.log / - - \ 294 | rpm-ostree install "$(cat $state/parsed/packages)" || rc=$? 295 | 296 | if [ $rc != 0 ]; then 297 | s3_upload 298 | update_github error "Could not layer packages." "$(cat $state/url)" 299 | exit 0 300 | fi 301 | 302 | vmreboot 303 | } 304 | 305 | install_packages() { 306 | local upload_dir=$(cat $state/upload_dir) 307 | 308 | local mgr=yum 309 | if envcmd rpm -q dnf; then 310 | mgr=dnf 311 | fi 312 | 313 | local rc=0 314 | logged_envcmd $upload_dir/setup.log / - - \ 315 | $mgr install -y "$(cat $state/parsed/packages)" || rc=$? 316 | 317 | if [ $rc != 0 ]; then 318 | s3_upload 319 | update_github error "Could not install packages." "$(cat $state/url)" 320 | exit 0 321 | fi 322 | } 323 | 324 | env_inject_site_repos() { 325 | local os_id=$(get_env_os_info ID) 326 | local os_version_id=$(get_env_os_info VERSION_ID) 327 | 328 | local OLDIFS=$IFS; IFS='|' 329 | mkdir -p $state/site-repos 330 | for site_repo in $site_repos; do 331 | local repo_file=${site_repo#*=} 332 | local target_os=${site_repo%=*} 333 | local target_os_id=${target_os%/*} 334 | local target_os_version_id=${target_os#*/} 335 | 336 | [ -n "$repo_file" ] 337 | [ -n "$target_os_id" ] 338 | [ -n "$target_os_version_id" ] 339 | 340 | if [ "$target_os_id" != '*' ] && 341 | ([ -z "$os_id" ] || 342 | [ "$target_os_id" != "$os_id" ]); then 343 | continue 344 | fi 345 | 346 | if [ "$target_os_version_id" != '*' ] && 347 | ([ -z "$os_version_id" ] || 348 | [ "$target_os_version_id" != "$os_version_id" ]); then 349 | continue 350 | fi 351 | 352 | if [ "${repo_file::7}" == "http://" ] || 353 | [ "${repo_file::8}" == "https://" ]; then 354 | (cd $state/site-repos && curl -LO "$repo_file") 355 | else # assume local 356 | cp -f $repo_file $state/site-repos 357 | fi 358 | done 359 | IFS=$OLDIFS 360 | 361 | envcmd mkdir -p /etc/yum.repos.d 362 | envcp $state/site-repos/. /etc/yum.repos.d 363 | } 364 | 365 | env_make_rpmmd_cache() { 366 | local os_id=$(get_env_os_info ID) 367 | local os_version_id=$(get_env_os_info VERSION_ID) 368 | 369 | local cachedir= 370 | if [ -n "$os_id" ] && [ -n "$os_version_id" ]; then 371 | cachedir="cache/rpmmd/$os_id/$os_version_id" 372 | fi 373 | 374 | local mgr=yum 375 | if envcmd rpm -q dnf; then 376 | mgr=dnf 377 | fi 378 | 379 | # inject cache if we have it 380 | if [ -n "$cachedir" ] && [ -d "$cachedir" ]; then 381 | envcmd mkdir -p /var/cache/$mgr 382 | # copy under lock to the suite-local cache and rsync that 383 | mkdir -p "$state/$cachedir" 384 | flock --shared "$cachedir" cp -alT "$cachedir" "$state/$cachedir" 385 | envcp "$state/$cachedir/." /var/cache/$mgr 386 | fi 387 | 388 | # update the cache 389 | local has_cache=0 390 | local retries=5 391 | while [ $retries -gt 0 ]; do 392 | if envcmd $mgr makecache; then 393 | has_cache=1 394 | break 395 | fi 396 | retries=$((retries - 1)) 397 | done 398 | 399 | if [ $has_cache == 0 ]; then 400 | update_github error "Could not makecache." 401 | exit 0 402 | fi 403 | 404 | local can_trust_cache=0 405 | if host_controlled; then 406 | can_trust_cache=1 # we trust all the distros we offer ourselves 407 | else 408 | # only trust official containers 409 | local image=$(cat $state/parsed/image) 410 | if grep -q -E '^registry.fedoraproject.org' <<< "$image" || 411 | grep -q -E '^(fedora|centos)(:[[:alnum:]_.-]+)?$' <<< "$image"; then 412 | can_trust_cache=1 413 | fi 414 | fi 415 | 416 | # update our own cache with the updated cache 417 | if [ -n "$cachedir" ] && [ $can_trust_cache == 1 ]; then 418 | 419 | mkdir -p "$state/$cachedir" 420 | envfetch /var/cache/$mgr/. "$state/$cachedir" 421 | 422 | # but only keep non-system solvs & repodata 423 | # NB: yum only keeps repo metadata there, so no need to prune 424 | if [ $mgr == dnf ]; then 425 | find "$state/$cachedir" -maxdepth 1 \ 426 | ! -type d ! -name '*.solv' ! -name '*.solvx' -delete 427 | rm -f "$state/$cachedir/@System.solv" 428 | fi 429 | 430 | # update global cache under lock 431 | # NB: cp -l is smart enough to skip copying if the file hasn't changed 432 | # (though that only works if we rsync'ed -- docker cp always overwrites) 433 | mkdir -p "$cachedir" 434 | flock --exclusive "$cachedir" cp -alTf "$state/$cachedir" "$cachedir" 435 | fi 436 | } 437 | 438 | run_loop() { 439 | local timeout=$1; shift 440 | local logfile=$1; shift 441 | local workdir=$1; shift 442 | local testfile=$1; shift 443 | local envfile=$1; shift 444 | 445 | local max_date=$(($(date +%s) + $timeout)) 446 | while IFS='' read -r line || [[ -n $line ]]; do 447 | 448 | timeout=$(($max_date - $(date +%s))) 449 | if [ $timeout -le 0 ]; then 450 | echo "### TIMED OUT" >> $logfile 451 | rc=137 452 | break 453 | fi 454 | 455 | rc=0 456 | logged_envcmd $logfile $workdir $envfile $timeout "$line" || rc=$? 457 | 458 | if [ $rc != 0 ]; then 459 | break 460 | fi 461 | 462 | done < "$testfile" 463 | 464 | return $rc 465 | } 466 | 467 | build_and_test() { 468 | local upload_dir=$(cat $state/upload_dir) 469 | local timeout=$(cat $state/parsed/timeout) 470 | local checkout=checkouts/$github_repo 471 | local rc=0 472 | 473 | if [ -f $state/parsed/build ]; then 474 | local config_opts=$(cat $state/parsed/build.config_opts) 475 | local build_opts=$(cat $state/parsed/build.build_opts) 476 | local install_opts=$(cat $state/parsed/build.install_opts) 477 | 478 | update_github pending "Building..." 479 | 480 | touch $state/build.sh 481 | if [ ! -f $checkout/configure ]; then 482 | if [ -f $checkout/autogen.sh ]; then 483 | echo "NOCONFIGURE=1 ./autogen.sh" >> $state/build.sh 484 | elif [ -f $checkout/autogen ]; then 485 | echo "NOCONFIGURE=1 ./autogen" >> $state/build.sh 486 | fi 487 | fi 488 | 489 | local njobs=$(envcmd getconf _NPROCESSORS_ONLN) 490 | 491 | echo "./configure $config_opts" >> $state/build.sh 492 | echo "make all --jobs $njobs $build_opts" >> $state/build.sh 493 | echo "make install $install_opts" >> $state/build.sh 494 | 495 | local max_date=$(($(date +%s) + $timeout)) 496 | 497 | run_loop \ 498 | $timeout \ 499 | $upload_dir/build.log \ 500 | /var/tmp/checkout \ 501 | $state/build.sh \ 502 | $state/parsed/envs || rc=$? 503 | 504 | timeout=$(($max_date - $(date +%s))) 505 | fi 506 | 507 | if [ $rc = 0 ] && [ -f $state/parsed/tests ]; then 508 | 509 | update_github pending "Running tests..." 510 | 511 | run_loop \ 512 | $timeout \ 513 | $upload_dir/output.log \ 514 | /var/tmp/checkout \ 515 | $state/parsed/tests \ 516 | $state/parsed/envs || rc=$? 517 | fi 518 | 519 | echo "$rc" > $state/rc 520 | } 521 | 522 | fetch_artifacts() { 523 | local upload_dir=$(cat $state/upload_dir) 524 | 525 | # let's pull back the artifacts 526 | if [ -f $state/parsed/artifacts ]; then 527 | 528 | mkdir $upload_dir/artifacts 529 | 530 | local fetched_at_least_one=0 531 | if host_controlled; then 532 | local node_addr=$(cat $state/host/node_addr) 533 | 534 | while IFS='' read -r artifact || [[ -n $artifact ]]; do 535 | path="/var/tmp/checkout/$artifact" 536 | if vmssh [ -e "$path" ]; then 537 | # NB: We could use rsync here to be more efficient, though 538 | # we need the --ignore-missing-args switch, which is not 539 | # available on older platforms. Anyway, in this case, we're 540 | # always fetching from scratch and the test envs are not 541 | # far, so scp shouldn't be much slower. 542 | vmscp -r "root@$node_addr:$path" $upload_dir/artifacts 543 | fetched_at_least_one=1 544 | fi 545 | done < $state/parsed/artifacts 546 | else 547 | local cid=$(cat $state/cid) 548 | while IFS='' read -r artifact || [[ -n $artifact ]]; do 549 | path="/var/tmp/checkout/$artifact" 550 | if sudo docker exec $cid [ -e "$path" ]; then 551 | sudo docker cp "$cid:$path" $upload_dir/artifacts 552 | fetched_at_least_one=1 553 | fi 554 | done < $state/parsed/artifacts 555 | fi 556 | 557 | if [ $fetched_at_least_one == 0 ]; then 558 | # we don't want it indexed if it's empty 559 | rm -rf $upload_dir/artifacts 560 | else 561 | # make sure indexer can access artifacts; docker copies as root 562 | if [ $UID != 0 ] && container_controlled; then 563 | sudo chown -R $UID:$UID $upload_dir/artifacts 564 | fi 565 | fi 566 | fi 567 | } 568 | 569 | s3_upload() { 570 | local indexer=$(realpath $THIS_DIR/utils/indexer.py) 571 | local upload_dir=$(cat $state/upload_dir) 572 | local s3_object=index.html 573 | 574 | # if we just have output.log or build.log, then just link directly to that 575 | if [ $(find $upload_dir -mindepth 1 | wc -l) = 1 ]; then 576 | if [ -f $upload_dir/output.log ]; then 577 | s3_object=output.log 578 | elif [ -f $upload_dir/build.log ]; then 579 | s3_object=build.log 580 | elif [ -f $upload_dir/setup.log ]; then 581 | s3_object=setup.log 582 | fi 583 | fi 584 | 585 | # go through every file we'll upload and make sure it's no more than the max 586 | # size, otherwise violently truncate it 587 | find $upload_dir -mindepth 1 -size +5M | while read f; do 588 | local orig_size=$(stat -c %s "$f") 589 | truncate -s 5M "$f" 590 | echo -e "\n### FILE TRUNCATED (ORIGINAL SIZE: $orig_size)" >> "$f" 591 | done 592 | 593 | if [ $s3_object = index.html ]; then 594 | # don't change directory in current session 595 | local context=$(cat $state/parsed/context) 596 | ( cd $upload_dir && github_context="$context" python3 $indexer ) 597 | fi 598 | 599 | # only actually upload if we're given $s3_prefix 600 | if [ -n "${s3_prefix:-}" ]; then 601 | 602 | local full_prefix=$s3_prefix/$github_repo/$(basename $upload_dir) 603 | 604 | # Upload logs separately so that we can set the MIME type properly. 605 | # Let's just always label the logs as UTF-8. If the data is not strict 606 | # ISO-8859-1, then it won't render properly anyway. If it's (even if 607 | # partially) UTF-8, then we made the best choice. If it's random 608 | # garbage, we're no worse off (plus, UTF-8 is pretty good at handling 609 | # that). 610 | aws s3 sync --exclude '*.log' \ 611 | $upload_dir s3://$full_prefix 612 | aws s3 sync --exclude '*' --include '*.log' \ 613 | --content-type 'text/plain; charset=utf-8' \ 614 | $upload_dir s3://$full_prefix 615 | 616 | # full address we'll use for the final commit status update 617 | printf "https://s3.amazonaws.com/%s/%s" \ 618 | $full_prefix $s3_object > $state/url 619 | fi 620 | } 621 | 622 | final_github_update() { 623 | local rc 624 | local ghstate 625 | local desc 626 | 627 | rc=$(cat $state/rc) 628 | if [ $rc == 124 ] || [ $rc == 137 ]; then 629 | ghstate=failure 630 | desc="Test timed out and was aborted." 631 | elif [ $rc != 0 ]; then 632 | ghstate=failure 633 | desc="Test failed with rc $rc." 634 | else 635 | ghstate=success 636 | desc="All tests passed" 637 | if [ -n "${github_pull_id:-}" ] && [ ! -f state/is_merge_sha ]; then 638 | desc="$desc, but merge commit could not be tested" 639 | fi 640 | desc="${desc}." 641 | fi 642 | 643 | local url= 644 | if [ -f $state/url ]; then 645 | url=$(cat $state/url) 646 | fi 647 | 648 | update_github $ghstate "$desc" "$url" 649 | } 650 | 651 | # $1 -- log file 652 | # $2 -- workdir 653 | # $3 -- envfile or - 654 | # $4 -- timeout or - 655 | logged_envcmd() { 656 | local logfile=$1; shift 657 | local workdir=$1; shift 658 | local envfile=$1; shift 659 | local timeout=$1; shift 660 | 661 | # seed with standard info 662 | if [ ! -f $logfile ]; then 663 | 664 | echo "### $(date --utc)" > $logfile 665 | echo "### $github_url" >> $logfile 666 | echo "### $github_commit" >> $logfile 667 | 668 | # NB: is_merge_sha is in the top-level global state dir 669 | if [ -n "${github_pull_id:-}" ] && [ ! -f state/is_merge_sha ]; then 670 | echo "### (WARNING: not merge sha, check for conflicts)" >> $logfile 671 | fi 672 | 673 | local context=$(cat $state/parsed/context) 674 | echo "### TESTSUITE $context" >> $logfile 675 | 676 | if [ -n "${BUILD_ID:-}" ]; then 677 | echo "### BUILD_ID $BUILD_ID" >> $logfile 678 | fi 679 | fi 680 | 681 | echo '>>>' "$@" >> $logfile 682 | 683 | # we just create a script and run that to make 684 | # invocation and redirection easier 685 | echo "set -euo pipefail" > $state/worker.sh 686 | if [ $envfile != - ] && [ -f $envfile ]; then 687 | cat $envfile >> $state/worker.sh 688 | fi 689 | echo "exec 2>&1" >> $state/worker.sh 690 | echo "cd $workdir" >> $state/worker.sh 691 | echo "$@" >> $state/worker.sh 692 | 693 | envcp $state/worker.sh /var/tmp 694 | 695 | local start=$(date +%s) 696 | 697 | rc=0 698 | timed_envcmd $timeout sh /var/tmp/worker.sh >> $logfile || rc=$? 699 | 700 | local duration=$(($(date +%s) - $start)) 701 | 702 | if [ $rc == 0 ]; then 703 | echo "### COMPLETED IN ${duration}s" >> $logfile 704 | elif [ $rc == 137 ]; then 705 | echo "### TIMED OUT AFTER ${duration}s" >> $logfile 706 | else 707 | echo "### EXITED WITH CODE $rc AFTER ${duration}s" >> $logfile 708 | fi 709 | 710 | return $rc 711 | } 712 | 713 | # $1 -- timeout or - 714 | timed_envcmd() { 715 | timeout=$1; shift 716 | 717 | if [[ $timeout == - ]]; then 718 | timeout=infinity 719 | fi 720 | 721 | # There's a tricky bit to note here, docker exec and ssh 722 | # don't handle arguments exactly the same way. SSH will 723 | # effectively do a 'sh -c' on the passed command, which 724 | # means that quoting might be an issue. 725 | 726 | if container_controlled; then 727 | local cid=$(cat $state/cid) 728 | sudo timeout --signal=KILL $timeout \ 729 | docker exec $cid "$@" 730 | else 731 | local node_addr=$(cat $state/host/node_addr) 732 | timeout --signal=KILL $timeout \ 733 | ssh -q -n -i $state/node_key \ 734 | -o StrictHostKeyChecking=no \ 735 | -o PasswordAuthentication=no \ 736 | -o UserKnownHostsFile=/dev/null \ 737 | root@$node_addr "$@" 738 | fi 739 | } 740 | 741 | envcmd() { 742 | timed_envcmd infinity "$@" 743 | } 744 | 745 | envcp() { 746 | target=$1; shift 747 | remote=$1; shift 748 | 749 | # N.B.: docker cp and rsync have almost the same 750 | # semantics, which is nice. One exception is that docker 751 | # wants 'dir/.' to signify copying dir contents, whereas 752 | # rsync is happy with just 'dir/', so always use '/.'. 753 | # Also, rsync creates nonexistent dirs, whereas docker 754 | # does not, so explicitly mkdir beforehand. 755 | 756 | if container_controlled; then 757 | local cid=$(cat $state/cid) 758 | sudo docker cp $target $cid:$remote 759 | else 760 | local node_addr=$(cat $state/host/node_addr) 761 | rsync --quiet -az --no-owner --no-group \ 762 | -e "ssh -q -i $state/node_key \ 763 | -o StrictHostKeyChecking=no \ 764 | -o PasswordAuthentication=no \ 765 | -o UserKnownHostsFile=/dev/null" \ 766 | $target root@$node_addr:$remote 767 | fi 768 | } 769 | 770 | envfetch() { 771 | remote=$1; shift 772 | target=$1; shift 773 | 774 | if container_controlled; then 775 | local cid=$(cat $state/cid) 776 | sudo docker cp $cid:$remote $target 777 | 778 | if [ $UID != 0 ]; then 779 | sudo chown -R $UID:$UID $target 780 | fi 781 | else 782 | local node_addr=$(cat $state/host/node_addr) 783 | rsync --quiet -az --no-owner --no-group \ 784 | -e "ssh -q -i $state/node_key \ 785 | -o StrictHostKeyChecking=no \ 786 | -o PasswordAuthentication=no \ 787 | -o UserKnownHostsFile=/dev/null" \ 788 | root@$node_addr:$remote $target 789 | fi 790 | } 791 | 792 | vmssh() { 793 | # NB: we use -n because stdin may be in use (e.g. in a 794 | # bash while read loop) 795 | ssh -q -n -i $state/node_key \ 796 | -o StrictHostKeyChecking=no \ 797 | -o PasswordAuthentication=no \ 798 | -o UserKnownHostsFile=/dev/null \ 799 | root@$(cat $state/host/node_addr) "$@" 800 | } 801 | 802 | vmscp() { 803 | scp -q -i $state/node_key \ 804 | -o StrictHostKeyChecking=no \ 805 | -o PasswordAuthentication=no \ 806 | -o UserKnownHostsFile=/dev/null "$@" 807 | } 808 | 809 | vmreboot() { 810 | vmssh systemctl reboot || : 811 | sleep 3 # give time for port to go down 812 | ssh_wait $(cat $state/host/node_addr) $state/node_key 813 | } 814 | 815 | update_github() { 816 | local context=$(cat $state/parsed/context) 817 | common_update_github "$context" "$@" 818 | } 819 | 820 | ensure_err_github_update() { 821 | trap "update_github error 'An internal error occurred.'" ERR 822 | } 823 | 824 | teardown_node() { 825 | if [ -f $state/host/node_name ] && \ 826 | [ -f $state/host/node_addr ]; then 827 | teardown_node_impl \ 828 | $(cat $state/host/node_name) \ 829 | $(cat $state/host/node_addr) \ 830 | "$(cat $state/host/node_volid)" 831 | fi 832 | } 833 | 834 | teardown_node_impl() { 835 | local node_name=$1; shift 836 | local node_addr=$1; shift 837 | local node_volid=$1; shift 838 | 839 | if [ -z "$node_name" ]; then 840 | return 841 | fi 842 | 843 | if [ -n "$node_volid" ]; then 844 | nova volume-detach $node_name $node_volid 845 | sleep 5 # XXX: sleep for a bit so that the detach actually takes place 846 | cinder --os-volume-api-version=2 delete $node_volid 847 | fi 848 | 849 | if [ -n "$node_addr" ] && \ 850 | [ -n "${os_floating_ip_pool:-}" ]; then 851 | nova floating-ip-disassociate $node_name $node_addr 852 | nova floating-ip-delete $node_addr 853 | fi 854 | 855 | nova delete $node_name 856 | } 857 | 858 | ensure_teardown_node() { 859 | if [ -z "${PAPR_DEBUG_NO_TEARDOWN:-}" ]; then 860 | trap teardown_node EXIT 861 | fi 862 | } 863 | 864 | teardown_container() { 865 | if [ -f $state/cid ]; then 866 | sudo docker rm -f $(cat $state/cid) 867 | fi 868 | } 869 | 870 | ensure_teardown_container() { 871 | if [ -z "${PAPR_DEBUG_NO_TEARDOWN:-}" ]; then 872 | trap teardown_container EXIT 873 | fi 874 | } 875 | 876 | teardown_cluster() { 877 | if [ -f $state/parsed/nhosts ]; then 878 | local nhosts=$(cat $state/parsed/nhosts) 879 | 880 | local i=0 881 | while [ $i -lt $nhosts ]; do 882 | if [ -f $state/host-$i/node_name ] && \ 883 | [ -f $state/host-$i/node_addr ]; then 884 | teardown_node_impl \ 885 | $(cat $state/host-$i/node_name) \ 886 | $(cat $state/host-$i/node_addr) \ 887 | "$(cat $state/host-$i/node_volid)" 888 | fi 889 | i=$((i + 1)) 890 | done 891 | fi 892 | 893 | if container_controlled; then 894 | teardown_container 895 | fi 896 | } 897 | 898 | ensure_teardown_cluster() { 899 | if [ -z "${PAPR_DEBUG_NO_TEARDOWN:-}" ]; then 900 | trap teardown_cluster EXIT 901 | fi 902 | } 903 | 904 | containerized() { 905 | [ "$(cat $state/parsed/envtype)" = container ] 906 | } 907 | 908 | virtualized() { 909 | [ "$(cat $state/parsed/envtype)" = host ] 910 | } 911 | 912 | clustered() { 913 | [ "$(cat $state/parsed/envtype)" = cluster ] 914 | } 915 | 916 | container_controlled() { 917 | [ "$(cat $state/parsed/controller)" = container ] 918 | } 919 | 920 | host_controlled() { 921 | [ "$(cat $state/parsed/controller)" = host ] 922 | } 923 | 924 | on_atomic_host() { 925 | host_controlled && vmssh test -f /run/ostree-booted 926 | } 927 | 928 | get_env_os_info() { 929 | (. <(envcmd cat /etc/os-release) && echo ${!1} || :) 930 | } 931 | 932 | main "$@" 933 | -------------------------------------------------------------------------------- /papr/utils/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | 5 | PKG_DIR = os.path.dirname(os.path.realpath(__file__)) 6 | -------------------------------------------------------------------------------- /papr/utils/common.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | # http://stackoverflow.com/a/39596504/308136 5 | def ordinal(n): 6 | suffix = ['th', 'st', 'nd', 'rd', 'th', 'th', 'th', 'th', 'th', 'th'] 7 | if n < 0: 8 | n *= -1 9 | n = int(n) 10 | 11 | if n % 100 in (11, 12, 13): 12 | s = 'th' 13 | else: 14 | s = suffix[n % 10] 15 | 16 | return str(n) + s 17 | 18 | 19 | # normalize timeout str to seconds 20 | def str_to_timeout(s): 21 | assert re.match('^[0-9]+[smh]$', s) 22 | timeout = int(s[:-1]) 23 | if s.endswith('m'): 24 | timeout *= 60 25 | if s.endswith('h'): 26 | timeout *= 60 * 60 27 | return timeout 28 | -------------------------------------------------------------------------------- /papr/utils/common.sh: -------------------------------------------------------------------------------- 1 | # Send a commit status update to GitHub 2 | # $1 context 3 | # $2 ghstate (pending, success, error, or failure) 4 | # $3 description (optional) 5 | # $4 url (optional) 6 | common_update_github() { 7 | 8 | local context=$1; shift 9 | local ghstate=$1; shift 10 | 11 | local description="" 12 | if [ $# -gt 0 ]; then 13 | description=$1; shift 14 | fi 15 | 16 | local url="" 17 | if [ $# -gt 0 ]; then 18 | url=$1; shift 19 | fi 20 | 21 | if [ -z "${github_commit:-}" ]; then 22 | echo "No github_commit defined, ignoring..." 23 | return 24 | fi 25 | 26 | if [ -z "${github_token:-}" ]; then 27 | echo "No github_token defined, punting on GitHub commit status update:" 28 | echo $github_repo $github_commit $ghstate "$context" "$description" "$url" 29 | return 30 | fi 31 | 32 | python3 $THIS_DIR/utils/gh.py \ 33 | --repo $github_repo \ 34 | --commit $github_commit \ 35 | --token env:github_token \ 36 | --state "$ghstate" \ 37 | --context "$context" \ 38 | --description "$description" \ 39 | --url "$url" 40 | 41 | # Also update the merge sha if we're testing a merge commit. 42 | # This is useful for homu: https://github.com/servo/homu/pull/54 43 | if [ -f state/is_merge_sha ]; then 44 | python3 $THIS_DIR/utils/gh.py \ 45 | --repo $github_repo \ 46 | --commit $(cat state/sha) \ 47 | --token env:github_token \ 48 | --state "$ghstate" \ 49 | --context "$context" \ 50 | --description "$description" \ 51 | --url "$url" 52 | fi 53 | } 54 | 55 | # Block until a node is available through SSH 56 | # $1 node IP address 57 | # $2 private key 58 | ssh_wait() { 59 | local node_addr=$1; shift 60 | local node_key=$1; shift 61 | 62 | timeout 300s "$THIS_DIR/utils/sshwait" $node_addr 63 | 64 | # We have to be extra cautious here -- OpenStack 65 | # networking takes some time to settle, so we wait until 66 | # we can contact the node for 5 continuous seconds. 67 | 68 | local max_sleep=30 69 | local failed=1 70 | 71 | sustain_true() { 72 | local sustain=5 73 | while [ $sustain -gt 0 ]; do 74 | if ! ssh -q -n -i $node_key \ 75 | -o StrictHostKeyChecking=no \ 76 | -o PasswordAuthentication=no \ 77 | -o UserKnownHostsFile=/dev/null \ 78 | root@$node_addr true; then 79 | return 1 80 | fi 81 | sustain=$((sustain - 1)) 82 | max_sleep=$((max_sleep - 1)) 83 | sleep 1 84 | done 85 | failed=0 86 | } 87 | 88 | while ! sustain_true && [ $max_sleep -gt 0 ]; do 89 | max_sleep=$((max_sleep - 1)) 90 | sleep 1 91 | done 92 | 93 | unset -f sustain_true 94 | 95 | if [ $failed == 1 ]; then 96 | echo "ERROR: Timed out while waiting for SSH." 97 | return 1 98 | fi 99 | } 100 | 101 | # Generic query to the GitHub API 102 | # $1 resource 103 | # $2.. path to key to print 104 | query_github() { 105 | resource=$1; shift 106 | python3 -c " 107 | import sys 108 | import requests 109 | header = {'Authorization': 'token ${github_token}'} 110 | d = requests.get('https://api.github.com/repos/${github_repo}/$resource', headers=header) 111 | if (d.status_code != requests.codes.ok): 112 | raise Exception('API Error: {}'.format(d.content)) 113 | j = d.json() 114 | for q in sys.argv[1:]: 115 | if q.isdigit(): 116 | q = int(q) 117 | j = j[q] 118 | print(j)" "$@" 119 | } 120 | -------------------------------------------------------------------------------- /papr/utils/ext_schema.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | from pykwalify.core import Core 4 | from pykwalify.errors import SchemaError 5 | 6 | # we can't use pkg-relative imports here because pykwalify imports this file as 7 | # its own pkg 8 | from papr.utils import common 9 | 10 | 11 | # http://stackoverflow.com/questions/2532053/ 12 | def _valid_hostname(hostname): 13 | if len(hostname) > 253: 14 | return False 15 | if re.match(r"[\d.]+$", hostname): 16 | return False 17 | allowed = re.compile("(?!-)[A-Z\d-]{1,63}(? 1: 28 | raise SchemaError("only one of 'host', 'container', " 29 | "or 'cluster' required") 30 | if 'build' not in value and 'tests' not in value: 31 | raise SchemaError("at least one of 'build' or 'tests' required") 32 | return True 33 | 34 | 35 | def ext_hosts(value, rule_obj, path): 36 | # Until this is fixed: 37 | # https://github.com/Grokzen/pykwalify/issues/67 38 | if type(value) is not list: 39 | raise SchemaError("expected list of dicts") 40 | for i, host in enumerate(value): 41 | if type(host) is not dict: 42 | raise SchemaError("host %d is not a dict" % i) 43 | if 'name' not in host: 44 | raise SchemaError("host %d missing key 'name'" % i) 45 | if 'distro' not in host: 46 | raise SchemaError("host %d missing key 'distro'" % i) 47 | if not _valid_hostname(host['name']): 48 | raise SchemaError("invalid hostname for host %d" % i) 49 | if 'ostree' in host: 50 | ext_ostree(host['ostree'], rule_obj, path) 51 | return True 52 | 53 | 54 | def ext_repos(value, rule_obj, path): 55 | # Until this is fixed: 56 | # https://github.com/Grokzen/pykwalify/issues/67 57 | if type(value) is not list: 58 | raise SchemaError("expected list of dicts") 59 | for i, repo in enumerate(value): 60 | if type(repo) is not dict: 61 | raise SchemaError("repo %d is not a dict" % i) 62 | if 'name' not in repo: 63 | raise SchemaError("repo %d missing key 'name'" % i) 64 | for key in repo: 65 | if type(repo[key]) not in [int, str]: 66 | raise SchemaError("key '%s' of repo %d is not str or int" 67 | % (key, i)) 68 | return True 69 | 70 | 71 | def ext_ostree(value, rule_obj, path): 72 | if type(value) is str: 73 | if value != "latest": 74 | raise SchemaError("expected string 'latest'") 75 | elif type(value) is dict: 76 | schema = {'mapping': 77 | {'remote': {'type': 'str'}, 78 | 'branch': {'type': 'str'}, 79 | 'revision': {'type': 'str'} 80 | } 81 | } 82 | c = Core(source_data=value, schema_data=schema) 83 | c.validate() 84 | else: 85 | raise SchemaError("expected str or map") 86 | return True 87 | 88 | 89 | def ext_timeout(value, rule_obj, path): 90 | if common.str_to_timeout(value) > (2 * 60 * 60): 91 | raise SchemaError("timeout cannot be greater than 2 hours") 92 | return True 93 | 94 | 95 | def ext_build(value, rule_obj, path): 96 | if type(value) not in [dict, bool]: 97 | raise SchemaError("expected bool or map") 98 | if type(value) is dict: 99 | schema = {'mapping': 100 | {'config-opts': {'type': 'str'}, 101 | 'build-opts': {'type': 'str'}, 102 | 'install-opts': {'type': 'str'} 103 | } 104 | } 105 | c = Core(source_data=value, schema_data=schema) 106 | c.validate() 107 | return True 108 | -------------------------------------------------------------------------------- /papr/utils/gh.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | """ 4 | Small utility to facilitate updating GitHub commit CI 5 | status: https://developer.github.com/v3/repos/statuses/ 6 | 7 | Parameters are passed on the command-line. They can be 8 | prefixed by 'env:' to denote a lookup in an environment 9 | variable. E.g. --token env:token. 10 | """ 11 | 12 | import os 13 | import sys 14 | import json 15 | import argparse 16 | import requests 17 | import datetime 18 | from simplejson.scanner import JSONDecodeError 19 | 20 | 21 | class CommitNotFoundException(Exception): 22 | pass 23 | 24 | 25 | def _main(): 26 | "Main entry point." 27 | 28 | args = _parse_args() 29 | status(args.repo, args.commit, args.token, args.state, 30 | args.context, args.description, args.url) 31 | 32 | 33 | def _parse_args(): 34 | """ 35 | Parses program arguments and optionally resolves 36 | pointers to environment variables. 37 | """ 38 | 39 | parser = argparse.ArgumentParser() 40 | required_args = ['repo', 'commit', 'token', 'state'] 41 | optional_args = ['context', 'description', 'url'] 42 | for arg in required_args: 43 | parser.add_argument('--' + arg, required=True) 44 | for arg in optional_args: 45 | parser.add_argument('--' + arg) 46 | args = parser.parse_args() 47 | 48 | # resolve env vars, possibly to None (only allowed for optional args) 49 | for arg in required_args + optional_args: 50 | val = getattr(args, arg) 51 | if val is not None: 52 | if val.startswith('env:'): 53 | new_val = os.environ.get(val[4:]) 54 | if new_val is None and arg in required_args: 55 | parser.error( 56 | "Parameter '%s' is required, but the given " 57 | "environment variable '%s' is missing." % ( 58 | arg, val[4:])) 59 | setattr(args, arg, new_val) 60 | # we allow users to pass "" for optional vars to mean None so that 61 | # they don't have to resort to e.g. eval 62 | elif val == "": 63 | if arg in required_args: 64 | parser.error( 65 | "Parameter '%s' is required, but the given " 66 | "argument is empty." % arg) 67 | setattr(args, arg, None) 68 | 69 | return args 70 | 71 | 72 | def status(repo, commit, token, state, 73 | context=None, description=None, url=None): 74 | data = _craft_data_dict(state, context, description, url) 75 | _update_status(repo, commit, token, data) 76 | 77 | 78 | def _craft_data_dict(state, context, description, url): 79 | "Creates the data dictionary as required by the API." 80 | 81 | data = {'state': state} 82 | if context is not None: 83 | data['context'] = context 84 | if description is not None: 85 | data['description'] = description 86 | if url is not None: 87 | data['target_url'] = url 88 | return data 89 | 90 | 91 | def _update_status(repo, commit, token, data): 92 | "Sends the status update's data using the GitHub API." 93 | 94 | header = {'Authorization': 'token ' + token} 95 | api_url = ("https://api.github.com/repos/%s/statuses/%s" % 96 | (repo, commit)) 97 | 98 | if __name__ == '__main__': 99 | eprint("Updating status of commit", commit, "with data", data) 100 | 101 | try: 102 | # use data= instead of json= in case we're running on an older requests 103 | resp = requests.post(api_url, data=json.dumps(data), headers=header) 104 | _print_ratelimit_info(resp) 105 | body = resp.json() 106 | except JSONDecodeError: 107 | eprint("Expected JSON, but received:") 108 | eprint("---") 109 | eprint(resp.content) 110 | eprint("---") 111 | eprint("Retrying...") 112 | resp = requests.post(api_url, data=json.dumps(data), headers=header) 113 | body = resp.json() 114 | 115 | # pylint: disable=no-member 116 | if resp.status_code != requests.codes.created: 117 | if (resp.status_code == requests.codes.unprocessable 118 | and body is not None and 'message' in body 119 | and "No commit found for SHA" in body['message']): 120 | raise CommitNotFoundException() 121 | 122 | # Some other error happened. 123 | errmsg = "Failed to update commit status [HTTP %d]" % resp.status_code 124 | errmsg += "\n" + str(resp.headers) 125 | if body is not None: 126 | errmsg += "\n" + str(body) 127 | raise Exception(errmsg) 128 | 129 | 130 | # XXX: add CLI support and deduplicate with status() 131 | def comment(repo, token, issue, text): 132 | "Creates a comment using the GitHub API." 133 | 134 | token_header = {'Authorization': 'token ' + token} 135 | api_url = ("https://api.github.com/repos/%s/issues/%d/comments" % 136 | (repo, issue)) 137 | 138 | data = {'body': text} 139 | 140 | # use data= instead of json= in case we're running on an older requests 141 | resp = requests.post(api_url, data=json.dumps(data), headers=token_header) 142 | _print_ratelimit_info(resp) 143 | body = resp.json() 144 | 145 | # pylint: disable=no-member 146 | if resp.status_code != requests.codes.created: 147 | errmsg = "Failed to update commit status [HTTP %d]" % resp.status_code 148 | errmsg += "\n" + str(resp.headers) 149 | if body is not None: 150 | errmsg += "\n" + str(body) 151 | raise Exception(errmsg) 152 | 153 | 154 | def _print_ratelimit_info(resp): 155 | # informational; don't croak if somehow the keys are missing/not int 156 | try: 157 | utc_now = int(datetime.datetime.utcnow().strftime("%s")) 158 | utc_reset = int(resp.headers['X-RateLimit-Reset']) 159 | next_reset = int((utc_reset - utc_now) / 60) 160 | print("X-RateLimit-Remaining: %s/%s (resets in %s mins)" % 161 | (resp.headers['X-RateLimit-Remaining'], 162 | resp.headers['X-RateLimit-Limit'], next_reset)) 163 | except Exception as e: 164 | print("Can't print rate limit info: %s" % e) 165 | 166 | 167 | def eprint(*args): 168 | print(*args, file=sys.stderr) 169 | 170 | 171 | if __name__ == '__main__': 172 | sys.exit(_main()) 173 | -------------------------------------------------------------------------------- /papr/utils/index.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Listing 4 | 5 | 6 | {{ url }} ({{ commit[:7] }})
7 | Testsuite: {{ context }}

8 | {% if files|length > 1 %} 9 | {{ files|length }} entries 10 | {% else %} 11 | {{ files|length }} entry 12 | {% endif %} 13 |
    14 | {%- if not at_top %}
  • ..
  • {% endif -%} 15 | {% for name, link in files|dictsort -%} 16 |
  • {{ name }}
  • 17 | {% endfor %} 18 |
19 | 20 | 21 | -------------------------------------------------------------------------------- /papr/utils/indexer.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # MIT License 4 | # 5 | # Copyright (c) 2016 Jonathan Lebon 6 | # 7 | # Permission is hereby granted, free of charge, to any person obtaining a copy 8 | # of this software and associated documentation files (the "Software"), to deal 9 | # in the Software without restriction, including without limitation the rights 10 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | # copies of the Software, and to permit persons to whom the Software is 12 | # furnished to do so, subject to the following conditions: 13 | # 14 | # The above copyright notice and this permission notice shall be included in 15 | # all copies or substantial portions of the Software. 16 | # 17 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 20 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 | # SOFTWARE. 24 | 25 | """ 26 | Recursively create index.html file listings for 27 | directories that do not have any. 28 | """ 29 | 30 | import os 31 | import jinja2 32 | 33 | from os import getcwd, listdir 34 | from os.path import dirname, isfile, isdir, join, realpath 35 | 36 | 37 | def get_index(dirpath): 38 | "Attempts to find an index file" 39 | if isfile(join(dirpath, 'index.htm')): 40 | return 'index.htm' 41 | if isfile(join(dirpath, 'index.html')): 42 | return 'index.html' 43 | return None 44 | 45 | 46 | def create_index(dirpath, tpl, at_top): 47 | "Creates a new index.html file" 48 | 49 | # get children 50 | files = {} 51 | for name in listdir(dirpath): 52 | if isdir(join(dirpath, name)): 53 | name = name + '/' 54 | files[name] = name 55 | path = join(dirpath, name) 56 | 57 | # link to the index.html of the child 58 | if isdir(path): 59 | index = get_index(join(dirpath, name)) 60 | if index is None: 61 | index = 'index.html' 62 | files[name] = name + index 63 | 64 | # Render the template to index.html 65 | with open(join(dirpath, "index.html"), 'w') as f: 66 | f.write(tpl.render(files=files, at_top=at_top)) 67 | 68 | 69 | def recurse(dirpath, tpl): 70 | for name in listdir(dirpath): 71 | path = join(dirpath, name) 72 | if isdir(path): 73 | if get_index(path) is None: 74 | create_index(path, tpl, at_top=False) 75 | recurse(path, tpl) 76 | 77 | 78 | def main(): 79 | "Main entry point" 80 | 81 | tpl_fname = join(dirname(realpath(__file__)), 'index.j2') 82 | with open(tpl_fname, 'r') as tplf: 83 | tpl = jinja2.Template(tplf.read(), autoescape=True) 84 | 85 | tpl.globals['url'] = os.environ.get('github_url', "N/A") 86 | tpl.globals['commit'] = os.environ.get('github_commit', "N/A") 87 | tpl.globals['context'] = os.environ.get('github_context', "N/A") 88 | 89 | cwd = getcwd() 90 | if get_index(cwd) is None: 91 | create_index(cwd, tpl, at_top=True) 92 | recurse(cwd, tpl) 93 | 94 | 95 | if __name__ == '__main__': 96 | main() 97 | -------------------------------------------------------------------------------- /papr/utils/os_provision.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | ''' 4 | This script is not meant to be run manually. It is 5 | called from the main script. See the README for details. 6 | 7 | We assume that the usual OpenStack authentication env 8 | vars are defined. Addtionally, the following env vars 9 | are expected: 10 | - os_image 11 | - os_min_ram 12 | - os_min_vcpus 13 | - os_min_disk 14 | - os_min_ephemeral 15 | - os_keyname 16 | - os_network 17 | - os_user_data 18 | - os_name_prefix 19 | - os_floating_ip_pool (optional) 20 | ''' 21 | 22 | import os 23 | import sys 24 | import uuid 25 | import time 26 | from novaclient import client as novaclient 27 | from cinderclient import client as cinderclient 28 | 29 | # XXX: clean this up 30 | 31 | output_dir = sys.argv[1] 32 | 33 | nova = novaclient.Client(2, auth_url=os.environ['OS_AUTH_URL'], 34 | tenant_id=os.environ['OS_TENANT_ID'], 35 | username=os.environ['OS_USERNAME'], 36 | password=os.environ['OS_PASSWORD']) 37 | 38 | print("INFO: authenticating") 39 | nova.authenticate() 40 | 41 | # it's possible multiple images match, e.g. during automated 42 | # image uploads, in which case let's just pick the first one 43 | print("INFO: resolving image '%s'" % os.environ['os_image']) 44 | image = nova.images.findall(name=os.environ['os_image'])[0] 45 | 46 | # go through all the flavours and determine which one to use 47 | min_ram = int(os.environ['os_min_ram']) 48 | min_vcpus = int(os.environ['os_min_vcpus']) 49 | min_disk = int(os.environ['os_min_disk']) 50 | flavors = nova.flavors.findall() 51 | flavors = [f for f in flavors if (f.ram >= min_ram and 52 | f.vcpus >= min_vcpus and 53 | f.disk >= min_disk)] 54 | 55 | if len(flavors) == 0: 56 | print("ERROR: no flavor satisfies minimum requirements.") 57 | sys.exit(1) 58 | 59 | 60 | # OK, now we need to pick the *least* resource-hungry flavor 61 | # from the list of flavors that fit the min reqs. This is 62 | # inevitably subjective, but here we prioritize vcpus, then 63 | # ram, then disk. 64 | def filter_flavors(flavors, attr): 65 | minval = min([getattr(f, attr) for f in flavors]) 66 | return [f for f in flavors if getattr(f, attr) == minval] 67 | 68 | 69 | flavors = filter_flavors(flavors, 'vcpus') 70 | flavors = filter_flavors(flavors, 'ram') 71 | flavors = filter_flavors(flavors, 'disk') 72 | flavors = filter_flavors(flavors, 'ephemeral') 73 | 74 | flavor = flavors[0] 75 | print("INFO: choosing flavor '%s'" % flavor.name) 76 | 77 | print("INFO: resolving network '%s'" % os.environ['os_network']) 78 | network = nova.networks.find(label=os.environ['os_network']) 79 | 80 | # if BUILD_ID is defined, let's add it so that it's easy to 81 | # trace back a node to the exact Jenkins build. 82 | meta = None 83 | if 'BUILD_ID' in os.environ: 84 | meta = {'BUILD_ID': os.environ['BUILD_ID']} 85 | 86 | print("INFO: reading user-data file '%s'" % os.environ['os_user_data']) 87 | with open(os.environ['os_user_data']) as f: 88 | userdata = f.read() 89 | 90 | 91 | def gen_name(): 92 | return "%s-%s" % (os.environ['os_name_prefix'], uuid.uuid4().hex[:8]) 93 | 94 | 95 | def server_exists(name): 96 | return len(nova.servers.findall(name=name)) > 0 97 | 98 | 99 | max_tries = 10 100 | name = gen_name() 101 | while server_exists(name) and max_tries > 0: 102 | name = gen_name() 103 | max_tries -= 1 104 | 105 | if max_tries == 0: 106 | print("ERROR: can't find unique name. Something is probably broken.") 107 | sys.exit(1) 108 | 109 | print("INFO: booting server %s" % name) 110 | server = nova.servers.create(name, meta=meta, image=image, userdata=userdata, 111 | flavor=flavor, key_name=os.environ['os_keyname'], 112 | nics=[{'net-id': network.id}]) 113 | print("INFO: booted server %s (%s)" % (name, server.id)) 114 | 115 | 116 | def write_to_file(fn, s): 117 | with open(os.path.join(output_dir, fn), 'w') as f: 118 | f.write(s) 119 | 120 | 121 | write_to_file('node_name', name) 122 | 123 | # XXX: check if there's a more elegant way to do this 124 | # XXX: implement timeout 125 | print("INFO: waiting for server to become active...") 126 | while server.status == 'BUILD': 127 | time.sleep(1) 128 | server.get() 129 | 130 | if server.status != 'ACTIVE': 131 | print("ERROR: server is not ACTIVE (state: %s)" % server.status) 132 | print("ERROR: deleting server") 133 | server.delete() 134 | sys.exit(1) 135 | 136 | vol = None 137 | min_ephemeral = int(os.environ['os_min_ephemeral']) 138 | if min_ephemeral > 0: 139 | try: 140 | print("INFO: creating volume of size %dG" % min_ephemeral) 141 | cinder = cinderclient.Client(2, os.environ['OS_USERNAME'], 142 | os.environ['OS_PASSWORD'], 143 | os.environ['OS_TENANT_NAME'], 144 | os.environ['OS_AUTH_URL'],) 145 | cinder.authenticate() 146 | volname = name + '-vol' 147 | vol = cinder.volumes.create(name=volname, size=min_ephemeral) 148 | print("INFO: created volume %s (%s)" % (volname, vol.id)) 149 | 150 | print("INFO: waiting for volume to become active...") 151 | while vol.status == 'creating': 152 | time.sleep(1) 153 | vol.get() 154 | 155 | if vol.status != 'available': 156 | print("ERROR: volume is not available (state: %s)" % vol.status) 157 | server.delete() 158 | vol.delete() 159 | sys.exit(1) 160 | 161 | # now we can safely attach the volume 162 | nova.volumes.create_server_volume(server.id, vol.id) 163 | except Exception: 164 | server.delete() 165 | if vol is not None: 166 | vol.delete() 167 | raise 168 | 169 | ip = server.networks[network.label][0] 170 | print("INFO: network IP is %s" % ip) 171 | if 'os_floating_ip_pool' in os.environ: 172 | print("INFO: attaching floating ip") 173 | fip = nova.floating_ips.create(os.environ['os_floating_ip_pool']) 174 | server.add_floating_ip(fip) 175 | ip = fip.ip 176 | print("INFO: floating IP is %s" % ip) 177 | 178 | write_to_file('node_addr', ip) 179 | write_to_file('node_volid', vol.id if vol is not None else '') 180 | -------------------------------------------------------------------------------- /papr/utils/parser.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | import yaml 5 | import shlex 6 | 7 | import pykwalify.core 8 | import pykwalify.errors 9 | 10 | from . import PKG_DIR 11 | from . import common 12 | 13 | 14 | class ParserError(SyntaxError): 15 | ''' 16 | Handle all errors caused by a bad PAPR file by 17 | throwing a ParserError. This allows clients to 18 | differentiate between infra errors and bad user 19 | input. We inherit from SyntaxError for the msg 20 | field. 21 | ''' 22 | pass 23 | 24 | 25 | class SuiteParser: 26 | 27 | def __init__(self, filepath): 28 | self.contexts = [] 29 | self.met_required = False 30 | try: 31 | with open(filepath, encoding='utf-8') as f: 32 | filedata = f.read() 33 | 34 | # use CSafeLoader, because the default loader arbitrarily thinks 35 | # code points >= 0x10000 (like 🐄) are not valid: 36 | # https://bitbucket.org/xi/pyyaml/issues/26 37 | yaml.SafeLoader = yaml.CSafeLoader 38 | 39 | # collapse generator to a list to force scanning of all the 40 | # documents and catch any invalid YAML syntax right away 41 | self.raw_suites = list(yaml.safe_load_all(filedata)) 42 | 43 | # catch exceptions due to a bad YAML and transform into ParserError 44 | except UnicodeDecodeError: 45 | raise ParserError("file is not valid UTF-8") 46 | except (yaml.scanner.ScannerError, yaml.parser.ParserError): 47 | raise ParserError("file could not be parsed as valid YAML") 48 | 49 | def parse(self): 50 | "Generator of testsuites parsed from the given YAML file." 51 | 52 | suite = None 53 | for idx, raw_suite in enumerate(self.raw_suites): 54 | try: 55 | suite = self._merge(suite, raw_suite) 56 | self._validate(suite) 57 | yield dict(suite) 58 | # tell users which suite exactly caused the error 59 | except ParserError as e: 60 | raise ParserError("failed to parse %s testsuite: %s" 61 | % (common.ordinal(idx + 1), e.msg)) 62 | 63 | def _merge(self, suite, new): 64 | "Merge the next document into the current one." 65 | 66 | if type(new) is not dict: 67 | raise ParserError("top-level type should be a dict") 68 | 69 | if suite is None: 70 | 71 | # The 'context' key is special. It's optional on the 72 | # first suite (defaulting to 'Red Hat CI'), but 73 | # required on subsequent suites. 74 | if 'context' not in new: 75 | new['context'] = "Red Hat CI" 76 | 77 | if 'inherit' in new and type(new['inherit']) is not bool: 78 | raise ParserError("expected 'bool' value for 'inherit' key") 79 | 80 | # if we're not inheriting, then let's just return the new suite itself 81 | if suite is None or not new.get('inherit', False): 82 | return self._normalize(new.copy()) 83 | 84 | assert type(suite) is dict 85 | 86 | # if the suite specifies an envtype, then make sure we 87 | # don't inherit the envtype of the old one 88 | envtypes = ['container', 'host', 'cluster'] 89 | if any([i in new for i in envtypes]): 90 | for i in envtypes: 91 | if i in suite: 92 | del suite[i] 93 | 94 | # we always expect a new context key 95 | del suite['context'] 96 | 97 | suite.update(new) 98 | 99 | return self._normalize(suite) 100 | 101 | def _normalize(self, suite): 102 | for k, v in list(suite.items()): 103 | if k == 'inherit' or v is None: 104 | del suite[k] 105 | return suite 106 | 107 | def _validate(self, suite): 108 | 109 | schema = os.path.join(PKG_DIR, "schema.yml") 110 | ext = os.path.join(PKG_DIR, "ext_schema.py") 111 | 112 | try: 113 | c = pykwalify.core.Core(source_data=suite, 114 | schema_files=[schema], 115 | extensions=[ext]) 116 | c.validate() 117 | except pykwalify.errors.PyKwalifyException as e: 118 | raise ParserError(e.msg) 119 | 120 | if suite['context'] in self.contexts: 121 | raise ParserError("duplicate 'context' value detected") 122 | 123 | self.met_required = self.met_required or suite.get('required', False) 124 | 125 | if suite['context'] == "required" and self.met_required: 126 | raise ParserError('context "required" forbidden when using the ' 127 | "'required' key") 128 | 129 | self.contexts.append(suite['context']) 130 | 131 | 132 | def _write_to_file(dir, fn, s, utf8=False): 133 | try: 134 | enc = 'utf-8' if utf8 else 'ascii' 135 | with open(os.path.join(dir, fn), 'w', encoding=enc) as f: 136 | f.write(s) 137 | except UnicodeEncodeError: 138 | # this can only happen if trying to encode in ascii 139 | # a value which contains non-ASCII chars in the YAML 140 | raise ParserError("non-ASCII characters found in ASCII-only field") 141 | 142 | 143 | def _flush_host(host, outdir): 144 | if 'ostree' in host: 145 | val = host['ostree'] 146 | assert type(val) in [str, dict] 147 | if type(val) is str: 148 | assert val == "latest" 149 | _write_to_file(outdir, "ostree_revision", "") 150 | else: 151 | _write_to_file(outdir, "ostree_remote", val.get('remote', '')) 152 | _write_to_file(outdir, "ostree_branch", val.get('branch', '')) 153 | _write_to_file(outdir, "ostree_revision", val.get('revision', '')) 154 | val = host.get("specs", {}) 155 | _write_to_file(outdir, "min_ram", str(val.get("ram", 2048))) 156 | _write_to_file(outdir, "min_cpus", str(val.get("cpus", 1))) 157 | _write_to_file(outdir, "min_disk", str(val.get("disk", 20))) 158 | _write_to_file(outdir, "min_secondary_disk", 159 | str(val.get("secondary-disk", 0))) 160 | _write_to_file(outdir, "distro", host['distro']) 161 | 162 | 163 | def flush_suite(suite, outdir): 164 | 165 | os.makedirs(outdir) 166 | 167 | if 'host' in suite: 168 | dir = os.path.join(outdir, "host") 169 | os.mkdir(dir) 170 | _flush_host(suite['host'], dir) 171 | _write_to_file(outdir, 'envtype', 'host') 172 | _write_to_file(outdir, 'controller', 'host') 173 | 174 | if 'container' in suite: 175 | _write_to_file(outdir, "image", suite['container']['image']) 176 | _write_to_file(outdir, 'envtype', 'container') 177 | _write_to_file(outdir, 'controller', 'container') 178 | 179 | if 'cluster' in suite: 180 | cluster = suite['cluster'] 181 | for i, host in enumerate(cluster['hosts']): 182 | dir = os.path.join(outdir, "host-%d" % i) 183 | os.mkdir(dir) 184 | _flush_host(host, dir) 185 | _write_to_file(dir, "name", host['name']) 186 | _write_to_file(outdir, 'nhosts', str(i+1)) 187 | if 'container' in cluster: 188 | _write_to_file(outdir, "image", cluster['container']['image']) 189 | _write_to_file(outdir, 'controller', 'container') 190 | else: 191 | _write_to_file(outdir, 'controller', 'host') 192 | _write_to_file(outdir, 'envtype', 'cluster') 193 | 194 | if 'tests' in suite: 195 | _write_to_file(outdir, "tests", '\n'.join(suite['tests']), utf8=True) 196 | 197 | _write_to_file(outdir, "branches", 198 | '\n'.join(suite.get('branches', ['master']))) 199 | 200 | timeout = common.str_to_timeout(suite.get('timeout', '2h')) 201 | _write_to_file(outdir, "timeout", str(timeout)) 202 | 203 | _write_to_file(outdir, "context", suite.get('context')) 204 | 205 | if 'extra-repos' in suite: 206 | repos = '' 207 | for repo in suite['extra-repos']: 208 | repos += "[%s]\n" % repo['name'] 209 | for key, val in repo.items(): 210 | repos += "%s=%s\n" % (key, val) 211 | if repos != "": 212 | _write_to_file(outdir, "papr-extras.repo", repos) 213 | 214 | if 'packages' in suite: 215 | packages = [] 216 | for pkg in suite['packages']: 217 | packages.append(shlex.quote(pkg)) 218 | _write_to_file(outdir, "packages", ' '.join(packages)) 219 | 220 | if 'artifacts' in suite: 221 | _write_to_file(outdir, "artifacts", '\n'.join(suite['artifacts'])) 222 | 223 | if 'env' in suite: 224 | envs = '' 225 | for k, v in suite['env'].items(): 226 | # NB: the schema already ensures that k is ASCII 227 | # only, so utf8=True will only affect the value 228 | envs += 'export %s="%s"\n' % (k, v) 229 | _write_to_file(outdir, "envs", envs, utf8=True) 230 | 231 | if 'build' in suite: 232 | v = suite['build'] 233 | if type(v) is bool and v: 234 | _write_to_file(outdir, "build", '') 235 | elif type(v) is dict: 236 | _write_to_file(outdir, "build", '') 237 | _write_to_file(outdir, "build.config_opts", 238 | v.get('config-opts', '')) 239 | _write_to_file(outdir, "build.build_opts", 240 | v.get('build-opts', '')) 241 | _write_to_file(outdir, "build.install_opts", 242 | v.get('install-opts', '')) 243 | -------------------------------------------------------------------------------- /papr/utils/required-index.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | Test results 4 | 5 | 6 | {{ url }} ({{ commit[:7] }})

7 | Test results 8 |
    9 | {% for name, passed, link in suites -%} 10 | {%- if passed -%} 11 | {% set bgcolor = "#aae0aa" %} 12 | {%- else -%} 13 | {% set bgcolor = "#e0aaaa" %} 14 | {%- endif -%} 15 |
  • 16 | 17 | 18 | {{ name }} 19 | 20 | 21 |
  • 22 | {% endfor %} 23 |
24 | 25 | 26 | -------------------------------------------------------------------------------- /papr/utils/schema.yml: -------------------------------------------------------------------------------- 1 | type: map 2 | func: ext_testenv 3 | mapping: 4 | host: 5 | mapping: 6 | distro: 7 | type: str 8 | required: true 9 | ostree: 10 | type: any 11 | func: ext_ostree 12 | specs: 13 | mapping: 14 | ram: 15 | type: int 16 | cpus: 17 | type: int 18 | disk: 19 | type: int 20 | secondary-disk: 21 | type: int 22 | container: 23 | mapping: 24 | image: 25 | type: str 26 | required: true 27 | cluster: 28 | mapping: 29 | hosts: 30 | type: any 31 | func: ext_hosts 32 | container: 33 | mapping: 34 | image: 35 | type: str 36 | required: true 37 | branches: 38 | sequence: 39 | - type: str 40 | unique: true 41 | pulls: 42 | type: bool 43 | context: 44 | type: str 45 | required: true 46 | required: 47 | type: bool 48 | extra-repos: 49 | type: any 50 | func: ext_repos 51 | packages: 52 | sequence: 53 | - type: str 54 | unique: true 55 | env: 56 | mapping: 57 | regex;(^[a-zA-Z_][a-zA-Z0-9_]*$): 58 | type: str 59 | build: 60 | type: any 61 | func: ext_build 62 | tests: 63 | sequence: 64 | - type: str 65 | timeout: 66 | type: str 67 | pattern: '[0-9]+[smh]' 68 | func: ext_timeout 69 | artifacts: 70 | sequence: 71 | - type: str 72 | -------------------------------------------------------------------------------- /papr/utils/sshwait: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # from https://github.com/jlebon/files/blob/master/bin/sshwait 5 | 6 | if [ $# -ne 1 ]; then 7 | echo "Usage: $0 " 8 | fi 9 | 10 | # If it's already open, then just exit quietly 11 | if echo | nc -w 500ms $1 22 2>&1 | grep -q SSH; then 12 | exit 0 13 | fi 14 | 15 | echo -n "Waiting for open SSH port..." 16 | while true; do 17 | 18 | out=$(echo | nc -w 1 $1 22 2>&1 || :) 19 | if grep -q SSH <<< "$out"; then 20 | break 21 | fi 22 | 23 | echo -n '.' 24 | 25 | if [[ $out == "Ncat: Connection timed out." ]]; then 26 | continue 27 | fi 28 | 29 | if [[ $out == "Ncat: Connection refused." ]] || \ 30 | [[ $out == "Ncat: No route to host." ]] || \ 31 | [[ $out == "Ncat: Connection reset by peer." ]]; then 32 | sleep 1 33 | continue 34 | fi 35 | 36 | echo 37 | echo -n "Unknown error: " 38 | echo "$out" 39 | exit 1 40 | done 41 | 42 | echo " done!" 43 | -------------------------------------------------------------------------------- /papr/utils/user-data: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | disable_root: 0 3 | 4 | users: 5 | - name: root 6 | lock-passwd: false 7 | inactive: false 8 | system: false 9 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # we're not compatible with the latest nova API 2 | python-novaclient==7.1.0 3 | python-cinderclient==3.2.0 4 | PyYAML==3.12 5 | jinja2==2.9.6 6 | awscli==1.11.72 7 | pykwalify==1.6.0 8 | boto3==1.4.4 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from setuptools import setup, find_packages 4 | 5 | # We don't define any reqs here; we're just an app for now, not a library, and 6 | # we only support running in a dedicated container/virtualenv provisioned with 7 | # requirements.txt. Anyway, the setuptools dep solver is not as good as pip's. 8 | 9 | setup( 10 | name="papr", 11 | version="0.1", 12 | packages=find_packages(), 13 | entry_points={ 14 | "console_scripts": ["papr = papr:main"], 15 | }, 16 | # just copy the bash scripts for now until they're fully ported over 17 | package_data={"papr": ["main", "testrunner", "provisioner"], 18 | # we'll hoist utils out later to just be a module in papr 19 | "papr.utils": ["*.sh", "*.yml", "*.j2", "sshwait", 20 | "user-data"]} 21 | ) 22 | -------------------------------------------------------------------------------- /validator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | ''' 4 | Simple script to validate a YAML file. 5 | Usage: ./validator.py /my/github/project/.papr.yml 6 | ''' 7 | 8 | import os 9 | import pprint 10 | import argparse 11 | import papr.utils.parser as parser 12 | 13 | argparser = argparse.ArgumentParser() 14 | argparser.add_argument('yml_file', help="YAML file to parse and validate") 15 | argparser.add_argument('--output-dir', metavar="DIR", 16 | help="directory to which to flush suites if desired") 17 | args = argparser.parse_args() 18 | 19 | suite_parser = parser.SuiteParser(args.yml_file) 20 | for idx, suite in enumerate(suite_parser.parse()): 21 | print("INFO: validated suite %d" % idx) 22 | pprint.pprint(suite, indent=4) 23 | if args.output_dir: 24 | suite_dir = os.path.join(args.output_dir, str(idx)) 25 | parser.flush_suite(suite, suite_dir) 26 | print("INFO: flushed to %s" % suite_dir) 27 | --------------------------------------------------------------------------------